mirror of https://github.com/containers/podman.git
Merge pull request #18835 from Luap99/update-container-deps
update c/{common,image,storage} to latest
This commit is contained in:
commit
c83589a852
2
Makefile
2
Makefile
|
@ -57,6 +57,7 @@ BUILDTAGS ?= \
|
|||
$(shell hack/libsubid_tag.sh) \
|
||||
exclude_graphdriver_devicemapper \
|
||||
seccomp
|
||||
ROOTLESSPORTTAGS ?= exclude_graphdriver_aufs exclude_graphdriver_btrfs exclude_graphdriver_devicemapper exclude_graphdriver_overlay exclude_graphdriver_zfs
|
||||
PYTHON ?= $(shell command -v python3 python|head -n1)
|
||||
PKG_MANAGER ?= $(shell command -v dnf yum|head -n1)
|
||||
# ~/.local/bin is not in PATH on all systems
|
||||
|
@ -401,6 +402,7 @@ bin/rootlessport: $(SOURCES) go.mod go.sum
|
|||
CGO_ENABLED=$(CGO_ENABLED) \
|
||||
$(GO) build \
|
||||
$(BUILDFLAGS) \
|
||||
-tags "$(ROOTLESSPORTTAGS)" \
|
||||
-o $@ ./cmd/rootlessport
|
||||
|
||||
.PHONY: rootlessport
|
||||
|
|
|
@ -4,6 +4,8 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/libnetwork/types"
|
||||
"github.com/containers/common/libnetwork/util"
|
||||
|
@ -77,6 +79,10 @@ func networkCreateFlags(cmd *cobra.Command) {
|
|||
flags.StringArrayVar(&networkCreateOptions.Subnets, subnetFlagName, nil, "subnets in CIDR format")
|
||||
_ = cmd.RegisterFlagCompletionFunc(subnetFlagName, completion.AutocompleteNone)
|
||||
|
||||
routeFlagName := "route"
|
||||
flags.StringArrayVar(&networkCreateOptions.Routes, routeFlagName, nil, "static routes")
|
||||
_ = cmd.RegisterFlagCompletionFunc(routeFlagName, completion.AutocompleteNone)
|
||||
|
||||
interfaceFlagName := "interface-name"
|
||||
flags.StringVar(&networkCreateOptions.InterfaceName, interfaceFlagName, "", "interface name which is used by the driver")
|
||||
_ = cmd.RegisterFlagCompletionFunc(interfaceFlagName, completion.AutocompleteNone)
|
||||
|
@ -176,6 +182,16 @@ func networkCreate(cmd *cobra.Command, args []string) error {
|
|||
return errors.New("cannot set gateway or range without subnet")
|
||||
}
|
||||
|
||||
for i := range networkCreateOptions.Routes {
|
||||
route, err := parseRoute(networkCreateOptions.Routes[i])
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
network.Routes = append(network.Routes, *route)
|
||||
}
|
||||
|
||||
extraCreateOptions := types.NetworkCreateOptions{
|
||||
IgnoreIfExists: networkCreateOptions.IgnoreIfExists,
|
||||
}
|
||||
|
@ -188,6 +204,46 @@ func networkCreate(cmd *cobra.Command, args []string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func parseRoute(routeStr string) (*types.Route, error) {
|
||||
s := strings.Split(routeStr, ",")
|
||||
var metric *uint32
|
||||
|
||||
if len(s) == 2 || len(s) == 3 {
|
||||
dstStr := s[0]
|
||||
gwStr := s[1]
|
||||
|
||||
destination, err := types.ParseCIDR(dstStr)
|
||||
gateway := net.ParseIP(gwStr)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid route destination %s", dstStr)
|
||||
}
|
||||
|
||||
if gateway == nil {
|
||||
return nil, fmt.Errorf("invalid route gateway %s", gwStr)
|
||||
}
|
||||
|
||||
if len(s) == 3 {
|
||||
mtr, err := strconv.ParseUint(s[2], 10, 32)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid route metric %s", s[2])
|
||||
}
|
||||
x := uint32(mtr)
|
||||
metric = &x
|
||||
}
|
||||
|
||||
r := types.Route{
|
||||
Destination: destination,
|
||||
Gateway: gateway,
|
||||
Metric: metric,
|
||||
}
|
||||
|
||||
return &r, nil
|
||||
}
|
||||
return nil, fmt.Errorf("invalid route: %s\nFormat: --route <destination in CIDR>,<gateway>,<metric (optional)>", routeStr)
|
||||
}
|
||||
|
||||
func parseRange(iprange string) (*types.LeaseRange, error) {
|
||||
_, subnet, err := net.ParseCIDR(iprange)
|
||||
if err != nil {
|
||||
|
|
|
@ -95,10 +95,12 @@ Set metadata for a network (e.g., --label mykey=value).
|
|||
|
||||
Set driver specific options.
|
||||
|
||||
All drivers accept the `mtu` and `metric` options.
|
||||
All drivers accept the `mtu`, `metric`, `no_default_route` and options.
|
||||
|
||||
- `mtu`: Sets the Maximum Transmission Unit (MTU) and takes an integer value.
|
||||
- `metric` Sets the Route Metric for the default route created in every container joined to this network. Accepts a positive integer value. Can only be used with the Netavark network backend.
|
||||
- `no_default_route`: If set to 1, Podman will not automatically add a default route to subnets. Routes can still be added
|
||||
manually by creating a custom route using `--route`.
|
||||
|
||||
Additionally the `bridge` driver supports the following options:
|
||||
|
||||
|
@ -114,6 +116,10 @@ The `macvlan` and `ipvlan` driver support the following options:
|
|||
- Supported values for `macvlan` are `bridge`, `private`, `vepa`, `passthru`. Defaults to `bridge`.
|
||||
- Supported values for `ipvlan` are `l2`, `l3`, `l3s`. Defaults to `l2`.
|
||||
|
||||
#### **--route**=*route*
|
||||
|
||||
A static route in the format <destination in CIDR notation>,<gateway>,<route metric (optional)>. This route will be added to every container in this network. Only available with the netavark backend. It can be specified multiple times if more than one static route is desired.
|
||||
|
||||
#### **--subnet**=*subnet*
|
||||
|
||||
The subnet in CIDR notation. Can be specified multiple times to allocate more than one subnet for this network.
|
||||
|
@ -158,6 +164,17 @@ $ podman network create --subnet 192.168.55.0/24 --gateway 192.168.55.3 --subnet
|
|||
podman4
|
||||
```
|
||||
|
||||
Create a network with a static subnet and a static route.
|
||||
```
|
||||
$ podman network create --subnet 192.168.33.0/24 --route 10.1.0.0/24,192.168.33.10 newnet
|
||||
```
|
||||
|
||||
Create a network with a static subnet and a static route without a default
|
||||
route.
|
||||
```
|
||||
$ podman network create --subnet 192.168.33.0/24 --route 10.1.0.0/24,192.168.33.10 --opt no_default_route=1 newnet
|
||||
```
|
||||
|
||||
Create a Macvlan based network using the host interface eth0. Macvlan networks can only be used as root.
|
||||
```
|
||||
$ sudo podman network create -d macvlan -o parent=eth0 --subnet 192.5.0.0/16 newnet
|
||||
|
|
|
@ -28,6 +28,7 @@ Pretty-print networks to JSON or using a Go template.
|
|||
| .NetworkDNSServers | Array of DNS servers used in this network |
|
||||
| .NetworkInterface | Name of the network interface on the host |
|
||||
| .Options | Network options |
|
||||
| .Routes | List of static routes for this network |
|
||||
| .Subnets | List of subnets on this network |
|
||||
|
||||
## EXAMPLE
|
||||
|
|
|
@ -56,6 +56,7 @@ Valid placeholders for the Go template are listed below:
|
|||
| .NetworkDNSServers | Array of DNS servers used in this network |
|
||||
| .NetworkInterface | Name of the network interface on the host |
|
||||
| .Options | Network options |
|
||||
| .Routes | List of static routes for this network |
|
||||
| .Subnets | List of subnets on this network |
|
||||
|
||||
#### **--no-trunc**
|
||||
|
|
43
go.mod
43
go.mod
|
@ -13,19 +13,19 @@ require (
|
|||
github.com/containernetworking/cni v1.1.2
|
||||
github.com/containernetworking/plugins v1.3.0
|
||||
github.com/containers/buildah v1.30.1-0.20230504052500-e925b5852e07
|
||||
github.com/containers/common v0.53.1-0.20230506101404-3e93a76d461c
|
||||
github.com/containers/common v0.53.1-0.20230608150349-2c1849f43e14
|
||||
github.com/containers/conmon v2.0.20+incompatible
|
||||
github.com/containers/image/v5 v5.25.1-0.20230505072505-dc4a4be9cc1e
|
||||
github.com/containers/image/v5 v5.25.1-0.20230608153337-8ad019310ff2
|
||||
github.com/containers/libhvee v0.0.5
|
||||
github.com/containers/ocicrypt v1.1.7
|
||||
github.com/containers/psgo v1.8.0
|
||||
github.com/containers/storage v1.46.1
|
||||
github.com/containers/storage v1.46.2-0.20230530174214-1dc289a244ce
|
||||
github.com/coreos/go-systemd/v22 v22.5.0
|
||||
github.com/coreos/stream-metadata-go v0.4.2
|
||||
github.com/crc-org/vfkit v0.0.5-0.20230602131541-3d57f09010c9
|
||||
github.com/cyphar/filepath-securejoin v0.2.3
|
||||
github.com/digitalocean/go-qemu v0.0.0-20221209210016-f035778c97f7
|
||||
github.com/docker/docker v24.0.0+incompatible
|
||||
github.com/docker/docker v24.0.2+incompatible
|
||||
github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11
|
||||
github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651
|
||||
github.com/docker/go-units v0.5.0
|
||||
|
@ -43,7 +43,7 @@ require (
|
|||
github.com/mattn/go-sqlite3 v1.14.17
|
||||
github.com/moby/term v0.5.0
|
||||
github.com/nxadm/tail v1.4.8
|
||||
github.com/onsi/ginkgo/v2 v2.9.7
|
||||
github.com/onsi/ginkgo/v2 v2.10.0
|
||||
github.com/onsi/gomega v1.27.8
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.1.0-rc3
|
||||
|
@ -74,19 +74,19 @@ require (
|
|||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.7 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.8 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/chzyer/readline v1.5.1 // indirect
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
github.com/containerd/containerd v1.7.0 // indirect
|
||||
github.com/containerd/containerd v1.7.2 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.5.0 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.6.0 // indirect
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/digitalocean/go-libvirt v0.0.0-20220804181439-8648fbde413e // indirect
|
||||
github.com/disiqueira/gotree/v3 v3.0.2 // indirect
|
||||
|
@ -113,14 +113,13 @@ require (
|
|||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/go-containerregistry v0.14.0 // indirect
|
||||
github.com/google/go-containerregistry v0.15.2 // indirect
|
||||
github.com/google/go-intervals v0.0.2 // indirect
|
||||
github.com/google/pprof v0.0.0-20230323073829-e72429f035bd // indirect
|
||||
github.com/google/trillian v1.5.1 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.2 // indirect
|
||||
github.com/imdario/mergo v0.3.15 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.4 // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jinzhu/copier v0.3.5 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
|
@ -132,7 +131,7 @@ require (
|
|||
github.com/manifoldco/promptui v0.9.0 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.0 // indirect
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/patternmatcher v0.5.0 // indirect
|
||||
github.com/moby/sys/mountinfo v0.6.2 // indirect
|
||||
|
@ -151,11 +150,11 @@ require (
|
|||
github.com/seccomp/libseccomp-golang v0.10.0 // indirect
|
||||
github.com/segmentio/ksuid v1.0.4 // indirect
|
||||
github.com/sigstore/fulcio v1.3.1 // indirect
|
||||
github.com/sigstore/rekor v1.1.2-0.20230508234306-ad288b385a44 // indirect
|
||||
github.com/sigstore/sigstore v1.6.4 // indirect
|
||||
github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 // indirect
|
||||
github.com/sigstore/sigstore v1.6.5 // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
||||
github.com/sylabs/sif/v2 v2.11.3 // indirect
|
||||
github.com/sylabs/sif/v2 v2.11.4 // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
|
||||
github.com/theupdateframework/go-tuf v0.5.2 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
|
@ -166,11 +165,11 @@ require (
|
|||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/otel v1.15.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.15.0 // indirect
|
||||
golang.org/x/crypto v0.8.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 // indirect
|
||||
golang.org/x/crypto v0.9.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/oauth2 v0.7.0 // indirect
|
||||
golang.org/x/tools v0.9.1 // indirect
|
||||
golang.org/x/oauth2 v0.8.0 // indirect
|
||||
golang.org/x/tools v0.9.3 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||
google.golang.org/grpc v1.55.0 // indirect
|
||||
|
|
115
go.sum
115
go.sum
|
@ -14,7 +14,6 @@ cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZ
|
|||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
|
@ -28,8 +27,8 @@ github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7
|
|||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic=
|
||||
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
|
||||
|
@ -66,8 +65,8 @@ github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn
|
|||
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
|
||||
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
||||
github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.7 h1:HBytQPxcv8Oy4244zbQbe6hnOnx544eL5QPUqhJldz8=
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.7/go.mod h1:ILuwjA+kNW+MrN/w5un7n3mTqkwsFu4Bp05/okFUZlE=
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.8 h1:YSZVvlIIDD1UxQpJp0h+dnpLUw+TrY0cx8obKsp3bek=
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.8/go.mod h1:OEthFdQv/AD2RAdzR6Mm1N1KPCztGKDurW1Z8b8VGMM=
|
||||
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
|
||||
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
|
@ -181,8 +180,8 @@ github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09Zvgq
|
|||
github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
|
||||
github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
|
||||
github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
|
||||
github.com/containerd/containerd v1.7.0 h1:G/ZQr3gMZs6ZT0qPUZ15znx5QSdQdASW11nXTLTM2Pg=
|
||||
github.com/containerd/containerd v1.7.0/go.mod h1:QfR7Efgb/6X2BDpTPJRvPTYDE9rsF0FsXX9J8sIs/sc=
|
||||
github.com/containerd/containerd v1.7.2 h1:UF2gdONnxO8I6byZXDi5sXWiWvlW3D/sci7dTQimEJo=
|
||||
github.com/containerd/containerd v1.7.2/go.mod h1:afcz74+K10M/+cjGHIVQrCt3RAQhUSCAjJ9iMYhhkuI=
|
||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
|
@ -240,12 +239,12 @@ github.com/containernetworking/plugins v1.3.0 h1:QVNXMT6XloyMUoO2wUOqWTC1hWFV62Q
|
|||
github.com/containernetworking/plugins v1.3.0/go.mod h1:Pc2wcedTQQCVuROOOaLBPPxrEXqqXBFt3cZ+/yVg6l0=
|
||||
github.com/containers/buildah v1.30.1-0.20230504052500-e925b5852e07 h1:Bs2sNFh/fSYr4J6JJLFqzyn3dp6HhlA6ewFwRYUpeIE=
|
||||
github.com/containers/buildah v1.30.1-0.20230504052500-e925b5852e07/go.mod h1:6A/BK0YJLXL8+AqlbceKJrhUT+NtEgsvAc51F7TAllc=
|
||||
github.com/containers/common v0.53.1-0.20230506101404-3e93a76d461c h1:NPf//8NAa6xjlj62eBbEBabu8LWVqxPRuweNAFCAYxs=
|
||||
github.com/containers/common v0.53.1-0.20230506101404-3e93a76d461c/go.mod h1:vAG2WNLK9d4umy56l413SS9xiJVe5m7LwOUSoi1x10k=
|
||||
github.com/containers/common v0.53.1-0.20230608150349-2c1849f43e14 h1:THwcUhmyEiq/W7S0eUtGLcm6EB8ih1iQpd7OCV4xuT8=
|
||||
github.com/containers/common v0.53.1-0.20230608150349-2c1849f43e14/go.mod h1:xBDye1c2T/Ms2zGhMeamNi6u158M3FbRLEsWdAwmSkk=
|
||||
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
||||
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
||||
github.com/containers/image/v5 v5.25.1-0.20230505072505-dc4a4be9cc1e h1:9rH8hFLJjmwMkNAdFfXP3O6cAODKujsTn8JurumYz6I=
|
||||
github.com/containers/image/v5 v5.25.1-0.20230505072505-dc4a4be9cc1e/go.mod h1:fGnQk2T+xmEk1/yL9Ky6djJ2F86vBIeo6X14zZQ33iM=
|
||||
github.com/containers/image/v5 v5.25.1-0.20230608153337-8ad019310ff2 h1:Fh+hVnBC+/oCVACYwBttfNz0ZEiTaXbempEJPc0CkM4=
|
||||
github.com/containers/image/v5 v5.25.1-0.20230608153337-8ad019310ff2/go.mod h1:lbnQxsf2S4lhn6fiBH2ucjiQu5kfBuGkGCif+AICyx0=
|
||||
github.com/containers/libhvee v0.0.5 h1:5tUiF2eVe8XbVSPD/Os4dIU1gJWoQgtkQHIjQ5X7wpE=
|
||||
github.com/containers/libhvee v0.0.5/go.mod h1:AYsyMe44w9ylWWEZNW+IOzA7oZ2i/P9TChNljavhYMI=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||
|
@ -258,15 +257,15 @@ github.com/containers/ocicrypt v1.1.7/go.mod h1:7CAhjcj2H8AYp5YvEie7oVSK2AhBY8Ns
|
|||
github.com/containers/psgo v1.8.0 h1:2loGekmGAxM9ir5OsXWEfGwFxorMPYnc6gEDsGFQvhY=
|
||||
github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc=
|
||||
github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s=
|
||||
github.com/containers/storage v1.46.1 h1:GcAe8J0Y6T2CF70fXPojUpnme6zXamuzGCrNujVtIGE=
|
||||
github.com/containers/storage v1.46.1/go.mod h1:81vNDX4h+nXJ2o0D6Yqy6JGXDYJGVpHZpz0nr09iJuQ=
|
||||
github.com/containers/storage v1.46.2-0.20230530174214-1dc289a244ce h1:who8o0q9CLMCOs8DunR66IfWno2eLwgNH8u7JsJP69A=
|
||||
github.com/containers/storage v1.46.2-0.20230530174214-1dc289a244ce/go.mod h1:ke6qnPYu0t2bUfYvBSWI7R8dNitNsS97t3z3hveOINY=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
||||
github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-oidc/v3 v3.5.0 h1:VxKtbccHZxs8juq7RdJntSqtXFtde9YpNpGn0yqgEHw=
|
||||
github.com/coreos/go-oidc/v3 v3.5.0/go.mod h1:ecXRtV4romGPeO6ieExAsUK9cb/3fp9hXNz1tlv8PIM=
|
||||
github.com/coreos/go-oidc/v3 v3.6.0 h1:AKVxfYw1Gmkn/w96z0DbT/B/xFnzTd3MkZvWLjF4n/o=
|
||||
github.com/coreos/go-oidc/v3 v3.6.0/go.mod h1:ZpHUsHBucTUj6WOkrP4E20UPynbLZzhTQ1XKCXkxyPc=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
|
@ -291,8 +290,8 @@ github.com/crc-org/vfkit v0.0.5-0.20230602131541-3d57f09010c9/go.mod h1:sluZ7Q1Z
|
|||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 h1:vU+EP9ZuFUCYE0NYLwTSob+3LNEJATzNfP/DC7SWGWI=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 h1:8Pq5UNTC+/UfvcOPKQGZoKCkeF+ZaKa4wJ9OS2gsQQM=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
|
||||
github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI=
|
||||
github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
|
||||
|
@ -320,8 +319,8 @@ github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4Kfc
|
|||
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
|
||||
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v24.0.0+incompatible h1:z4bf8HvONXX9Tde5lGBMQ7yCJgNahmJumdrStZAbeY4=
|
||||
github.com/docker/docker v24.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v24.0.2+incompatible h1:eATx+oLz9WdNVkQrr0qjQ8HvRJ4bOOxfzEo8R+dA3cg=
|
||||
github.com/docker/docker v24.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
|
||||
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
|
||||
|
@ -356,7 +355,6 @@ github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yi
|
|||
github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg=
|
||||
github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
||||
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
|
@ -436,7 +434,7 @@ github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/
|
|||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU=
|
||||
github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
|
||||
github.com/go-rod/rod v0.112.9 h1:uA/yLbB+t0UlqJcLJtK2pZrCNPzd15dOKRUEOnmnt9k=
|
||||
github.com/go-rod/rod v0.113.1 h1:+Qb4K/vkR7BOhW6FhfhtLzUD3l11+0XlF4do+27sOQk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
|
@ -530,12 +528,11 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
|
||||
github.com/google/go-containerregistry v0.14.0 h1:z58vMqHxuwvAsVwvKEkmVBz2TlgBgH5k6koEXBtlYkw=
|
||||
github.com/google/go-containerregistry v0.14.0/go.mod h1:aiJ2fp/SXvkWgmYHioXnbMdlgB8eXiiYOY55gfN91Wk=
|
||||
github.com/google/go-containerregistry v0.15.2 h1:MMkSh+tjSdnmJZO7ljvEqV1DjfekB6VUEAZgy3a+TQE=
|
||||
github.com/google/go-containerregistry v0.15.2/go.mod h1:wWK+LnOv4jXMM23IT/F1wdYftGWGr47Is8CG+pmHK1Q=
|
||||
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
|
||||
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
|
@ -555,8 +552,6 @@ github.com/google/pprof v0.0.0-20230323073829-e72429f035bd/go.mod h1:79YE0hCXdHa
|
|||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/trillian v1.5.1 h1:2p1l13f0eWd7eOShwarwIxutYYnGzY/5S+xYewQIPkU=
|
||||
github.com/google/trillian v1.5.1/go.mod h1:EcDttN8nf+EoAiyLigBAp9ebncZI6rhJPyxZ+dQ6HSo=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
|
@ -592,14 +587,14 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY
|
|||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=
|
||||
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-hclog v1.3.1 h1:vDwF1DFNZhntP4DAjuTpOw3uEgMUpXh1pB5fW9DqHpo=
|
||||
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
|
@ -613,8 +608,8 @@ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
|
|||
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
|
||||
github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
|
@ -687,9 +682,7 @@ github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsI
|
|||
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
|
||||
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
|
@ -707,8 +700,9 @@ github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WT
|
|||
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
|
||||
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
||||
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.0 h1:J5QK618xRcXnQYZ2GE5FdmpS1ufIrWue+lR/mpe6/14=
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.0/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU=
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
|
@ -763,8 +757,8 @@ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0
|
|||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.9.7 h1:06xGQy5www2oN160RtEZoTvnP2sPhEfePYmCDc2szss=
|
||||
github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0=
|
||||
github.com/onsi/ginkgo/v2 v2.10.0 h1:sfUl4qgLdvkChZrWCYndY2EAu9BRIw1YphNAzy1VNWs=
|
||||
github.com/onsi/ginkgo/v2 v2.10.0/go.mod h1:UDQOh5wbQUlMnkLfVaIUMtQ1Vus92oM+P2JX1aulgcE=
|
||||
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
|
@ -840,7 +834,7 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:
|
|||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
|
||||
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
|
@ -887,10 +881,10 @@ github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
|
|||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sigstore/fulcio v1.3.1 h1:0ntW9VbQbt2JytoSs8BOGB84A65eeyvGSavWteYp29Y=
|
||||
github.com/sigstore/fulcio v1.3.1/go.mod h1:/XfqazOec45ulJZpyL9sq+OsVQ8g2UOVoNVi7abFgqU=
|
||||
github.com/sigstore/rekor v1.1.2-0.20230508234306-ad288b385a44 h1:03iBveStHVfP97eB54lMjkgrSMYWW7F7X9IlOI9N+A0=
|
||||
github.com/sigstore/rekor v1.1.2-0.20230508234306-ad288b385a44/go.mod h1:4/htUXiP2z4KwvZOXTalCkh5C9I7msJbVWrdjWyD+fU=
|
||||
github.com/sigstore/sigstore v1.6.4 h1:jH4AzR7qlEH/EWzm+opSpxCfuUcjHL+LJPuQE7h40WE=
|
||||
github.com/sigstore/sigstore v1.6.4/go.mod h1:pjR64lBxnjoSrAr+Ydye/FV73IfrgtoYlAI11a8xMfA=
|
||||
github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 h1:x/WnxasgR40qGY67IHwioakXLuhDxJ10vF8/INuOTiI=
|
||||
github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12/go.mod h1:8c+a8Yo7r8gKuYbIaz+c3oOdw9iMXx+tMdOg2+b+2jQ=
|
||||
github.com/sigstore/sigstore v1.6.5 h1:/liHIo7YPJp6sN31DzBYDOuRPmN1xbzROMBE5DLllYM=
|
||||
github.com/sigstore/sigstore v1.6.5/go.mod h1:h+EoQsf9+6UKgNYxKhBcPgo4PZeEVfzAJxKRRIYhyN4=
|
||||
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
|
@ -946,8 +940,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
|||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/sylabs/sif/v2 v2.11.3 h1:EQxi5zl6i5DsbVal9HHpk/zuSx7aNLeZBy8vmvFz838=
|
||||
github.com/sylabs/sif/v2 v2.11.3/go.mod h1:0ryivqvvsncJOJjU5QQIEc77a5zKK46F+urBXMdA07w=
|
||||
github.com/sylabs/sif/v2 v2.11.4 h1:4dRvsRFVkyS7e8oD8AEL0HrJocnet05+EFW+DhVb/Ic=
|
||||
github.com/sylabs/sif/v2 v2.11.4/go.mod h1:83kqbKZFRFfFLe1ui5BH+rAxF2obICM/i3zto4ivM7s=
|
||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
|
||||
|
@ -1008,14 +1002,14 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17
|
|||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
|
||||
github.com/ysmood/fetchup v0.2.2 h1:Qn8/q5uDW7szclt4sVXCFJ1TXup3hogz94OaLf6kloo=
|
||||
github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ=
|
||||
github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ=
|
||||
github.com/ysmood/got v0.34.1 h1:IrV2uWLs45VXNvZqhJ6g2nIhY+pgIG1CUoOcqfXFl1s=
|
||||
github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE=
|
||||
github.com/ysmood/leakless v0.8.0 h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
|
||||
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
|
||||
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
|
||||
|
@ -1067,11 +1061,10 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
|||
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
|
||||
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
||||
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
|
||||
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -1082,8 +1075,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 h1:5llv2sWeaMSnA3w2kS57ouQQ4pudlXrR0dCgw51QK9o=
|
||||
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc=
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -1105,7 +1098,6 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
|
|||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -1151,9 +1143,6 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1
|
|||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
|
@ -1161,9 +1150,8 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr
|
|||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk=
|
||||
golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g=
|
||||
golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
|
||||
golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8=
|
||||
golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -1176,7 +1164,6 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
|
||||
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -1259,17 +1246,13 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220823224334-20c2bfdbfe24/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -1281,7 +1264,6 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
@ -1340,9 +1322,8 @@ golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4X
|
|||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo=
|
||||
golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||
golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=
|
||||
golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -1509,11 +1490,9 @@ k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
|
|||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
|
||||
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
|
||||
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
|
||||
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
||||
|
|
|
@ -50,6 +50,7 @@ type NetworkCreateOptions struct {
|
|||
NetworkDNSServers []string
|
||||
Ranges []string
|
||||
Subnets []string
|
||||
Routes []string
|
||||
IPv6 bool
|
||||
// Mapping of driver options and values.
|
||||
Options map[string]string
|
||||
|
|
|
@ -277,6 +277,7 @@ ln -s vendor src
|
|||
LDFLAGS="-X %{import_path}/libpod/define.buildInfo=$(date +%s)"
|
||||
|
||||
# build rootlessport first
|
||||
export BUILDTAGS="exclude_graphdriver_aufs exclude_graphdriver_btrfs exclude_graphdriver_devicemapper exclude_graphdriver_overlay exclude_graphdriver_zfs"
|
||||
%gobuild -o bin/rootlessport %{import_path}/cmd/rootlessport
|
||||
|
||||
export BASEBUILDTAGS="seccomp exclude_graphdriver_devicemapper $(hack/systemd_tag.sh) $(hack/libsubid_tag.sh)"
|
||||
|
|
|
@ -62,6 +62,215 @@ var _ = Describe("Podman network create", func() {
|
|||
Expect(subnet.Contains(containerIP)).To(BeTrue(), "subnet contains containerIP")
|
||||
})
|
||||
|
||||
It("podman network create with name and subnet and static route", func() {
|
||||
SkipIfCNI(podmanTest)
|
||||
netName := "subnet-" + stringid.GenerateRandomID()
|
||||
nc := podmanTest.Podman([]string{
|
||||
"network",
|
||||
"create",
|
||||
"--subnet",
|
||||
"10.19.12.0/24",
|
||||
"--route",
|
||||
"10.21.0.0/24,10.19.12.250",
|
||||
netName,
|
||||
})
|
||||
nc.WaitWithDefaultTimeout()
|
||||
defer podmanTest.removeNetwork(netName)
|
||||
Expect(nc).Should(Exit(0))
|
||||
|
||||
// Inspect the network configuration
|
||||
inspect := podmanTest.Podman([]string{"network", "inspect", netName})
|
||||
inspect.WaitWithDefaultTimeout()
|
||||
Expect(inspect).Should(Exit(0))
|
||||
|
||||
// JSON the network configuration into something usable
|
||||
var results []types.Network
|
||||
err := json.Unmarshal([]byte(inspect.OutputToString()), &results)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(results).To(HaveLen(1))
|
||||
result := results[0]
|
||||
Expect(result).To(HaveField("Name", netName))
|
||||
Expect(result.Subnets).To(HaveLen(1))
|
||||
Expect(result.Subnets[0].Subnet.String()).To(Equal("10.19.12.0/24"))
|
||||
Expect(result.Subnets[0].Gateway.String()).To(Equal("10.19.12.1"))
|
||||
Expect(result.Routes[0].Destination.String()).To(Equal("10.21.0.0/24"))
|
||||
Expect(result.Routes[0].Gateway.String()).To(Equal("10.19.12.250"))
|
||||
Expect(result.Routes[0].Metric).To(BeNil())
|
||||
|
||||
// Once a container executes a new network, the nic will be created. We should clean those up
|
||||
// best we can
|
||||
defer removeNetworkDevice(result.NetworkInterface)
|
||||
|
||||
})
|
||||
|
||||
It("podman network create with name and subnet and static route and metric", func() {
|
||||
SkipIfCNI(podmanTest)
|
||||
netName := "subnet-" + stringid.GenerateRandomID()
|
||||
nc := podmanTest.Podman([]string{
|
||||
"network",
|
||||
"create",
|
||||
"--subnet",
|
||||
"10.19.13.0/24",
|
||||
"--route",
|
||||
"10.21.1.0/24,10.19.13.250,120",
|
||||
netName,
|
||||
})
|
||||
nc.WaitWithDefaultTimeout()
|
||||
defer podmanTest.removeNetwork(netName)
|
||||
Expect(nc).Should(Exit(0))
|
||||
|
||||
// Inspect the network configuration
|
||||
inspect := podmanTest.Podman([]string{"network", "inspect", netName})
|
||||
inspect.WaitWithDefaultTimeout()
|
||||
Expect(inspect).Should(Exit(0))
|
||||
|
||||
// JSON the network configuration into something usable
|
||||
var results []types.Network
|
||||
err := json.Unmarshal([]byte(inspect.OutputToString()), &results)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(results).To(HaveLen(1))
|
||||
result := results[0]
|
||||
Expect(result).To(HaveField("Name", netName))
|
||||
Expect(result.Subnets).To(HaveLen(1))
|
||||
Expect(result.Subnets[0].Subnet.String()).To(Equal("10.19.13.0/24"))
|
||||
Expect(result.Subnets[0].Gateway.String()).To(Equal("10.19.13.1"))
|
||||
Expect(result.Routes[0].Destination.String()).To(Equal("10.21.1.0/24"))
|
||||
Expect(result.Routes[0].Gateway.String()).To(Equal("10.19.13.250"))
|
||||
Expect(*result.Routes[0].Metric).To(Equal(uint32(120)))
|
||||
|
||||
// Once a container executes a new network, the nic will be created. We should clean those up
|
||||
// best we can
|
||||
defer removeNetworkDevice(result.NetworkInterface)
|
||||
|
||||
})
|
||||
|
||||
It("podman network create with name and subnet and two static routes", func() {
|
||||
SkipIfCNI(podmanTest)
|
||||
netName := "subnet-" + stringid.GenerateRandomID()
|
||||
nc := podmanTest.Podman([]string{
|
||||
"network",
|
||||
"create",
|
||||
"--subnet",
|
||||
"10.19.14.0/24",
|
||||
"--route",
|
||||
"10.21.2.0/24,10.19.14.250",
|
||||
"--route",
|
||||
"10.21.3.0/24,10.19.14.251,120",
|
||||
netName,
|
||||
})
|
||||
nc.WaitWithDefaultTimeout()
|
||||
defer podmanTest.removeNetwork(netName)
|
||||
Expect(nc).Should(Exit(0))
|
||||
|
||||
// Inspect the network configuration
|
||||
inspect := podmanTest.Podman([]string{"network", "inspect", netName})
|
||||
inspect.WaitWithDefaultTimeout()
|
||||
Expect(inspect).Should(Exit(0))
|
||||
|
||||
// JSON the network configuration into something usable
|
||||
var results []types.Network
|
||||
err := json.Unmarshal([]byte(inspect.OutputToString()), &results)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(results).To(HaveLen(1))
|
||||
result := results[0]
|
||||
Expect(result).To(HaveField("Name", netName))
|
||||
Expect(result.Subnets).To(HaveLen(1))
|
||||
Expect(result.Subnets[0].Subnet.String()).To(Equal("10.19.14.0/24"))
|
||||
Expect(result.Subnets[0].Gateway.String()).To(Equal("10.19.14.1"))
|
||||
Expect(result.Routes).To(HaveLen(2))
|
||||
Expect(result.Routes[0].Destination.String()).To(Equal("10.21.2.0/24"))
|
||||
Expect(result.Routes[0].Gateway.String()).To(Equal("10.19.14.250"))
|
||||
Expect(result.Routes[0].Metric).To(BeNil())
|
||||
Expect(result.Routes[1].Destination.String()).To(Equal("10.21.3.0/24"))
|
||||
Expect(result.Routes[1].Gateway.String()).To(Equal("10.19.14.251"))
|
||||
Expect(*result.Routes[1].Metric).To(Equal(uint32(120)))
|
||||
|
||||
// Once a container executes a new network, the nic will be created. We should clean those up
|
||||
// best we can
|
||||
defer removeNetworkDevice(result.NetworkInterface)
|
||||
|
||||
})
|
||||
|
||||
It("podman network create with name and subnet and static route (ipv6)", func() {
|
||||
SkipIfCNI(podmanTest)
|
||||
netName := "subnet-" + stringid.GenerateRandomID()
|
||||
nc := podmanTest.Podman([]string{
|
||||
"network",
|
||||
"create",
|
||||
"--subnet",
|
||||
"fd:ab04::/64",
|
||||
"--route",
|
||||
"fd:1::/64,fd::1,120",
|
||||
netName,
|
||||
})
|
||||
nc.WaitWithDefaultTimeout()
|
||||
defer podmanTest.removeNetwork(netName)
|
||||
Expect(nc).Should(Exit(0))
|
||||
|
||||
// Inspect the network configuration
|
||||
inspect := podmanTest.Podman([]string{"network", "inspect", netName})
|
||||
inspect.WaitWithDefaultTimeout()
|
||||
Expect(inspect).Should(Exit(0))
|
||||
|
||||
// JSON the network configuration into something usable
|
||||
var results []types.Network
|
||||
err := json.Unmarshal([]byte(inspect.OutputToString()), &results)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(results).To(HaveLen(1))
|
||||
result := results[0]
|
||||
Expect(result).To(HaveField("Name", netName))
|
||||
Expect(result.Subnets).To(HaveLen(1))
|
||||
Expect(result.Subnets[0].Subnet.String()).To(Equal("fd:ab04::/64"))
|
||||
Expect(result.Subnets[0].Gateway.String()).To(Equal("fd:ab04::1"))
|
||||
Expect(result.Routes[0].Destination.String()).To(Equal("fd:1::/64"))
|
||||
Expect(result.Routes[0].Gateway.String()).To(Equal("fd::1"))
|
||||
Expect(*result.Routes[0].Metric).To(Equal(uint32(120)))
|
||||
|
||||
// Once a container executes a new network, the nic will be created. We should clean those up
|
||||
// best we can
|
||||
defer removeNetworkDevice(result.NetworkInterface)
|
||||
|
||||
})
|
||||
|
||||
It("podman network create with name and subnet with --opt no_default_route=1", func() {
|
||||
SkipIfCNI(podmanTest)
|
||||
netName := "subnet-" + stringid.GenerateRandomID()
|
||||
nc := podmanTest.Podman([]string{
|
||||
"network",
|
||||
"create",
|
||||
"--subnet",
|
||||
"10.19.15.0/24",
|
||||
"--opt",
|
||||
"no_default_route=1",
|
||||
netName,
|
||||
})
|
||||
nc.WaitWithDefaultTimeout()
|
||||
defer podmanTest.removeNetwork(netName)
|
||||
Expect(nc).Should(Exit(0))
|
||||
|
||||
// Inspect the network configuration
|
||||
inspect := podmanTest.Podman([]string{"network", "inspect", netName})
|
||||
inspect.WaitWithDefaultTimeout()
|
||||
Expect(inspect).Should(Exit(0))
|
||||
|
||||
// JSON the network configuration into something usable
|
||||
var results []types.Network
|
||||
err := json.Unmarshal([]byte(inspect.OutputToString()), &results)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(results).To(HaveLen(1))
|
||||
result := results[0]
|
||||
Expect(result).To(HaveField("Name", netName))
|
||||
Expect(result.Subnets).To(HaveLen(1))
|
||||
Expect(result.Subnets[0].Subnet.String()).To(Equal("10.19.15.0/24"))
|
||||
Expect(result.Subnets[0].Gateway.String()).To(Equal("10.19.15.1"))
|
||||
Expect(result.Options[types.NoDefaultRoute]).To(Equal("true"))
|
||||
|
||||
// Once a container executes a new network, the nic will be created. We should clean those up
|
||||
// best we can
|
||||
defer removeNetworkDevice(result.NetworkInterface)
|
||||
|
||||
})
|
||||
|
||||
It("podman network create with name and IPv6 subnet", func() {
|
||||
netName := "ipv6-" + stringid.GenerateRandomID()
|
||||
nc := podmanTest.Podman([]string{"network", "create", "--subnet", "fd00:1:2:3:4::/64", netName})
|
||||
|
|
|
@ -43,7 +43,9 @@ setup() {
|
|||
fi
|
||||
|
||||
# cgroup-manager=systemd does not work inside a container
|
||||
export _PODMAN_TEST_OPTS="--cgroup-manager=cgroupfs --root=$PODMAN_UPGRADE_WORKDIR/root --runroot=$PODMAN_UPGRADE_WORKDIR/runroot --tmpdir=$PODMAN_UPGRADE_WORKDIR/tmp"
|
||||
# skip_mount_home=true is required so we can share the storage mounts between host and container,
|
||||
# the default c/storage behavior is to make the mount propagation private.
|
||||
export _PODMAN_TEST_OPTS="--storage-opt=skip_mount_home=true --cgroup-manager=cgroupfs --root=$PODMAN_UPGRADE_WORKDIR/root --runroot=$PODMAN_UPGRADE_WORKDIR/runroot --tmpdir=$PODMAN_UPGRADE_WORKDIR/tmp"
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
|
@ -180,7 +182,7 @@ EOF
|
|||
-v /var/lib/cni:/var/lib/cni \
|
||||
-v /etc/cni/net.d:/etc/cni/net.d \
|
||||
-v /dev/shm:/dev/shm \
|
||||
-v $pmroot:$pmroot \
|
||||
-v $pmroot:$pmroot:rshared \
|
||||
$OLD_PODMAN $pmroot/setup
|
||||
|
||||
_PODMAN_TEST_OPTS= wait_for_ready podman_parent
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.8 BLOCK -->
|
||||
|
||||
## Security
|
||||
|
||||
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
|
||||
|
||||
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
|
||||
|
||||
## Reporting Security Issues
|
||||
|
||||
**Please do not report security vulnerabilities through public GitHub issues.**
|
||||
|
||||
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
|
||||
|
||||
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
|
||||
|
||||
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
|
||||
|
||||
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
|
||||
|
||||
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
|
||||
* Full paths of source file(s) related to the manifestation of the issue
|
||||
* The location of the affected source code (tag/branch/commit or direct URL)
|
||||
* Any special configuration required to reproduce the issue
|
||||
* Step-by-step instructions to reproduce the issue
|
||||
* Proof-of-concept or exploit code (if possible)
|
||||
* Impact of the issue, including how an attacker might exploit the issue
|
||||
|
||||
This information will help us triage your report more quickly.
|
||||
|
||||
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
|
||||
|
||||
## Preferred Languages
|
||||
|
||||
We prefer all communications to be in English.
|
||||
|
||||
## Policy
|
||||
|
||||
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
|
||||
|
||||
<!-- END MICROSOFT SECURITY.MD BLOCK -->
|
|
@ -94,23 +94,9 @@ out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/ho
|
|||
tar -zcf $@ -C rootfs .
|
||||
rm -rf rootfs
|
||||
|
||||
-include deps/cmd/gcs.gomake
|
||||
-include deps/cmd/gcstools.gomake
|
||||
-include deps/cmd/hooks/wait-paths.gomake
|
||||
-include deps/cmd/tar2ext4.gomake
|
||||
-include deps/internal/tools/snp-report.gomake
|
||||
|
||||
# Implicit rule for includes that define Go targets.
|
||||
%.gomake: $(SRCROOT)/Makefile
|
||||
bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report:
|
||||
@mkdir -p $(dir $@)
|
||||
@/bin/echo $(@:deps/%.gomake=bin/%): $(SRCROOT)/hack/gomakedeps.sh > $@.new
|
||||
@/bin/echo -e '\t@mkdir -p $$(dir $$@) $(dir $@)' >> $@.new
|
||||
@/bin/echo -e '\t$$(GO_BUILD) -o $$@.new $$(SRCROOT)/$$(@:bin/%=%)' >> $@.new
|
||||
@/bin/echo -e '\tGO="$(GO)" $$(SRCROOT)/hack/gomakedeps.sh $$@ $$(SRCROOT)/$$(@:bin/%=%) $$(GO_FLAGS) $$(GO_FLAGS_EXTRA) > $(@:%.gomake=%.godeps).new' >> $@.new
|
||||
@/bin/echo -e '\tmv $(@:%.gomake=%.godeps).new $(@:%.gomake=%.godeps)' >> $@.new
|
||||
@/bin/echo -e '\tmv $$@.new $$@' >> $@.new
|
||||
@/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new
|
||||
mv $@.new $@
|
||||
GOOS=linux $(GO_BUILD) -o $@ $(SRCROOT)/$(@:bin/%=%)
|
||||
|
||||
bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o
|
||||
@mkdir -p bin
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/log"
|
||||
)
|
||||
|
||||
const TimeFormat = log.RFC3339NanoFixed
|
||||
|
||||
func FormatTime(t time.Time) string {
|
||||
return t.Format(TimeFormat)
|
||||
}
|
||||
|
||||
// DurationFormat formats a [time.Duration] log entry.
|
||||
//
|
||||
// A nil value signals an error with the formatting.
|
||||
type DurationFormat func(time.Duration) interface{}
|
||||
|
||||
func DurationFormatString(d time.Duration) interface{} { return d.String() }
|
||||
func DurationFormatSeconds(d time.Duration) interface{} { return d.Seconds() }
|
||||
func DurationFormatMilliseconds(d time.Duration) interface{} { return d.Milliseconds() }
|
||||
|
||||
// FormatIO formats net.Conn and other types that have an `Addr()` or `Name()`.
|
||||
//
|
||||
// See FormatEnabled for more information.
|
||||
func FormatIO(ctx context.Context, v interface{}) string {
|
||||
m := make(map[string]string)
|
||||
m["type"] = reflect.TypeOf(v).String()
|
||||
|
||||
switch t := v.(type) {
|
||||
case net.Conn:
|
||||
m["localAddress"] = formatAddr(t.LocalAddr())
|
||||
m["remoteAddress"] = formatAddr(t.RemoteAddr())
|
||||
case interface{ Addr() net.Addr }:
|
||||
m["address"] = formatAddr(t.Addr())
|
||||
default:
|
||||
return Format(ctx, t)
|
||||
}
|
||||
|
||||
return Format(ctx, m)
|
||||
}
|
||||
|
||||
func formatAddr(a net.Addr) string {
|
||||
return a.Network() + "://" + a.String()
|
||||
}
|
||||
|
||||
// Format formats an object into a JSON string, without any indendtation or
|
||||
// HTML escapes.
|
||||
// Context is used to output a log waring if the conversion fails.
|
||||
//
|
||||
// This is intended primarily for `trace.StringAttribute()`
|
||||
func Format(ctx context.Context, v interface{}) string {
|
||||
b, err := encode(v)
|
||||
if err != nil {
|
||||
G(ctx).WithError(err).Warning("could not format value")
|
||||
return ""
|
||||
}
|
||||
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func encode(v interface{}) ([]byte, error) {
|
||||
return encodeBuffer(&bytes.Buffer{}, v)
|
||||
}
|
||||
|
||||
func encodeBuffer(buf *bytes.Buffer, v interface{}) ([]byte, error) {
|
||||
enc := json.NewEncoder(buf)
|
||||
enc.SetEscapeHTML(false)
|
||||
enc.SetIndent("", "")
|
||||
|
||||
if err := enc.Encode(v); err != nil {
|
||||
err = fmt.Errorf("could not marshall %T to JSON for logging: %w", v, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// encoder.Encode appends a newline to the end
|
||||
return bytes.TrimSpace(buf.Bytes()), nil
|
||||
}
|
|
@ -1,23 +1,58 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// Hook serves to intercept and format `logrus.Entry`s before they are passed
|
||||
// to the ETW hook.
|
||||
const nullString = "null"
|
||||
|
||||
// Hook intercepts and formats a [logrus.Entry] before it logged.
|
||||
//
|
||||
// The containerd shim discards the (formatted) logrus output, and outputs only via ETW.
|
||||
// The Linux GCS outputs logrus entries over stdout, which is consumed by the shim and
|
||||
// then re-output via the ETW hook.
|
||||
type Hook struct{}
|
||||
// The shim either outputs the logs through an ETW hook, discarding the (formatted) output
|
||||
// or logs output to a pipe for logging binaries to consume.
|
||||
// The Linux GCS outputs logrus entries over stdout, which is then consumed and re-output
|
||||
// by the shim.
|
||||
type Hook struct {
|
||||
// EncodeAsJSON formats structs, maps, arrays, slices, and [bytes.Buffer] as JSON.
|
||||
// Variables of [bytes.Buffer] will be converted to []byte.
|
||||
//
|
||||
// Default is false.
|
||||
EncodeAsJSON bool
|
||||
|
||||
// FormatTime specifies the format for [time.Time] variables.
|
||||
// An empty string disables formatting.
|
||||
// When disabled, the fall back will the JSON encoding, if enabled.
|
||||
//
|
||||
// Default is [github.com/containerd/containerd/log.RFC3339NanoFixed].
|
||||
TimeFormat string
|
||||
|
||||
// Duration format converts a [time.Duration] fields to an appropriate encoding.
|
||||
// nil disables formatting.
|
||||
// When disabled, the fall back will the JSON encoding, if enabled.
|
||||
//
|
||||
// Default is [DurationFormatString], which appends a duration unit after the value.
|
||||
DurationFormat DurationFormat
|
||||
|
||||
// AddSpanContext adds [logfields.TraceID] and [logfields.SpanID] fields to
|
||||
// the entry from the span context stored in [logrus.Entry.Context], if it exists.
|
||||
AddSpanContext bool
|
||||
}
|
||||
|
||||
var _ logrus.Hook = &Hook{}
|
||||
|
||||
func NewHook() *Hook {
|
||||
return &Hook{}
|
||||
return &Hook{
|
||||
TimeFormat: log.RFC3339NanoFixed,
|
||||
DurationFormat: DurationFormatString,
|
||||
AddSpanContext: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Hook) Levels() []logrus.Level {
|
||||
|
@ -25,14 +60,108 @@ func (h *Hook) Levels() []logrus.Level {
|
|||
}
|
||||
|
||||
func (h *Hook) Fire(e *logrus.Entry) (err error) {
|
||||
// JSON encode, if necessary, then add span information
|
||||
h.encode(e)
|
||||
h.addSpanContext(e)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// encode loops through all the fields in the [logrus.Entry] and encodes them according to
|
||||
// the settings in [Hook].
|
||||
// If [Hook.TimeFormat] is non-empty, it will be passed to [time.Time.Format] for
|
||||
// fields of type [time.Time].
|
||||
//
|
||||
// If [Hook.EncodeAsJSON] is true, then fields that are not numeric, boolean, strings, or
|
||||
// errors will be encoded via a [json.Marshal] (with HTML escaping disabled).
|
||||
// Chanel- and function-typed fields, as well as unsafe pointers are left alone and not encoded.
|
||||
//
|
||||
// If [Hook.TimeFormat] and [Hook.DurationFormat] are empty and [Hook.EncodeAsJSON] is false,
|
||||
// then this is a no-op.
|
||||
func (h *Hook) encode(e *logrus.Entry) {
|
||||
d := e.Data
|
||||
|
||||
formatTime := h.TimeFormat != ""
|
||||
formatDuration := h.DurationFormat != nil
|
||||
if !(h.EncodeAsJSON || formatTime || formatDuration) {
|
||||
return
|
||||
}
|
||||
|
||||
for k, v := range d {
|
||||
// encode types with dedicated formatting options first
|
||||
|
||||
if vv, ok := v.(time.Time); formatTime && ok {
|
||||
d[k] = vv.Format(h.TimeFormat)
|
||||
continue
|
||||
}
|
||||
|
||||
if vv, ok := v.(time.Duration); formatDuration && ok {
|
||||
d[k] = h.DurationFormat(vv)
|
||||
continue
|
||||
}
|
||||
|
||||
// general case JSON encoding
|
||||
|
||||
if !h.EncodeAsJSON {
|
||||
continue
|
||||
}
|
||||
|
||||
switch vv := v.(type) {
|
||||
// built in types
|
||||
// "json" marshals errors as "{}", so leave alone here
|
||||
case bool, string, error, uintptr,
|
||||
int8, int16, int32, int64, int,
|
||||
uint8, uint32, uint64, uint,
|
||||
float32, float64:
|
||||
continue
|
||||
|
||||
// Rather than setting d[k] = vv.String(), JSON encode []byte value, since it
|
||||
// may be a binary payload and not representable as a string.
|
||||
// `case bytes.Buffer,*bytes.Buffer:` resolves `vv` to `interface{}`,
|
||||
// so cannot use `vv.Bytes`.
|
||||
// Could move to below the `reflect.Indirect()` call below, but
|
||||
// that would require additional typematching and dereferencing.
|
||||
// Easier to keep these duplicate branches here.
|
||||
case bytes.Buffer:
|
||||
v = vv.Bytes()
|
||||
case *bytes.Buffer:
|
||||
v = vv.Bytes()
|
||||
}
|
||||
|
||||
// dereference pointer or interface variables
|
||||
rv := reflect.Indirect(reflect.ValueOf(v))
|
||||
// check if `v` is a null pointer
|
||||
if !rv.IsValid() {
|
||||
d[k] = nullString
|
||||
continue
|
||||
}
|
||||
|
||||
switch rv.Kind() {
|
||||
case reflect.Map, reflect.Struct, reflect.Array, reflect.Slice:
|
||||
default:
|
||||
// Bool, [U]?Int*, Float*, Complex*, Uintptr, String: encoded as normal
|
||||
// Chan, Func: not supported by json
|
||||
// Interface, Pointer: dereferenced above
|
||||
// UnsafePointer: not supported by json, not safe to de-reference; leave alone
|
||||
continue
|
||||
}
|
||||
|
||||
b, err := encode(v)
|
||||
if err != nil {
|
||||
// Errors are written to stderr (ie, to `panic.log`) and stops the remaining
|
||||
// hooks (ie, exporting to ETW) from firing. So add encoding errors to
|
||||
// the entry data to be written out, but keep on processing.
|
||||
d[k+"-"+logrus.ErrorKey] = err.Error()
|
||||
// keep the original `v` as the value,
|
||||
continue
|
||||
}
|
||||
d[k] = string(b)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Hook) addSpanContext(e *logrus.Entry) {
|
||||
ctx := e.Context
|
||||
if ctx == nil {
|
||||
if !h.AddSpanContext || ctx == nil {
|
||||
return
|
||||
}
|
||||
span := trace.FromContext(ctx)
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
|
||||
|
@ -56,11 +55,11 @@ func ScrubProcessParameters(s string) (string, error) {
|
|||
}
|
||||
pp.Environment = map[string]string{_scrubbedReplacement: _scrubbedReplacement}
|
||||
|
||||
buf := bytes.NewBuffer(b[:0])
|
||||
if err := encode(buf, pp); err != nil {
|
||||
b, err := encodeBuffer(bytes.NewBuffer(b[:0]), pp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(buf.String()), nil
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
// ScrubBridgeCreate scrubs requests sent over the bridge of type
|
||||
|
@ -150,21 +149,12 @@ func scrubBytes(b []byte, scrub scrubberFunc) ([]byte, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
if err := encode(buf, m); err != nil {
|
||||
b, err := encode(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bytes.TrimSpace(buf.Bytes()), nil
|
||||
}
|
||||
|
||||
func encode(buf *bytes.Buffer, v interface{}) error {
|
||||
enc := json.NewEncoder(buf)
|
||||
enc.SetEscapeHTML(false)
|
||||
if err := enc.Encode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func isRequestBase(m genMap) bool {
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
package oc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// todo: break import cycle with "internal/hcs/errors.go" and reference errors defined there
|
||||
// todo: add errors defined in "internal/guest/gcserror" (Hresult does not implement error)
|
||||
|
||||
func toStatusCode(err error) codes.Code {
|
||||
// checks if err implements GRPCStatus() *"google.golang.org/grpc/status".Status,
|
||||
// wraps an error defined in "github.com/containerd/containerd/errdefs", or is a
|
||||
// context timeout or cancelled error
|
||||
if s, ok := status.FromError(errdefs.ToGRPC(err)); ok {
|
||||
return s.Code()
|
||||
}
|
||||
|
||||
switch {
|
||||
// case isAny(err):
|
||||
// return codes.Cancelled
|
||||
case isAny(err, os.ErrInvalid):
|
||||
return codes.InvalidArgument
|
||||
case isAny(err, os.ErrDeadlineExceeded):
|
||||
return codes.DeadlineExceeded
|
||||
case isAny(err, os.ErrNotExist):
|
||||
return codes.NotFound
|
||||
case isAny(err, os.ErrExist):
|
||||
return codes.AlreadyExists
|
||||
case isAny(err, os.ErrPermission):
|
||||
return codes.PermissionDenied
|
||||
// case isAny(err):
|
||||
// return codes.ResourceExhausted
|
||||
case isAny(err, os.ErrClosed, net.ErrClosed, io.ErrClosedPipe, io.ErrShortBuffer):
|
||||
return codes.FailedPrecondition
|
||||
// case isAny(err):
|
||||
// return codes.Aborted
|
||||
// case isAny(err):
|
||||
// return codes.OutOfRange
|
||||
// case isAny(err):
|
||||
// return codes.Unimplemented
|
||||
case isAny(err, io.ErrNoProgress):
|
||||
return codes.Internal
|
||||
// case isAny(err):
|
||||
// return codes.Unavailable
|
||||
case isAny(err, io.ErrShortWrite, io.ErrUnexpectedEOF):
|
||||
return codes.DataLoss
|
||||
// case isAny(err):
|
||||
// return codes.Unauthenticated
|
||||
default:
|
||||
return codes.Unknown
|
||||
}
|
||||
}
|
||||
|
||||
// isAny returns true if errors.Is is true for any of the provided errors, errs.
|
||||
func isAny(err error, errs ...error) bool {
|
||||
for _, e := range errs {
|
||||
if errors.Is(err, e) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -3,19 +3,26 @@ package oc
|
|||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/log"
|
||||
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||
)
|
||||
|
||||
var _ = (trace.Exporter)(&LogrusExporter{})
|
||||
const spanMessage = "Span"
|
||||
|
||||
var _errorCodeKey = logrus.ErrorKey + "Code"
|
||||
|
||||
// LogrusExporter is an OpenCensus `trace.Exporter` that exports
|
||||
// `trace.SpanData` to logrus output.
|
||||
type LogrusExporter struct {
|
||||
}
|
||||
type LogrusExporter struct{}
|
||||
|
||||
var _ trace.Exporter = &LogrusExporter{}
|
||||
|
||||
// ExportSpan exports `s` based on the the following rules:
|
||||
//
|
||||
// 1. All output will contain `s.Attributes`, `s.TraceID`, `s.SpanID`,
|
||||
// `s.ParentSpanID` for correlation
|
||||
// 1. All output will contain `s.Attributes`, `s.SpanKind`, `s.TraceID`,
|
||||
// `s.SpanID`, and `s.ParentSpanID` for correlation
|
||||
//
|
||||
// 2. Any calls to .Annotate will not be supported.
|
||||
//
|
||||
|
@ -23,21 +30,57 @@ type LogrusExporter struct {
|
|||
// `s.Status.Code != 0` in which case it will be written at `logrus.ErrorLevel`
|
||||
// providing `s.Status.Message` as the error value.
|
||||
func (le *LogrusExporter) ExportSpan(s *trace.SpanData) {
|
||||
// Combine all span annotations with traceID, spanID, parentSpanID
|
||||
baseEntry := logrus.WithFields(logrus.Fields(s.Attributes))
|
||||
baseEntry.Data["traceID"] = s.TraceID.String()
|
||||
baseEntry.Data["spanID"] = s.SpanID.String()
|
||||
baseEntry.Data["parentSpanID"] = s.ParentSpanID.String()
|
||||
baseEntry.Data["startTime"] = s.StartTime
|
||||
baseEntry.Data["endTime"] = s.EndTime
|
||||
baseEntry.Data["duration"] = s.EndTime.Sub(s.StartTime).String()
|
||||
baseEntry.Data["name"] = s.Name
|
||||
baseEntry.Time = s.StartTime
|
||||
if s.DroppedAnnotationCount > 0 {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"name": s.Name,
|
||||
logfields.TraceID: s.TraceID.String(),
|
||||
logfields.SpanID: s.SpanID.String(),
|
||||
"dropped": s.DroppedAttributeCount,
|
||||
"maxAttributes": len(s.Attributes),
|
||||
}).Warning("span had dropped attributes")
|
||||
}
|
||||
|
||||
entry := log.L.Dup()
|
||||
// Combine all span annotations with span data (eg, trace ID, span ID, parent span ID,
|
||||
// error, status code)
|
||||
// (OC) Span attributes are guaranteed to be strings, bools, or int64s, so we can
|
||||
// can skip overhead in entry.WithFields() and add them directly to entry.Data.
|
||||
// Preallocate ahead of time, since we should add, at most, 10 additional entries
|
||||
data := make(logrus.Fields, len(entry.Data)+len(s.Attributes)+10)
|
||||
|
||||
// Default log entry may have prexisting/application-wide data
|
||||
for k, v := range entry.Data {
|
||||
data[k] = v
|
||||
}
|
||||
for k, v := range s.Attributes {
|
||||
data[k] = v
|
||||
}
|
||||
|
||||
data[logfields.Name] = s.Name
|
||||
data[logfields.TraceID] = s.TraceID.String()
|
||||
data[logfields.SpanID] = s.SpanID.String()
|
||||
data[logfields.ParentSpanID] = s.ParentSpanID.String()
|
||||
data[logfields.StartTime] = s.StartTime
|
||||
data[logfields.EndTime] = s.EndTime
|
||||
data[logfields.Duration] = s.EndTime.Sub(s.StartTime)
|
||||
if sk := spanKindToString(s.SpanKind); sk != "" {
|
||||
data["spanKind"] = sk
|
||||
}
|
||||
|
||||
level := logrus.InfoLevel
|
||||
if s.Status.Code != 0 {
|
||||
level = logrus.ErrorLevel
|
||||
baseEntry.Data[logrus.ErrorKey] = s.Status.Message
|
||||
|
||||
// don't overwrite an existing "error" or "errorCode" attributes
|
||||
if _, ok := data[logrus.ErrorKey]; !ok {
|
||||
data[logrus.ErrorKey] = s.Status.Message
|
||||
}
|
||||
baseEntry.Log(level, "Span")
|
||||
if _, ok := data[_errorCodeKey]; !ok {
|
||||
data[_errorCodeKey] = codes.Code(s.Status.Code).String()
|
||||
}
|
||||
}
|
||||
|
||||
entry.Data = data
|
||||
entry.Time = s.StartTime
|
||||
entry.Log(level, spanMessage)
|
||||
}
|
||||
|
|
|
@ -14,8 +14,7 @@ var DefaultSampler = trace.AlwaysSample()
|
|||
func SetSpanStatus(span *trace.Span, err error) {
|
||||
status := trace.Status{}
|
||||
if err != nil {
|
||||
// TODO: JTERRY75 - Handle errors in a non-generic way
|
||||
status.Code = trace.StatusCodeUnknown
|
||||
status.Code = int32(toStatusCode(err))
|
||||
status.Message = err.Error()
|
||||
}
|
||||
span.SetStatus(status)
|
||||
|
@ -46,3 +45,14 @@ func update(ctx context.Context, s *trace.Span) (context.Context, *trace.Span) {
|
|||
|
||||
var WithServerSpanKind = trace.WithSpanKind(trace.SpanKindServer)
|
||||
var WithClientSpanKind = trace.WithSpanKind(trace.SpanKindClient)
|
||||
|
||||
func spanKindToString(sk int) string {
|
||||
switch sk {
|
||||
case trace.SpanKindClient:
|
||||
return "client"
|
||||
case trace.SpanKindServer:
|
||||
return "server"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,20 +23,14 @@ type (
|
|||
)
|
||||
|
||||
type explicitAccess struct {
|
||||
//nolint:structcheck
|
||||
accessPermissions accessMask
|
||||
//nolint:structcheck
|
||||
accessMode accessMode
|
||||
//nolint:structcheck
|
||||
inheritance inheritMode
|
||||
//nolint:structcheck
|
||||
trustee trustee
|
||||
}
|
||||
|
||||
type trustee struct {
|
||||
//nolint:unused,structcheck
|
||||
multipleTrustee *trustee
|
||||
//nolint:unused,structcheck
|
||||
multipleTrusteeOperation int32
|
||||
trusteeForm trusteeForm
|
||||
trusteeType trusteeType
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"github.com/containernetworking/cni/libcni"
|
||||
"github.com/containers/common/libnetwork/types"
|
||||
"github.com/containers/common/pkg/config"
|
||||
cutil "github.com/containers/common/pkg/util"
|
||||
"github.com/containers/storage/pkg/lockfile"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
@ -295,6 +296,43 @@ func (n *cniNetwork) DefaultInterfaceName() string {
|
|||
return cniDeviceName
|
||||
}
|
||||
|
||||
// NetworkInfo return the network information about binary path,
|
||||
// package version and program version.
|
||||
func (n *cniNetwork) NetworkInfo() types.NetworkInfo {
|
||||
path := ""
|
||||
packageVersion := ""
|
||||
for _, p := range n.cniPluginDirs {
|
||||
ver := cutil.PackageVersion(p)
|
||||
if ver != cutil.UnknownPackage {
|
||||
path = p
|
||||
packageVersion = ver
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
info := types.NetworkInfo{
|
||||
Backend: types.CNI,
|
||||
Package: packageVersion,
|
||||
Path: path,
|
||||
}
|
||||
|
||||
dnsPath := filepath.Join(path, "dnsname")
|
||||
dnsPackage := cutil.PackageVersion(dnsPath)
|
||||
dnsProgram, err := cutil.ProgramVersionDnsname(dnsPath)
|
||||
if err != nil {
|
||||
logrus.Infof("Failed to get the dnsname plugin version: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(dnsPath); err == nil {
|
||||
info.DNS = types.DNSNetworkInfo{
|
||||
Path: dnsPath,
|
||||
Package: dnsPackage,
|
||||
Version: dnsProgram,
|
||||
}
|
||||
}
|
||||
|
||||
return info
|
||||
}
|
||||
|
||||
func (n *cniNetwork) Network(nameOrID string) (*types.Network, error) {
|
||||
network, err := n.getNetwork(nameOrID)
|
||||
if err != nil {
|
||||
|
|
|
@ -81,6 +81,43 @@ func ValidateSubnets(network *types.Network, addGateway bool, usedNetworks []*ne
|
|||
return nil
|
||||
}
|
||||
|
||||
func ValidateRoutes(routes []types.Route) error {
|
||||
for _, route := range routes {
|
||||
err := ValidateRoute(route)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ValidateRoute(route types.Route) error {
|
||||
if route.Destination.IP == nil {
|
||||
return fmt.Errorf("route destination ip nil")
|
||||
}
|
||||
|
||||
if route.Destination.Mask == nil {
|
||||
return fmt.Errorf("route destination mask nil")
|
||||
}
|
||||
|
||||
if route.Gateway == nil {
|
||||
return fmt.Errorf("route gateway nil")
|
||||
}
|
||||
|
||||
// Reparse to ensure destination is valid.
|
||||
ip, ipNet, err := net.ParseCIDR(route.Destination.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("route destination invalid: %w", err)
|
||||
}
|
||||
|
||||
// check that destination is a network and not an address
|
||||
if !ip.Equal(ipNet.IP) {
|
||||
return fmt.Errorf("route destination invalid")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ValidateSetupOptions(n NetUtil, namespacePath string, options types.SetupOptions) error {
|
||||
if namespacePath == "" {
|
||||
return errors.New("namespacePath is empty")
|
||||
|
|
|
@ -198,6 +198,13 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case types.NoDefaultRoute:
|
||||
val, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// rust only support "true" or "false" while go can parse 1 and 0 as well so we need to change it
|
||||
newNetwork.Options[types.NoDefaultRoute] = strconv.FormatBool(val)
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported bridge network option %s", key)
|
||||
|
@ -237,6 +244,12 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
|
|||
return nil, err
|
||||
}
|
||||
|
||||
//validate routes
|
||||
err = internalutil.ValidateRoutes(newNetwork.Routes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newNetwork.Created = time.Now()
|
||||
|
||||
if !defaultNet {
|
||||
|
@ -317,6 +330,24 @@ func createIpvlanOrMacvlan(network *types.Network) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case types.NoDefaultRoute:
|
||||
val, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// rust only support "true" or "false" while go can parse 1 and 0 as well so we need to change it
|
||||
network.Options[types.NoDefaultRoute] = strconv.FormatBool(val)
|
||||
case types.BclimOption:
|
||||
if isMacVlan {
|
||||
_, err := strconv.ParseInt(value, 10, 32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse %q option: %w", key, err)
|
||||
}
|
||||
// do not fallthrough for macvlan
|
||||
break
|
||||
}
|
||||
// bclim is only valid for macvlan not ipvlan so fallthrough to error case
|
||||
fallthrough
|
||||
default:
|
||||
return fmt.Errorf("unsupported %s network option %s", driver, key)
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/containers/common/libnetwork/internal/util"
|
||||
"github.com/containers/common/libnetwork/types"
|
||||
"github.com/containers/common/pkg/config"
|
||||
cutil "github.com/containers/common/pkg/util"
|
||||
"github.com/containers/storage/pkg/lockfile"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
@ -336,6 +337,37 @@ func (n *netavarkNetwork) DefaultInterfaceName() string {
|
|||
return defaultBridgeName
|
||||
}
|
||||
|
||||
// NetworkInfo return the network information about binary path,
|
||||
// package version and program version.
|
||||
func (n *netavarkNetwork) NetworkInfo() types.NetworkInfo {
|
||||
path := n.netavarkBinary
|
||||
packageVersion := cutil.PackageVersion(path)
|
||||
programVersion, err := cutil.ProgramVersion(path)
|
||||
if err != nil {
|
||||
logrus.Infof("Failed to get the netavark version: %v", err)
|
||||
}
|
||||
info := types.NetworkInfo{
|
||||
Backend: types.Netavark,
|
||||
Version: programVersion,
|
||||
Package: packageVersion,
|
||||
Path: path,
|
||||
}
|
||||
|
||||
dnsPath := n.aardvarkBinary
|
||||
dnsPackage := cutil.PackageVersion(dnsPath)
|
||||
dnsProgram, err := cutil.ProgramVersion(dnsPath)
|
||||
if err != nil {
|
||||
logrus.Infof("Failed to get the aardvark version: %v", err)
|
||||
}
|
||||
info.DNS = types.DNSNetworkInfo{
|
||||
Package: dnsPackage,
|
||||
Path: dnsPath,
|
||||
Version: dnsProgram,
|
||||
}
|
||||
|
||||
return info
|
||||
}
|
||||
|
||||
func (n *netavarkNetwork) Network(nameOrID string) (*types.Network, error) {
|
||||
network, err := n.getNetwork(nameOrID)
|
||||
if err != nil {
|
||||
|
|
|
@ -41,6 +41,8 @@ const (
|
|||
ModeOption = "mode"
|
||||
IsolateOption = "isolate"
|
||||
MetricOption = "metric"
|
||||
NoDefaultRoute = "no_default_route"
|
||||
BclimOption = "bclim"
|
||||
)
|
||||
|
||||
type NetworkBackend string
|
||||
|
|
|
@ -34,6 +34,10 @@ type ContainerNetwork interface {
|
|||
// DefaultNetworkName will return the default network name
|
||||
// for this interface.
|
||||
DefaultNetworkName() string
|
||||
|
||||
// NetworkInfo return the network information about backend type,
|
||||
// binary path, package version and so on.
|
||||
NetworkInfo() NetworkInfo
|
||||
}
|
||||
|
||||
// Network describes the Network attributes.
|
||||
|
@ -50,6 +54,8 @@ type Network struct {
|
|||
Created time.Time `json:"created,omitempty"`
|
||||
// Subnets to use for this network.
|
||||
Subnets []Subnet `json:"subnets,omitempty"`
|
||||
// Routes to use for this network.
|
||||
Routes []Route `json:"routes,omitempty"`
|
||||
// IPv6Enabled if set to true an ipv6 subnet should be created for this net.
|
||||
IPv6Enabled bool `json:"ipv6_enabled"`
|
||||
// Internal is whether the Network should not have external routes
|
||||
|
@ -80,6 +86,22 @@ type NetworkUpdateOptions struct {
|
|||
RemoveDNSServers []string `json:"remove_dns_servers,omitempty"`
|
||||
}
|
||||
|
||||
// NetworkInfo contains the network information.
|
||||
type NetworkInfo struct {
|
||||
Backend NetworkBackend `json:"backend"`
|
||||
Version string `json:"version,omitempty"`
|
||||
Package string `json:"package,omitempty"`
|
||||
Path string `json:"path,omitempty"`
|
||||
DNS DNSNetworkInfo `json:"dns,omitempty"`
|
||||
}
|
||||
|
||||
// NetworkInfo contains the DNS information.
|
||||
type DNSNetworkInfo struct {
|
||||
Version string `json:"version,omitempty"`
|
||||
Package string `json:"package,omitempty"`
|
||||
Path string `json:"path,omitempty"`
|
||||
}
|
||||
|
||||
// IPNet is used as custom net.IPNet type to add Marshal/Unmarshal methods.
|
||||
type IPNet struct {
|
||||
net.IPNet
|
||||
|
@ -169,6 +191,17 @@ type Subnet struct {
|
|||
LeaseRange *LeaseRange `json:"lease_range,omitempty"`
|
||||
}
|
||||
|
||||
type Route struct {
|
||||
// Destination for this route in CIDR form.
|
||||
// swagger:strfmt string
|
||||
Destination IPNet `json:"destination"`
|
||||
// Gateway IP for this route.
|
||||
// swagger:strfmt string
|
||||
Gateway net.IP `json:"gateway"`
|
||||
// Metric for this route. Optional.
|
||||
Metric *uint32 `json:"metric,omitempty"`
|
||||
}
|
||||
|
||||
// LeaseRange contains the range where IP are leased.
|
||||
type LeaseRange struct {
|
||||
// StartIP first IP in the subnet which should be used to assign ips.
|
||||
|
|
|
@ -584,6 +584,10 @@ type NetworkConfig struct {
|
|||
// are always assigned randomly.
|
||||
DefaultSubnetPools []SubnetPool `toml:"default_subnet_pools,omitempty"`
|
||||
|
||||
// DefaultRootlessNetworkCmd is used to set the default rootless network
|
||||
// program, either "slirp4nents" (default) or "pasta".
|
||||
DefaultRootlessNetworkCmd string `toml:"default_rootless_network_cmd,omitempty"`
|
||||
|
||||
// NetworkConfigDir is where network configuration files are stored.
|
||||
NetworkConfigDir string `toml:"network_config_dir,omitempty"`
|
||||
|
||||
|
@ -591,6 +595,10 @@ type NetworkConfig struct {
|
|||
// for netavark rootful bridges with dns enabled. This can be necessary
|
||||
// when other dns forwarders run on the machine. 53 is used if unset.
|
||||
DNSBindPort uint16 `toml:"dns_bind_port,omitempty,omitzero"`
|
||||
|
||||
// PastaOptions contains a default list of pasta(1) options that should
|
||||
// be used when running pasta.
|
||||
PastaOptions []string `toml:"pasta_options,omitempty"`
|
||||
}
|
||||
|
||||
type SubnetPool struct {
|
||||
|
|
|
@ -344,6 +344,13 @@ default_sysctls = [
|
|||
# {"base" = "10.128.0.0/9", "size" = 24},
|
||||
#]
|
||||
|
||||
|
||||
|
||||
# Configure which rootless network program to use by default. Valid options are
|
||||
# `slirp4netns` (default) and `pasta`.
|
||||
#
|
||||
#default_rootless_network_cmd = "slirp4netns"
|
||||
|
||||
# Path to the directory where network configuration files are located.
|
||||
# For the CNI backend the default is "/etc/cni/net.d" as root
|
||||
# and "$HOME/.config/cni/net.d" as rootless.
|
||||
|
@ -359,6 +366,11 @@ default_sysctls = [
|
|||
#
|
||||
#dns_bind_port = 53
|
||||
|
||||
# A list of default pasta options that should be used running pasta.
|
||||
# It accepts the pasta cli options, see pasta(1) for the full list of options.
|
||||
#
|
||||
#pasta_options = []
|
||||
|
||||
[engine]
|
||||
# Index to the active service
|
||||
#
|
||||
|
@ -407,7 +419,7 @@ default_sysctls = [
|
|||
# Format is a single character [a-Z] or a comma separated sequence of
|
||||
# `ctrl-<value>`, where `<value>` is one of:
|
||||
# `a-z`, `@`, `^`, `[`, `\`, `]`, `^` or `_`
|
||||
#
|
||||
# Specifying "" disables this feature.
|
||||
#detach_keys = "ctrl-p,ctrl-q"
|
||||
|
||||
# Determines whether engine will reserve ports on the host when they are
|
||||
|
|
|
@ -218,6 +218,7 @@ func DefaultConfig() (*Config, error) {
|
|||
DefaultNetwork: "podman",
|
||||
DefaultSubnet: DefaultSubnet,
|
||||
DefaultSubnetPools: DefaultSubnetPools,
|
||||
DefaultRootlessNetworkCmd: "slirp4netns",
|
||||
DNSBindPort: 0,
|
||||
CNIPluginDirs: DefaultCNIPluginDirs,
|
||||
NetavarkPluginDirs: DefaultNetavarkPluginDirs,
|
||||
|
@ -283,6 +284,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
|
|||
c.VolumePath = filepath.Join(storeOpts.GraphRoot, "volumes")
|
||||
|
||||
c.VolumePluginTimeout = DefaultVolumePluginTimeout
|
||||
c.CompressionFormat = "gzip"
|
||||
|
||||
c.HelperBinariesDir = defaultHelperBinariesDir
|
||||
if additionalHelperBinariesDir != "" {
|
||||
|
|
|
@ -1,6 +1,94 @@
|
|||
package util
|
||||
|
||||
import "regexp"
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
UnknownPackage = "Unknown"
|
||||
)
|
||||
|
||||
// Note: This function is copied from containers/podman libpod/util.go
|
||||
// Please see https://github.com/containers/common/pull/1460
|
||||
func queryPackageVersion(cmdArg ...string) string {
|
||||
output := UnknownPackage
|
||||
if 1 < len(cmdArg) {
|
||||
cmd := exec.Command(cmdArg[0], cmdArg[1:]...)
|
||||
if outp, err := cmd.Output(); err == nil {
|
||||
output = string(outp)
|
||||
if cmdArg[0] == "/usr/bin/dpkg" {
|
||||
r := strings.Split(output, ": ")
|
||||
queryFormat := `${Package}_${Version}_${Architecture}`
|
||||
cmd = exec.Command("/usr/bin/dpkg-query", "-f", queryFormat, "-W", r[0])
|
||||
if outp, err := cmd.Output(); err == nil {
|
||||
output = string(outp)
|
||||
}
|
||||
}
|
||||
}
|
||||
if cmdArg[0] == "/sbin/apk" {
|
||||
prefix := cmdArg[len(cmdArg)-1] + " is owned by "
|
||||
output = strings.Replace(output, prefix, "", 1)
|
||||
}
|
||||
}
|
||||
return strings.Trim(output, "\n")
|
||||
}
|
||||
|
||||
// Note: This function is copied from containers/podman libpod/util.go
|
||||
// Please see https://github.com/containers/common/pull/1460
|
||||
func PackageVersion(program string) string { // program is full path
|
||||
packagers := [][]string{
|
||||
{"/usr/bin/rpm", "-q", "-f"},
|
||||
{"/usr/bin/dpkg", "-S"}, // Debian, Ubuntu
|
||||
{"/usr/bin/pacman", "-Qo"}, // Arch
|
||||
{"/usr/bin/qfile", "-qv"}, // Gentoo (quick)
|
||||
{"/usr/bin/equery", "b"}, // Gentoo (slow)
|
||||
{"/sbin/apk", "info", "-W"}, // Alpine
|
||||
{"/usr/local/sbin/pkg", "which", "-q"}, // FreeBSD
|
||||
}
|
||||
|
||||
for _, cmd := range packagers {
|
||||
cmd = append(cmd, program)
|
||||
if out := queryPackageVersion(cmd...); out != UnknownPackage {
|
||||
return out
|
||||
}
|
||||
}
|
||||
return UnknownPackage
|
||||
}
|
||||
|
||||
// Note: This function is copied from containers/podman libpod/util.go
|
||||
// Please see https://github.com/containers/common/pull/1460
|
||||
func ProgramVersion(program string) (string, error) {
|
||||
return programVersion(program, false)
|
||||
}
|
||||
|
||||
func ProgramVersionDnsname(program string) (string, error) {
|
||||
return programVersion(program, true)
|
||||
}
|
||||
|
||||
func programVersion(program string, dnsname bool) (string, error) {
|
||||
cmd := exec.Command(program, "--version")
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("`%v --version` failed: %v %v (%v)", program, stderr.String(), stdout.String(), err)
|
||||
}
|
||||
|
||||
output := strings.TrimSuffix(stdout.String(), "\n")
|
||||
// dnsname --version returns the information to stderr
|
||||
if dnsname {
|
||||
output = strings.TrimSuffix(stderr.String(), "\n")
|
||||
}
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
// StringInSlice determines if a string is in a string slice, returns bool
|
||||
func StringInSlice(s string, sl []string) bool {
|
||||
|
|
|
@ -12,11 +12,41 @@ import (
|
|||
internalManifest "github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/signature"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type instanceCopyKind int
|
||||
|
||||
const (
|
||||
instanceCopyCopy instanceCopyKind = iota
|
||||
instanceCopyClone
|
||||
)
|
||||
|
||||
type instanceCopy struct {
|
||||
op instanceCopyKind
|
||||
sourceDigest digest.Digest
|
||||
}
|
||||
|
||||
// prepareInstanceCopies prepares a list of instances which needs to copied to the manifest list.
|
||||
func prepareInstanceCopies(instanceDigests []digest.Digest, options *Options) []instanceCopy {
|
||||
res := []instanceCopy{}
|
||||
for i, instanceDigest := range instanceDigests {
|
||||
if options.ImageListSelection == CopySpecificImages &&
|
||||
!slices.Contains(options.Instances, instanceDigest) {
|
||||
logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
|
||||
continue
|
||||
}
|
||||
res = append(res, instanceCopy{
|
||||
op: instanceCopyCopy,
|
||||
sourceDigest: instanceDigest,
|
||||
})
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// copyMultipleImages copies some or all of an image list's instances, using
|
||||
// policyContext to validate source image admissibility.
|
||||
func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, retErr error) {
|
||||
|
@ -88,44 +118,35 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
|||
|
||||
// Copy each image, or just the ones we want to copy, in turn.
|
||||
instanceDigests := updatedList.Instances()
|
||||
imagesToCopy := len(instanceDigests)
|
||||
if options.ImageListSelection == CopySpecificImages {
|
||||
imagesToCopy = len(options.Instances)
|
||||
}
|
||||
c.Printf("Copying %d of %d images in list\n", imagesToCopy, len(instanceDigests))
|
||||
updates := make([]manifest.ListUpdate, len(instanceDigests))
|
||||
instancesCopied := 0
|
||||
for i, instanceDigest := range instanceDigests {
|
||||
if options.ImageListSelection == CopySpecificImages &&
|
||||
!slices.Contains(options.Instances, instanceDigest) {
|
||||
update, err := updatedList.Instance(instanceDigest)
|
||||
instanceEdits := []internalManifest.ListEdit{}
|
||||
instanceCopyList := prepareInstanceCopies(instanceDigests, options)
|
||||
c.Printf("Copying %d of %d images in list\n", len(instanceCopyList), len(instanceDigests))
|
||||
for i, instance := range instanceCopyList {
|
||||
// Update instances to be edited by their `ListOperation` and
|
||||
// populate necessary fields.
|
||||
switch instance.op {
|
||||
case instanceCopyCopy:
|
||||
logrus.Debugf("Copying instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList))
|
||||
c.Printf("Copying image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList))
|
||||
unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest)
|
||||
updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceCopyList[i].sourceDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err)
|
||||
}
|
||||
logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
|
||||
// Record the digest/size/type of the manifest that we didn't copy.
|
||||
updates[i] = update
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("Copying instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
|
||||
c.Printf("Copying image %s (%d/%d)\n", instanceDigest, instancesCopied+1, imagesToCopy)
|
||||
unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceDigest)
|
||||
updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceDigest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", instancesCopied+1, imagesToCopy, err)
|
||||
}
|
||||
instancesCopied++
|
||||
// Record the result of a possible conversion here.
|
||||
update := manifest.ListUpdate{
|
||||
Digest: updatedManifestDigest,
|
||||
Size: int64(len(updatedManifest)),
|
||||
MediaType: updatedManifestType,
|
||||
instanceEdits = append(instanceEdits, internalManifest.ListEdit{
|
||||
ListOperation: internalManifest.ListOpUpdate,
|
||||
UpdateOldDigest: instance.sourceDigest,
|
||||
UpdateDigest: updatedManifestDigest,
|
||||
UpdateSize: int64(len(updatedManifest)),
|
||||
UpdateMediaType: updatedManifestType})
|
||||
default:
|
||||
return nil, fmt.Errorf("copying image: invalid copy operation %d", instance.op)
|
||||
}
|
||||
updates[i] = update
|
||||
}
|
||||
|
||||
// Now reset the digest/size/types of the manifests in the list to account for any conversions that we made.
|
||||
if err = updatedList.UpdateInstances(updates); err != nil {
|
||||
if err = updatedList.EditInstances(instanceEdits); err != nil {
|
||||
return nil, fmt.Errorf("updating manifest list: %w", err)
|
||||
}
|
||||
|
||||
|
|
70
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
70
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
|
@ -69,27 +69,71 @@ func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdat
|
|||
|
||||
// UpdateInstances updates the sizes, digests, and media types of the manifests
|
||||
// which the list catalogs.
|
||||
func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
|
||||
if len(updates) != len(list.Manifests) {
|
||||
return fmt.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates))
|
||||
func (index *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
|
||||
editInstances := []ListEdit{}
|
||||
for i, instance := range updates {
|
||||
editInstances = append(editInstances, ListEdit{
|
||||
UpdateOldDigest: index.Manifests[i].Digest,
|
||||
UpdateDigest: instance.Digest,
|
||||
UpdateSize: instance.Size,
|
||||
UpdateMediaType: instance.MediaType,
|
||||
ListOperation: ListOpUpdate})
|
||||
}
|
||||
for i := range updates {
|
||||
if err := updates[i].Digest.Validate(); err != nil {
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err)
|
||||
return index.editInstances(editInstances)
|
||||
}
|
||||
list.Manifests[i].Digest = updates[i].Digest
|
||||
if updates[i].Size < 0 {
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
|
||||
|
||||
func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||
addedEntries := []Schema2ManifestDescriptor{}
|
||||
for i, editInstance := range editInstances {
|
||||
switch editInstance.ListOperation {
|
||||
case ListOpUpdate:
|
||||
if err := editInstance.UpdateOldDigest.Validate(); err != nil {
|
||||
return fmt.Errorf("Schema2List.EditInstances: Attempting to update %s which is an invalid digest: %w", editInstance.UpdateOldDigest, err)
|
||||
}
|
||||
list.Manifests[i].Size = updates[i].Size
|
||||
if updates[i].MediaType == "" {
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType)
|
||||
if err := editInstance.UpdateDigest.Validate(); err != nil {
|
||||
return fmt.Errorf("Schema2List.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err)
|
||||
}
|
||||
list.Manifests[i].MediaType = updates[i].MediaType
|
||||
targetIndex := slices.IndexFunc(index.Manifests, func(m Schema2ManifestDescriptor) bool {
|
||||
return m.Digest == editInstance.UpdateOldDigest
|
||||
})
|
||||
if targetIndex == -1 {
|
||||
return fmt.Errorf("Schema2List.EditInstances: digest %s not found", editInstance.UpdateOldDigest)
|
||||
}
|
||||
index.Manifests[targetIndex].Digest = editInstance.UpdateDigest
|
||||
if editInstance.UpdateSize < 0 {
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize)
|
||||
}
|
||||
index.Manifests[targetIndex].Size = editInstance.UpdateSize
|
||||
if editInstance.UpdateMediaType == "" {
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType)
|
||||
}
|
||||
index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
|
||||
case ListOpAdd:
|
||||
addInstance := Schema2ManifestDescriptor{
|
||||
Schema2Descriptor{Digest: editInstance.AddDigest, Size: editInstance.AddSize, MediaType: editInstance.AddMediaType},
|
||||
Schema2PlatformSpec{
|
||||
OS: editInstance.AddPlatform.OS,
|
||||
Architecture: editInstance.AddPlatform.Architecture,
|
||||
OSVersion: editInstance.AddPlatform.OSVersion,
|
||||
OSFeatures: editInstance.AddPlatform.OSFeatures,
|
||||
Variant: editInstance.AddPlatform.Variant,
|
||||
},
|
||||
}
|
||||
addedEntries = append(addedEntries, addInstance)
|
||||
default:
|
||||
return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation)
|
||||
}
|
||||
}
|
||||
if len(addedEntries) != 0 {
|
||||
index.Manifests = append(index.Manifests, addedEntries...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (index *Schema2List) EditInstances(editInstances []ListEdit) error {
|
||||
return index.editInstances(editInstances)
|
||||
}
|
||||
|
||||
func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
|
||||
// ChooseInstanceByCompression is same as ChooseInstance for schema2 manifest list.
|
||||
return list.ChooseInstance(ctx)
|
||||
|
|
|
@ -55,6 +55,10 @@ type List interface {
|
|||
// SystemContext ( or for the current platform if the SystemContext doesn't specify any detail ) and preferGzip for compression which
|
||||
// when configured to OptionalBoolTrue and chooses best available compression when it is OptionalBoolFalse or left OptionalBoolUndefined.
|
||||
ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error)
|
||||
// Edit information about the list's instances. Contains Slice of ListEdit where each element
|
||||
// is responsible for either Modifying or Adding a new instance to the Manifest. Operation is
|
||||
// selected on the basis of configured ListOperation field.
|
||||
EditInstances([]ListEdit) error
|
||||
}
|
||||
|
||||
// ListUpdate includes the fields which a List's UpdateInstances() method will modify.
|
||||
|
@ -65,6 +69,32 @@ type ListUpdate struct {
|
|||
MediaType string
|
||||
}
|
||||
|
||||
type ListOp int
|
||||
|
||||
const (
|
||||
listOpInvalid ListOp = iota
|
||||
ListOpAdd
|
||||
ListOpUpdate
|
||||
)
|
||||
|
||||
// ListEdit includes the fields which a List's EditInstances() method will modify.
|
||||
type ListEdit struct {
|
||||
ListOperation ListOp
|
||||
|
||||
// if Op == ListEditUpdate (basically the previous UpdateInstances). All fields must be set.
|
||||
UpdateOldDigest digest.Digest
|
||||
UpdateDigest digest.Digest
|
||||
UpdateSize int64
|
||||
UpdateMediaType string
|
||||
|
||||
// If Op = ListEditAdd. All fields must be set.
|
||||
AddDigest digest.Digest
|
||||
AddSize int64
|
||||
AddMediaType string
|
||||
AddPlatform *imgspecv1.Platform
|
||||
AddAnnotations map[string]string
|
||||
}
|
||||
|
||||
// ListPublicFromBlob parses a list of manifests.
|
||||
// This is publicly visible as c/image/manifest.ListFromBlob.
|
||||
func ListPublicFromBlob(manifest []byte, manifestMIMEType string) (ListPublic, error) {
|
||||
|
|
|
@ -64,26 +64,68 @@ func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate
|
|||
// UpdateInstances updates the sizes, digests, and media types of the manifests
|
||||
// which the list catalogs.
|
||||
func (index *OCI1IndexPublic) UpdateInstances(updates []ListUpdate) error {
|
||||
if len(updates) != len(index.Manifests) {
|
||||
return fmt.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates))
|
||||
editInstances := []ListEdit{}
|
||||
for i, instance := range updates {
|
||||
editInstances = append(editInstances, ListEdit{
|
||||
UpdateOldDigest: index.Manifests[i].Digest,
|
||||
UpdateDigest: instance.Digest,
|
||||
UpdateSize: instance.Size,
|
||||
UpdateMediaType: instance.MediaType,
|
||||
ListOperation: ListOpUpdate})
|
||||
}
|
||||
for i := range updates {
|
||||
if err := updates[i].Digest.Validate(); err != nil {
|
||||
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err)
|
||||
return index.editInstances(editInstances)
|
||||
}
|
||||
index.Manifests[i].Digest = updates[i].Digest
|
||||
if updates[i].Size < 0 {
|
||||
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
|
||||
|
||||
func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error {
|
||||
addedEntries := []imgspecv1.Descriptor{}
|
||||
for i, editInstance := range editInstances {
|
||||
switch editInstance.ListOperation {
|
||||
case ListOpUpdate:
|
||||
if err := editInstance.UpdateOldDigest.Validate(); err != nil {
|
||||
return fmt.Errorf("OCI1Index.EditInstances: Attempting to update %s which is an invalid digest: %w", editInstance.UpdateOldDigest, err)
|
||||
}
|
||||
index.Manifests[i].Size = updates[i].Size
|
||||
if updates[i].MediaType == "" {
|
||||
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType)
|
||||
if err := editInstance.UpdateDigest.Validate(); err != nil {
|
||||
return fmt.Errorf("OCI1Index.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err)
|
||||
}
|
||||
index.Manifests[i].MediaType = updates[i].MediaType
|
||||
targetIndex := slices.IndexFunc(index.Manifests, func(m imgspecv1.Descriptor) bool {
|
||||
return m.Digest == editInstance.UpdateOldDigest
|
||||
})
|
||||
if targetIndex == -1 {
|
||||
return fmt.Errorf("OCI1Index.EditInstances: digest %s not found", editInstance.UpdateOldDigest)
|
||||
}
|
||||
index.Manifests[targetIndex].Digest = editInstance.UpdateDigest
|
||||
if editInstance.UpdateSize < 0 {
|
||||
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize)
|
||||
}
|
||||
index.Manifests[targetIndex].Size = editInstance.UpdateSize
|
||||
if editInstance.UpdateMediaType == "" {
|
||||
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType)
|
||||
}
|
||||
index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
|
||||
case ListOpAdd:
|
||||
addedEntries = append(addedEntries, imgspecv1.Descriptor{
|
||||
MediaType: editInstance.AddMediaType,
|
||||
Size: editInstance.AddSize,
|
||||
Digest: editInstance.AddDigest,
|
||||
Platform: editInstance.AddPlatform,
|
||||
Annotations: editInstance.AddAnnotations})
|
||||
default:
|
||||
return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation)
|
||||
}
|
||||
}
|
||||
if len(addedEntries) != 0 {
|
||||
index.Manifests = append(index.Manifests, addedEntries...)
|
||||
slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) bool {
|
||||
return !instanceIsZstd(a) && instanceIsZstd(b)
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (index *OCI1Index) EditInstances(editInstances []ListEdit) error {
|
||||
return index.editInstances(editInstances)
|
||||
}
|
||||
|
||||
// instanceIsZstd returns true if instance is a zstd instance otherwise false.
|
||||
func instanceIsZstd(manifest imgspecv1.Descriptor) bool {
|
||||
if value, ok := manifest.Annotations[OCI1InstanceAnnotationCompressionZSTD]; ok && value == "true" {
|
||||
|
@ -131,8 +173,6 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi
|
|||
for manifestIndex, d := range index.Manifests {
|
||||
candidate := instanceCandidate{platformIndex: math.MaxInt, manifestPosition: manifestIndex, isZstd: instanceIsZstd(d), digest: d.Digest}
|
||||
if d.Platform != nil {
|
||||
foundPlatform := false
|
||||
for platformIndex, wantedPlatform := range wantedPlatforms {
|
||||
imagePlatform := imgspecv1.Platform{
|
||||
Architecture: d.Platform.Architecture,
|
||||
OS: d.Platform.OS,
|
||||
|
@ -140,15 +180,13 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi
|
|||
OSFeatures: slices.Clone(d.Platform.OSFeatures),
|
||||
Variant: d.Platform.Variant,
|
||||
}
|
||||
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
|
||||
foundPlatform = true
|
||||
candidate.platformIndex = platformIndex
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundPlatform {
|
||||
platformIndex := slices.IndexFunc(wantedPlatforms, func(wantedPlatform imgspecv1.Platform) bool {
|
||||
return platform.MatchesPlatform(imagePlatform, wantedPlatform)
|
||||
})
|
||||
if platformIndex == -1 {
|
||||
continue
|
||||
}
|
||||
candidate.platformIndex = platformIndex
|
||||
}
|
||||
if bestMatch == nil || candidate.isPreferredOver(bestMatch, didPreferGzip) {
|
||||
bestMatch = &candidate
|
||||
|
|
|
@ -24,11 +24,11 @@ func NewWithValues[E comparable](values ...E) *Set[E] {
|
|||
return s
|
||||
}
|
||||
|
||||
func (s Set[E]) Add(v E) {
|
||||
func (s *Set[E]) Add(v E) {
|
||||
s.m[v] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
|
||||
}
|
||||
|
||||
func (s Set[E]) Delete(v E) {
|
||||
func (s *Set[E]) Delete(v E) {
|
||||
delete(s.m, v)
|
||||
}
|
||||
|
||||
|
|
|
@ -48,9 +48,9 @@ var (
|
|||
ErrNotSupported = errors.New("not supported")
|
||||
)
|
||||
|
||||
// authPath combines a path to a file with container registry access keys,
|
||||
// along with expected properties of that path (currently just whether it's)
|
||||
// legacy format or not.
|
||||
// authPath combines a path to a file with container registry credentials,
|
||||
// along with expected properties of that path (currently just whether it's
|
||||
// legacy format or not).
|
||||
type authPath struct {
|
||||
path string
|
||||
legacyFormat bool
|
||||
|
@ -87,12 +87,12 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s
|
|||
switch helper {
|
||||
// Special-case the built-in helpers for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
desc, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, string, error) {
|
||||
if ch, exists := auths.CredHelpers[key]; exists {
|
||||
desc, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
|
||||
if ch, exists := fileContents.CredHelpers[key]; exists {
|
||||
if isNamespaced {
|
||||
return false, "", unsupportedNamespaceErr(ch)
|
||||
}
|
||||
desc, err := setAuthToCredHelper(ch, key, username, password)
|
||||
desc, err := setCredsInCredHelper(ch, key, username, password)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s
|
|||
}
|
||||
creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
|
||||
newCreds := dockerAuthConfig{Auth: creds}
|
||||
auths.AuthConfigs[key] = newCreds
|
||||
fileContents.AuthConfigs[key] = newCreds
|
||||
return true, "", nil
|
||||
})
|
||||
// External helpers.
|
||||
|
@ -108,7 +108,7 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s
|
|||
if isNamespaced {
|
||||
err = unsupportedNamespaceErr(helper)
|
||||
} else {
|
||||
desc, err = setAuthToCredHelper(helper, key, username, password)
|
||||
desc, err = setCredsInCredHelper(helper, key, username, password)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
|
@ -156,17 +156,17 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
|
|||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
for _, path := range getAuthFilePaths(sys, homedir.Get()) {
|
||||
// parse returns an empty map in case the path doesn't exist.
|
||||
auths, err := path.parse()
|
||||
fileContents, err := path.parse()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading JSON file %q: %w", path.path, err)
|
||||
}
|
||||
// Credential helpers in the auth file have a
|
||||
// direct mapping to a registry, so we can just
|
||||
// walk the map.
|
||||
for registry := range auths.CredHelpers {
|
||||
for registry := range fileContents.CredHelpers {
|
||||
allKeys.Add(registry)
|
||||
}
|
||||
for key := range auths.AuthConfigs {
|
||||
for key := range fileContents.AuthConfigs {
|
||||
key := normalizeAuthFileKey(key, path.legacyFormat)
|
||||
if key == normalizedDockerIORegistry {
|
||||
key = "docker.io"
|
||||
|
@ -176,7 +176,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
|
|||
}
|
||||
// External helpers.
|
||||
default:
|
||||
creds, err := listAuthsFromCredHelper(helper)
|
||||
creds, err := listCredsInCredHelper(helper)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error listing credentials stored in credential helper %s: %v", helper, err)
|
||||
if errors.Is(err, exec.ErrNotFound) {
|
||||
|
@ -193,19 +193,19 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
|
|||
|
||||
// Now use `GetCredentials` to the specific auth configs for each
|
||||
// previously listed registry.
|
||||
authConfigs := make(map[string]types.DockerAuthConfig)
|
||||
allCreds := make(map[string]types.DockerAuthConfig)
|
||||
for _, key := range allKeys.Values() {
|
||||
authConf, err := GetCredentials(sys, key)
|
||||
creds, err := GetCredentials(sys, key)
|
||||
if err != nil {
|
||||
// Note: we rely on the logging in `GetCredentials`.
|
||||
return nil, err
|
||||
}
|
||||
if authConf != (types.DockerAuthConfig{}) {
|
||||
authConfigs[key] = authConf
|
||||
if creds != (types.DockerAuthConfig{}) {
|
||||
allCreds[key] = creds
|
||||
}
|
||||
}
|
||||
|
||||
return authConfigs, nil
|
||||
return allCreds, nil
|
||||
}
|
||||
|
||||
// getAuthFilePaths returns a slice of authPaths based on the system context
|
||||
|
@ -285,13 +285,13 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t
|
|||
// Anonymous function to query credentials from auth files.
|
||||
getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, string, error) {
|
||||
for _, path := range getAuthFilePaths(sys, homeDir) {
|
||||
authConfig, err := findCredentialsInFile(key, registry, path)
|
||||
creds, err := findCredentialsInFile(key, registry, path)
|
||||
if err != nil {
|
||||
return types.DockerAuthConfig{}, "", err
|
||||
}
|
||||
|
||||
if authConfig != (types.DockerAuthConfig{}) {
|
||||
return authConfig, path.path, nil
|
||||
if creds != (types.DockerAuthConfig{}) {
|
||||
return creds, path.path, nil
|
||||
}
|
||||
}
|
||||
return types.DockerAuthConfig{}, "", nil
|
||||
|
@ -320,7 +320,7 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t
|
|||
// This intentionally uses "registry", not "key"; we don't support namespaced
|
||||
// credentials in helpers, but a "registry" is a valid parent of "key".
|
||||
helperKey = registry
|
||||
creds, err = getAuthFromCredHelper(helper, registry)
|
||||
creds, err = getCredsFromCredHelper(helper, registry)
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", helperKey, helper, err)
|
||||
|
@ -360,14 +360,14 @@ func GetAuthentication(sys *types.SystemContext, key string) (string, string, er
|
|||
// getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication,
|
||||
// it exists only to allow testing it with an artificial home directory.
|
||||
func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string) (string, string, error) {
|
||||
auth, err := getCredentialsWithHomeDir(sys, key, homeDir)
|
||||
creds, err := getCredentialsWithHomeDir(sys, key, homeDir)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if auth.IdentityToken != "" {
|
||||
if creds.IdentityToken != "" {
|
||||
return "", "", fmt.Errorf("non-empty identity token found and this API doesn't support it: %w", ErrNotSupported)
|
||||
}
|
||||
return auth.Username, auth.Password, nil
|
||||
return creds.Username, creds.Password, nil
|
||||
}
|
||||
|
||||
// RemoveAuthentication removes credentials for `key` from all possible
|
||||
|
@ -393,7 +393,7 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error {
|
|||
logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper)
|
||||
return
|
||||
}
|
||||
err := deleteAuthFromCredHelper(helper, key)
|
||||
err := deleteCredsFromCredHelper(helper, key)
|
||||
if err == nil {
|
||||
logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper)
|
||||
isLoggedIn = true
|
||||
|
@ -411,13 +411,13 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error {
|
|||
switch helper {
|
||||
// Special-case the built-in helper for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
_, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, string, error) {
|
||||
if innerHelper, exists := auths.CredHelpers[key]; exists {
|
||||
_, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
|
||||
if innerHelper, exists := fileContents.CredHelpers[key]; exists {
|
||||
removeFromCredHelper(innerHelper)
|
||||
}
|
||||
if _, ok := auths.AuthConfigs[key]; ok {
|
||||
if _, ok := fileContents.AuthConfigs[key]; ok {
|
||||
isLoggedIn = true
|
||||
delete(auths.AuthConfigs, key)
|
||||
delete(fileContents.AuthConfigs, key)
|
||||
}
|
||||
return true, "", multiErr
|
||||
})
|
||||
|
@ -454,23 +454,23 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
|
|||
switch helper {
|
||||
// Special-case the built-in helper for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
_, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, string, error) {
|
||||
for registry, helper := range auths.CredHelpers {
|
||||
_, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
|
||||
for registry, helper := range fileContents.CredHelpers {
|
||||
// Helpers in auth files are expected
|
||||
// to exist, so no special treatment
|
||||
// for them.
|
||||
if err := deleteAuthFromCredHelper(helper, registry); err != nil {
|
||||
if err := deleteCredsFromCredHelper(helper, registry); err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
}
|
||||
auths.CredHelpers = make(map[string]string)
|
||||
auths.AuthConfigs = make(map[string]dockerAuthConfig)
|
||||
fileContents.CredHelpers = make(map[string]string)
|
||||
fileContents.AuthConfigs = make(map[string]dockerAuthConfig)
|
||||
return true, "", nil
|
||||
})
|
||||
// External helpers.
|
||||
default:
|
||||
var creds map[string]string
|
||||
creds, err = listAuthsFromCredHelper(helper)
|
||||
creds, err = listCredsInCredHelper(helper)
|
||||
if err != nil {
|
||||
if errors.Is(err, exec.ErrNotFound) {
|
||||
// It's okay if the helper doesn't exist.
|
||||
|
@ -480,7 +480,7 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
|
|||
}
|
||||
}
|
||||
for registry := range creds {
|
||||
err = deleteAuthFromCredHelper(helper, registry)
|
||||
err = deleteCredsFromCredHelper(helper, registry)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
@ -497,7 +497,7 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
|
|||
return multiErr
|
||||
}
|
||||
|
||||
func listAuthsFromCredHelper(credHelper string) (map[string]string, error) {
|
||||
func listCredsInCredHelper(credHelper string) (map[string]string, error) {
|
||||
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
|
||||
p := helperclient.NewShellProgramFunc(helperName)
|
||||
return helperclient.List(p)
|
||||
|
@ -543,40 +543,40 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool,
|
|||
return newAuthPathDefault(fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil
|
||||
}
|
||||
|
||||
// parse unmarshals the authentications stored in the auth.json file and returns it
|
||||
// parse unmarshals the credentials stored in the auth.json file and returns it
|
||||
// or returns an empty dockerConfigFile data structure if auth.json does not exist
|
||||
// if the file exists and is empty, this function returns an error.
|
||||
func (path authPath) parse() (dockerConfigFile, error) {
|
||||
var auths dockerConfigFile
|
||||
var fileContents dockerConfigFile
|
||||
|
||||
raw, err := os.ReadFile(path.path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
auths.AuthConfigs = map[string]dockerAuthConfig{}
|
||||
return auths, nil
|
||||
fileContents.AuthConfigs = map[string]dockerAuthConfig{}
|
||||
return fileContents, nil
|
||||
}
|
||||
return dockerConfigFile{}, err
|
||||
}
|
||||
|
||||
if path.legacyFormat {
|
||||
if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil {
|
||||
if err = json.Unmarshal(raw, &fileContents.AuthConfigs); err != nil {
|
||||
return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path.path, err)
|
||||
}
|
||||
return auths, nil
|
||||
return fileContents, nil
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(raw, &auths); err != nil {
|
||||
if err = json.Unmarshal(raw, &fileContents); err != nil {
|
||||
return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path.path, err)
|
||||
}
|
||||
|
||||
if auths.AuthConfigs == nil {
|
||||
auths.AuthConfigs = map[string]dockerAuthConfig{}
|
||||
if fileContents.AuthConfigs == nil {
|
||||
fileContents.AuthConfigs = map[string]dockerAuthConfig{}
|
||||
}
|
||||
if auths.CredHelpers == nil {
|
||||
auths.CredHelpers = make(map[string]string)
|
||||
if fileContents.CredHelpers == nil {
|
||||
fileContents.CredHelpers = make(map[string]string)
|
||||
}
|
||||
|
||||
return auths, nil
|
||||
return fileContents, nil
|
||||
}
|
||||
|
||||
// modifyJSON finds an auth.json file, calls editor on the contents, and
|
||||
|
@ -585,7 +585,7 @@ func (path authPath) parse() (dockerConfigFile, error) {
|
|||
//
|
||||
// The editor may also return a human-readable description of the updated location; if it is "",
|
||||
// the file itself is used.
|
||||
func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, string, error)) (string, error) {
|
||||
func modifyJSON(sys *types.SystemContext, editor func(fileContents *dockerConfigFile) (bool, string, error)) (string, error) {
|
||||
path, _, err := getPathToAuth(sys)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -599,17 +599,17 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (
|
|||
return "", err
|
||||
}
|
||||
|
||||
auths, err := path.parse()
|
||||
fileContents, err := path.parse()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("reading JSON file %q: %w", path.path, err)
|
||||
}
|
||||
|
||||
updated, description, err := editor(&auths)
|
||||
updated, description, err := editor(&fileContents)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("updating %q: %w", path.path, err)
|
||||
}
|
||||
if updated {
|
||||
newData, err := json.MarshalIndent(auths, "", "\t")
|
||||
newData, err := json.MarshalIndent(fileContents, "", "\t")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("marshaling JSON %q: %w", path.path, err)
|
||||
}
|
||||
|
@ -625,7 +625,7 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (
|
|||
return description, nil
|
||||
}
|
||||
|
||||
func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) {
|
||||
func getCredsFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) {
|
||||
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
|
||||
p := helperclient.NewShellProgramFunc(helperName)
|
||||
creds, err := helperclient.Get(p, registry)
|
||||
|
@ -650,9 +650,9 @@ func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig,
|
|||
}
|
||||
}
|
||||
|
||||
// setAuthToCredHelper stores (username, password) for registry in credHelper.
|
||||
// setCredsInCredHelper stores (username, password) for registry in credHelper.
|
||||
// Returns a human-readable description of the destination, to be returned by SetCredentials.
|
||||
func setAuthToCredHelper(credHelper, registry, username, password string) (string, error) {
|
||||
func setCredsInCredHelper(credHelper, registry, username, password string) (string, error) {
|
||||
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
|
||||
p := helperclient.NewShellProgramFunc(helperName)
|
||||
creds := &credentials.Credentials{
|
||||
|
@ -666,7 +666,7 @@ func setAuthToCredHelper(credHelper, registry, username, password string) (strin
|
|||
return fmt.Sprintf("credential helper: %s", credHelper), nil
|
||||
}
|
||||
|
||||
func deleteAuthFromCredHelper(credHelper, registry string) error {
|
||||
func deleteCredsFromCredHelper(credHelper, registry string) error {
|
||||
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
|
||||
p := helperclient.NewShellProgramFunc(helperName)
|
||||
return helperclient.Erase(p, registry)
|
||||
|
@ -675,7 +675,7 @@ func deleteAuthFromCredHelper(credHelper, registry string) error {
|
|||
// findCredentialsInFile looks for credentials matching "key"
|
||||
// (which is "registry" or a namespace in "registry") in "path".
|
||||
func findCredentialsInFile(key, registry string, path authPath) (types.DockerAuthConfig, error) {
|
||||
auths, err := path.parse()
|
||||
fileContents, err := path.parse()
|
||||
if err != nil {
|
||||
return types.DockerAuthConfig{}, fmt.Errorf("reading JSON file %q: %w", path.path, err)
|
||||
}
|
||||
|
@ -683,9 +683,9 @@ func findCredentialsInFile(key, registry string, path authPath) (types.DockerAut
|
|||
// First try cred helpers. They should always be normalized.
|
||||
// This intentionally uses "registry", not "key"; we don't support namespaced
|
||||
// credentials in helpers.
|
||||
if ch, exists := auths.CredHelpers[registry]; exists {
|
||||
if ch, exists := fileContents.CredHelpers[registry]; exists {
|
||||
logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path.path)
|
||||
return getAuthFromCredHelper(ch, registry)
|
||||
return getCredsFromCredHelper(ch, registry)
|
||||
}
|
||||
|
||||
// Support sub-registry namespaces in auth.
|
||||
|
@ -701,7 +701,7 @@ func findCredentialsInFile(key, registry string, path authPath) (types.DockerAut
|
|||
// Repo or namespace keys are only supported as exact matches. For registry
|
||||
// keys we prefer exact matches as well.
|
||||
for _, key := range keys {
|
||||
if val, exists := auths.AuthConfigs[key]; exists {
|
||||
if val, exists := fileContents.AuthConfigs[key]; exists {
|
||||
return decodeDockerAuth(path.path, key, val)
|
||||
}
|
||||
}
|
||||
|
@ -715,7 +715,7 @@ func findCredentialsInFile(key, registry string, path authPath) (types.DockerAut
|
|||
// The docker.io registry still uses the /v1/ key with a special host name,
|
||||
// so account for that as well.
|
||||
registry = normalizeRegistry(registry)
|
||||
for k, v := range auths.AuthConfigs {
|
||||
for k, v := range fileContents.AuthConfigs {
|
||||
if normalizeAuthFileKey(k, path.legacyFormat) == registry {
|
||||
return decodeDockerAuth(path.path, k, v)
|
||||
}
|
||||
|
|
|
@ -585,9 +585,9 @@ type SystemContext struct {
|
|||
// resolving to Docker Hub in the Docker-compatible REST API of Podman; it should never be used outside this
|
||||
// specific context.
|
||||
PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub bool
|
||||
// If not "", overrides the default path for the authentication file, but only new format files
|
||||
// If not "", overrides the default path for the registry authentication file, but only new format files
|
||||
AuthFilePath string
|
||||
// if not "", overrides the default path for the authentication file, but with the legacy format;
|
||||
// if not "", overrides the default path for the registry authentication file, but with the legacy format;
|
||||
// the code currently will by default look for legacy format files like .dockercfg in the $HOME dir;
|
||||
// but in addition to the home dir, openshift may mount .dockercfg files (via secret mount)
|
||||
// in locations other than the home dir; openshift components should then set this field in those cases;
|
||||
|
|
|
@ -17,13 +17,13 @@ env:
|
|||
####
|
||||
#### Cache-image names to test with (double-quotes around names are critical)
|
||||
###
|
||||
FEDORA_NAME: "fedora-37"
|
||||
FEDORA_NAME: "fedora-38"
|
||||
DEBIAN_NAME: "debian-12"
|
||||
|
||||
# GCE project where images live
|
||||
IMAGE_PROJECT: "libpod-218412"
|
||||
# VM Image built in containers/automation_images
|
||||
IMAGE_SUFFIX: "c20230405t152256z-f37f36d12"
|
||||
IMAGE_SUFFIX: "c20230517t144652z-f38f37d12"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
|
||||
|
||||
|
@ -56,7 +56,6 @@ gce_instance:
|
|||
linux_testing: &linux_testing
|
||||
depends_on:
|
||||
- lint
|
||||
only_if: $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
|
||||
gce_instance: # Only need to specify differences from defaults (above)
|
||||
image_name: "${VM_IMAGE}"
|
||||
|
||||
|
@ -127,10 +126,12 @@ lint_task:
|
|||
fingerprint_script: cat go.sum
|
||||
folder: $GOPATH/pkg/mod
|
||||
build_script: |
|
||||
echo "deb http://deb.debian.org/debian stretch-backports main" > /etc/apt/sources.list.d/backports.list
|
||||
apt-get update
|
||||
apt-get install -y libbtrfs-dev libdevmapper-dev
|
||||
test_script: make TAGS=regex_precompile local-validate && make lint && make clean
|
||||
test_script: |
|
||||
make TAGS=regex_precompile local-validate
|
||||
make lint
|
||||
make clean
|
||||
|
||||
|
||||
# Update metadata on VM images referenced by this repository state
|
||||
|
@ -168,7 +169,7 @@ vendor_task:
|
|||
|
||||
cross_task:
|
||||
container:
|
||||
image: golang:1.17
|
||||
image: golang:1.19
|
||||
build_script: make cross
|
||||
|
||||
|
||||
|
@ -182,6 +183,6 @@ success_task:
|
|||
- vendor
|
||||
- cross
|
||||
container:
|
||||
image: golang:1.17
|
||||
image: golang:1.19
|
||||
clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed
|
||||
script: /bin/true
|
||||
|
|
|
@ -4,68 +4,8 @@ run:
|
|||
deadline: 5m
|
||||
skip-dirs-use-default: true
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- cyclop
|
||||
- deadcode
|
||||
- dogsled
|
||||
- dupl
|
||||
- errcheck
|
||||
- errname
|
||||
- errorlint
|
||||
- exhaustive
|
||||
- exhaustivestruct
|
||||
- exhaustruct
|
||||
- forbidigo
|
||||
- forcetypeassert
|
||||
- funlen
|
||||
- gci
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gocognit
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- godot
|
||||
- godox
|
||||
- goerr113
|
||||
enable:
|
||||
- gofumpt
|
||||
- golint
|
||||
- gomnd
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ifshort
|
||||
- ineffassign
|
||||
- interfacer
|
||||
- interfacebloat
|
||||
- ireturn
|
||||
- lll
|
||||
- maintidx
|
||||
- maligned
|
||||
- misspell
|
||||
- musttag
|
||||
- nakedret
|
||||
- nestif
|
||||
- nlreturn
|
||||
- nolintlint
|
||||
- nonamedreturns
|
||||
- nosnakecase
|
||||
- paralleltest
|
||||
- prealloc
|
||||
- predeclared
|
||||
- rowserrcheck
|
||||
- scopelint
|
||||
disable:
|
||||
- errcheck
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- stylecheck
|
||||
- tagliatelle
|
||||
- testpackage
|
||||
- thelper
|
||||
- unconvert
|
||||
- unparam
|
||||
- varcheck
|
||||
- varnamelen
|
||||
- wastedassign
|
||||
- whitespace
|
||||
- wrapcheck
|
||||
- wsl
|
||||
|
|
|
@ -1,13 +1,18 @@
|
|||
export GO111MODULE=off
|
||||
export GOPROXY=https://proxy.golang.org
|
||||
|
||||
.PHONY: \
|
||||
all \
|
||||
binary \
|
||||
clean \
|
||||
codespell \
|
||||
containers-storage \
|
||||
cross \
|
||||
default \
|
||||
docs \
|
||||
gccgo \
|
||||
help \
|
||||
install \
|
||||
install.docs \
|
||||
install.tools \
|
||||
lint \
|
||||
local-binary \
|
||||
local-cross \
|
||||
local-gccgo \
|
||||
|
@ -15,33 +20,25 @@ export GOPROXY=https://proxy.golang.org
|
|||
local-test-integration \
|
||||
local-test-unit \
|
||||
local-validate \
|
||||
lint \
|
||||
vendor
|
||||
test-integration \
|
||||
test-unit \
|
||||
validate \
|
||||
vendor \
|
||||
vendor-in-container
|
||||
|
||||
PACKAGE := github.com/containers/storage
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
|
||||
EPOCH_TEST_COMMIT := 0418ebf59f9e1f564831c0ba9378b7f8e40a1c73
|
||||
NATIVETAGS :=
|
||||
AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) $(shell ./hack/libsubid_tag.sh)
|
||||
BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS)
|
||||
GO ?= go
|
||||
TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race)
|
||||
|
||||
# Go module support: set `-mod=vendor` to use the vendored sources
|
||||
ifeq ($(shell $(GO) help mod >/dev/null 2>&1 && echo true), true)
|
||||
GO:=GO111MODULE=on $(GO)
|
||||
MOD_VENDOR=-mod=vendor
|
||||
endif
|
||||
|
||||
default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs
|
||||
|
||||
clean: ## remove all built files
|
||||
$(RM) -f containers-storage containers-storage.* docs/*.1 docs/*.5
|
||||
|
||||
sources := $(wildcard *.go cmd/containers-storage/*.go drivers/*.go drivers/*/*.go internal/*/*.go pkg/*/*.go pkg/*/*/*.go types/*.go)
|
||||
containers-storage: $(sources) ## build using gc on the host
|
||||
$(GO) build $(MOD_VENDOR) -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
|
||||
containers-storage: ## build using gc on the host
|
||||
$(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
|
||||
|
||||
codespell:
|
||||
codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L worl,flate,uint,iff,od,ERRO -w
|
||||
|
@ -49,15 +46,15 @@ codespell:
|
|||
binary local-binary: containers-storage
|
||||
|
||||
local-gccgo gccgo: ## build using gccgo on the host
|
||||
GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build $(MOD_VENDOR) -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage
|
||||
GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage
|
||||
|
||||
local-cross cross: ## cross build the binaries for arm, darwin, and freebsd
|
||||
@for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le linux/s390x linux/mips linux/mipsle linux/mips64 linux/mips64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \
|
||||
os=`echo $${target} | cut -f1 -d/` ; \
|
||||
arch=`echo $${target} | cut -f2 -d/` ; \
|
||||
suffix=$${os}.$${arch} ; \
|
||||
echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build $(MOD_VENDOR) -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage ; \
|
||||
env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build $(MOD_VENDOR) -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage || exit 1 ; \
|
||||
echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage ; \
|
||||
env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage || exit 1 ; \
|
||||
done
|
||||
|
||||
docs: install.tools ## build the docs on the host
|
||||
|
@ -66,21 +63,17 @@ docs: install.tools ## build the docs on the host
|
|||
local-test: local-binary local-test-unit local-test-integration ## build the binaries and run the tests
|
||||
|
||||
local-test-unit test-unit: local-binary ## run the unit tests on the host (requires\nsuperuser privileges)
|
||||
@$(GO) test $(MOD_VENDOR) $(BUILDFLAGS) $(TESTFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor)
|
||||
@$(GO) test -count 1 $(BUILDFLAGS) $(TESTFLAGS) ./...
|
||||
|
||||
local-test-integration test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges)
|
||||
@cd tests; ./test_runner.bash
|
||||
|
||||
local-validate validate: install.tools ## validate DCO and gofmt on the host
|
||||
local-validate validate: install.tools ## validate DCO on the host
|
||||
@./hack/git-validation.sh
|
||||
@./hack/gofmt.sh
|
||||
|
||||
install.tools:
|
||||
$(MAKE) -C tests/tools
|
||||
|
||||
$(FFJSON):
|
||||
$(MAKE) -C tests/tools
|
||||
|
||||
install.docs: docs
|
||||
$(MAKE) -C docs install
|
||||
|
||||
|
|
|
@ -1 +1 @@
|
|||
1.46.1
|
||||
1.47.0-dev
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -523,13 +523,20 @@ func (r *containerStore) load(lockedForWriting bool) (bool, error) {
|
|||
// The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes).
|
||||
func (r *containerStore) save(saveLocations containerLocations) error {
|
||||
r.lockfile.AssertLockedForWriting()
|
||||
// This must be done before we write the file, because the process could be terminated
|
||||
// after the file is written but before the lock file is updated.
|
||||
lw, err := r.lockfile.RecordWrite()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.lastWrite = lw
|
||||
for locationIndex := 0; locationIndex < numContainerLocationIndex; locationIndex++ {
|
||||
location := containerLocationFromIndex(locationIndex)
|
||||
if location&saveLocations == 0 {
|
||||
continue
|
||||
}
|
||||
rpath := r.jsonPath[locationIndex]
|
||||
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
subsetContainers := make([]*Container, 0, len(r.containers))
|
||||
|
@ -549,15 +556,10 @@ func (r *containerStore) save(saveLocations containerLocations) error {
|
|||
NoSync: true,
|
||||
}
|
||||
}
|
||||
if err := ioutils.AtomicWriteFileWithOpts(rpath, jdata, 0600, opts); err != nil {
|
||||
if err := ioutils.AtomicWriteFileWithOpts(rpath, jdata, 0o600, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
lw, err := r.lockfile.RecordWrite()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.lastWrite = lw
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -569,12 +571,12 @@ func (r *containerStore) saveFor(modifiedContainer *Container) error {
|
|||
}
|
||||
|
||||
func newContainerStore(dir string, runDir string, transient bool) (rwContainerStore, error) {
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
if err := os.MkdirAll(dir, 0o700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
volatileDir := dir
|
||||
if transient {
|
||||
if err := os.MkdirAll(runDir, 0700); err != nil {
|
||||
if err := os.MkdirAll(runDir, 0o700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
volatileDir = runDir
|
||||
|
@ -926,10 +928,10 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error {
|
|||
if !ok {
|
||||
return ErrContainerUnknown
|
||||
}
|
||||
if err := os.MkdirAll(r.datadir(c.ID), 0700); err != nil {
|
||||
if err := os.MkdirAll(r.datadir(c.ID), 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0600)
|
||||
err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0o600)
|
||||
if err == nil {
|
||||
save := false
|
||||
if c.BigDataSizes == nil {
|
||||
|
|
|
@ -64,7 +64,7 @@ var (
|
|||
enableDirperm bool
|
||||
)
|
||||
|
||||
const defaultPerms = os.FileMode(0555)
|
||||
const defaultPerms = os.FileMode(0o555)
|
||||
|
||||
func init() {
|
||||
graphdriver.MustRegister("aufs", Init)
|
||||
|
@ -87,11 +87,9 @@ type Driver struct {
|
|||
// Init returns a new AUFS driver.
|
||||
// An error is returned if AUFS is not supported.
|
||||
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
|
||||
|
||||
// Try to load the aufs kernel module
|
||||
if err := supportsAufs(); err != nil {
|
||||
return nil, fmt.Errorf("kernel does not support aufs: %w", graphdriver.ErrNotSupported)
|
||||
|
||||
}
|
||||
|
||||
fsMagic, err := graphdriver.GetFSMagic(home)
|
||||
|
@ -145,7 +143,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
|||
// Create the root aufs driver dir and return
|
||||
// if it already exists
|
||||
// If not populate the dir structure
|
||||
if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil {
|
||||
if err := idtools.MkdirAllAs(home, 0o700, rootUID, rootGID); err != nil {
|
||||
if os.IsExist(err) {
|
||||
return a, nil
|
||||
}
|
||||
|
@ -158,7 +156,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
|||
|
||||
// Populate the dir structure
|
||||
for _, p := range paths {
|
||||
if err := idtools.MkdirAllAs(path.Join(home, p), 0700, rootUID, rootGID); err != nil {
|
||||
if err := idtools.MkdirAllAs(path.Join(home, p), 0o700, rootUID, rootGID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
@ -251,9 +249,21 @@ func (a *Driver) Exists(id string) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// List layers (not including additional image stores)
|
||||
// ListLayers() returns all of the layers known to the driver.
|
||||
func (a *Driver) ListLayers() ([]string, error) {
|
||||
return nil, graphdriver.ErrNotSupported
|
||||
diffsDir := filepath.Join(a.rootPath(), "diff")
|
||||
entries, err := os.ReadDir(diffsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results := make([]string, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
results = append(results, entry.Name())
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// AdditionalImageStores returns additional image stores supported by the driver
|
||||
|
@ -278,7 +288,6 @@ func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts
|
|||
// Create three folders for each id
|
||||
// mnt, layers, and diff
|
||||
func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
|
||||
if opts != nil && len(opts.StorageOpt) != 0 {
|
||||
return fmt.Errorf("--storage-opt is not supported for aufs")
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ import (
|
|||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const defaultPerms = os.FileMode(0555)
|
||||
const defaultPerms = os.FileMode(0o555)
|
||||
|
||||
func init() {
|
||||
graphdriver.MustRegister("btrfs", Init)
|
||||
|
@ -56,7 +56,6 @@ type btrfsOptions struct {
|
|||
// Init returns a new BTRFS driver.
|
||||
// An error is returned if BTRFS is not supported.
|
||||
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
|
||||
|
||||
fsMagic, err := graphdriver.GetFSMagic(home)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -70,7 +69,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil {
|
||||
if err := idtools.MkdirAllAs(filepath.Join(home, "subvolumes"), 0o700, rootUID, rootGID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -119,7 +118,7 @@ func parseOptions(opt []string) (btrfsOptions, bool, error) {
|
|||
case "btrfs.mountopt":
|
||||
return options, userDiskQuota, fmt.Errorf("btrfs driver does not support mount options")
|
||||
default:
|
||||
return options, userDiskQuota, fmt.Errorf("unknown option %s", key)
|
||||
return options, userDiskQuota, fmt.Errorf("unknown option %s (%q)", key, option)
|
||||
}
|
||||
}
|
||||
return options, userDiskQuota, nil
|
||||
|
@ -226,7 +225,7 @@ func subvolSnapshot(src, dest, name string) error {
|
|||
var args C.struct_btrfs_ioctl_vol_args_v2
|
||||
args.fd = C.__s64(getDirFd(srcDir))
|
||||
|
||||
var cs = C.CString(name)
|
||||
cs := C.CString(name)
|
||||
C.set_name_btrfs_ioctl_vol_args_v2(&args, cs)
|
||||
C.free(unsafe.Pointer(cs))
|
||||
|
||||
|
@ -479,13 +478,13 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts
|
|||
|
||||
// Create the filesystem with given id.
|
||||
func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
quotas := path.Join(d.home, "quotas")
|
||||
subvolumes := path.Join(d.home, "subvolumes")
|
||||
quotas := d.quotasDir()
|
||||
subvolumes := d.subvolumesDir()
|
||||
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil {
|
||||
if err := idtools.MkdirAllAs(subvolumes, 0o700, rootUID, rootGID); err != nil {
|
||||
return err
|
||||
}
|
||||
if parent == "" {
|
||||
|
@ -523,10 +522,10 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
|
|||
if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil {
|
||||
if err := idtools.MkdirAllAs(quotas, 0o700, rootUID, rootGID); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil {
|
||||
if err := os.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0o644); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -560,7 +559,7 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e
|
|||
}
|
||||
driver.options.size = uint64(size)
|
||||
default:
|
||||
return fmt.Errorf("unknown option %s", key)
|
||||
return fmt.Errorf("unknown option %s (%q)", key, storageOpt)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -679,9 +678,21 @@ func (d *Driver) Exists(id string) bool {
|
|||
return err == nil
|
||||
}
|
||||
|
||||
// List layers (not including additional image stores)
|
||||
// List all of the layers known to the driver.
|
||||
func (d *Driver) ListLayers() ([]string, error) {
|
||||
return nil, graphdriver.ErrNotSupported
|
||||
subvolumesDir := filepath.Join(d.home, "subvolumes")
|
||||
entries, err := os.ReadDir(subvolumesDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results := make([]string, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
results = append(results, entry.Name())
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// AdditionalImageStores returns additional image stores supported by the driver
|
||||
|
|
|
@ -10,8 +10,7 @@ import (
|
|||
"github.com/containers/storage/pkg/idtools"
|
||||
)
|
||||
|
||||
type platformChowner struct {
|
||||
}
|
||||
type platformChowner struct{}
|
||||
|
||||
func newLChowner() *platformChowner {
|
||||
return &platformChowner{}
|
||||
|
|
|
@ -11,6 +11,7 @@ package copy
|
|||
#endif
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"errors"
|
||||
|
|
|
@ -177,7 +177,7 @@ func writeLVMConfig(root string, cfg directLVMConfig) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("marshalling direct lvm config: %w", err)
|
||||
}
|
||||
if err := os.WriteFile(p, b, 0600); err != nil {
|
||||
if err := os.WriteFile(p, b, 0o600); err != nil {
|
||||
return fmt.Errorf("writing direct lvm config to file: %w", err)
|
||||
}
|
||||
return nil
|
||||
|
@ -193,7 +193,7 @@ func setupDirectLVM(cfg directLVMConfig) error {
|
|||
}
|
||||
}
|
||||
|
||||
err := os.MkdirAll(lvmProfileDir, 0755)
|
||||
err := os.MkdirAll(lvmProfileDir, 0o755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating lvm profile directory: %w", err)
|
||||
}
|
||||
|
@ -241,7 +241,7 @@ func setupDirectLVM(cfg directLVMConfig) error {
|
|||
}
|
||||
|
||||
profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent)
|
||||
err = os.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600)
|
||||
err = os.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0o600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing storage thinp autoextend profile: %w", err)
|
||||
}
|
||||
|
|
|
@ -273,7 +273,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) {
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil {
|
||||
if err := idtools.MkdirAllAs(dirname, 0o700, uid, gid); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
@ -282,7 +282,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) {
|
|||
return "", err
|
||||
}
|
||||
logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename)
|
||||
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600)
|
||||
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0o600)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -293,7 +293,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) {
|
|||
}
|
||||
} else {
|
||||
if fi.Size() < size {
|
||||
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600)
|
||||
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0o600)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -421,7 +421,6 @@ func (devices *DeviceSet) constructDeviceIDMap() {
|
|||
}
|
||||
|
||||
func (devices *DeviceSet) deviceFileWalkFunction(path string, name string) error {
|
||||
|
||||
// Skip some of the meta files which are not device files.
|
||||
if strings.HasSuffix(name, ".migrated") {
|
||||
logrus.Debugf("devmapper: Skipping file %s", path)
|
||||
|
@ -458,7 +457,7 @@ func (devices *DeviceSet) loadDeviceFilesOnStart() error {
|
|||
logrus.Debug("devmapper: loadDeviceFilesOnStart()")
|
||||
defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END")
|
||||
|
||||
var scan = func(path string, d fs.DirEntry, err error) error {
|
||||
scan := func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
logrus.Debugf("devmapper: Can't walk the file %s: %v", path, err)
|
||||
return nil
|
||||
|
@ -1001,6 +1000,10 @@ func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error {
|
|||
devices.Lock()
|
||||
defer devices.Unlock()
|
||||
|
||||
if devices.filesystem == "" {
|
||||
devices.filesystem = determineDefaultFS()
|
||||
}
|
||||
|
||||
if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1152,7 +1155,6 @@ func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error {
|
|||
}
|
||||
|
||||
func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error {
|
||||
|
||||
if !userBaseSize {
|
||||
return nil
|
||||
}
|
||||
|
@ -1191,7 +1193,7 @@ func (devices *DeviceSet) growFS(info *devInfo) error {
|
|||
|
||||
fsMountPoint := "/run/containers/storage/mnt"
|
||||
if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(fsMountPoint, 0700); err != nil {
|
||||
if err := os.MkdirAll(fsMountPoint, 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(fsMountPoint)
|
||||
|
@ -1657,7 +1659,6 @@ func (devices *DeviceSet) loadThinPoolLoopBackInfo() error {
|
|||
}
|
||||
|
||||
func (devices *DeviceSet) enableDeferredRemovalDeletion() error {
|
||||
|
||||
// If user asked for deferred removal then check both libdm library
|
||||
// and kernel driver support deferred removal otherwise error out.
|
||||
if enableDeferredRemoval {
|
||||
|
@ -1701,10 +1702,13 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil {
|
||||
if err := idtools.MkdirAs(devices.root, 0o700, uid, gid); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil {
|
||||
if err := os.MkdirAll(devices.metadataDir(), 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := idtools.MkdirAs(filepath.Join(devices.root, "mnt"), 0o700, uid, gid); err != nil && !errors.Is(err, os.ErrExist) {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -1811,7 +1815,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) {
|
|||
devices.dataLoopFile = data
|
||||
devices.dataDevice = dataFile.Name()
|
||||
} else {
|
||||
dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600)
|
||||
dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0o600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1844,7 +1848,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) {
|
|||
devices.metadataLoopFile = metadata
|
||||
devices.metadataDevice = metadataFile.Name()
|
||||
} else {
|
||||
metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600)
|
||||
metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0o600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1966,7 +1970,6 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string
|
|||
}
|
||||
|
||||
func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) {
|
||||
|
||||
// Read size to change the block device size per container.
|
||||
for key, val := range storageOpt {
|
||||
key := strings.ToLower(key)
|
||||
|
@ -2483,6 +2486,26 @@ func (devices *DeviceSet) List() []string {
|
|||
return ids
|
||||
}
|
||||
|
||||
// ListLayers returns a list of device IDs, omitting the ""/"base" device and
|
||||
// any which have been marked as deleted.
|
||||
func (devices *DeviceSet) ListLayers() ([]string, error) {
|
||||
if err := devices.cleanupDeletedDevices(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
devices.Lock()
|
||||
defer devices.Unlock()
|
||||
|
||||
ids := make([]string, 0, len(devices.Devices))
|
||||
for k, d := range devices.Devices {
|
||||
if k == "" || d.Deleted {
|
||||
continue
|
||||
}
|
||||
ids = append(ids, k)
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) {
|
||||
var params string
|
||||
_, sizeInSectors, _, params, err = devicemapper.GetStatus(devName)
|
||||
|
@ -2520,7 +2543,6 @@ func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) {
|
|||
}
|
||||
|
||||
sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName())
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const defaultPerms = os.FileMode(0555)
|
||||
const defaultPerms = os.FileMode(0o555)
|
||||
|
||||
func init() {
|
||||
graphdriver.MustRegister("devicemapper", Init)
|
||||
|
@ -55,7 +55,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
|||
ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()),
|
||||
locker: locker.New(),
|
||||
}
|
||||
|
||||
return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil
|
||||
}
|
||||
|
||||
|
@ -103,7 +102,6 @@ func (d *Driver) Status() [][2]string {
|
|||
// Metadata returns a map of information about the device.
|
||||
func (d *Driver) Metadata(id string) (map[string]string, error) {
|
||||
m, err := d.DeviceSet.exportDeviceMetadata(id)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -202,11 +200,11 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
|||
}
|
||||
|
||||
// Create the target directories if they don't exist
|
||||
if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil {
|
||||
if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0o755, uid, gid); err != nil {
|
||||
d.ctr.Decrement(mp)
|
||||
return "", err
|
||||
}
|
||||
if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) {
|
||||
if err := idtools.MkdirAs(mp, 0o755, uid, gid); err != nil && !os.IsExist(err) {
|
||||
d.ctr.Decrement(mp)
|
||||
return "", err
|
||||
}
|
||||
|
@ -227,7 +225,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
|||
if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) {
|
||||
// Create an "id" file with the container/image id in it to help reconstruct this in case
|
||||
// of later problems
|
||||
if err := os.WriteFile(idFile, []byte(id), 0600); err != nil {
|
||||
if err := os.WriteFile(idFile, []byte(id), 0o600); err != nil {
|
||||
d.ctr.Decrement(mp)
|
||||
d.DeviceSet.UnmountDevice(id, mp)
|
||||
return "", err
|
||||
|
@ -267,11 +265,6 @@ func (d *Driver) Exists(id string) bool {
|
|||
return d.DeviceSet.HasDevice(id)
|
||||
}
|
||||
|
||||
// List layers (not including additional image stores)
|
||||
func (d *Driver) ListLayers() ([]string, error) {
|
||||
return nil, graphdriver.ErrNotSupported
|
||||
}
|
||||
|
||||
// AdditionalImageStores returns additional image stores supported by the driver
|
||||
func (d *Driver) AdditionalImageStores() []string {
|
||||
return nil
|
||||
|
|
|
@ -111,6 +111,10 @@ type ProtoDriver interface {
|
|||
Exists(id string) bool
|
||||
// Returns a list of layer ids that exist on this driver (does not include
|
||||
// additional storage layers). Not supported by all backends.
|
||||
// If the driver requires that layers be removed in a particular order,
|
||||
// usually due to parent-child relationships that it cares about, The
|
||||
// list should be sorted well enough so that if all layers need to be
|
||||
// removed, they can be removed in the order in which they're returned.
|
||||
ListLayers() ([]string, error)
|
||||
// Status returns a set of key-value pairs which give low
|
||||
// level diagnostic status about this driver.
|
||||
|
@ -322,6 +326,7 @@ func getBuiltinDriver(name, home string, options Options) (Driver, error) {
|
|||
type Options struct {
|
||||
Root string
|
||||
RunRoot string
|
||||
ImageStore string
|
||||
DriverPriority []string
|
||||
DriverOptions []string
|
||||
UIDMaps []idtools.IDMap
|
||||
|
@ -337,12 +342,12 @@ func New(name string, config Options) (Driver, error) {
|
|||
}
|
||||
|
||||
// Guess for prior driver
|
||||
driversMap := scanPriorDrivers(config.Root)
|
||||
driversMap := ScanPriorDrivers(config.Root)
|
||||
|
||||
// use the supplied priority list unless it is empty
|
||||
prioList := config.DriverPriority
|
||||
if len(prioList) == 0 {
|
||||
prioList = priority
|
||||
prioList = Priority
|
||||
}
|
||||
|
||||
for _, name := range prioList {
|
||||
|
@ -414,12 +419,12 @@ func isDriverNotSupported(err error) bool {
|
|||
}
|
||||
|
||||
// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers
|
||||
func scanPriorDrivers(root string) map[string]bool {
|
||||
func ScanPriorDrivers(root string) map[string]bool {
|
||||
driversMap := make(map[string]bool)
|
||||
|
||||
for driver := range drivers {
|
||||
p := filepath.Join(root, driver)
|
||||
if _, err := os.Stat(p); err == nil && driver != "vfs" {
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
driversMap[driver] = true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,11 +1,9 @@
|
|||
package graphdriver
|
||||
|
||||
var (
|
||||
// Slice of drivers that should be used in order
|
||||
priority = []string{
|
||||
var Priority = []string{
|
||||
"vfs",
|
||||
}
|
||||
)
|
||||
|
||||
// GetFSMagic returns the filesystem id given the path.
|
||||
func GetFSMagic(rootpath string) (FsMagic, error) {
|
||||
|
|
|
@ -13,7 +13,7 @@ const (
|
|||
|
||||
var (
|
||||
// Slice of drivers that should be used in an order
|
||||
priority = []string{
|
||||
Priority = []string{
|
||||
"zfs",
|
||||
"vfs",
|
||||
}
|
||||
|
@ -31,8 +31,7 @@ func NewDefaultChecker() Checker {
|
|||
return &defaultChecker{}
|
||||
}
|
||||
|
||||
type defaultChecker struct {
|
||||
}
|
||||
type defaultChecker struct{}
|
||||
|
||||
func (c *defaultChecker) IsMounted(path string) bool {
|
||||
m, _ := mount.Mounted(path)
|
||||
|
|
|
@ -90,7 +90,7 @@ const (
|
|||
|
||||
var (
|
||||
// Slice of drivers that should be used in an order
|
||||
priority = []string{
|
||||
Priority = []string{
|
||||
"overlay",
|
||||
// We don't support devicemapper without configuration
|
||||
// "devicemapper",
|
||||
|
@ -161,8 +161,7 @@ func NewDefaultChecker() Checker {
|
|||
return &defaultChecker{}
|
||||
}
|
||||
|
||||
type defaultChecker struct {
|
||||
}
|
||||
type defaultChecker struct{}
|
||||
|
||||
func (c *defaultChecker) IsMounted(path string) bool {
|
||||
m, _ := mount.Mounted(path)
|
||||
|
|
|
@ -16,6 +16,7 @@ static inline struct statvfs *getstatfs(char *s) {
|
|||
}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"unsafe"
|
||||
|
@ -31,7 +32,7 @@ const (
|
|||
|
||||
var (
|
||||
// Slice of drivers that should be used in an order
|
||||
priority = []string{
|
||||
Priority = []string{
|
||||
"zfs",
|
||||
}
|
||||
|
||||
|
@ -69,8 +70,7 @@ func NewDefaultChecker() Checker {
|
|||
return &defaultChecker{}
|
||||
}
|
||||
|
||||
type defaultChecker struct {
|
||||
}
|
||||
type defaultChecker struct{}
|
||||
|
||||
func (c *defaultChecker) IsMounted(path string) bool {
|
||||
m, _ := mount.Mounted(path)
|
||||
|
@ -80,7 +80,6 @@ func (c *defaultChecker) IsMounted(path string) bool {
|
|||
// Mounted checks if the given path is mounted as the fs type
|
||||
// Solaris supports only ZFS for now
|
||||
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
|
||||
|
||||
cs := C.CString(filepath.Dir(mountPath))
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
buf := C.getstatfs(cs)
|
||||
|
|
|
@ -3,12 +3,10 @@
|
|||
|
||||
package graphdriver
|
||||
|
||||
var (
|
||||
// Slice of drivers that should be used in an order
|
||||
priority = []string{
|
||||
var Priority = []string{
|
||||
"unsupported",
|
||||
}
|
||||
)
|
||||
|
||||
// GetFSMagic returns the filesystem id given the path.
|
||||
func GetFSMagic(rootpath string) (FsMagic, error) {
|
||||
|
|
|
@ -1,11 +1,9 @@
|
|||
package graphdriver
|
||||
|
||||
var (
|
||||
// Slice of drivers that should be used in order
|
||||
priority = []string{
|
||||
var Priority = []string{
|
||||
"windowsfilter",
|
||||
}
|
||||
)
|
||||
|
||||
// GetFSMagic returns the filesystem id given the path.
|
||||
func GetFSMagic(rootpath string) (FsMagic, error) {
|
||||
|
|
|
@ -14,11 +14,9 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
// ApplyUncompressedLayer defines the unpack method used by the graph
|
||||
// driver.
|
||||
ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer
|
||||
)
|
||||
var ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer
|
||||
|
||||
// NaiveDiffDriver takes a ProtoDriver and adds the
|
||||
// capability of the Diffing methods which it may or may not
|
||||
|
@ -173,7 +171,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts)
|
|||
}
|
||||
defer driverPut(driver, id, &err)
|
||||
|
||||
defaultForceMask := os.FileMode(0700)
|
||||
defaultForceMask := os.FileMode(0o700)
|
||||
var forceMask *os.FileMode // = nil
|
||||
if runtime.GOOS == "darwin" {
|
||||
forceMask = &defaultForceMask
|
||||
|
|
|
@ -38,22 +38,22 @@ func doesSupportNativeDiff(d, mountOpts string) error {
|
|||
}()
|
||||
|
||||
// Make directories l1/d, l1/d1, l2/d, l3, work, merged
|
||||
if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil {
|
||||
if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0755); err != nil {
|
||||
if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil {
|
||||
if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Mkdir(filepath.Join(td, "l3"), 0755); err != nil {
|
||||
if err := os.Mkdir(filepath.Join(td, "l3"), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil {
|
||||
if err := os.Mkdir(filepath.Join(td, "work"), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil {
|
||||
if err := os.Mkdir(filepath.Join(td, "merged"), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -82,7 +82,7 @@ func doesSupportNativeDiff(d, mountOpts string) error {
|
|||
}()
|
||||
|
||||
// Touch file in d to force copy up of opaque directory "d" from "l2" to "l3"
|
||||
if err := os.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil {
|
||||
if err := os.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0o644); err != nil {
|
||||
return fmt.Errorf("failed to write to merged directory: %w", err)
|
||||
}
|
||||
|
||||
|
@ -132,19 +132,19 @@ func doesMetacopy(d, mountOpts string) (bool, error) {
|
|||
}()
|
||||
|
||||
// Make directories l1, l2, work, merged
|
||||
if err := os.MkdirAll(filepath.Join(td, "l1"), 0755); err != nil {
|
||||
if err := os.MkdirAll(filepath.Join(td, "l1"), 0o755); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := ioutils.AtomicWriteFile(filepath.Join(td, "l1", "f"), []byte{0xff}, 0700); err != nil {
|
||||
if err := ioutils.AtomicWriteFile(filepath.Join(td, "l1", "f"), []byte{0xff}, 0o700); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Join(td, "l2"), 0755); err != nil {
|
||||
if err := os.MkdirAll(filepath.Join(td, "l2"), 0o755); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil {
|
||||
if err := os.Mkdir(filepath.Join(td, "work"), 0o755); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil {
|
||||
if err := os.Mkdir(filepath.Join(td, "merged"), 0o755); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Mount using the mandatory options and configured options
|
||||
|
@ -170,7 +170,7 @@ func doesMetacopy(d, mountOpts string) (bool, error) {
|
|||
}()
|
||||
// Make a change that only impacts the inode, and check if the pulled-up copy is marked
|
||||
// as a metadata-only copy
|
||||
if err := os.Chmod(filepath.Join(td, "merged", "f"), 0600); err != nil {
|
||||
if err := os.Chmod(filepath.Join(td, "merged", "f"), 0o600); err != nil {
|
||||
return false, fmt.Errorf("changing permissions on file for metacopy check: %w", err)
|
||||
}
|
||||
metacopy, err := system.Lgetxattr(filepath.Join(td, "l2", "f"), archive.GetOverlayXattrName("metacopy"))
|
||||
|
@ -196,20 +196,23 @@ func doesVolatile(d string) (bool, error) {
|
|||
}
|
||||
}()
|
||||
|
||||
if err := os.MkdirAll(filepath.Join(td, "lower"), 0755); err != nil {
|
||||
if err := os.MkdirAll(filepath.Join(td, "lower"), 0o755); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Join(td, "upper"), 0755); err != nil {
|
||||
if err := os.MkdirAll(filepath.Join(td, "upper"), 0o755); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil {
|
||||
if err := os.Mkdir(filepath.Join(td, "work"), 0o755); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil {
|
||||
if err := os.Mkdir(filepath.Join(td, "merged"), 0o755); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Mount using the mandatory options and configured options
|
||||
opts := fmt.Sprintf("volatile,lowerdir=%s,upperdir=%s,workdir=%s", path.Join(td, "lower"), path.Join(td, "upper"), path.Join(td, "work"))
|
||||
if unshare.IsRootless() {
|
||||
opts = fmt.Sprintf("%s,userxattr", opts)
|
||||
}
|
||||
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil {
|
||||
return false, fmt.Errorf("failed to mount overlay for volatile check: %w", err)
|
||||
}
|
||||
|
@ -238,11 +241,11 @@ func supportsIdmappedLowerLayers(home string) (bool, error) {
|
|||
upperDir := filepath.Join(layerDir, "upper")
|
||||
workDir := filepath.Join(layerDir, "work")
|
||||
|
||||
_ = idtools.MkdirAs(mergedDir, 0700, 0, 0)
|
||||
_ = idtools.MkdirAs(lowerDir, 0700, 0, 0)
|
||||
_ = idtools.MkdirAs(lowerMappedDir, 0700, 0, 0)
|
||||
_ = idtools.MkdirAs(upperDir, 0700, 0, 0)
|
||||
_ = idtools.MkdirAs(workDir, 0700, 0, 0)
|
||||
_ = idtools.MkdirAs(mergedDir, 0o700, 0, 0)
|
||||
_ = idtools.MkdirAs(lowerDir, 0o700, 0, 0)
|
||||
_ = idtools.MkdirAs(lowerMappedDir, 0o700, 0, 0)
|
||||
_ = idtools.MkdirAs(upperDir, 0o700, 0, 0)
|
||||
_ = idtools.MkdirAs(workDir, 0o700, 0, 0)
|
||||
|
||||
mapping := []idtools.IDMap{
|
||||
{
|
||||
|
|
|
@ -29,7 +29,6 @@ import (
|
|||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/parsers"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
units "github.com/docker/go-units"
|
||||
|
@ -41,13 +40,13 @@ import (
|
|||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
// untar defines the untar method
|
||||
untar = chrootarchive.UntarUncompressed
|
||||
)
|
||||
var untar = chrootarchive.UntarUncompressed
|
||||
|
||||
const (
|
||||
defaultPerms = os.FileMode(0555)
|
||||
defaultPerms = os.FileMode(0o555)
|
||||
selinuxLabelTest = "system_u:object_r:container_file_t:s0"
|
||||
mountProgramFlagFile = ".has-mount-program"
|
||||
)
|
||||
|
||||
// This backend uses the overlay union filesystem for containers
|
||||
|
@ -79,6 +78,7 @@ const (
|
|||
|
||||
const (
|
||||
linkDir = "l"
|
||||
stagingDir = "staging"
|
||||
lowerFile = "lower"
|
||||
maxDepth = 500
|
||||
|
||||
|
@ -110,6 +110,7 @@ type Driver struct {
|
|||
name string
|
||||
home string
|
||||
runhome string
|
||||
imageStore string
|
||||
uidMaps []idtools.IDMap
|
||||
gidMaps []idtools.IDMap
|
||||
ctr *graphdriver.RefCounter
|
||||
|
@ -124,7 +125,6 @@ type Driver struct {
|
|||
}
|
||||
|
||||
type additionalLayerStore struct {
|
||||
|
||||
// path is the directory where this store is available on the host.
|
||||
path string
|
||||
|
||||
|
@ -175,11 +175,11 @@ func hasVolatileOption(opts []string) bool {
|
|||
}
|
||||
|
||||
func getMountProgramFlagFile(path string) string {
|
||||
return filepath.Join(path, ".has-mount-program")
|
||||
return filepath.Join(path, mountProgramFlagFile)
|
||||
}
|
||||
|
||||
func checkSupportVolatile(home, runhome string) (bool, error) {
|
||||
feature := fmt.Sprintf("volatile")
|
||||
const feature = "volatile"
|
||||
volatileCacheResult, _, err := cachedFeatureCheck(runhome, feature)
|
||||
var usingVolatile bool
|
||||
if err == nil {
|
||||
|
@ -200,6 +200,8 @@ func checkSupportVolatile(home, runhome string) (bool, error) {
|
|||
if err = cachedFeatureRecord(runhome, feature, usingVolatile, ""); err != nil {
|
||||
return false, fmt.Errorf("recording volatile-being-used status: %w", err)
|
||||
}
|
||||
} else {
|
||||
usingVolatile = false
|
||||
}
|
||||
}
|
||||
return usingVolatile, nil
|
||||
|
@ -303,6 +305,16 @@ func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool {
|
|||
// If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error.
|
||||
// If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned.
|
||||
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
|
||||
// If custom --imagestore is selected never
|
||||
// ditch the original graphRoot, instead add it as
|
||||
// additionalImageStore so its images can still be
|
||||
// read and used.
|
||||
if options.ImageStore != "" {
|
||||
graphRootAsAdditionalStore := fmt.Sprintf("AdditionalImageStore=%s", options.ImageStore)
|
||||
options.DriverOptions = append(options.DriverOptions, graphRootAsAdditionalStore)
|
||||
// complete base name with driver name included
|
||||
options.ImageStore = filepath.Join(options.ImageStore, "overlay")
|
||||
}
|
||||
opts, err := parseOptions(options.DriverOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -325,11 +337,17 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
|||
}
|
||||
|
||||
// Create the driver home dir
|
||||
if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0755, 0, 0); err != nil {
|
||||
if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0o755, 0, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil {
|
||||
if options.ImageStore != "" {
|
||||
if err := idtools.MkdirAllAs(path.Join(options.ImageStore, linkDir), 0o755, 0, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := idtools.MkdirAllAs(runhome, 0o700, rootUID, rootGID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -345,12 +363,12 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
|||
|
||||
if opts.mountProgram != "" {
|
||||
if unshare.IsRootless() && isNetworkFileSystem(fsMagic) && opts.forceMask == nil {
|
||||
m := os.FileMode(0700)
|
||||
m := os.FileMode(0o700)
|
||||
opts.forceMask = &m
|
||||
logrus.Warnf("Network file system detected as backing store. Enforcing overlay option `force_mask=\"%o\"`. Add it to storage.conf to silence this warning", m)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0600); err != nil {
|
||||
if err := os.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0o600); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
|
@ -420,6 +438,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
|||
d := &Driver{
|
||||
name: "overlay",
|
||||
home: home,
|
||||
imageStore: options.ImageStore,
|
||||
runhome: runhome,
|
||||
uidMaps: options.UIDMaps,
|
||||
gidMaps: options.GIDMaps,
|
||||
|
@ -560,9 +579,9 @@ func parseOptions(options []string) (*overlayOptions, error) {
|
|||
var mask int64
|
||||
switch val {
|
||||
case "shared":
|
||||
mask = 0755
|
||||
mask = 0o755
|
||||
case "private":
|
||||
mask = 0700
|
||||
mask = 0o700
|
||||
default:
|
||||
mask, err = strconv.ParseInt(val, 8, 32)
|
||||
if err != nil {
|
||||
|
@ -627,7 +646,7 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) {
|
|||
if err != nil && !os.IsNotExist(err) {
|
||||
return false, err
|
||||
}
|
||||
if err := os.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0600); err != nil && !os.IsNotExist(err) {
|
||||
if err := os.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0o600); err != nil && !os.IsNotExist(err) {
|
||||
return false, err
|
||||
}
|
||||
if needsMountProgram {
|
||||
|
@ -640,7 +659,7 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) {
|
|||
|
||||
for _, dir := range []string{home, runhome} {
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
_ = idtools.MkdirAllAs(dir, 0700, 0, 0)
|
||||
_ = idtools.MkdirAllAs(dir, 0o700, 0, 0)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -700,12 +719,12 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
|
|||
_ = os.RemoveAll(layerDir)
|
||||
_ = os.Remove(home)
|
||||
}()
|
||||
_ = idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID)
|
||||
_ = idtools.MkdirAs(lower1Dir, 0700, rootUID, rootGID)
|
||||
_ = idtools.MkdirAs(lower2Dir, 0700, rootUID, rootGID)
|
||||
_ = idtools.MkdirAs(lower2Subdir, 0700, rootUID, rootGID)
|
||||
_ = idtools.MkdirAs(upperDir, 0700, rootUID, rootGID)
|
||||
_ = idtools.MkdirAs(workDir, 0700, rootUID, rootGID)
|
||||
_ = idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID)
|
||||
_ = idtools.MkdirAs(lower1Dir, 0o700, rootUID, rootGID)
|
||||
_ = idtools.MkdirAs(lower2Dir, 0o700, rootUID, rootGID)
|
||||
_ = idtools.MkdirAs(lower2Subdir, 0o700, rootUID, rootGID)
|
||||
_ = idtools.MkdirAs(upperDir, 0o700, rootUID, rootGID)
|
||||
_ = idtools.MkdirAs(workDir, 0o700, rootUID, rootGID)
|
||||
f, err := os.Create(lower2SubdirFile)
|
||||
if err != nil {
|
||||
logrus.Debugf("Unable to create test file: %v", err)
|
||||
|
@ -723,7 +742,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
|
|||
if unshare.IsRootless() {
|
||||
flags = fmt.Sprintf("%s,userxattr", flags)
|
||||
}
|
||||
if err := syscall.Mknod(filepath.Join(upperDir, "whiteout"), syscall.S_IFCHR|0600, int(unix.Mkdev(0, 0))); err != nil {
|
||||
if err := syscall.Mknod(filepath.Join(upperDir, "whiteout"), syscall.S_IFCHR|0o600, int(unix.Mkdev(0, 0))); err != nil {
|
||||
logrus.Debugf("Unable to create kernel-style whiteout: %v", err)
|
||||
return supportsDType, fmt.Errorf("unable to create kernel-style whiteout: %w", err)
|
||||
}
|
||||
|
@ -806,15 +825,22 @@ func (d *Driver) Status() [][2]string {
|
|||
// Metadata returns meta data about the overlay driver such as
|
||||
// LowerDir, UpperDir, WorkDir and MergeDir used to store data.
|
||||
func (d *Driver) Metadata(id string) (map[string]string, error) {
|
||||
dir := d.dir(id)
|
||||
dir, imagestore, _ := d.dir2(id)
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
workDirBase := dir
|
||||
if imagestore != "" {
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
workDirBase = imagestore
|
||||
}
|
||||
|
||||
metadata := map[string]string{
|
||||
"WorkDir": path.Join(dir, "work"),
|
||||
"MergedDir": path.Join(dir, "merged"),
|
||||
"UpperDir": path.Join(dir, "diff"),
|
||||
"WorkDir": path.Join(workDirBase, "work"),
|
||||
"MergedDir": path.Join(workDirBase, "merged"),
|
||||
"UpperDir": path.Join(workDirBase, "diff"),
|
||||
}
|
||||
|
||||
lowerDirs, err := d.getLowerDirs(id)
|
||||
|
@ -929,7 +955,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr
|
|||
}
|
||||
|
||||
func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disableQuota bool) (retErr error) {
|
||||
dir := d.dir(id)
|
||||
dir, imageStore, _ := d.dir2(id)
|
||||
|
||||
uidMaps := d.uidMaps
|
||||
gidMaps := d.gidMaps
|
||||
|
@ -940,7 +966,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
|||
}
|
||||
|
||||
// Make the link directory if it does not exist
|
||||
if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0755, 0, 0); err != nil {
|
||||
if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0o755, 0, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -954,11 +980,22 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
|||
GID: rootGID,
|
||||
}
|
||||
|
||||
if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0755, idPair); err != nil {
|
||||
if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0o755, idPair); err != nil {
|
||||
return err
|
||||
}
|
||||
workDirBase := dir
|
||||
if imageStore != "" {
|
||||
workDirBase = imageStore
|
||||
if err := idtools.MkdirAllAndChownNew(path.Dir(imageStore), 0o755, idPair); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if parent != "" {
|
||||
st, err := system.Stat(d.dir(parent))
|
||||
parentBase, parentImageStore, _ := d.dir2(parent)
|
||||
if parentImageStore != "" {
|
||||
parentBase = parentImageStore
|
||||
}
|
||||
st, err := system.Stat(filepath.Join(parentBase, "diff"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -975,9 +1012,14 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
|||
}
|
||||
}
|
||||
|
||||
if err := idtools.MkdirAllAndChownNew(dir, 0700, idPair); err != nil {
|
||||
if err := idtools.MkdirAllAndChownNew(dir, 0o700, idPair); err != nil {
|
||||
return err
|
||||
}
|
||||
if imageStore != "" {
|
||||
if err := idtools.MkdirAllAndChownNew(imageStore, 0o700, idPair); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// Clean up on failure
|
||||
|
@ -985,6 +1027,11 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
|||
if err2 := os.RemoveAll(dir); err2 != nil {
|
||||
logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2)
|
||||
}
|
||||
if imageStore != "" {
|
||||
if err2 := os.RemoveAll(workDirBase); err2 != nil {
|
||||
logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -1007,44 +1054,60 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
|||
if err := d.quotaCtl.SetQuota(dir, quota); err != nil {
|
||||
return err
|
||||
}
|
||||
if imageStore != "" {
|
||||
if err := d.quotaCtl.SetQuota(imageStore, quota); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
perms := defaultPerms
|
||||
if d.options.forceMask != nil {
|
||||
perms = *d.options.forceMask
|
||||
}
|
||||
|
||||
if parent != "" {
|
||||
st, err := system.Stat(filepath.Join(d.dir(parent), "diff"))
|
||||
parentDir, parentImageStore, _ := d.dir2(parent)
|
||||
base := parentDir
|
||||
if parentImageStore != "" {
|
||||
base = parentImageStore
|
||||
}
|
||||
st, err := system.Stat(filepath.Join(base, "diff"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
perms = os.FileMode(st.Mode())
|
||||
}
|
||||
|
||||
if err := idtools.MkdirAs(path.Join(dir, "diff"), perms, rootUID, rootGID); err != nil {
|
||||
if err := idtools.MkdirAs(path.Join(workDirBase, "diff"), perms, rootUID, rootGID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lid := generateID(idLength)
|
||||
if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil {
|
||||
|
||||
linkBase := path.Join("..", id, "diff")
|
||||
if imageStore != "" {
|
||||
linkBase = path.Join(imageStore, "diff")
|
||||
}
|
||||
if err := os.Symlink(linkBase, path.Join(d.home, linkDir, lid)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write link id to link file
|
||||
if err := os.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil {
|
||||
if err := os.WriteFile(path.Join(dir, "link"), []byte(lid), 0o644); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil {
|
||||
if err := idtools.MkdirAs(path.Join(workDirBase, "work"), 0o700, rootUID, rootGID); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil {
|
||||
if err := idtools.MkdirAs(path.Join(dir, "merged"), 0o700, rootUID, rootGID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if no parent directory, create a dummy lower directory and skip writing a "lowers" file
|
||||
if parent == "" {
|
||||
return idtools.MkdirAs(path.Join(dir, "empty"), 0700, rootUID, rootGID)
|
||||
return idtools.MkdirAs(path.Join(dir, "empty"), 0o700, rootUID, rootGID)
|
||||
}
|
||||
|
||||
lower, err := d.getLower(parent)
|
||||
|
@ -1052,7 +1115,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
|||
return err
|
||||
}
|
||||
if lower != "" {
|
||||
if err := os.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil {
|
||||
if err := os.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0o666); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -1120,22 +1183,26 @@ func (d *Driver) getLower(parent string) (string, error) {
|
|||
}
|
||||
|
||||
func (d *Driver) dir(id string) string {
|
||||
p, _ := d.dir2(id)
|
||||
p, _, _ := d.dir2(id)
|
||||
return p
|
||||
}
|
||||
|
||||
func (d *Driver) dir2(id string) (string, bool) {
|
||||
func (d *Driver) dir2(id string) (string, string, bool) {
|
||||
newpath := path.Join(d.home, id)
|
||||
imageStore := ""
|
||||
if d.imageStore != "" {
|
||||
imageStore = path.Join(d.imageStore, id)
|
||||
}
|
||||
if _, err := os.Stat(newpath); err != nil {
|
||||
for _, p := range d.AdditionalImageStores() {
|
||||
l := path.Join(p, d.name, id)
|
||||
_, err = os.Stat(l)
|
||||
if err == nil {
|
||||
return l, true
|
||||
return l, imageStore, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return newpath, false
|
||||
return newpath, imageStore, false
|
||||
}
|
||||
|
||||
func (d *Driver) getLowerDirs(id string) ([]string, error) {
|
||||
|
@ -1223,6 +1290,9 @@ func (d *Driver) Remove(id string) error {
|
|||
}
|
||||
if d.quotaCtl != nil {
|
||||
d.quotaCtl.ClearQuota(dir)
|
||||
if d.imageStore != "" {
|
||||
d.quotaCtl.ClearQuota(d.imageStore)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1240,7 +1310,7 @@ func (d *Driver) recreateSymlinks() error {
|
|||
return fmt.Errorf("reading driver home directory %q: %w", d.home, err)
|
||||
}
|
||||
// This makes the link directory if it doesn't exist
|
||||
if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0755, 0, 0); err != nil {
|
||||
if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0o755, 0, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
// Keep looping as long as we take some corrective action in each iteration
|
||||
|
@ -1317,7 +1387,7 @@ func (d *Driver) recreateSymlinks() error {
|
|||
if err != nil || string(data) != link.Name() {
|
||||
// NOTE: If two or more links point to the same target, we will update linkFile
|
||||
// with every value of link.Name(), and set madeProgress = true every time.
|
||||
if err := os.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil {
|
||||
if err := os.WriteFile(linkFile, []byte(link.Name()), 0o644); err != nil {
|
||||
errs = multierror.Append(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err))
|
||||
continue
|
||||
}
|
||||
|
@ -1342,10 +1412,14 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
|||
}
|
||||
|
||||
func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) {
|
||||
dir, inAdditionalStore := d.dir2(id)
|
||||
dir, imageStore, inAdditionalStore := d.dir2(id)
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
return "", err
|
||||
}
|
||||
workDirBase := dir
|
||||
if imageStore != "" {
|
||||
workDirBase = imageStore
|
||||
}
|
||||
readWrite := !inAdditionalStore
|
||||
|
||||
if !d.SupportsShifting() || options.DisableShifting {
|
||||
|
@ -1478,18 +1552,18 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||
absLowers = append(absLowers, path.Join(dir, "empty"))
|
||||
}
|
||||
// user namespace requires this to move a directory from lower to upper.
|
||||
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
|
||||
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
diffDir := path.Join(dir, "diff")
|
||||
diffDir := path.Join(workDirBase, "diff")
|
||||
if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
mergedDir := path.Join(dir, "merged")
|
||||
// Create the driver merged dir
|
||||
if err := idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
|
||||
if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) {
|
||||
return "", err
|
||||
}
|
||||
if count := d.ctr.Increment(mergedDir); count > 1 {
|
||||
|
@ -1505,7 +1579,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||
}
|
||||
}()
|
||||
|
||||
workdir := path.Join(dir, "work")
|
||||
workdir := path.Join(workDirBase, "work")
|
||||
|
||||
if d.options.mountProgram == "" && unshare.IsRootless() {
|
||||
optsList = append(optsList, "userxattr")
|
||||
|
@ -1525,7 +1599,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||
if !disableShifting && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 && d.options.mountProgram == "" {
|
||||
var newAbsDir []string
|
||||
mappedRoot := filepath.Join(d.home, id, "mapped")
|
||||
if err := os.MkdirAll(mappedRoot, 0700); err != nil {
|
||||
if err := os.MkdirAll(mappedRoot, 0o700); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
@ -1612,16 +1686,15 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||
// Use mountFrom when the mount data has exceeded the page size. The mount syscall fails if
|
||||
// the mount data cannot fit within a page and relative links make the mount data much
|
||||
// smaller at the expense of requiring a fork exec to chdir().
|
||||
|
||||
workdir = path.Join(id, "work")
|
||||
if readWrite {
|
||||
diffDir := path.Join(id, "diff")
|
||||
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir)
|
||||
workDir := path.Join(id, "work")
|
||||
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workDir)
|
||||
} else {
|
||||
opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":"))
|
||||
}
|
||||
if len(optsList) > 0 {
|
||||
opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ","))
|
||||
opts = strings.Join(append([]string{opts}, optsList...), ",")
|
||||
}
|
||||
mountData = label.FormatMountLabel(opts, options.MountLabel)
|
||||
mountFunc = func(source string, target string, mType string, flags uintptr, label string) error {
|
||||
|
@ -1631,9 +1704,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||
}
|
||||
|
||||
// overlay has a check in place to prevent mounting the same file system twice
|
||||
// if volatile was already specified.
|
||||
err = os.RemoveAll(filepath.Join(workdir, "work/incompat/volatile"))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
// if volatile was already specified. Yes, the kernel repeats the "work" component.
|
||||
err = os.RemoveAll(filepath.Join(workdir, "work", "incompat", "volatile"))
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
@ -1703,11 +1776,13 @@ func (d *Driver) Put(id string) error {
|
|||
if !unmounted {
|
||||
if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) {
|
||||
logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err)
|
||||
return fmt.Errorf("unmounting %q: %w", mountpoint, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) {
|
||||
logrus.Debugf("Failed to remove mountpoint %s overlay: %s - %v", id, mountpoint, err)
|
||||
return fmt.Errorf("removing mount point %q: %w", mountpoint, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -1725,20 +1800,23 @@ func (d *Driver) ListLayers() ([]string, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
layers := make([]string, 0)
|
||||
|
||||
for _, entry := range entries {
|
||||
id := entry.Name()
|
||||
switch id {
|
||||
case linkDir, stagingDir, quota.BackingFsBlockDeviceLink, mountProgramFlagFile:
|
||||
// expected, but not a layer. skip it
|
||||
continue
|
||||
default:
|
||||
// Does it look like a datadir directory?
|
||||
if !entry.IsDir() || stringid.ValidateID(id) != nil {
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
layers = append(layers, id)
|
||||
}
|
||||
|
||||
return layers, err
|
||||
}
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
// isParent returns if the passed in parent is the direct parent of the passed in layer
|
||||
|
@ -1795,7 +1873,7 @@ func (g *overlayFileGetter) Close() error {
|
|||
}
|
||||
|
||||
func (d *Driver) getStagingDir() string {
|
||||
return filepath.Join(d.home, "staging")
|
||||
return filepath.Join(d.home, stagingDir)
|
||||
}
|
||||
|
||||
// DiffGetter returns a FileGetCloser that can read files from the directory that
|
||||
|
@ -1831,7 +1909,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
|
|||
var applyDir string
|
||||
|
||||
if id == "" {
|
||||
err := os.MkdirAll(d.getStagingDir(), 0700)
|
||||
err := os.MkdirAll(d.getStagingDir(), 0o700)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
|
@ -1884,7 +1962,6 @@ func (d *Driver) DifferTarget(id string) (string, error) {
|
|||
|
||||
// ApplyDiff applies the new layer into a root
|
||||
func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (size int64, err error) {
|
||||
|
||||
if !d.isParent(id, parent) {
|
||||
if d.options.ignoreChownErrors {
|
||||
options.IgnoreChownErrors = d.options.ignoreChownErrors
|
||||
|
@ -1922,8 +1999,12 @@ func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts)
|
|||
}
|
||||
|
||||
func (d *Driver) getDiffPath(id string) (string, error) {
|
||||
dir := d.dir(id)
|
||||
return redirectDiffIfAdditionalLayer(path.Join(dir, "diff"))
|
||||
dir, imagestore, _ := d.dir2(id)
|
||||
base := dir
|
||||
if imagestore != "" {
|
||||
base = imagestore
|
||||
}
|
||||
return redirectDiffIfAdditionalLayer(path.Join(base, "diff"))
|
||||
}
|
||||
|
||||
func (d *Driver) getLowerDiffPaths(id string) ([]string, error) {
|
||||
|
@ -2014,8 +2095,12 @@ func (d *Driver) AdditionalImageStores() []string {
|
|||
// by toContainer to those specified by toHost.
|
||||
func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
|
||||
var err error
|
||||
dir := d.dir(id)
|
||||
diffDir := filepath.Join(dir, "diff")
|
||||
dir, imagestore, _ := d.dir2(id)
|
||||
base := dir
|
||||
if imagestore != "" {
|
||||
base = imagestore
|
||||
}
|
||||
diffDir := filepath.Join(base, "diff")
|
||||
|
||||
rootUID, rootGID := 0, 0
|
||||
if toHost != nil {
|
||||
|
@ -2196,7 +2281,7 @@ func (al *additionalLayer) CreateAs(id, parent string) error {
|
|||
}
|
||||
// tell the additional layer store that we use this layer.
|
||||
// mark this layer as "additional layer"
|
||||
if err := os.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0644); err != nil {
|
||||
if err := os.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0o644); err != nil {
|
||||
return err
|
||||
}
|
||||
notifyUseAdditionalLayer(al.path)
|
||||
|
|
|
@ -50,6 +50,7 @@ struct fsxattr {
|
|||
#endif
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -67,6 +68,10 @@ import (
|
|||
|
||||
const projectIDsAllocatedPerQuotaHome = 10000
|
||||
|
||||
// BackingFsBlockDeviceLink is the name of a file that we place in
|
||||
// the home directory of a driver that uses this package.
|
||||
const BackingFsBlockDeviceLink = "backingFsBlockDev"
|
||||
|
||||
// Quota limit params - currently we only control blocks hard limit and inodes
|
||||
type Quota struct {
|
||||
Size uint64
|
||||
|
@ -94,7 +99,6 @@ func generateUniqueProjectID(path string) (uint32, error) {
|
|||
stat, ok := fileinfo.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("not a syscall.Stat_t %s", path)
|
||||
|
||||
}
|
||||
projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome)
|
||||
return uint32(projectID), nil
|
||||
|
@ -187,7 +191,6 @@ func NewControl(basePath string) (*Control, error) {
|
|||
// SetQuota - assign a unique project id to directory and set the quota limits
|
||||
// for that project id
|
||||
func (q *Control) SetQuota(targetPath string, quota Quota) error {
|
||||
|
||||
projectID, ok := q.quotas[targetPath]
|
||||
if !ok {
|
||||
projectID = q.nextProjectID
|
||||
|
@ -235,7 +238,7 @@ func (q *Control) setProjectQuota(projectID uint32, quota Quota) error {
|
|||
d.d_ino_softlimit = d.d_ino_hardlimit
|
||||
}
|
||||
|
||||
var cs = C.CString(q.backingFsBlockDev)
|
||||
cs := C.CString(q.backingFsBlockDev)
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
|
||||
runQuotactl := func() syscall.Errno {
|
||||
|
@ -303,7 +306,7 @@ func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, err
|
|||
//
|
||||
// get the quota limit for the container's project id
|
||||
//
|
||||
var cs = C.CString(q.backingFsBlockDev)
|
||||
cs := C.CString(q.backingFsBlockDev)
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
|
||||
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA,
|
||||
|
@ -395,9 +398,9 @@ func openDir(path string) (*C.DIR, error) {
|
|||
Cpath := C.CString(path)
|
||||
defer free(Cpath)
|
||||
|
||||
dir := C.opendir(Cpath)
|
||||
dir, errno := C.opendir(Cpath)
|
||||
if dir == nil {
|
||||
return nil, fmt.Errorf("can't open dir %v", Cpath)
|
||||
return nil, fmt.Errorf("can't open dir %v: %w", Cpath, errno)
|
||||
}
|
||||
return dir, nil
|
||||
}
|
||||
|
@ -421,10 +424,10 @@ func makeBackingFsDev(home string) (string, error) {
|
|||
return "", err
|
||||
}
|
||||
|
||||
backingFsBlockDev := path.Join(home, "backingFsBlockDev")
|
||||
backingFsBlockDev := path.Join(home, BackingFsBlockDeviceLink)
|
||||
backingFsBlockDevTmp := backingFsBlockDev + ".tmp"
|
||||
// Re-create just in case someone copied the home directory over to a new device
|
||||
if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0600, int(stat.Dev)); err != nil {
|
||||
if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil {
|
||||
return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err)
|
||||
}
|
||||
if err := unix.Rename(backingFsBlockDevTmp, backingFsBlockDev); err != nil {
|
||||
|
|
|
@ -15,8 +15,7 @@ type Quota struct {
|
|||
|
||||
// Control - Context to be used by storage driver (e.g. overlay)
|
||||
// who wants to apply project quotas to container dirs
|
||||
type Control struct {
|
||||
}
|
||||
type Control struct{}
|
||||
|
||||
func NewControl(basePath string) (*Control, error) {
|
||||
return nil, errors.New("filesystem does not support, or has not enabled quotas")
|
||||
|
|
|
@ -34,6 +34,7 @@ func NaiveCreateFromTemplate(d TemplateDriver, id, template string, templateIDMa
|
|||
}
|
||||
return err
|
||||
}
|
||||
defer diff.Close()
|
||||
|
||||
applyOptions := ApplyDiffOpts{
|
||||
Diff: diff,
|
||||
|
|
|
@ -14,19 +14,13 @@ import (
|
|||
"github.com/containers/storage/pkg/directory"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/parsers"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vbatts/tar-split/tar/storage"
|
||||
)
|
||||
|
||||
var (
|
||||
// CopyDir defines the copy method to use.
|
||||
CopyDir = dirCopy
|
||||
)
|
||||
|
||||
const defaultPerms = os.FileMode(0555)
|
||||
const defaultPerms = os.FileMode(0o555)
|
||||
|
||||
func init() {
|
||||
graphdriver.MustRegister("vfs", Init)
|
||||
|
@ -42,11 +36,10 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
|||
}
|
||||
|
||||
rootIDs := d.idMappings.RootPair()
|
||||
if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil {
|
||||
if err := idtools.MkdirAllAndChown(filepath.Join(home, "dir"), 0o700, rootIDs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, option := range options.DriverOptions {
|
||||
|
||||
key, val, err := parsers.ParseKeyValueOpt(option)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -69,6 +62,12 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
|||
return nil, fmt.Errorf("vfs driver does not support %s options", key)
|
||||
}
|
||||
}
|
||||
// If --imagestore is provided, lets add writable graphRoot
|
||||
// to vfs's additional image store, as it is done for
|
||||
// `overlay` driver.
|
||||
if options.ImageStore != "" {
|
||||
d.homes = append(d.homes, options.ImageStore)
|
||||
}
|
||||
d.updater = graphdriver.NewNaiveLayerIDMapUpdater(d)
|
||||
d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d.updater)
|
||||
|
||||
|
@ -161,7 +160,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
|
|||
|
||||
dir := d.dir(id)
|
||||
rootIDs := idMappings.RootPair()
|
||||
if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0700, rootIDs); err != nil {
|
||||
if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0o700, rootIDs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -173,7 +172,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
|
|||
|
||||
rootPerms := defaultPerms
|
||||
if runtime.GOOS == "darwin" {
|
||||
rootPerms = os.FileMode(0700)
|
||||
rootPerms = os.FileMode(0o700)
|
||||
}
|
||||
|
||||
if parent != "" {
|
||||
|
@ -203,7 +202,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
|
|||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (d *Driver) dir(id string) string {
|
||||
|
@ -268,7 +266,7 @@ func (d *Driver) Exists(id string) bool {
|
|||
|
||||
// List layers (not including additional image stores)
|
||||
func (d *Driver) ListLayers() ([]string, error) {
|
||||
entries, err := os.ReadDir(d.homes[0])
|
||||
entries, err := os.ReadDir(filepath.Join(d.homes[0], "dir"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -278,7 +276,7 @@ func (d *Driver) ListLayers() ([]string, error) {
|
|||
for _, entry := range entries {
|
||||
id := entry.Name()
|
||||
// Does it look like a datadir directory?
|
||||
if !entry.IsDir() || stringid.ValidateID(id) != nil {
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -304,7 +302,15 @@ func (d *Driver) SupportsShifting() bool {
|
|||
// UpdateLayerIDMap updates ID mappings in a from matching the ones specified
|
||||
// by toContainer to those specified by toHost.
|
||||
func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
|
||||
return d.updater.UpdateLayerIDMap(id, toContainer, toHost, mountLabel)
|
||||
if err := d.updater.UpdateLayerIDMap(id, toContainer, toHost, mountLabel); err != nil {
|
||||
return err
|
||||
}
|
||||
dir := d.dir(id)
|
||||
rootIDs, err := toHost.ToHost(idtools.IDPair{UID: 0, GID: 0})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Chown(dir, rootIDs.UID, rootIDs.GID)
|
||||
}
|
||||
|
||||
// Changes produces a list of changes between the specified layer
|
||||
|
|
|
@ -64,8 +64,7 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
type checker struct {
|
||||
}
|
||||
type checker struct{}
|
||||
|
||||
func (c *checker) IsMounted(path string) bool {
|
||||
return false
|
||||
|
@ -102,7 +101,7 @@ func InitFilter(home string, options graphdriver.Options) (graphdriver.Driver, e
|
|||
return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home)
|
||||
}
|
||||
|
||||
if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil {
|
||||
if err := idtools.MkdirAllAs(home, 0o700, 0, 0); err != nil {
|
||||
return nil, fmt.Errorf("windowsfilter failed to create '%s': %w", home, err)
|
||||
}
|
||||
|
||||
|
@ -885,7 +884,7 @@ func (d *Driver) resolveID(id string) (string, error) {
|
|||
|
||||
// setID stores the layerId in disk.
|
||||
func (d *Driver) setID(id, altID string) error {
|
||||
return os.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600)
|
||||
return os.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0o600)
|
||||
}
|
||||
|
||||
// getLayerChain returns the layer chain information.
|
||||
|
@ -915,7 +914,7 @@ func (d *Driver) setLayerChain(id string, chain []string) error {
|
|||
}
|
||||
|
||||
jPath := filepath.Join(d.dir(id), "layerchain.json")
|
||||
err = os.WriteFile(jPath, content, 0600)
|
||||
err = os.WriteFile(jPath, content, 0o600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to write layerchain file - %s", err)
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ type zfsOptions struct {
|
|||
mountOptions string
|
||||
}
|
||||
|
||||
const defaultPerms = os.FileMode(0555)
|
||||
const defaultPerms = os.FileMode(0o555)
|
||||
|
||||
func init() {
|
||||
graphdriver.MustRegister("zfs", Init)
|
||||
|
@ -57,7 +57,7 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) {
|
|||
return nil, fmt.Errorf("the 'zfs' command is not available: %w", graphdriver.ErrPrerequisites)
|
||||
}
|
||||
|
||||
file, err := unix.Open("/dev/zfs", unix.O_RDWR, 0600)
|
||||
file, err := unix.Open("/dev/zfs", unix.O_RDWR, 0o600)
|
||||
if err != nil {
|
||||
logger.Debugf("cannot open /dev/zfs: %v", err)
|
||||
return nil, fmt.Errorf("could not open /dev/zfs: %v: %w", err, graphdriver.ErrPrerequisites)
|
||||
|
@ -110,7 +110,7 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get root uid/gid: %w", err)
|
||||
}
|
||||
if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil {
|
||||
if err := idtools.MkdirAllAs(base, 0o700, rootUID, rootGID); err != nil {
|
||||
return nil, fmt.Errorf("failed to create '%s': %w", base, err)
|
||||
}
|
||||
|
||||
|
@ -409,7 +409,6 @@ func (d *Driver) Remove(id string) error {
|
|||
|
||||
// Get returns the mountpoint for the given id after creating the target directories if necessary.
|
||||
func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
|
||||
|
||||
mountpoint := d.mountPath(id)
|
||||
if count := d.ctr.Increment(mountpoint); count > 1 {
|
||||
return mountpoint, nil
|
||||
|
@ -454,7 +453,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr
|
|||
return "", err
|
||||
}
|
||||
// Create the target directories if they don't exist
|
||||
if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil {
|
||||
if err := idtools.MkdirAllAs(mountpoint, 0o755, rootUID, rootGID); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
@ -506,7 +505,9 @@ func (d *Driver) Exists(id string) bool {
|
|||
return d.filesystemsCache[d.zfsPath(id)]
|
||||
}
|
||||
|
||||
// List layers (not including additional image stores)
|
||||
// List layers (not including additional image stores). Our layers aren't all
|
||||
// dependent on a single well-known dataset, so we can't reliably tell which
|
||||
// datasets are ours and which ones just look like they could be ours.
|
||||
func (d *Driver) ListLayers() ([]string, error) {
|
||||
return nil, graphdriver.ErrNotSupported
|
||||
}
|
||||
|
|
|
@ -568,26 +568,28 @@ func (r *imageStore) Save() error {
|
|||
}
|
||||
r.lockfile.AssertLockedForWriting()
|
||||
rpath := r.imagespath()
|
||||
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
jdata, err := json.Marshal(&r.images)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutils.AtomicWriteFile(rpath, jdata, 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
// This must be done before we write the file, because the process could be terminated
|
||||
// after the file is written but before the lock file is updated.
|
||||
lw, err := r.lockfile.RecordWrite()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.lastWrite = lw
|
||||
if err := ioutils.AtomicWriteFile(rpath, jdata, 0o600); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newImageStore(dir string) (rwImageStore, error) {
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
if err := os.MkdirAll(dir, 0o700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lockfile, err := lockfile.GetLockFile(filepath.Join(dir, "images.lock"))
|
||||
|
@ -1015,11 +1017,11 @@ func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest
|
|||
if key == "" {
|
||||
return fmt.Errorf("can't set empty name for image big data item: %w", ErrInvalidBigDataName)
|
||||
}
|
||||
err := os.MkdirAll(r.datadir(image.ID), 0700)
|
||||
err := os.MkdirAll(r.datadir(image.ID), 0o700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
|
||||
err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0o600)
|
||||
if err == nil {
|
||||
save := false
|
||||
if image.BigDataSizes == nil {
|
||||
|
|
|
@ -657,7 +657,6 @@ func (r *layerStore) Layers() ([]Layer, error) {
|
|||
// Requires startWriting.
|
||||
func (r *layerStore) GarbageCollect() error {
|
||||
layers, err := r.driver.ListLayers()
|
||||
|
||||
if err != nil {
|
||||
if errors.Is(err, drivers.ErrNotSupported) {
|
||||
return nil
|
||||
|
@ -864,7 +863,11 @@ func (r *layerStore) loadMounts() error {
|
|||
return err
|
||||
}
|
||||
layerMounts := []layerMountPoint{}
|
||||
if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil {
|
||||
if len(data) != 0 {
|
||||
if err := json.Unmarshal(data, &layerMounts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Clear all of our mount information. If another process
|
||||
// unmounted something, it (along with its zero count) won't
|
||||
// have been encoded into the version of mountpoints.json that
|
||||
|
@ -887,10 +890,8 @@ func (r *layerStore) loadMounts() error {
|
|||
}
|
||||
}
|
||||
}
|
||||
err = nil
|
||||
}
|
||||
r.bymount = mounts
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
// save saves the contents of the store to disk.
|
||||
|
@ -920,13 +921,21 @@ func (r *layerStore) saveLayers(saveLocations layerLocations) error {
|
|||
}
|
||||
r.lockfile.AssertLockedForWriting()
|
||||
|
||||
// This must be done before we write the file, because the process could be terminated
|
||||
// after the file is written but before the lock file is updated.
|
||||
lw, err := r.lockfile.RecordWrite()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.lastWrite = lw
|
||||
|
||||
for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
|
||||
location := layerLocationFromIndex(locationIndex)
|
||||
if location&saveLocations == 0 {
|
||||
continue
|
||||
}
|
||||
rpath := r.jsonPath[locationIndex]
|
||||
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
subsetLayers := make([]*Layer, 0, len(r.layers))
|
||||
|
@ -944,16 +953,11 @@ func (r *layerStore) saveLayers(saveLocations layerLocations) error {
|
|||
if location == volatileLayerLocation {
|
||||
opts.NoSync = true
|
||||
}
|
||||
if err := ioutils.AtomicWriteFileWithOpts(rpath, jldata, 0600, &opts); err != nil {
|
||||
if err := ioutils.AtomicWriteFileWithOpts(rpath, jldata, 0o600, &opts); err != nil {
|
||||
return err
|
||||
}
|
||||
r.layerspathsModified[locationIndex] = opts.ModTime
|
||||
}
|
||||
lw, err := r.lockfile.RecordWrite()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.lastWrite = lw
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -965,7 +969,7 @@ func (r *layerStore) saveMounts() error {
|
|||
}
|
||||
r.mountsLockfile.AssertLockedForWriting()
|
||||
mpath := r.mountspath()
|
||||
if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(mpath), 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
mounts := make([]layerMountPoint, 0, len(r.layers))
|
||||
|
@ -982,22 +986,26 @@ func (r *layerStore) saveMounts() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// This must be done before we write the file, because the process could be terminated
|
||||
// after the file is written but before the lock file is updated.
|
||||
lw, err := r.mountsLockfile.RecordWrite()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.mountsLastWrite = lw
|
||||
|
||||
if err = ioutils.AtomicWriteFile(mpath, jmdata, 0o600); err != nil {
|
||||
return err
|
||||
}
|
||||
return r.loadMounts()
|
||||
}
|
||||
|
||||
func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver, transient bool) (rwLayerStore, error) {
|
||||
if err := os.MkdirAll(rundir, 0700); err != nil {
|
||||
if err := os.MkdirAll(rundir, 0o700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := os.MkdirAll(layerdir, 0700); err != nil {
|
||||
if err := os.MkdirAll(layerdir, 0o700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Note: While the containers.lock file is in rundir for transient stores
|
||||
|
@ -1213,10 +1221,10 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
|
|||
if !r.lockfile.IsReadWrite() {
|
||||
return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
|
||||
}
|
||||
if err := os.MkdirAll(r.rundir, 0700); err != nil {
|
||||
if err := os.MkdirAll(r.rundir, 0o700); err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
if err := os.MkdirAll(r.layerdir, 0700); err != nil {
|
||||
if err := os.MkdirAll(r.layerdir, 0o700); err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
if id == "" {
|
||||
|
@ -1690,7 +1698,7 @@ func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error
|
|||
if key == "" {
|
||||
return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName)
|
||||
}
|
||||
err := os.MkdirAll(r.datadir(layer.ID), 0700)
|
||||
err := os.MkdirAll(r.datadir(layer.ID), 0o700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1698,7 +1706,7 @@ func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error
|
|||
// NewAtomicFileWriter doesn't overwrite/truncate the existing inode.
|
||||
// BigData() relies on this behaviour when opening the file for read
|
||||
// so that it is either accessing the old data or the new one.
|
||||
writer, err := ioutils.NewAtomicFileWriter(r.datapath(layer.ID, key), 0600)
|
||||
writer, err := ioutils.NewAtomicFileWriter(r.datapath(layer.ID, key), 0o600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening bigdata file: %w", err)
|
||||
}
|
||||
|
@ -1922,6 +1930,18 @@ func (r *layerStore) Wipe() error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
ids, err := r.driver.ListLayers()
|
||||
if err != nil {
|
||||
if !errors.Is(err, drivers.ErrNotSupported) {
|
||||
return err
|
||||
}
|
||||
ids = nil
|
||||
}
|
||||
for _, id := range ids {
|
||||
if err := r.driver.Remove(id); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2198,7 +2218,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
|
|||
return -1, err
|
||||
}
|
||||
compression := archive.DetectCompression(header[:n])
|
||||
defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), diff)
|
||||
defragmented := io.MultiReader(bytes.NewReader(header[:n]), diff)
|
||||
|
||||
// Decide if we need to compute digests
|
||||
var compressedDigest, uncompressedDigest digest.Digest // = ""
|
||||
|
@ -2270,10 +2290,10 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
|
|||
return -1, err
|
||||
}
|
||||
compressor.Close()
|
||||
if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0o700); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0600); err != nil {
|
||||
if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0o600); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if compressedDigester != nil {
|
||||
|
|
|
@ -132,13 +132,13 @@ const (
|
|||
)
|
||||
|
||||
const (
|
||||
modeISDIR = 040000 // Directory
|
||||
modeISFIFO = 010000 // FIFO
|
||||
modeISREG = 0100000 // Regular file
|
||||
modeISLNK = 0120000 // Symbolic link
|
||||
modeISBLK = 060000 // Block special file
|
||||
modeISCHR = 020000 // Character special file
|
||||
modeISSOCK = 0140000 // Socket
|
||||
modeISDIR = 0o40000 // Directory
|
||||
modeISFIFO = 0o10000 // FIFO
|
||||
modeISREG = 0o100000 // Regular file
|
||||
modeISLNK = 0o120000 // Symbolic link
|
||||
modeISBLK = 0o60000 // Block special file
|
||||
modeISCHR = 0o20000 // Character special file
|
||||
modeISSOCK = 0o140000 // Socket
|
||||
)
|
||||
|
||||
// IsArchivePath checks if the (possibly compressed) file at the given path
|
||||
|
@ -328,7 +328,6 @@ func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModi
|
|||
}
|
||||
|
||||
pipeWriter.Close()
|
||||
|
||||
}()
|
||||
return pipeReader
|
||||
}
|
||||
|
@ -702,7 +701,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
|||
}
|
||||
|
||||
if forceMask != nil && (hdr.Typeflag != tar.TypeSymlink || runtime.GOOS == "darwin") {
|
||||
value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&07777)
|
||||
value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&0o7777)
|
||||
if err := system.Lsetxattr(path, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -800,7 +799,6 @@ func Tar(path string, compression Compression) (io.ReadCloser, error) {
|
|||
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
|
||||
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
|
||||
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
|
||||
|
||||
// Fix the source path to work with long path names. This is a no-op
|
||||
// on platforms other than Windows.
|
||||
srcPath = fixVolumePathPrefix(srcPath)
|
||||
|
@ -1032,7 +1030,7 @@ loop:
|
|||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs)
|
||||
err = idtools.MkdirAllAndChownNew(parentPath, 0o777, rootIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1239,7 +1237,7 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error {
|
|||
}
|
||||
// Create dst, copy src's content into it
|
||||
logrus.Debugf("Creating dest directory: %s", dst)
|
||||
if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil {
|
||||
if err := idtools.MkdirAllAndChownNew(dst, 0o755, rootIDs); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
|
||||
|
@ -1266,7 +1264,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
|||
dst = filepath.Join(dst, filepath.Base(src))
|
||||
}
|
||||
// Create the holding directory if necessary
|
||||
if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(dst), 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -1422,7 +1420,7 @@ func IsArchive(header []byte) bool {
|
|||
if compression != Uncompressed {
|
||||
return true
|
||||
}
|
||||
r := tar.NewReader(bytes.NewBuffer(header))
|
||||
r := tar.NewReader(bytes.NewReader(header))
|
||||
_, err := r.Next()
|
||||
return err == nil
|
||||
}
|
||||
|
|
|
@ -153,8 +153,7 @@ func (overlayWhiteoutConverter) ConvertReadWithHandler(hdr *tar.Header, path str
|
|||
return true, nil
|
||||
}
|
||||
|
||||
type directHandler struct {
|
||||
}
|
||||
type directHandler struct{}
|
||||
|
||||
func (d directHandler) Setxattr(path, name string, value []byte) error {
|
||||
return unix.Setxattr(path, name, value, 0)
|
||||
|
@ -185,7 +184,7 @@ func GetFileOwner(path string) (uint32, uint32, uint32, error) {
|
|||
}
|
||||
s, ok := f.Sys().(*syscall.Stat_t)
|
||||
if ok {
|
||||
return s.Uid, s.Gid, s.Mode & 07777, nil
|
||||
return s.Uid, s.Gid, s.Mode & 0o7777, nil
|
||||
}
|
||||
return 0, 0, uint32(f.Mode()), nil
|
||||
}
|
||||
|
|
|
@ -88,7 +88,7 @@ func minor(device uint64) uint64 {
|
|||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||
mode := uint32(hdr.Mode & 07777)
|
||||
mode := uint32(hdr.Mode & 0o7777)
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeBlock:
|
||||
mode |= unix.S_IFBLK
|
||||
|
|
|
@ -38,7 +38,6 @@ func CanonicalTarNameForPath(p string) (string, error) {
|
|||
return "", fmt.Errorf("windows path contains forward slash: %s", p)
|
||||
}
|
||||
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
|
||||
|
||||
}
|
||||
|
||||
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||
|
@ -48,8 +47,8 @@ func chmodTarEntry(perm os.FileMode) os.FileMode {
|
|||
permPart := perm & os.ModePerm
|
||||
noPermPart := perm &^ os.ModePerm
|
||||
// Add the x bit: make everything +x from windows
|
||||
permPart |= 0111
|
||||
permPart &= 0755
|
||||
permPart |= 0o111
|
||||
permPart &= 0o755
|
||||
|
||||
return noPermPart | permPart
|
||||
}
|
||||
|
|
|
@ -131,9 +131,11 @@ func isENOTDIR(err error) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
type skipChange func(string) (bool, error)
|
||||
type deleteChange func(string, string, os.FileInfo) (string, error)
|
||||
type whiteoutChange func(string, string) (bool, error)
|
||||
type (
|
||||
skipChange func(string) (bool, error)
|
||||
deleteChange func(string, string, os.FileInfo) (string, error)
|
||||
whiteoutChange func(string, string) (bool, error)
|
||||
)
|
||||
|
||||
func changes(layers []string, rw string, dc deleteChange, sc skipChange, wc whiteoutChange) ([]Change, error) {
|
||||
var (
|
||||
|
@ -299,7 +301,6 @@ func (info *FileInfo) path() string {
|
|||
}
|
||||
|
||||
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||
|
||||
sizeAtEntry := len(*changes)
|
||||
|
||||
if oldInfo == nil {
|
||||
|
@ -373,7 +374,6 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
|||
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
|
||||
(*changes)[sizeAtEntry] = change
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Changes add changes to file information.
|
||||
|
@ -398,9 +398,7 @@ func newRootFileInfo(idMappings *idtools.IDMappings) *FileInfo {
|
|||
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
|
||||
// If oldDir is "", then all files in newDir will be Add-Changes.
|
||||
func ChangesDirs(newDir string, newMappings *idtools.IDMappings, oldDir string, oldMappings *idtools.IDMappings) ([]Change, error) {
|
||||
var (
|
||||
oldRoot, newRoot *FileInfo
|
||||
)
|
||||
var oldRoot, newRoot *FileInfo
|
||||
if oldDir == "" {
|
||||
emptyDir, err := os.MkdirTemp("", "empty")
|
||||
if err != nil {
|
||||
|
|
|
@ -397,5 +397,4 @@ func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (str
|
|||
|
||||
// We didn't find the same path in any older layers, so it was new in this one.
|
||||
return "", nil
|
||||
|
||||
}
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
)
|
||||
|
||||
func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.StatT, newInfo *FileInfo) bool {
|
||||
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
if oldStat.Mtim() != newStat.Mtim() ||
|
||||
oldStat.Mode() != newStat.Mode() ||
|
||||
|
|
|
@ -297,7 +297,6 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir
|
|||
}
|
||||
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// RebaseArchiveEntries rewrites the given srcContent archive replacing
|
||||
|
|
|
@ -85,7 +85,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
|||
parentPath := filepath.Join(dest, parent)
|
||||
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
err = os.MkdirAll(parentPath, 0755)
|
||||
err = os.MkdirAll(parentPath, 0o755)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
|
|
@ -98,7 +98,7 @@ func parseFileFlags(fflags string) (uint32, uint32, error) {
|
|||
}
|
||||
|
||||
func formatFileFlags(fflags uint32) (string, error) {
|
||||
var res = []string{}
|
||||
res := []string{}
|
||||
for fflags != 0 {
|
||||
// Extract lowest set bit
|
||||
fflag := uint32(1) << bits.TrailingZeros32(fflags)
|
||||
|
|
|
@ -77,7 +77,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
|
|||
|
||||
dest = filepath.Clean(dest)
|
||||
if _, err := os.Stat(dest); os.IsNotExist(err) {
|
||||
if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil {
|
||||
if err := idtools.MkdirAllAndChownNew(dest, 0o755, rootIDs); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,7 +8,8 @@ import (
|
|||
|
||||
func invokeUnpack(decompressedArchive io.Reader,
|
||||
dest string,
|
||||
options *archive.TarOptions, root string) error {
|
||||
options *archive.TarOptions, root string,
|
||||
) error {
|
||||
return archive.Unpack(decompressedArchive, dest, options)
|
||||
}
|
||||
|
||||
|
|
|
@ -14,7 +14,8 @@ func chroot(path string) error {
|
|||
|
||||
func invokeUnpack(decompressedArchive io.Reader,
|
||||
dest string,
|
||||
options *archive.TarOptions, root string) error {
|
||||
options *archive.TarOptions, root string,
|
||||
) error {
|
||||
// Windows is different to Linux here because Windows does not support
|
||||
// chroot. Hence there is no point sandboxing a chrooted process to
|
||||
// do the unpack. We call inline instead within the daemon process.
|
||||
|
|
|
@ -26,7 +26,6 @@ type applyLayerResponse struct {
|
|||
// used on Windows as it does not support chroot, hence no point sandboxing
|
||||
// through chroot and rexec.
|
||||
func applyLayer() {
|
||||
|
||||
var (
|
||||
tmpDir string
|
||||
err error
|
||||
|
|
|
@ -48,8 +48,10 @@ type layersCache struct {
|
|||
created time.Time
|
||||
}
|
||||
|
||||
var cacheMutex sync.Mutex
|
||||
var cache *layersCache
|
||||
var (
|
||||
cacheMutex sync.Mutex
|
||||
cache *layersCache
|
||||
)
|
||||
|
||||
func (c *layersCache) release() {
|
||||
cacheMutex.Lock()
|
||||
|
|
|
@ -15,8 +15,10 @@ import (
|
|||
"github.com/vbatts/tar-split/archive/tar"
|
||||
)
|
||||
|
||||
const RollsumBits = 16
|
||||
const holesThreshold = int64(1 << 10)
|
||||
const (
|
||||
RollsumBits = 16
|
||||
holesThreshold = int64(1 << 10)
|
||||
)
|
||||
|
||||
type holesFinder struct {
|
||||
reader *bufio.Reader
|
||||
|
|
|
@ -25,11 +25,15 @@ import (
|
|||
"math/bits"
|
||||
)
|
||||
|
||||
const windowSize = 64 // Roll assumes windowSize is a power of 2
|
||||
const charOffset = 31
|
||||
const (
|
||||
windowSize = 64 // Roll assumes windowSize is a power of 2
|
||||
charOffset = 31
|
||||
)
|
||||
|
||||
const blobBits = 13
|
||||
const blobSize = 1 << blobBits // 8k
|
||||
const (
|
||||
blobBits = 13
|
||||
blobSize = 1 << blobBits // 8k
|
||||
)
|
||||
|
||||
type RollSum struct {
|
||||
s1, s2 uint32
|
||||
|
|
|
@ -134,7 +134,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
|
|||
Entries: metadata,
|
||||
}
|
||||
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
// Generate the manifest
|
||||
manifest, err := json.Marshal(toc)
|
||||
if err != nil {
|
||||
|
|
|
@ -558,7 +558,7 @@ func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.Fil
|
|||
func openFileUnderRootOpenat2(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) {
|
||||
how := unix.OpenHow{
|
||||
Flags: flags,
|
||||
Mode: uint64(mode & 07777),
|
||||
Mode: uint64(mode & 0o7777),
|
||||
Resolve: unix.RESOLVE_IN_ROOT,
|
||||
}
|
||||
return unix.Openat2(dirfd, name, &how)
|
||||
|
@ -636,7 +636,7 @@ func openOrCreateDirUnderRoot(name string, dirfd int, mode os.FileMode) (*os.Fil
|
|||
|
||||
baseName := filepath.Base(name)
|
||||
|
||||
if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, 0755); err2 != nil {
|
||||
if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, 0o755); err2 != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -1384,7 +1384,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
|
|||
filesToWaitFor := 0
|
||||
for i, r := range mergedEntries {
|
||||
if options.ForceMask != nil {
|
||||
value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&07777)
|
||||
value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&0o7777)
|
||||
r.Xattrs[containersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value))
|
||||
r.Mode = int64(*options.ForceMask)
|
||||
}
|
||||
|
|
|
@ -124,6 +124,11 @@ type OptionsConfig struct {
|
|||
// for shared image content
|
||||
AdditionalImageStores []string `toml:"additionalimagestores,omitempty"`
|
||||
|
||||
// ImageStore is the location of image store which is separated from the
|
||||
// container store. Usually this is not recommended unless users wants
|
||||
// separate store for image and containers.
|
||||
ImageStore string `toml:"imagestore,omitempty"`
|
||||
|
||||
// AdditionalLayerStores is the location of additional read/only
|
||||
// Layer stores. Usually used to access Networked File System
|
||||
// for shared image content
|
||||
|
|
|
@ -239,8 +239,8 @@ func (t *Task) getDriverVersion() (string, error) {
|
|||
}
|
||||
|
||||
func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64,
|
||||
length uint64, targetType string, params string) {
|
||||
|
||||
length uint64, targetType string, params string,
|
||||
) {
|
||||
return DmGetNextTarget(t.unmanaged, next, &start, &length,
|
||||
&targetType, ¶ms),
|
||||
start, length, targetType, params
|
||||
|
@ -345,8 +345,7 @@ func RemoveDeviceDeferred(name string) error {
|
|||
// disable udev dm rules and delete the symlink under /dev/mapper by itself,
|
||||
// even if the removal is deferred by the kernel.
|
||||
cookie := new(uint)
|
||||
var flags uint16
|
||||
flags = DmUdevDisableLibraryFallback
|
||||
flags := uint16(DmUdevDisableLibraryFallback)
|
||||
if err := task.setCookie(cookie, flags); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can not set cookie: %s", err)
|
||||
}
|
||||
|
@ -384,7 +383,7 @@ func CancelDeferredRemove(deviceName string) error {
|
|||
return fmt.Errorf("devicemapper: Can't set sector %s", err)
|
||||
}
|
||||
|
||||
if err := task.setMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil {
|
||||
if err := task.setMessage("@cancel_deferred_remove"); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set message %s", err)
|
||||
}
|
||||
|
||||
|
@ -459,8 +458,7 @@ func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize
|
|||
}
|
||||
|
||||
cookie := new(uint)
|
||||
var flags uint16
|
||||
flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag
|
||||
flags := uint16(DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag)
|
||||
if err := task.setCookie(cookie, flags); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set cookie %s", err)
|
||||
}
|
||||
|
|
|
@ -138,8 +138,8 @@ func dmTaskSetRoFct(task *cdmTask) int {
|
|||
}
|
||||
|
||||
func dmTaskAddTargetFct(task *cdmTask,
|
||||
start, size uint64, ttype, params string) int {
|
||||
|
||||
start, size uint64, ttype, params string,
|
||||
) int {
|
||||
Cttype := C.CString(ttype)
|
||||
defer free(Cttype)
|
||||
|
||||
|
@ -156,12 +156,11 @@ func dmTaskGetDepsFct(task *cdmTask) *Deps {
|
|||
}
|
||||
|
||||
// golang issue: https://github.com/golang/go/issues/11925
|
||||
hdr := reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))),
|
||||
Len: int(Cdeps.count),
|
||||
Cap: int(Cdeps.count),
|
||||
}
|
||||
devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr))
|
||||
var devices []C.uint64_t
|
||||
devicesHdr := (*reflect.SliceHeader)(unsafe.Pointer(&devices))
|
||||
devicesHdr.Data = uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps)))
|
||||
devicesHdr.Len = int(Cdeps.count)
|
||||
devicesHdr.Cap = int(Cdeps.count)
|
||||
|
||||
deps := &Deps{
|
||||
Count: uint32(Cdeps.count),
|
||||
|
|
|
@ -183,7 +183,6 @@ func (p *Pattern) Exclusion() bool {
|
|||
}
|
||||
|
||||
func (p *Pattern) match(path string) (bool, error) {
|
||||
|
||||
if p.regexp == nil {
|
||||
if err := p.compile(); err != nil {
|
||||
return false, filepath.ErrBadPattern
|
||||
|
@ -356,12 +355,12 @@ func CreateIfNotExists(path string, isDir bool) error {
|
|||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if isDir {
|
||||
return os.MkdirAll(path, 0755)
|
||||
return os.MkdirAll(path, 0o755)
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.OpenFile(path, os.O_CREATE, 0755)
|
||||
f, err := os.OpenFile(path, os.O_CREATE, 0o755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -104,7 +104,7 @@ func CreateIDMappedMount(source, target string, pid int) error {
|
|||
&attr, uint(unsafe.Sizeof(attr))); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Mkdir(target, 0700); err != nil && !os.IsExist(err) {
|
||||
if err := os.Mkdir(target, 0o700); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
return moveMount(targetDirFd, target)
|
||||
|
@ -140,7 +140,7 @@ func CreateUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int,
|
|||
for _, m := range idmap {
|
||||
mappings = mappings + fmt.Sprintf("%d %d %d\n", m.ContainerID, m.HostID, m.Size)
|
||||
}
|
||||
return os.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0600)
|
||||
return os.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0o600)
|
||||
}
|
||||
if err := writeMappings("uid_map", uidMaps); err != nil {
|
||||
cleanupFunc()
|
||||
|
|
|
@ -91,13 +91,13 @@ func CanAccess(path string, pair IDPair) bool {
|
|||
}
|
||||
|
||||
func accessible(isOwner, isGroup bool, perms os.FileMode) bool {
|
||||
if isOwner && (perms&0100 == 0100) {
|
||||
if isOwner && (perms&0o100 == 0o100) {
|
||||
return true
|
||||
}
|
||||
if isGroup && (perms&0010 == 0010) {
|
||||
if isGroup && (perms&0o010 == 0o010) {
|
||||
return true
|
||||
}
|
||||
if perms&0001 == 0001 {
|
||||
if perms&0o001 == 0o001 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
|
|
@ -89,7 +89,6 @@ func addUser(userName string) error {
|
|||
}
|
||||
|
||||
func createSubordinateRanges(name string) error {
|
||||
|
||||
// first, we should verify that ranges weren't automatically created
|
||||
// by the distro tooling
|
||||
ranges, err := readSubuid(name)
|
||||
|
|
|
@ -135,7 +135,7 @@ func openLock(path string, ro bool) (fd int, err error) {
|
|||
|
||||
// the directory of the lockfile seems to be removed, try to create it
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
|
||||
return fd, fmt.Errorf("creating lock file directory: %w", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -138,6 +138,7 @@ func (l *LockFile) Modified() (bool, error) {
|
|||
func (l *LockFile) Touch() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LockFile) IsReadWrite() bool {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ func stringToLoopName(src string) [LoNameSize]uint8 {
|
|||
}
|
||||
|
||||
func getNextFreeLoopbackIndex() (int, error) {
|
||||
f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644)
|
||||
f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0o644)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File
|
|||
}
|
||||
|
||||
// OpenFile adds O_CLOEXEC
|
||||
loopFile, err = os.OpenFile(target, os.O_RDWR, 0644)
|
||||
loopFile, err = os.OpenFile(target, os.O_RDWR, 0o644)
|
||||
if err != nil {
|
||||
logrus.Errorf("Opening loopback device: %s", err)
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
|
@ -114,7 +114,6 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File
|
|||
// AttachLoopDevice attaches the given sparse file to the next
|
||||
// available loopback device. It returns an opened *os.File.
|
||||
func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
|
||||
|
||||
// Try to retrieve the next available loopback device via syscall.
|
||||
// If it fails, we discard error and start looping for a
|
||||
// loopback from index 0.
|
||||
|
@ -124,7 +123,7 @@ func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
|
|||
}
|
||||
|
||||
// OpenFile adds O_CLOEXEC
|
||||
sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644)
|
||||
sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0o644)
|
||||
if err != nil {
|
||||
logrus.Errorf("Opening sparse file: %v", err)
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
|
|
|
@ -24,7 +24,6 @@ func (k *VersionInfo) String() string {
|
|||
|
||||
// GetKernelVersion gets the current kernel version.
|
||||
func GetKernelVersion() (*VersionInfo, error) {
|
||||
|
||||
var (
|
||||
h windows.Handle
|
||||
dwVersion uint32
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
// of apps that want to use global regex variables. This library initializes them on
|
||||
// first use as opposed to the start of the executable.
|
||||
type Regexp struct {
|
||||
once sync.Once
|
||||
once *sync.Once
|
||||
regexp *regexp.Regexp
|
||||
val string
|
||||
}
|
||||
|
@ -22,7 +22,10 @@ func Delayed(val string) Regexp {
|
|||
}
|
||||
if precompile {
|
||||
re.regexp = regexp.MustCompile(re.val)
|
||||
} else {
|
||||
re.once = &sync.Once{}
|
||||
}
|
||||
|
||||
return re
|
||||
}
|
||||
|
||||
|
@ -44,6 +47,7 @@ func (re *Regexp) ExpandString(dst []byte, template string, src string, match []
|
|||
re.compile()
|
||||
return re.regexp.ExpandString(dst, template, src, match)
|
||||
}
|
||||
|
||||
func (re *Regexp) Find(b []byte) []byte {
|
||||
re.compile()
|
||||
return re.regexp.Find(b)
|
||||
|
@ -153,6 +157,7 @@ func (re *Regexp) MatchReader(r io.RuneReader) bool {
|
|||
re.compile()
|
||||
return re.regexp.MatchReader(r)
|
||||
}
|
||||
|
||||
func (re *Regexp) MatchString(s string) bool {
|
||||
re.compile()
|
||||
return re.regexp.MatchString(s)
|
||||
|
|
|
@ -63,7 +63,7 @@ func generateID(r io.Reader) string {
|
|||
}
|
||||
}
|
||||
|
||||
// GenerateRandomID returns a unique id.
|
||||
// GenerateRandomID returns a pseudorandom 64-character hex string.
|
||||
func GenerateRandomID() string {
|
||||
return generateID(cryptorand.Reader)
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue