mirror of https://github.com/containers/podman.git
Vendor in containers/(storage,image, common, buildah)
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
parent
5f848d89ed
commit
f67ab1eb20
12
go.mod
12
go.mod
|
@ -11,13 +11,13 @@ require (
|
|||
github.com/container-orchestrated-devices/container-device-interface v0.4.0
|
||||
github.com/containernetworking/cni v1.1.1
|
||||
github.com/containernetworking/plugins v1.1.1
|
||||
github.com/containers/buildah v1.26.1-0.20220609225314-e66309ebde8c
|
||||
github.com/containers/common v0.48.1-0.20220705175712-dd1c331887b9
|
||||
github.com/containers/buildah v1.26.1-0.20220716095526-d31d27c357ab
|
||||
github.com/containers/common v0.48.1-0.20220715075726-2ac10faca05a
|
||||
github.com/containers/conmon v2.0.20+incompatible
|
||||
github.com/containers/image/v5 v5.21.2-0.20220617075545-929f14a56f5c
|
||||
github.com/containers/image/v5 v5.21.2-0.20220714132403-2bb3f3e44c5c
|
||||
github.com/containers/ocicrypt v1.1.5
|
||||
github.com/containers/psgo v1.7.2
|
||||
github.com/containers/storage v1.41.1-0.20220616120034-7df64288ef35
|
||||
github.com/containers/storage v1.41.1-0.20220714115232-fc9b0ff5272a
|
||||
github.com/coreos/go-systemd/v22 v22.3.2
|
||||
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
|
||||
github.com/cyphar/filepath-securejoin v0.2.3
|
||||
|
@ -64,8 +64,8 @@ require (
|
|||
github.com/vishvananda/netlink v1.1.1-0.20220115184804-dd687eb2f2d4
|
||||
go.etcd.io/bbolt v1.3.6
|
||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
|
||||
golang.org/x/sys v0.0.0-20220624220833-87e55d714810
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467
|
||||
golang.org/x/text v0.3.7
|
||||
google.golang.org/protobuf v1.28.0
|
||||
|
|
|
@ -3,7 +3,7 @@ module github.com/containerd/stargz-snapshotter/estargz
|
|||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/klauspost/compress v1.15.1
|
||||
github.com/klauspost/compress v1.15.7
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/vbatts/tar-split v0.11.2
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
|
||||
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.15.7 h1:7cgTQxJCU/vy+oP/E3B9RGbQTgbiVzIJWIKOLoAsPok=
|
||||
github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
@ -1313,6 +1314,18 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
|||
),
|
||||
wantFailOnLossLess: true,
|
||||
},
|
||||
{
|
||||
name: "hardlink should be replaced to the destination entry",
|
||||
in: tarOf(
|
||||
dir("foo/"),
|
||||
file("foo/foo1", "test"),
|
||||
link("foolink", "foo/foo1"),
|
||||
),
|
||||
wantNumGz: 4, // dir, foo1 + link, TOC, footer
|
||||
want: checks(
|
||||
mustSameEntry("foo/foo1", "foolink"),
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
@ -1730,6 +1743,60 @@ func hasEntryOwner(entry string, owner owner) stargzCheck {
|
|||
})
|
||||
}
|
||||
|
||||
func mustSameEntry(files ...string) stargzCheck {
|
||||
return stargzCheckFn(func(t *testing.T, r *Reader) {
|
||||
var first *TOCEntry
|
||||
for _, f := range files {
|
||||
if first == nil {
|
||||
var ok bool
|
||||
first, ok = r.Lookup(f)
|
||||
if !ok {
|
||||
t.Errorf("unknown first file on Lookup: %q", f)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Test Lookup
|
||||
e, ok := r.Lookup(f)
|
||||
if !ok {
|
||||
t.Errorf("unknown file on Lookup: %q", f)
|
||||
return
|
||||
}
|
||||
if e != first {
|
||||
t.Errorf("Lookup: %+v(%p) != %+v(%p)", e, e, first, first)
|
||||
return
|
||||
}
|
||||
|
||||
// Test LookupChild
|
||||
pe, ok := r.Lookup(filepath.Dir(filepath.Clean(f)))
|
||||
if !ok {
|
||||
t.Errorf("failed to get parent of %q", f)
|
||||
return
|
||||
}
|
||||
e, ok = pe.LookupChild(filepath.Base(filepath.Clean(f)))
|
||||
if !ok {
|
||||
t.Errorf("failed to get %q as the child of %+v", f, pe)
|
||||
return
|
||||
}
|
||||
if e != first {
|
||||
t.Errorf("LookupChild: %+v(%p) != %+v(%p)", e, e, first, first)
|
||||
return
|
||||
}
|
||||
|
||||
// Test ForeachChild
|
||||
pe.ForeachChild(func(baseName string, e *TOCEntry) bool {
|
||||
if baseName == filepath.Base(filepath.Clean(f)) {
|
||||
if e != first {
|
||||
t.Errorf("ForeachChild: %+v(%p) != %+v(%p)", e, e, first, first)
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func tarOf(s ...tarEntry) []tarEntry { return s }
|
||||
|
||||
type tarEntry interface {
|
||||
|
|
|
@ -159,7 +159,8 @@ type TOCEntry struct {
|
|||
|
||||
// NumLink is the number of entry names pointing to this entry.
|
||||
// Zero means one name references this entry.
|
||||
NumLink int
|
||||
// This field is calculated during runtime and not recorded in TOC JSON.
|
||||
NumLink int `json:"-"`
|
||||
|
||||
// Xattrs are the extended attribute for the entry.
|
||||
Xattrs map[string][]byte `json:"xattrs,omitempty"`
|
||||
|
|
|
@ -51,7 +51,7 @@ gcp_credentials: ENCRYPTED[ae0bf7370f0b6e446bc61d0865a2c55d3e166b3fab9466eb0393e
|
|||
timeout_in: 120m
|
||||
|
||||
# Default VM to use unless set or modified by task
|
||||
gce_instance:
|
||||
gce_instance: &standardvm
|
||||
image_project: "${IMAGE_PROJECT}"
|
||||
zone: "us-central1-c" # Required by Cirrus for the time being
|
||||
cpu: 2
|
||||
|
@ -76,6 +76,7 @@ meta_task:
|
|||
${FEDORA_CACHE_IMAGE_NAME}
|
||||
${PRIOR_FEDORA_CACHE_IMAGE_NAME}
|
||||
${UBUNTU_CACHE_IMAGE_NAME}
|
||||
build-push-${IMAGE_SUFFIX}
|
||||
BUILDID: "${CIRRUS_BUILD_ID}"
|
||||
REPOREF: "${CIRRUS_CHANGE_IN_REPO}"
|
||||
GCPJSON: ENCRYPTED[d3614d6f5cc0e66be89d4252b3365fd84f14eee0259d4eb47e25fc0bc2842c7937f5ee8c882b7e547b4c5ec4b6733b14]
|
||||
|
@ -93,9 +94,8 @@ smoke_task:
|
|||
gce_instance:
|
||||
memory: "12Gb"
|
||||
|
||||
# N/B: Skip running this on branches due to multiple bugs in
|
||||
# the git-validate tool which are difficult to debug and fix.
|
||||
skip: $CIRRUS_PR == ''
|
||||
# Don't bother running on branches (including cron), or for tags.
|
||||
only_if: $CIRRUS_PR != ''
|
||||
|
||||
timeout_in: 30m
|
||||
|
||||
|
@ -111,6 +111,7 @@ smoke_task:
|
|||
vendor_task:
|
||||
name: "Test Vendoring"
|
||||
alias: vendor
|
||||
only_if: ¬_multiarch $CIRRUS_CRON != 'multiarch'
|
||||
|
||||
env:
|
||||
CIRRUS_WORKING_DIR: "/var/tmp/go/src/github.com/containers/buildah"
|
||||
|
@ -134,7 +135,9 @@ vendor_task:
|
|||
cross_build_task:
|
||||
name: "Cross Compile"
|
||||
alias: cross_build
|
||||
only_if: ¬_docs $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
|
||||
only_if: >-
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
|
||||
$CIRRUS_CRON != 'multiarch'
|
||||
|
||||
osx_instance:
|
||||
image: 'big-sur-base'
|
||||
|
@ -154,7 +157,10 @@ cross_build_task:
|
|||
unit_task:
|
||||
name: 'Unit tests w/ $STORAGE_DRIVER'
|
||||
alias: unit
|
||||
only_if: *not_docs
|
||||
only_if: ¬_build_docs >-
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' &&
|
||||
$CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' &&
|
||||
$CIRRUS_CRON != 'multiarch'
|
||||
depends_on: &smoke_vendor_cross
|
||||
- smoke
|
||||
- vendor
|
||||
|
@ -179,7 +185,7 @@ unit_task:
|
|||
conformance_task:
|
||||
name: 'Build Conformance w/ $STORAGE_DRIVER'
|
||||
alias: conformance
|
||||
only_if: *not_docs
|
||||
only_if: *not_build_docs
|
||||
depends_on: *smoke_vendor_cross
|
||||
|
||||
gce_instance:
|
||||
|
@ -200,7 +206,7 @@ conformance_task:
|
|||
integration_task:
|
||||
name: "Integration $DISTRO_NV w/ $STORAGE_DRIVER"
|
||||
alias: integration
|
||||
only_if: *not_docs
|
||||
only_if: *not_build_docs
|
||||
depends_on: *smoke_vendor_cross
|
||||
|
||||
matrix:
|
||||
|
@ -255,7 +261,7 @@ integration_task:
|
|||
integration_rootless_task:
|
||||
name: "Integration rootless $DISTRO_NV w/ $STORAGE_DRIVER"
|
||||
alias: integration_rootless
|
||||
only_if: *not_docs
|
||||
only_if: *not_build_docs
|
||||
depends_on: *smoke_vendor_cross
|
||||
|
||||
matrix:
|
||||
|
@ -294,7 +300,7 @@ integration_rootless_task:
|
|||
in_podman_task:
|
||||
name: "Containerized Integration"
|
||||
alias: in_podman
|
||||
only_if: *not_docs
|
||||
only_if: *not_build_docs
|
||||
depends_on: *smoke_vendor_cross
|
||||
|
||||
env:
|
||||
|
@ -315,6 +321,52 @@ in_podman_task:
|
|||
<<: *standardlogs
|
||||
|
||||
|
||||
image_build_task: &image-build
|
||||
name: "Build multi-arch $CTXDIR"
|
||||
alias: image_build
|
||||
# Some of these container images take > 1h to build, limit
|
||||
# this task to a specific Cirrus-Cron entry with this name.
|
||||
only_if: $CIRRUS_CRON == 'multiarch'
|
||||
depends_on:
|
||||
- smoke
|
||||
timeout_in: 120m # emulation is sssllllooooowwww
|
||||
gce_instance:
|
||||
<<: *standardvm
|
||||
image_name: build-push-${IMAGE_SUFFIX}
|
||||
# More muscle required for parallel multi-arch build
|
||||
type: "n2-standard-4"
|
||||
matrix:
|
||||
- env:
|
||||
CTXDIR: contrib/buildahimage/upstream
|
||||
- env:
|
||||
CTXDIR: contrib/buildahimage/testing
|
||||
- env:
|
||||
CTXDIR: contrib/buildahimage/stable
|
||||
env:
|
||||
DISTRO_NV: "${FEDORA_NAME}" # Required for repo cache extraction
|
||||
BUILDAH_USERNAME: ENCRYPTED[70e1d4f026cba5d82fc067944baab10f7c71c64bb6b75fce4eeb5c106694b3bbc8e08f8a1b926d6e03e85cf4e21833bb]
|
||||
BUILDAH_PASSWORD: ENCRYPTED[2dc7f4f623bfc856e1d5030df263b9e48ddab39abacea7a8bc714179c188df15fc0a5bb5d3414a24637d4e39aa51b7b5]
|
||||
CONTAINERS_USERNAME: ENCRYPTED[88cd93c753f78d70e4beb5dbebd4402d682daf45793d7e0fe8b75b358f768e8734aef3f130ffb4ebca9bdea8d220a188]
|
||||
CONTAINERS_PASSWORD: ENCRYPTED[886cf4cc126e50b2fd7f2792235a22bb79e4b81db43f803a6214a38d3fd6c04cd4e64570b562cb32b04e5fbc435404b6]
|
||||
main_script:
|
||||
- source /etc/automation_environment
|
||||
- main.sh $CIRRUS_REPO_CLONE_URL $CTXDIR
|
||||
|
||||
|
||||
test_image_build_task:
|
||||
<<: *image-build
|
||||
alias: test_image_build
|
||||
# Allow this to run inside a PR w/ [CI:BUILD] only.
|
||||
only_if: $CIRRUS_PR != '' && $CIRRUS_CHANGE_TITLE =~ '.*CI:BUILD.*'
|
||||
# This takes a LONG time, only run when requested. N/B: Any task
|
||||
# made to depend on this one will block FOREVER unless triggered.
|
||||
# DO NOT ADD THIS TASK AS DEPENDENCY FOR `success_task`.
|
||||
trigger_type: manual
|
||||
# Overwrite all 'env', don't push anything, just do the build.
|
||||
env:
|
||||
DRYRUN: 1
|
||||
|
||||
|
||||
# Status aggregator for all tests. This task simply ensures a defined
|
||||
# set of tasks all passed, and allows confirming that based on the status
|
||||
# of this task.
|
||||
|
@ -331,6 +383,7 @@ success_task:
|
|||
- cross_build
|
||||
- integration
|
||||
- in_podman
|
||||
- image_build
|
||||
|
||||
container:
|
||||
image: "quay.io/libpod/alpine:latest"
|
||||
|
|
|
@ -78,8 +78,9 @@ ALL_CROSS_TARGETS := $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dis
|
|||
LINUX_CROSS_TARGETS := $(filter bin/buildah.linux.%,$(ALL_CROSS_TARGETS))
|
||||
DARWIN_CROSS_TARGETS := $(filter bin/buildah.darwin.%,$(ALL_CROSS_TARGETS))
|
||||
WINDOWS_CROSS_TARGETS := $(addsuffix .exe,$(filter bin/buildah.windows.%,$(ALL_CROSS_TARGETS)))
|
||||
FREEBSD_CROSS_TARGETS := $(filter bin/buildah.freebsd.%,$(ALL_CROSS_TARGETS))
|
||||
.PHONY: cross
|
||||
cross: $(LINUX_CROSS_TARGETS) $(DARWIN_CROSS_TARGETS) $(WINDOWS_CROSS_TARGETS)
|
||||
cross: $(LINUX_CROSS_TARGETS) $(DARWIN_CROSS_TARGETS) $(WINDOWS_CROSS_TARGETS) $(FREEBSD_CROSS_TARGETS)
|
||||
|
||||
bin/buildah.%:
|
||||
mkdir -p ./bin
|
||||
|
@ -114,7 +115,6 @@ validate: install.tools
|
|||
./tests/validate/whitespace.sh
|
||||
./hack/xref-helpmsgs-manpages
|
||||
./tests/validate/pr-should-include-tests
|
||||
./tests/validate/buildahimages-are-sane
|
||||
|
||||
.PHONY: install.tools
|
||||
install.tools:
|
||||
|
@ -189,3 +189,9 @@ vendor:
|
|||
.PHONY: lint
|
||||
lint: install.tools
|
||||
./tests/tools/build/golangci-lint run $(LINTFLAGS)
|
||||
|
||||
# CAUTION: This is not a replacement for RPMs provided by your distro.
|
||||
# Only intended to build and test the latest unreleased changes.
|
||||
.PHONY: rpm
|
||||
rpm:
|
||||
rpkg local
|
||||
|
|
|
@ -2,6 +2,7 @@ package buildah
|
|||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -24,7 +25,6 @@ import (
|
|||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/opencontainers/runc/libcontainer/userns"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -88,6 +88,11 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string,
|
|||
return err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
if response.StatusCode < http.StatusOK || response.StatusCode >= http.StatusBadRequest {
|
||||
return fmt.Errorf("invalid response status %d", response.StatusCode)
|
||||
}
|
||||
|
||||
// Figure out what to name the new content.
|
||||
name := renameTarget
|
||||
if name == "" {
|
||||
|
@ -100,7 +105,7 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string,
|
|||
if lastModified != "" {
|
||||
d, err := time.Parse(time.RFC1123, lastModified)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing last-modified time")
|
||||
return fmt.Errorf("error parsing last-modified time: %w", err)
|
||||
}
|
||||
date = d
|
||||
}
|
||||
|
@ -112,17 +117,17 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string,
|
|||
// we can figure out how much content there is.
|
||||
f, err := ioutil.TempFile(mountpoint, "download")
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating temporary file to hold %q", src)
|
||||
return fmt.Errorf("error creating temporary file to hold %q: %w", src, err)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
defer f.Close()
|
||||
size, err = io.Copy(f, response.Body)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error writing %q to temporary file %q", src, f.Name())
|
||||
return fmt.Errorf("error writing %q to temporary file %q: %w", src, f.Name(), err)
|
||||
}
|
||||
_, err = f.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error setting up to read %q from temporary file %q", src, f.Name())
|
||||
return fmt.Errorf("error setting up to read %q from temporary file %q: %w", src, f.Name(), err)
|
||||
}
|
||||
responseBody = f
|
||||
}
|
||||
|
@ -150,10 +155,14 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string,
|
|||
}
|
||||
err = tw.WriteHeader(&hdr)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error writing header")
|
||||
return fmt.Errorf("error writing header: %w", err)
|
||||
}
|
||||
_, err = io.Copy(tw, responseBody)
|
||||
return errors.Wrapf(err, "error writing content from %q to tar stream", src)
|
||||
|
||||
if _, err := io.Copy(tw, responseBody); err != nil {
|
||||
return fmt.Errorf("error writing content from %q to tar stream: %w", src, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// includeDirectoryAnyway returns true if "path" is a prefix for an exception
|
||||
|
@ -199,13 +208,13 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
contextDir = string(os.PathSeparator)
|
||||
currentDir, err = os.Getwd()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error determining current working directory")
|
||||
return fmt.Errorf("error determining current working directory: %w", err)
|
||||
}
|
||||
} else {
|
||||
if !filepath.IsAbs(options.ContextDir) {
|
||||
contextDir, err = filepath.Abs(options.ContextDir)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error converting context directory path %q to an absolute path", options.ContextDir)
|
||||
return fmt.Errorf("error converting context directory path %q to an absolute path: %w", options.ContextDir, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -233,7 +242,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
}
|
||||
localSourceStats, err = copier.Stat(contextDir, contextDir, statOptions, localSources)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "checking on sources under %q", contextDir)
|
||||
return fmt.Errorf("checking on sources under %q: %w", contextDir, err)
|
||||
}
|
||||
}
|
||||
numLocalSourceItems := 0
|
||||
|
@ -247,15 +256,15 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
errorText = fmt.Sprintf("possible escaping context directory error: %s", errorText)
|
||||
}
|
||||
return errors.Errorf("checking on sources under %q: %v", contextDir, errorText)
|
||||
return fmt.Errorf("checking on sources under %q: %v", contextDir, errorText)
|
||||
}
|
||||
if len(localSourceStat.Globbed) == 0 {
|
||||
return errors.Wrapf(syscall.ENOENT, "checking source under %q: no glob matches", contextDir)
|
||||
return fmt.Errorf("checking source under %q: no glob matches: %w", contextDir, syscall.ENOENT)
|
||||
}
|
||||
numLocalSourceItems += len(localSourceStat.Globbed)
|
||||
}
|
||||
if numLocalSourceItems+len(remoteSources) == 0 {
|
||||
return errors.Wrapf(syscall.ENOENT, "no sources %v found", sources)
|
||||
return fmt.Errorf("no sources %v found: %w", sources, syscall.ENOENT)
|
||||
}
|
||||
|
||||
// Find out which user (and group) the destination should belong to.
|
||||
|
@ -264,14 +273,14 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
if options.Chown != "" {
|
||||
userUID, userGID, err = b.userForCopy(mountPoint, options.Chown)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error looking up UID/GID for %q", options.Chown)
|
||||
return fmt.Errorf("error looking up UID/GID for %q: %w", options.Chown, err)
|
||||
}
|
||||
}
|
||||
var chmodDirsFiles *os.FileMode
|
||||
if options.Chmod != "" {
|
||||
p, err := strconv.ParseUint(options.Chmod, 8, 32)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing chmod %q", options.Chmod)
|
||||
return fmt.Errorf("error parsing chmod %q: %w", options.Chmod, err)
|
||||
}
|
||||
perm := os.FileMode(p)
|
||||
chmodDirsFiles = &perm
|
||||
|
@ -323,7 +332,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
}
|
||||
destStats, err := copier.Stat(mountPoint, filepath.Join(mountPoint, b.WorkDir()), statOptions, []string{extractDirectory})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error checking on destination %v", extractDirectory)
|
||||
return fmt.Errorf("error checking on destination %v: %w", extractDirectory, err)
|
||||
}
|
||||
if (len(destStats) == 0 || len(destStats[0].Globbed) == 0) && !destMustBeDirectory && destCanBeFile {
|
||||
// destination doesn't exist - extract to parent and rename the incoming file to the destination's name
|
||||
|
@ -339,7 +348,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
|
||||
if len(destStats) == 1 && len(destStats[0].Globbed) == 1 && destStats[0].Results[destStats[0].Globbed[0]].IsRegular {
|
||||
if destMustBeDirectory {
|
||||
return errors.Errorf("destination %v already exists but is not a directory", destination)
|
||||
return fmt.Errorf("destination %v already exists but is not a directory", destination)
|
||||
}
|
||||
// destination exists - it's a file, we need to extract to parent and rename the incoming file to the destination's name
|
||||
renameTarget = filepath.Base(extractDirectory)
|
||||
|
@ -348,7 +357,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
|
||||
pm, err := fileutils.NewPatternMatcher(options.Excludes)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error processing excludes list %v", options.Excludes)
|
||||
return fmt.Errorf("error processing excludes list %v: %w", options.Excludes, err)
|
||||
}
|
||||
|
||||
// Make sure that, if it's a symlink, we'll chroot to the target of the link;
|
||||
|
@ -356,7 +365,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
evalOptions := copier.EvalOptions{}
|
||||
evaluated, err := copier.Eval(mountPoint, extractDirectory, evalOptions)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error checking on destination %v", extractDirectory)
|
||||
return fmt.Errorf("error checking on destination %v: %w", extractDirectory, err)
|
||||
}
|
||||
extractDirectory = evaluated
|
||||
|
||||
|
@ -374,7 +383,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
ChownNew: chownDirs,
|
||||
}
|
||||
if err := copier.Mkdir(mountPoint, extractDirectory, mkdirOptions); err != nil {
|
||||
return errors.Wrapf(err, "error ensuring target directory exists")
|
||||
return fmt.Errorf("error ensuring target directory exists: %w", err)
|
||||
}
|
||||
|
||||
// Copy each source in turn.
|
||||
|
@ -418,10 +427,10 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
}()
|
||||
wg.Wait()
|
||||
if getErr != nil {
|
||||
getErr = errors.Wrapf(getErr, "error reading %q", src)
|
||||
getErr = fmt.Errorf("error reading %q: %w", src, getErr)
|
||||
}
|
||||
if putErr != nil {
|
||||
putErr = errors.Wrapf(putErr, "error storing %q", src)
|
||||
putErr = fmt.Errorf("error storing %q: %w", src, putErr)
|
||||
}
|
||||
multiErr = multierror.Append(getErr, putErr)
|
||||
if multiErr != nil && multiErr.ErrorOrNil() != nil {
|
||||
|
@ -450,16 +459,16 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
for _, glob := range localSourceStat.Globbed {
|
||||
rel, err := filepath.Rel(contextDir, glob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error computing path of %q relative to %q", glob, contextDir)
|
||||
return fmt.Errorf("error computing path of %q relative to %q: %w", glob, contextDir, err)
|
||||
}
|
||||
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
return errors.Errorf("possible escaping context directory error: %q is outside of %q", glob, contextDir)
|
||||
return fmt.Errorf("possible escaping context directory error: %q is outside of %q", glob, contextDir)
|
||||
}
|
||||
// Check for dockerignore-style exclusion of this item.
|
||||
if rel != "." {
|
||||
excluded, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error checking if %q(%q) is excluded", glob, rel)
|
||||
return fmt.Errorf("error checking if %q(%q) is excluded: %w", glob, rel, err)
|
||||
}
|
||||
if excluded {
|
||||
// non-directories that are excluded are excluded, no question, but
|
||||
|
@ -515,7 +524,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
getErr = copier.Get(contextDir, contextDir, getOptions, []string{glob}, writer)
|
||||
closeErr = writer.Close()
|
||||
if renameTarget != "" && renamedItems > 1 {
|
||||
renameErr = errors.Errorf("internal error: renamed %d items when we expected to only rename 1", renamedItems)
|
||||
renameErr = fmt.Errorf("internal error: renamed %d items when we expected to only rename 1", renamedItems)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
|
@ -553,16 +562,16 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
}()
|
||||
wg.Wait()
|
||||
if getErr != nil {
|
||||
getErr = errors.Wrapf(getErr, "error reading %q", src)
|
||||
getErr = fmt.Errorf("error reading %q: %w", src, getErr)
|
||||
}
|
||||
if closeErr != nil {
|
||||
closeErr = errors.Wrapf(closeErr, "error closing %q", src)
|
||||
closeErr = fmt.Errorf("error closing %q: %w", src, closeErr)
|
||||
}
|
||||
if renameErr != nil {
|
||||
renameErr = errors.Wrapf(renameErr, "error renaming %q", src)
|
||||
renameErr = fmt.Errorf("error renaming %q: %w", src, renameErr)
|
||||
}
|
||||
if putErr != nil {
|
||||
putErr = errors.Wrapf(putErr, "error storing %q", src)
|
||||
putErr = fmt.Errorf("error storing %q: %w", src, putErr)
|
||||
}
|
||||
multiErr = multierror.Append(getErr, closeErr, renameErr, putErr)
|
||||
if multiErr != nil && multiErr.ErrorOrNil() != nil {
|
||||
|
@ -577,7 +586,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
|||
if options.IgnoreFile != "" {
|
||||
excludesFile = " using " + options.IgnoreFile
|
||||
}
|
||||
return errors.Wrapf(syscall.ENOENT, "no items matching glob %q copied (%d filtered out%s)", localSourceStat.Glob, len(localSourceStat.Globbed), excludesFile)
|
||||
return fmt.Errorf("no items matching glob %q copied (%d filtered out%s): %w", localSourceStat.Glob, len(localSourceStat.Globbed), excludesFile, syscall.ENOENT)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -599,7 +608,7 @@ func (b *Builder) userForRun(mountPoint string, userspec string) (specs.User, st
|
|||
if !strings.Contains(userspec, ":") {
|
||||
groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID))
|
||||
if err2 != nil {
|
||||
if errors.Cause(err2) != chrootuser.ErrNoSuchUser && err == nil {
|
||||
if !errors.Is(err2, chrootuser.ErrNoSuchUser) && err == nil {
|
||||
err = err2
|
||||
}
|
||||
} else {
|
||||
|
@ -629,7 +638,7 @@ func (b *Builder) userForCopy(mountPoint string, userspec string) (uint32, uint3
|
|||
|
||||
// If userspec did not specify any values for user or group, then fail
|
||||
if user == "" && group == "" {
|
||||
return 0, 0, errors.Errorf("can't find uid for user %s", userspec)
|
||||
return 0, 0, fmt.Errorf("can't find uid for user %s", userspec)
|
||||
}
|
||||
|
||||
// If userspec specifies values for user or group, check for numeric values
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
package bind
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -14,7 +15,6 @@ import (
|
|||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
@ -29,28 +29,28 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou
|
|||
|
||||
// We expect a root directory to be defined.
|
||||
if spec.Root == nil {
|
||||
return nil, errors.Errorf("configuration has no root filesystem?")
|
||||
return nil, errors.New("configuration has no root filesystem?")
|
||||
}
|
||||
rootPath := spec.Root.Path
|
||||
|
||||
// Create a new mount namespace in which to do the things we're doing.
|
||||
if err := unix.Unshare(unix.CLONE_NEWNS); err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating new mount namespace for %v", spec.Process.Args)
|
||||
return nil, fmt.Errorf("error creating new mount namespace for %v: %w", spec.Process.Args, err)
|
||||
}
|
||||
|
||||
// Make all of our mounts private to our namespace.
|
||||
if err := mount.MakeRPrivate("/"); err != nil {
|
||||
return nil, errors.Wrapf(err, "error making mounts private to mount namespace for %v", spec.Process.Args)
|
||||
return nil, fmt.Errorf("error making mounts private to mount namespace for %v: %w", spec.Process.Args, err)
|
||||
}
|
||||
|
||||
// Make sure the bundle directory is searchable. We created it with
|
||||
// TempDir(), so it should have started with permissions set to 0700.
|
||||
info, err := os.Stat(bundlePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error checking permissions on %q", bundlePath)
|
||||
return nil, fmt.Errorf("error checking permissions on %q: %w", bundlePath, err)
|
||||
}
|
||||
if err = os.Chmod(bundlePath, info.Mode()|0111); err != nil {
|
||||
return nil, errors.Wrapf(err, "error loosening permissions on %q", bundlePath)
|
||||
return nil, fmt.Errorf("error loosening permissions on %q: %w", bundlePath, err)
|
||||
}
|
||||
|
||||
// Figure out who needs to be able to reach these bind mounts in order
|
||||
|
@ -117,23 +117,23 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou
|
|||
// access.
|
||||
mnt := filepath.Join(bundlePath, "mnt")
|
||||
if err = idtools.MkdirAndChown(mnt, 0100, idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}); err != nil {
|
||||
return unmountAll, errors.Wrapf(err, "error creating %q owned by the container's root user", mnt)
|
||||
return unmountAll, fmt.Errorf("error creating %q owned by the container's root user: %w", mnt, err)
|
||||
}
|
||||
|
||||
// Make that directory private, and add it to the list of locations we
|
||||
// unmount at cleanup time.
|
||||
if err = mount.MakeRPrivate(mnt); err != nil {
|
||||
return unmountAll, errors.Wrapf(err, "error marking filesystem at %q as private", mnt)
|
||||
return unmountAll, fmt.Errorf("error marking filesystem at %q as private: %w", mnt, err)
|
||||
}
|
||||
unmount = append([]string{mnt}, unmount...)
|
||||
|
||||
// Create a bind mount for the root filesystem and add it to the list.
|
||||
rootfs := filepath.Join(mnt, "rootfs")
|
||||
if err = os.Mkdir(rootfs, 0000); err != nil {
|
||||
return unmountAll, errors.Wrapf(err, "error creating directory %q", rootfs)
|
||||
return unmountAll, fmt.Errorf("error creating directory %q: %w", rootfs, err)
|
||||
}
|
||||
if err = unix.Mount(rootPath, rootfs, "", unix.MS_BIND|unix.MS_REC|unix.MS_PRIVATE, ""); err != nil {
|
||||
return unmountAll, errors.Wrapf(err, "error bind mounting root filesystem from %q to %q", rootPath, rootfs)
|
||||
return unmountAll, fmt.Errorf("error bind mounting root filesystem from %q to %q: %w", rootPath, rootfs, err)
|
||||
}
|
||||
logrus.Debugf("bind mounted %q to %q", rootPath, rootfs)
|
||||
unmount = append([]string{rootfs}, unmount...)
|
||||
|
@ -154,28 +154,28 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou
|
|||
logrus.Warnf("couldn't find %q on host to bind mount into container", spec.Mounts[i].Source)
|
||||
continue
|
||||
}
|
||||
return unmountAll, errors.Wrapf(err, "error checking if %q is a directory", spec.Mounts[i].Source)
|
||||
return unmountAll, fmt.Errorf("error checking if %q is a directory: %w", spec.Mounts[i].Source, err)
|
||||
}
|
||||
stage := filepath.Join(mnt, fmt.Sprintf("buildah-bind-target-%d", i))
|
||||
if info.IsDir() {
|
||||
// If the source is a directory, make one to use as the
|
||||
// mount target.
|
||||
if err = os.Mkdir(stage, 0000); err != nil {
|
||||
return unmountAll, errors.Wrapf(err, "error creating directory %q", stage)
|
||||
return unmountAll, fmt.Errorf("error creating directory %q: %w", stage, err)
|
||||
}
|
||||
} else {
|
||||
// If the source is not a directory, create an empty
|
||||
// file to use as the mount target.
|
||||
file, err := os.OpenFile(stage, os.O_WRONLY|os.O_CREATE, 0000)
|
||||
if err != nil {
|
||||
return unmountAll, errors.Wrapf(err, "error creating file %q", stage)
|
||||
return unmountAll, fmt.Errorf("error creating file %q: %w", stage, err)
|
||||
}
|
||||
file.Close()
|
||||
}
|
||||
// Bind mount the source from wherever it is to a place where
|
||||
// we know the runtime helper will be able to get to it...
|
||||
if err = unix.Mount(spec.Mounts[i].Source, stage, "", unix.MS_BIND|unix.MS_REC|unix.MS_PRIVATE, ""); err != nil {
|
||||
return unmountAll, errors.Wrapf(err, "error bind mounting bind object from %q to %q", spec.Mounts[i].Source, stage)
|
||||
return unmountAll, fmt.Errorf("error bind mounting bind object from %q to %q: %w", spec.Mounts[i].Source, stage, err)
|
||||
}
|
||||
logrus.Debugf("bind mounted %q to %q", spec.Mounts[i].Source, stage)
|
||||
spec.Mounts[i].Source = stage
|
||||
|
@ -209,7 +209,7 @@ func leaveBindMountAlone(mount specs.Mount) bool {
|
|||
func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error {
|
||||
mounts, err := mount.GetMounts()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error retrieving list of mounts")
|
||||
return fmt.Errorf("error retrieving list of mounts: %w", err)
|
||||
}
|
||||
// getChildren returns the list of mount IDs that hang off of the
|
||||
// specified ID.
|
||||
|
@ -255,7 +255,10 @@ func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error {
|
|||
// find the top of the tree we're unmounting
|
||||
top := getMountByPoint(mountpoint)
|
||||
if top == nil {
|
||||
return errors.Wrapf(err, "%q is not mounted", mountpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%q is not mounted: %w", mountpoint, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// add all of the mounts that are hanging off of it
|
||||
tree := getTree(top.ID)
|
||||
|
@ -270,7 +273,7 @@ func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error {
|
|||
logrus.Debugf("mountpoint %q is not present(?), skipping", mount.Mountpoint)
|
||||
continue
|
||||
}
|
||||
return errors.Wrapf(err, "error checking if %q is mounted", mount.Mountpoint)
|
||||
return fmt.Errorf("error checking if %q is mounted: %w", mount.Mountpoint, err)
|
||||
}
|
||||
if uint64(mount.Major) != uint64(st.Dev) || uint64(mount.Minor) != uint64(st.Dev) { //nolint:unconvert // (required for some OS/arch combinations)
|
||||
logrus.Debugf("%q is apparently not really mounted, skipping", mount.Mountpoint)
|
||||
|
@ -293,7 +296,7 @@ func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error {
|
|||
// if we're also supposed to remove this thing, do that, too
|
||||
if cutil.StringInSlice(mount.Mountpoint, mountpointsToRemove) {
|
||||
if err := os.Remove(mount.Mountpoint); err != nil {
|
||||
return errors.Wrapf(err, "error removing %q", mount.Mountpoint)
|
||||
return fmt.Errorf("error removing %q: %w", mount.Mountpoint, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@ import (
|
|||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -408,10 +407,10 @@ func OpenBuilder(store storage.Store, container string) (*Builder, error) {
|
|||
}
|
||||
b := &Builder{}
|
||||
if err = json.Unmarshal(buildstate, &b); err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing %q, read from %q", string(buildstate), filepath.Join(cdir, stateFile))
|
||||
return nil, fmt.Errorf("error parsing %q, read from %q: %w", string(buildstate), filepath.Join(cdir, stateFile), err)
|
||||
}
|
||||
if b.Type != containerType {
|
||||
return nil, errors.Errorf("container %q is not a %s container (is a %q container)", container, define.Package, b.Type)
|
||||
return nil, fmt.Errorf("container %q is not a %s container (is a %q container)", container, define.Package, b.Type)
|
||||
}
|
||||
|
||||
netInt, err := getNetworkInterface(store, b.CNIConfigDir, b.CNIPluginPath)
|
||||
|
@ -520,7 +519,7 @@ func (b *Builder) Save() error {
|
|||
return err
|
||||
}
|
||||
if err = ioutils.AtomicWriteFile(filepath.Join(cdir, stateFile), buildstate, 0600); err != nil {
|
||||
return errors.Wrapf(err, "error saving builder state to %q", filepath.Join(cdir, stateFile))
|
||||
return fmt.Errorf("error saving builder state to %q: %w", filepath.Join(cdir, stateFile), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,168 @@
|
|||
# For automatic rebuilds in COPR
|
||||
|
||||
# The following tag is to get correct syntax highlighting for this file in vim text editor
|
||||
# vim: syntax=spec
|
||||
|
||||
# Any additinoal comments should go below this line or else syntax highlighting
|
||||
# may not work.
|
||||
|
||||
# CAUTION: This is not a replacement for RPMs provided by your distro.
|
||||
# Only intended to build and test the latest unreleased changes.
|
||||
|
||||
%global with_debug 1
|
||||
|
||||
%if 0%{?with_debug}
|
||||
%global _find_debuginfo_dwz_opts %{nil}
|
||||
%global _dwz_low_mem_die_limit 0
|
||||
%else
|
||||
%global debug_package %{nil}
|
||||
%endif
|
||||
|
||||
%if ! 0%{?gobuild:1}
|
||||
%define gobuild(o:) GO111MODULE=off go build -buildmode pie -compiler gc -tags="rpm_crashtraceback ${BUILDTAGS:-}" -ldflags "${LDFLAGS:-} -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \\n') -extldflags '-Wl,-z,relro -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld '" -a -v -x %{?**};
|
||||
%endif
|
||||
|
||||
%global provider github
|
||||
%global provider_tld com
|
||||
%global project containers
|
||||
%global repo %{name}
|
||||
# https://github.com/containers/%%{name}
|
||||
%global import_path %{provider}.%{provider_tld}/%{project}/%{repo}
|
||||
%global git0 https://%{import_path}
|
||||
|
||||
Name: {{{ git_dir_name }}}
|
||||
Epoch: 101
|
||||
Version: {{{ git_dir_version }}}
|
||||
Release: 1%{?dist}
|
||||
Summary: Manage Pods, Containers and Container Images
|
||||
License: ASL 2.0
|
||||
URL: https://github.com/containers/buildah
|
||||
VCS: {{{ git_dir_vcs }}}
|
||||
Source: {{{ git_dir_pack }}}
|
||||
BuildRequires: device-mapper-devel
|
||||
BuildRequires: git-core
|
||||
BuildRequires: golang
|
||||
BuildRequires: glib2-devel
|
||||
BuildRequires: glibc-static
|
||||
BuildRequires: go-md2man
|
||||
%if 0%{?fedora} || 0%{?rhel} >= 9
|
||||
BuildRequires: go-rpm-macros
|
||||
%endif
|
||||
BuildRequires: gpgme-devel
|
||||
BuildRequires: libassuan-devel
|
||||
BuildRequires: make
|
||||
BuildRequires: ostree-devel
|
||||
BuildRequires: shadow-utils-subid-devel
|
||||
%if 0%{?fedora} && ! 0%{?rhel}
|
||||
BuildRequires: btrfs-progs-devel
|
||||
%endif
|
||||
%if 0%{?fedora} <= 35
|
||||
Requires: containers-common >= 4:1-39
|
||||
%else
|
||||
Requires: containers-common >= 4:1-46
|
||||
%endif
|
||||
%if 0%{?rhel}
|
||||
BuildRequires: libseccomp-devel
|
||||
%else
|
||||
BuildRequires: libseccomp-static
|
||||
%endif
|
||||
Requires: libseccomp
|
||||
Suggests: cpp
|
||||
Suggests: qemu-user-static
|
||||
|
||||
%description
|
||||
The %{name} package provides a command line tool which can be used to
|
||||
* create a working container from scratch
|
||||
or
|
||||
* create a working container from an image as a starting point
|
||||
* mount/umount a working container's root file system for manipulation
|
||||
* save container's root file system layer to create a new image
|
||||
* delete a working container or an image.
|
||||
|
||||
%package tests
|
||||
Summary: Tests for %{name}
|
||||
Requires: %{name} = %{version}-%{release}
|
||||
Requires: bats
|
||||
Requires: bzip2
|
||||
Requires: podman
|
||||
Requires: golang
|
||||
Requires: jq
|
||||
Requires: httpd-tools
|
||||
Requires: openssl
|
||||
Requires: nmap-ncat
|
||||
Requires: git-daemon
|
||||
|
||||
%description tests
|
||||
%{summary}
|
||||
|
||||
This package contains system tests for %{name}
|
||||
|
||||
%prep
|
||||
{{{ git_dir_setup_macro }}}
|
||||
|
||||
%build
|
||||
%set_build_flags
|
||||
export GO111MODULE=off
|
||||
export GOPATH=$(pwd)/_build:$(pwd)
|
||||
export CGO_CFLAGS=$CFLAGS
|
||||
# These extra flags present in $CFLAGS have been skipped for now as they break the build
|
||||
CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-flto=auto//g')
|
||||
CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-Wp,D_GLIBCXX_ASSERTIONS//g')
|
||||
CGO_CFLAGS=$(echo $CGO_CFLAGS | sed 's/-specs=\/usr\/lib\/rpm\/redhat\/redhat-annobin-cc1//g')
|
||||
|
||||
%ifarch x86_64
|
||||
export CGO_CFLAGS+=" -m64 -mtune=generic -fcf-protection=full"
|
||||
%endif
|
||||
mkdir _build
|
||||
pushd _build
|
||||
mkdir -p src/%{provider}.%{provider_tld}/%{project}
|
||||
ln -s $(dirs +1 -l) src/%{import_path}
|
||||
popd
|
||||
|
||||
mv vendor src
|
||||
|
||||
export CNI_VERSION=`grep '^# github.com/containernetworking/cni ' src/modules.txt | sed 's,.* ,,'`
|
||||
export LDFLAGS="-X main.buildInfo=`date +%s` -X main.cniVersion=${CNI_VERSION}"
|
||||
|
||||
export BUILDTAGS='seccomp libsubid selinux'
|
||||
%if 0%{?rhel}
|
||||
export BUILDTAGS='$BUILDTAGS exclude_graphdriver_btrfs btrfs_noversion'
|
||||
%endif
|
||||
|
||||
%gobuild -o bin/%{name} %{import_path}/cmd/%{name}
|
||||
%gobuild -o bin/imgtype %{import_path}/tests/imgtype
|
||||
%gobuild -o bin/copy %{import_path}/tests/copy
|
||||
GOMD2MAN=go-md2man %{__make} -C docs
|
||||
|
||||
# This will copy the files generated by the `make` command above into
|
||||
# the installable rpm package.
|
||||
%install
|
||||
export GOPATH=$(pwd)/_build:$(pwd):%{gopath}
|
||||
make DESTDIR=%{buildroot} PREFIX=%{_prefix} install install.completions
|
||||
make DESTDIR=%{buildroot} PREFIX=%{_prefix} -C docs install
|
||||
|
||||
install -d -p %{buildroot}/%{_datadir}/%{name}/test/system
|
||||
cp -pav tests/. %{buildroot}/%{_datadir}/%{name}/test/system
|
||||
cp bin/imgtype %{buildroot}/%{_bindir}/%{name}-imgtype
|
||||
cp bin/copy %{buildroot}/%{_bindir}/%{name}-copy
|
||||
|
||||
rm -f %{buildroot}%{_mandir}/man5/{Containerfile.5*,containerignore.5*}
|
||||
|
||||
|
||||
%files
|
||||
%license LICENSE
|
||||
%doc README.md
|
||||
%{_bindir}/%{name}
|
||||
%{_mandir}/man1/%{name}*
|
||||
%dir %{_datadir}/bash-completion
|
||||
%dir %{_datadir}/bash-completion/completions
|
||||
%{_datadir}/bash-completion/completions/%{name}
|
||||
|
||||
%files tests
|
||||
%license LICENSE
|
||||
%{_bindir}/%{name}-imgtype
|
||||
%{_bindir}/%{name}-copy
|
||||
%{_datadir}/%{name}/test
|
||||
|
||||
%changelog
|
||||
{{{ git_dir_changelog }}}
|
|
@ -1,3 +1,4 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package chroot
|
||||
|
@ -29,7 +30,6 @@ import (
|
|||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/opencontainers/runc/libcontainer/apparmor"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
"golang.org/x/sys/unix"
|
||||
|
@ -109,7 +109,7 @@ func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reade
|
|||
return err
|
||||
}
|
||||
if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil {
|
||||
return errors.Wrapf(err, "error storing runtime configuration")
|
||||
return fmt.Errorf("error storing runtime configuration: %w", err)
|
||||
}
|
||||
logrus.Debugf("config = %v", string(specbytes))
|
||||
|
||||
|
@ -127,14 +127,14 @@ func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reade
|
|||
// Create a pipe for passing configuration down to the next process.
|
||||
preader, pwriter, err := os.Pipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating configuration pipe")
|
||||
return fmt.Errorf("error creating configuration pipe: %w", err)
|
||||
}
|
||||
config, conferr := json.Marshal(runUsingChrootSubprocOptions{
|
||||
Spec: spec,
|
||||
BundlePath: bundlePath,
|
||||
})
|
||||
if conferr != nil {
|
||||
return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingChrootCommand)
|
||||
return fmt.Errorf("error encoding configuration for %q: %w", runUsingChrootCommand, conferr)
|
||||
}
|
||||
|
||||
// Set our terminal's mode to raw, to pass handling of special
|
||||
|
@ -551,7 +551,7 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io
|
|||
// Create a pipe for passing configuration down to the next process.
|
||||
preader, pwriter, err := os.Pipe()
|
||||
if err != nil {
|
||||
return 1, errors.Wrapf(err, "error creating configuration pipe")
|
||||
return 1, fmt.Errorf("error creating configuration pipe: %w", err)
|
||||
}
|
||||
config, conferr := json.Marshal(runUsingChrootExecSubprocOptions{
|
||||
Spec: spec,
|
||||
|
@ -921,7 +921,7 @@ func setApparmorProfile(spec *specs.Spec) error {
|
|||
return nil
|
||||
}
|
||||
if err := apparmor.ApplyProfile(spec.Process.ApparmorProfile); err != nil {
|
||||
return errors.Wrapf(err, "error setting apparmor profile to %q", spec.Process.ApparmorProfile)
|
||||
return fmt.Errorf("error setting apparmor profile to %q: %w", spec.Process.ApparmorProfile, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -930,14 +930,14 @@ func setApparmorProfile(spec *specs.Spec) error {
|
|||
func setCapabilities(spec *specs.Spec, keepCaps ...string) error {
|
||||
currentCaps, err := capability.NewPid2(0)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading capabilities of current process")
|
||||
return fmt.Errorf("error reading capabilities of current process: %w", err)
|
||||
}
|
||||
if err := currentCaps.Load(); err != nil {
|
||||
return errors.Wrapf(err, "error loading capabilities")
|
||||
return fmt.Errorf("error loading capabilities: %w", err)
|
||||
}
|
||||
caps, err := capability.NewPid2(0)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading capabilities of current process")
|
||||
return fmt.Errorf("error reading capabilities of current process: %w", err)
|
||||
}
|
||||
capMap := map[capability.CapType][]string{
|
||||
capability.BOUNDING: spec.Process.Capabilities.Bounding,
|
||||
|
@ -958,7 +958,7 @@ func setCapabilities(spec *specs.Spec, keepCaps ...string) error {
|
|||
}
|
||||
}
|
||||
if cap == noCap {
|
||||
return errors.Errorf("error mapping capability %q to a number", capToSet)
|
||||
return fmt.Errorf("error mapping capability %q to a number", capToSet)
|
||||
}
|
||||
caps.Set(capType, cap)
|
||||
}
|
||||
|
@ -971,7 +971,7 @@ func setCapabilities(spec *specs.Spec, keepCaps ...string) error {
|
|||
}
|
||||
}
|
||||
if cap == noCap {
|
||||
return errors.Errorf("error mapping capability %q to a number", capToSet)
|
||||
return fmt.Errorf("error mapping capability %q to a number", capToSet)
|
||||
}
|
||||
if currentCaps.Get(capType, cap) {
|
||||
caps.Set(capType, cap)
|
||||
|
@ -979,7 +979,7 @@ func setCapabilities(spec *specs.Spec, keepCaps ...string) error {
|
|||
}
|
||||
}
|
||||
if err = caps.Apply(capability.CAPS | capability.BOUNDS | capability.AMBS); err != nil {
|
||||
return errors.Wrapf(err, "error setting capabilities")
|
||||
return fmt.Errorf("error setting capabilities: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -994,7 +994,7 @@ func parseRlimits(spec *specs.Spec) (map[int]unix.Rlimit, error) {
|
|||
for _, limit := range spec.Process.Rlimits {
|
||||
resource, recognized := rlimitsMap[strings.ToUpper(limit.Type)]
|
||||
if !recognized {
|
||||
return nil, errors.Errorf("error parsing limit type %q", limit.Type)
|
||||
return nil, fmt.Errorf("error parsing limit type %q", limit.Type)
|
||||
}
|
||||
parsed[resource] = unix.Rlimit{Cur: limit.Soft, Max: limit.Hard}
|
||||
}
|
||||
|
@ -1011,7 +1011,7 @@ func setRlimits(spec *specs.Spec, onlyLower, onlyRaise bool) error {
|
|||
for resource, desired := range limits {
|
||||
var current unix.Rlimit
|
||||
if err := unix.Getrlimit(resource, ¤t); err != nil {
|
||||
return errors.Wrapf(err, "error reading %q limit", rlimitsReverseMap[resource])
|
||||
return fmt.Errorf("error reading %q limit: %w", rlimitsReverseMap[resource], err)
|
||||
}
|
||||
if desired.Max > current.Max && onlyLower {
|
||||
// this would raise a hard limit, and we're only here to lower them
|
||||
|
@ -1022,7 +1022,7 @@ func setRlimits(spec *specs.Spec, onlyLower, onlyRaise bool) error {
|
|||
continue
|
||||
}
|
||||
if err := unix.Setrlimit(resource, &desired); err != nil {
|
||||
return errors.Wrapf(err, "error setting %q limit to soft=%d,hard=%d (was soft=%d,hard=%d)", rlimitsReverseMap[resource], desired.Cur, desired.Max, current.Cur, current.Max)
|
||||
return fmt.Errorf("error setting %q limit to soft=%d,hard=%d (was soft=%d,hard=%d): %w", rlimitsReverseMap[resource], desired.Cur, desired.Max, current.Cur, current.Max, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -1032,11 +1032,11 @@ func makeReadOnly(mntpoint string, flags uintptr) error {
|
|||
var fs unix.Statfs_t
|
||||
// Make sure it's read-only.
|
||||
if err := unix.Statfs(mntpoint, &fs); err != nil {
|
||||
return errors.Wrapf(err, "error checking if directory %q was bound read-only", mntpoint)
|
||||
return fmt.Errorf("error checking if directory %q was bound read-only: %w", mntpoint, err)
|
||||
}
|
||||
if fs.Flags&unix.ST_RDONLY == 0 {
|
||||
if err := unix.Mount(mntpoint, mntpoint, "bind", flags|unix.MS_REMOUNT, ""); err != nil {
|
||||
return errors.Wrapf(err, "error remounting %s in mount namespace read-only", mntpoint)
|
||||
return fmt.Errorf("error remounting %s in mount namespace read-only: %w", mntpoint, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -1097,16 +1097,16 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
}
|
||||
}
|
||||
if err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error bind mounting /dev from host into mount namespace")
|
||||
return undoBinds, fmt.Errorf("error bind mounting /dev from host into mount namespace: %w", err)
|
||||
}
|
||||
}
|
||||
// Make sure it's read-only.
|
||||
if err = unix.Statfs(subDev, &fs); err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", subDev)
|
||||
return undoBinds, fmt.Errorf("error checking if directory %q was bound read-only: %w", subDev, err)
|
||||
}
|
||||
if fs.Flags&unix.ST_RDONLY == 0 {
|
||||
if err := unix.Mount(subDev, subDev, "bind", devFlags|unix.MS_REMOUNT, ""); err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error remounting /dev in mount namespace read-only")
|
||||
return undoBinds, fmt.Errorf("error remounting /dev in mount namespace read-only: %w", err)
|
||||
}
|
||||
}
|
||||
logrus.Debugf("bind mounted %q to %q", "/dev", filepath.Join(spec.Root.Path, "/dev"))
|
||||
|
@ -1121,7 +1121,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
}
|
||||
}
|
||||
if err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error bind mounting /proc from host into mount namespace")
|
||||
return undoBinds, fmt.Errorf("error bind mounting /proc from host into mount namespace: %w", err)
|
||||
}
|
||||
}
|
||||
logrus.Debugf("bind mounted %q to %q", "/proc", filepath.Join(spec.Root.Path, "/proc"))
|
||||
|
@ -1136,7 +1136,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
}
|
||||
}
|
||||
if err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error bind mounting /sys from host into mount namespace")
|
||||
return undoBinds, fmt.Errorf("error bind mounting /sys from host into mount namespace: %w", err)
|
||||
}
|
||||
}
|
||||
if err := makeReadOnly(subSys, sysFlags); err != nil {
|
||||
|
@ -1194,14 +1194,14 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
case "bind":
|
||||
srcinfo, err = os.Stat(m.Source)
|
||||
if err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error examining %q for mounting in mount namespace", m.Source)
|
||||
return undoBinds, fmt.Errorf("error examining %q for mounting in mount namespace: %w", m.Source, err)
|
||||
}
|
||||
case "overlay":
|
||||
fallthrough
|
||||
case "tmpfs":
|
||||
srcinfo, err = os.Stat("/")
|
||||
if err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error examining / to use as a template for a %s", m.Type)
|
||||
return undoBinds, fmt.Errorf("error examining / to use as a template for a %s: %w", m.Type, err)
|
||||
}
|
||||
}
|
||||
target := filepath.Join(spec.Root.Path, m.Destination)
|
||||
|
@ -1211,7 +1211,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
if err == nil && stat != nil && (stat.Mode()&os.ModeSymlink != 0) {
|
||||
target, err = copier.Eval(spec.Root.Path, m.Destination, copier.EvalOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "evaluating symlink %q", target)
|
||||
return nil, fmt.Errorf("evaluating symlink %q: %w", target, err)
|
||||
}
|
||||
// Stat the destination of the evaluated symlink
|
||||
_, err = os.Stat(target)
|
||||
|
@ -1219,20 +1219,20 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
if err != nil {
|
||||
// If the target can't be stat()ted, check the error.
|
||||
if !os.IsNotExist(err) {
|
||||
return undoBinds, errors.Wrapf(err, "error examining %q for mounting in mount namespace", target)
|
||||
return undoBinds, fmt.Errorf("error examining %q for mounting in mount namespace: %w", target, err)
|
||||
}
|
||||
// The target isn't there yet, so create it.
|
||||
if srcinfo.IsDir() {
|
||||
if err = os.MkdirAll(target, 0755); err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error creating mountpoint %q in mount namespace", target)
|
||||
return undoBinds, fmt.Errorf("error creating mountpoint %q in mount namespace: %w", target, err)
|
||||
}
|
||||
} else {
|
||||
if err = os.MkdirAll(filepath.Dir(target), 0755); err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error ensuring parent of mountpoint %q (%q) is present in mount namespace", target, filepath.Dir(target))
|
||||
return undoBinds, fmt.Errorf("error ensuring parent of mountpoint %q (%q) is present in mount namespace: %w", target, filepath.Dir(target), err)
|
||||
}
|
||||
var file *os.File
|
||||
if file, err = os.OpenFile(target, os.O_WRONLY|os.O_CREATE, 0755); err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error creating mountpoint %q in mount namespace", target)
|
||||
return undoBinds, fmt.Errorf("error creating mountpoint %q in mount namespace: %w", target, err)
|
||||
}
|
||||
file.Close()
|
||||
}
|
||||
|
@ -1272,28 +1272,28 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
// Do the bind mount.
|
||||
logrus.Debugf("bind mounting %q on %q", m.Destination, filepath.Join(spec.Root.Path, m.Destination))
|
||||
if err := unix.Mount(m.Source, target, "", requestFlags, ""); err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error bind mounting %q from host to %q in mount namespace (%q)", m.Source, m.Destination, target)
|
||||
return undoBinds, fmt.Errorf("error bind mounting %q from host to %q in mount namespace (%q): %w", m.Source, m.Destination, target, err)
|
||||
}
|
||||
logrus.Debugf("bind mounted %q to %q", m.Source, target)
|
||||
case "tmpfs":
|
||||
// Mount a tmpfs.
|
||||
if err := mount.Mount(m.Source, target, m.Type, strings.Join(append(m.Options, "private"), ",")); err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error mounting tmpfs to %q in mount namespace (%q, %q)", m.Destination, target, strings.Join(m.Options, ","))
|
||||
return undoBinds, fmt.Errorf("error mounting tmpfs to %q in mount namespace (%q, %q): %w", m.Destination, target, strings.Join(m.Options, ","), err)
|
||||
}
|
||||
logrus.Debugf("mounted a tmpfs to %q", target)
|
||||
case "overlay":
|
||||
// Mount a overlay.
|
||||
if err := mount.Mount(m.Source, target, m.Type, strings.Join(append(m.Options, "private"), ",")); err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error mounting overlay to %q in mount namespace (%q, %q)", m.Destination, target, strings.Join(m.Options, ","))
|
||||
return undoBinds, fmt.Errorf("error mounting overlay to %q in mount namespace (%q, %q): %w", m.Destination, target, strings.Join(m.Options, ","), err)
|
||||
}
|
||||
logrus.Debugf("mounted a overlay to %q", target)
|
||||
}
|
||||
if err = unix.Statfs(target, &fs); err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", target)
|
||||
return undoBinds, fmt.Errorf("error checking if directory %q was bound read-only: %w", target, err)
|
||||
}
|
||||
if uintptr(fs.Flags)&expectedFlags != expectedFlags {
|
||||
if err := unix.Mount(target, target, "bind", requestFlags|unix.MS_REMOUNT, ""); err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error remounting %q in mount namespace with expected flags", target)
|
||||
return undoBinds, fmt.Errorf("error remounting %q in mount namespace with expected flags: %w", target, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1308,7 +1308,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
// No target, no problem.
|
||||
continue
|
||||
}
|
||||
return undoBinds, errors.Wrapf(err, "error checking %q for symlinks before marking it read-only", r)
|
||||
return undoBinds, fmt.Errorf("error checking %q for symlinks before marking it read-only: %w", r, err)
|
||||
}
|
||||
// Check if the location is already read-only.
|
||||
var fs unix.Statfs_t
|
||||
|
@ -1317,7 +1317,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
// No target, no problem.
|
||||
continue
|
||||
}
|
||||
return undoBinds, errors.Wrapf(err, "error checking if directory %q is already read-only", target)
|
||||
return undoBinds, fmt.Errorf("error checking if directory %q is already read-only: %w", target, err)
|
||||
}
|
||||
if fs.Flags&unix.ST_RDONLY != 0 {
|
||||
continue
|
||||
|
@ -1329,23 +1329,23 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
// No target, no problem.
|
||||
continue
|
||||
}
|
||||
return undoBinds, errors.Wrapf(err, "error bind mounting %q onto itself in preparation for making it read-only", target)
|
||||
return undoBinds, fmt.Errorf("error bind mounting %q onto itself in preparation for making it read-only: %w", target, err)
|
||||
}
|
||||
// Remount the location read-only.
|
||||
if err = unix.Statfs(target, &fs); err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", target)
|
||||
return undoBinds, fmt.Errorf("error checking if directory %q was bound read-only: %w", target, err)
|
||||
}
|
||||
if fs.Flags&unix.ST_RDONLY == 0 {
|
||||
if err := unix.Mount(target, target, "", roFlags|unix.MS_BIND|unix.MS_REMOUNT, ""); err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error remounting %q in mount namespace read-only", target)
|
||||
return undoBinds, fmt.Errorf("error remounting %q in mount namespace read-only: %w", target, err)
|
||||
}
|
||||
}
|
||||
// Check again.
|
||||
if err = unix.Statfs(target, &fs); err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error checking if directory %q was remounted read-only", target)
|
||||
return undoBinds, fmt.Errorf("error checking if directory %q was remounted read-only: %w", target, err)
|
||||
}
|
||||
if fs.Flags&unix.ST_RDONLY == 0 {
|
||||
return undoBinds, errors.Wrapf(err, "error verifying that %q in mount namespace was remounted read-only", target)
|
||||
return undoBinds, fmt.Errorf("error verifying that %q in mount namespace was remounted read-only: %w", target, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1353,7 +1353,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
roEmptyDir := filepath.Join(bundlePath, "empty")
|
||||
if len(spec.Linux.MaskedPaths) > 0 {
|
||||
if err := os.Mkdir(roEmptyDir, 0700); err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error creating empty directory %q", roEmptyDir)
|
||||
return undoBinds, fmt.Errorf("error creating empty directory %q: %w", roEmptyDir, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1374,19 +1374,19 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
// No target, no problem.
|
||||
continue
|
||||
}
|
||||
return undoBinds, errors.Wrapf(err, "error examining %q for masking in mount namespace", target)
|
||||
return undoBinds, fmt.Errorf("error examining %q for masking in mount namespace: %w", target, err)
|
||||
}
|
||||
if targetinfo.IsDir() {
|
||||
// The target's a directory. Check if it's a read-only filesystem.
|
||||
var statfs unix.Statfs_t
|
||||
if err = unix.Statfs(target, &statfs); err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error checking if directory %q is a mountpoint", target)
|
||||
return undoBinds, fmt.Errorf("error checking if directory %q is a mountpoint: %w", target, err)
|
||||
}
|
||||
isReadOnly := statfs.Flags&unix.MS_RDONLY != 0
|
||||
// Check if any of the IDs we're mapping could read it.
|
||||
var stat unix.Stat_t
|
||||
if err = unix.Stat(target, &stat); err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error checking permissions on directory %q", target)
|
||||
return undoBinds, fmt.Errorf("error checking permissions on directory %q: %w", target, err)
|
||||
}
|
||||
isAccessible := false
|
||||
if stat.Mode&unix.S_IROTH|unix.S_IXOTH != 0 {
|
||||
|
@ -1417,13 +1417,13 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
directory, err := os.Open(target)
|
||||
if err != nil {
|
||||
if !os.IsPermission(err) {
|
||||
return undoBinds, errors.Wrapf(err, "error opening directory %q", target)
|
||||
return undoBinds, fmt.Errorf("error opening directory %q: %w", target, err)
|
||||
}
|
||||
} else {
|
||||
names, err := directory.Readdirnames(0)
|
||||
directory.Close()
|
||||
if err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error reading contents of directory %q", target)
|
||||
return undoBinds, fmt.Errorf("error reading contents of directory %q: %w", target, err)
|
||||
}
|
||||
hasContent = false
|
||||
for _, name := range names {
|
||||
|
@ -1442,14 +1442,14 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
roFlags := uintptr(syscall.MS_BIND | syscall.MS_NOSUID | syscall.MS_NODEV | syscall.MS_NOEXEC | syscall.MS_RDONLY)
|
||||
if !isReadOnly || (hasContent && isAccessible) {
|
||||
if err = unix.Mount(roEmptyDir, target, "bind", roFlags, ""); err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error masking directory %q in mount namespace", target)
|
||||
return undoBinds, fmt.Errorf("error masking directory %q in mount namespace: %w", target, err)
|
||||
}
|
||||
if err = unix.Statfs(target, &fs); err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error checking if directory %q was mounted read-only in mount namespace", target)
|
||||
return undoBinds, fmt.Errorf("error checking if directory %q was mounted read-only in mount namespace: %w", target, err)
|
||||
}
|
||||
if fs.Flags&unix.ST_RDONLY == 0 {
|
||||
if err = unix.Mount(target, target, "", roFlags|syscall.MS_REMOUNT, ""); err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error making sure directory %q in mount namespace is read only", target)
|
||||
return undoBinds, fmt.Errorf("error making sure directory %q in mount namespace is read only: %w", target, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1457,7 +1457,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
|
|||
// If the target's is not a directory or os.DevNull, bind mount os.DevNull over it.
|
||||
if !isDevNull(targetinfo) {
|
||||
if err = unix.Mount(os.DevNull, target, "", uintptr(syscall.MS_BIND|syscall.MS_RDONLY|syscall.MS_PRIVATE), ""); err != nil {
|
||||
return undoBinds, errors.Wrapf(err, "error masking non-directory %q in mount namespace", target)
|
||||
return undoBinds, fmt.Errorf("error masking non-directory %q in mount namespace: %w", target, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,11 +4,11 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/common/pkg/seccomp"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
libseccomp "github.com/seccomp/libseccomp-golang"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
@ -109,13 +109,13 @@ func setSeccomp(spec *specs.Spec) error {
|
|||
return libseccomp.CompareInvalid
|
||||
}
|
||||
|
||||
filter, err := libseccomp.NewFilter(mapAction(spec.Linux.Seccomp.DefaultAction, nil))
|
||||
filter, err := libseccomp.NewFilter(mapAction(spec.Linux.Seccomp.DefaultAction, spec.Linux.Seccomp.DefaultErrnoRet))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating seccomp filter with default action %q", spec.Linux.Seccomp.DefaultAction)
|
||||
return fmt.Errorf("error creating seccomp filter with default action %q: %w", spec.Linux.Seccomp.DefaultAction, err)
|
||||
}
|
||||
for _, arch := range spec.Linux.Seccomp.Architectures {
|
||||
if err = filter.AddArch(mapArch(arch)); err != nil {
|
||||
return errors.Wrapf(err, "error adding architecture %q(%q) to seccomp filter", arch, mapArch(arch))
|
||||
return fmt.Errorf("error adding architecture %q(%q) to seccomp filter: %w", arch, mapArch(arch), err)
|
||||
}
|
||||
}
|
||||
for _, rule := range spec.Linux.Seccomp.Syscalls {
|
||||
|
@ -131,7 +131,7 @@ func setSeccomp(spec *specs.Spec) error {
|
|||
for scnum := range scnames {
|
||||
if len(rule.Args) == 0 {
|
||||
if err = filter.AddRule(scnum, mapAction(rule.Action, rule.ErrnoRet)); err != nil {
|
||||
return errors.Wrapf(err, "error adding a rule (%q:%q) to seccomp filter", scnames[scnum], rule.Action)
|
||||
return fmt.Errorf("error adding a rule (%q:%q) to seccomp filter: %w", scnames[scnum], rule.Action, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ func setSeccomp(spec *specs.Spec) error {
|
|||
for _, arg := range rule.Args {
|
||||
condition, err := libseccomp.MakeCondition(arg.Index, mapOp(arg.Op), arg.Value, arg.ValueTwo)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error building a seccomp condition %d:%v:%d:%d", arg.Index, arg.Op, arg.Value, arg.ValueTwo)
|
||||
return fmt.Errorf("error building a seccomp condition %d:%v:%d:%d: %w", arg.Index, arg.Op, arg.Value, arg.ValueTwo, err)
|
||||
}
|
||||
if arg.Op != specs.OpEqualTo {
|
||||
opsAreAllEquality = false
|
||||
|
@ -156,22 +156,22 @@ func setSeccomp(spec *specs.Spec) error {
|
|||
if len(rule.Args) > 1 && opsAreAllEquality && err.Error() == "two checks on same syscall argument" {
|
||||
for i := range conditions {
|
||||
if err = filter.AddRuleConditional(scnum, mapAction(rule.Action, rule.ErrnoRet), conditions[i:i+1]); err != nil {
|
||||
return errors.Wrapf(err, "error adding a conditional rule (%q:%q[%d]) to seccomp filter", scnames[scnum], rule.Action, i)
|
||||
return fmt.Errorf("error adding a conditional rule (%q:%q[%d]) to seccomp filter: %w", scnames[scnum], rule.Action, i, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return errors.Wrapf(err, "error adding a conditional rule (%q:%q) to seccomp filter", scnames[scnum], rule.Action)
|
||||
return fmt.Errorf("error adding a conditional rule (%q:%q) to seccomp filter: %w", scnames[scnum], rule.Action, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err = filter.SetNoNewPrivsBit(spec.Process.NoNewPrivileges); err != nil {
|
||||
return errors.Wrapf(err, "error setting no-new-privileges bit to %v", spec.Process.NoNewPrivileges)
|
||||
return fmt.Errorf("error setting no-new-privileges bit to %v: %w", spec.Process.NoNewPrivileges, err)
|
||||
}
|
||||
err = filter.Load()
|
||||
filter.Release()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error activating seccomp filter")
|
||||
return fmt.Errorf("error activating seccomp filter: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -183,17 +183,17 @@ func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
|
|||
case "":
|
||||
seccompConfig, err := seccomp.GetDefaultProfile(spec)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "loading default seccomp profile failed")
|
||||
return fmt.Errorf("loading default seccomp profile failed: %w", err)
|
||||
}
|
||||
spec.Linux.Seccomp = seccompConfig
|
||||
default:
|
||||
seccompProfile, err := ioutil.ReadFile(seccompProfilePath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "opening seccomp profile (%s) failed", seccompProfilePath)
|
||||
return fmt.Errorf("opening seccomp profile (%s) failed: %w", seccompProfilePath, err)
|
||||
}
|
||||
seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "loading seccomp profile (%s) failed", seccompProfilePath)
|
||||
return fmt.Errorf("loading seccomp profile (%s) failed: %w", seccompProfilePath, err)
|
||||
}
|
||||
spec.Linux.Seccomp = seccompConfig
|
||||
}
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
//go:build !linux || !seccomp
|
||||
// +build !linux !seccomp
|
||||
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func setSeccomp(spec *specs.Spec) error {
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
selinux "github.com/opencontainers/selinux/go-selinux"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -15,7 +17,7 @@ func setSelinuxLabel(spec *specs.Spec) error {
|
|||
logrus.Debugf("setting selinux label")
|
||||
if spec.Process.SelinuxLabel != "" && selinux.GetEnabled() {
|
||||
if err := label.SetProcessLabel(spec.Process.SelinuxLabel); err != nil {
|
||||
return errors.Wrapf(err, "error setting process label to %q", spec.Process.SelinuxLabel)
|
||||
return fmt.Errorf("error setting process label to %q: %w", spec.Process.SelinuxLabel, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func setSelinuxLabel(spec *specs.Spec) error {
|
||||
|
|
|
@ -1,15 +1,16 @@
|
|||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package chroot
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// RunUsingChroot is not supported.
|
||||
func RunUsingChroot(spec *specs.Spec, bundlePath string, stdin io.Reader, stdout, stderr io.Writer) (err error) {
|
||||
return errors.Errorf("--isolation chroot is not supported on this platform")
|
||||
func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reader, stdout, stderr io.Writer) (err error) {
|
||||
return fmt.Errorf("--isolation chroot is not supported on this platform")
|
||||
}
|
||||
|
|
|
@ -3,6 +3,8 @@ package buildah
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
@ -25,7 +27,6 @@ import (
|
|||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -145,7 +146,7 @@ func checkRegistrySourcesAllows(forWhat string, dest types.ImageReference) (inse
|
|||
AllowedRegistries []string `json:"allowedRegistries,omitempty"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(registrySources), &sources); err != nil {
|
||||
return false, errors.Wrapf(err, "error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON", registrySources)
|
||||
return false, fmt.Errorf("error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON: %w", registrySources, err)
|
||||
}
|
||||
blocked := false
|
||||
if len(sources.BlockedRegistries) > 0 {
|
||||
|
@ -156,7 +157,7 @@ func checkRegistrySourcesAllows(forWhat string, dest types.ImageReference) (inse
|
|||
}
|
||||
}
|
||||
if blocked {
|
||||
return false, errors.Errorf("%s registry at %q denied by policy: it is in the blocked registries list", forWhat, reference.Domain(dref))
|
||||
return false, fmt.Errorf("%s registry at %q denied by policy: it is in the blocked registries list", forWhat, reference.Domain(dref))
|
||||
}
|
||||
allowed := true
|
||||
if len(sources.AllowedRegistries) > 0 {
|
||||
|
@ -168,7 +169,7 @@ func checkRegistrySourcesAllows(forWhat string, dest types.ImageReference) (inse
|
|||
}
|
||||
}
|
||||
if !allowed {
|
||||
return false, errors.Errorf("%s registry at %q denied by policy: not in allowed registries list", forWhat, reference.Domain(dref))
|
||||
return false, fmt.Errorf("%s registry at %q denied by policy: not in allowed registries list", forWhat, reference.Domain(dref))
|
||||
}
|
||||
if len(sources.InsecureRegistries) > 0 {
|
||||
return true, nil
|
||||
|
@ -204,7 +205,7 @@ func (b *Builder) addManifest(ctx context.Context, manifestName string, imageSpe
|
|||
|
||||
names, err := util.ExpandNames([]string{manifestName}, systemContext, b.store)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error encountered while expanding manifest list name %q", manifestName)
|
||||
return "", fmt.Errorf("error encountered while expanding manifest list name %q: %w", manifestName, err)
|
||||
}
|
||||
|
||||
ref, err := util.VerifyTagName(imageSpec)
|
||||
|
@ -247,7 +248,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
|
|||
// work twice.
|
||||
if options.OmitTimestamp {
|
||||
if options.HistoryTimestamp != nil {
|
||||
return imgID, nil, "", errors.Errorf("OmitTimestamp ahd HistoryTimestamp can not be used together")
|
||||
return imgID, nil, "", fmt.Errorf("OmitTimestamp ahd HistoryTimestamp can not be used together")
|
||||
}
|
||||
timestamp := time.Unix(0, 0).UTC()
|
||||
options.HistoryTimestamp = ×tamp
|
||||
|
@ -257,7 +258,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
|
|||
nameToRemove = stringid.GenerateRandomID() + "-tmp"
|
||||
dest2, err := is.Transport.ParseStoreReference(b.store, nameToRemove)
|
||||
if err != nil {
|
||||
return imgID, nil, "", errors.Wrapf(err, "error creating temporary destination reference for image")
|
||||
return imgID, nil, "", fmt.Errorf("error creating temporary destination reference for image: %w", err)
|
||||
}
|
||||
dest = dest2
|
||||
}
|
||||
|
@ -266,23 +267,23 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
|
|||
|
||||
blocked, err := isReferenceBlocked(dest, systemContext)
|
||||
if err != nil {
|
||||
return "", nil, "", errors.Wrapf(err, "error checking if committing to registry for %q is blocked", transports.ImageName(dest))
|
||||
return "", nil, "", fmt.Errorf("error checking if committing to registry for %q is blocked: %w", transports.ImageName(dest), err)
|
||||
}
|
||||
if blocked {
|
||||
return "", nil, "", errors.Errorf("commit access to registry for %q is blocked by configuration", transports.ImageName(dest))
|
||||
return "", nil, "", fmt.Errorf("commit access to registry for %q is blocked by configuration", transports.ImageName(dest))
|
||||
}
|
||||
|
||||
// Load the system signing policy.
|
||||
commitPolicy, err := signature.DefaultPolicy(systemContext)
|
||||
if err != nil {
|
||||
return "", nil, "", errors.Wrapf(err, "error obtaining default signature policy")
|
||||
return "", nil, "", fmt.Errorf("error obtaining default signature policy: %w", err)
|
||||
}
|
||||
// Override the settings for local storage to make sure that we can always read the source "image".
|
||||
commitPolicy.Transports[is.Transport.Name()] = storageAllowedPolicyScopes
|
||||
|
||||
policyContext, err := signature.NewPolicyContext(commitPolicy)
|
||||
if err != nil {
|
||||
return imgID, nil, "", errors.Wrapf(err, "error creating new signature policy context")
|
||||
return imgID, nil, "", fmt.Errorf("error creating new signature policy context: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err2 := policyContext.Destroy(); err2 != nil {
|
||||
|
@ -297,7 +298,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
|
|||
}
|
||||
if insecure {
|
||||
if systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
|
||||
return imgID, nil, "", errors.Errorf("can't require tls verification on an insecured registry")
|
||||
return imgID, nil, "", fmt.Errorf("can't require tls verification on an insecured registry")
|
||||
}
|
||||
systemContext.DockerInsecureSkipTLSVerify = types.OptionalBoolTrue
|
||||
systemContext.OCIInsecureSkipTLSVerify = true
|
||||
|
@ -308,7 +309,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
|
|||
// Build an image reference from which we can copy the finished image.
|
||||
src, err = b.makeContainerImageRef(options)
|
||||
if err != nil {
|
||||
return imgID, nil, "", errors.Wrapf(err, "error computing layer digests and building metadata for container %q", b.ContainerID)
|
||||
return imgID, nil, "", fmt.Errorf("error computing layer digests and building metadata for container %q: %w", b.ContainerID, err)
|
||||
}
|
||||
// In case we're using caching, decide how to handle compression for a cache.
|
||||
// If we're using blob caching, set it up for the source.
|
||||
|
@ -321,12 +322,12 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
|
|||
}
|
||||
cache, err := blobcache.NewBlobCache(src, options.BlobDirectory, compress)
|
||||
if err != nil {
|
||||
return imgID, nil, "", errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(src), options.BlobDirectory)
|
||||
return imgID, nil, "", fmt.Errorf("error wrapping image reference %q in blob cache at %q: %w", transports.ImageName(src), options.BlobDirectory, err)
|
||||
}
|
||||
maybeCachedSrc = cache
|
||||
cache, err = blobcache.NewBlobCache(dest, options.BlobDirectory, compress)
|
||||
if err != nil {
|
||||
return imgID, nil, "", errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(dest), options.BlobDirectory)
|
||||
return imgID, nil, "", fmt.Errorf("error wrapping image reference %q in blob cache at %q: %w", transports.ImageName(dest), options.BlobDirectory, err)
|
||||
}
|
||||
maybeCachedDest = cache
|
||||
}
|
||||
|
@ -347,7 +348,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
|
|||
|
||||
var manifestBytes []byte
|
||||
if manifestBytes, err = retryCopyImage(ctx, policyContext, maybeCachedDest, maybeCachedSrc, dest, getCopyOptions(b.store, options.ReportWriter, nil, systemContext, "", false, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
|
||||
return imgID, nil, "", errors.Wrapf(err, "error copying layers and metadata for container %q", b.ContainerID)
|
||||
return imgID, nil, "", fmt.Errorf("error copying layers and metadata for container %q: %w", b.ContainerID, err)
|
||||
}
|
||||
// If we've got more names to attach, and we know how to do that for
|
||||
// the transport that we're writing the new image to, add them now.
|
||||
|
@ -356,10 +357,10 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
|
|||
case is.Transport.Name():
|
||||
img, err := is.Transport.GetStoreImage(b.store, dest)
|
||||
if err != nil {
|
||||
return imgID, nil, "", errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest))
|
||||
return imgID, nil, "", fmt.Errorf("error locating just-written image %q: %w", transports.ImageName(dest), err)
|
||||
}
|
||||
if err = util.AddImageNames(b.store, "", systemContext, img, options.AdditionalTags); err != nil {
|
||||
return imgID, nil, "", errors.Wrapf(err, "error setting image names to %v", append(img.Names, options.AdditionalTags...))
|
||||
return imgID, nil, "", fmt.Errorf("error setting image names to %v: %w", append(img.Names, options.AdditionalTags...), err)
|
||||
}
|
||||
logrus.Debugf("assigned names %v to image %q", img.Names, img.ID)
|
||||
default:
|
||||
|
@ -368,8 +369,8 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
|
|||
}
|
||||
|
||||
img, err := is.Transport.GetStoreImage(b.store, dest)
|
||||
if err != nil && errors.Cause(err) != storage.ErrImageUnknown {
|
||||
return imgID, nil, "", errors.Wrapf(err, "error locating image %q in local storage", transports.ImageName(dest))
|
||||
if err != nil && !errors.Is(err, storage.ErrImageUnknown) {
|
||||
return imgID, nil, "", fmt.Errorf("error locating image %q in local storage: %w", transports.ImageName(dest), err)
|
||||
}
|
||||
if err == nil {
|
||||
imgID = img.ID
|
||||
|
@ -381,12 +382,12 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
|
|||
}
|
||||
if len(prunedNames) < len(img.Names) {
|
||||
if err = b.store.SetNames(imgID, prunedNames); err != nil {
|
||||
return imgID, nil, "", errors.Wrapf(err, "failed to prune temporary name from image %q", imgID)
|
||||
return imgID, nil, "", fmt.Errorf("failed to prune temporary name from image %q: %w", imgID, err)
|
||||
}
|
||||
logrus.Debugf("reassigned names %v to image %q", prunedNames, img.ID)
|
||||
dest2, err := is.Transport.ParseStoreReference(b.store, "@"+imgID)
|
||||
if err != nil {
|
||||
return imgID, nil, "", errors.Wrapf(err, "error creating unnamed destination reference for image")
|
||||
return imgID, nil, "", fmt.Errorf("error creating unnamed destination reference for image: %w", err)
|
||||
}
|
||||
dest = dest2
|
||||
}
|
||||
|
@ -399,7 +400,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
|
|||
|
||||
manifestDigest, err := manifest.Digest(manifestBytes)
|
||||
if err != nil {
|
||||
return imgID, nil, "", errors.Wrapf(err, "error computing digest of manifest of new image %q", transports.ImageName(dest))
|
||||
return imgID, nil, "", fmt.Errorf("error computing digest of manifest of new image %q: %w", transports.ImageName(dest), err)
|
||||
}
|
||||
|
||||
var ref reference.Canonical
|
||||
|
|
|
@ -3,6 +3,7 @@ package buildah
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
@ -18,7 +19,6 @@ import (
|
|||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -28,7 +28,7 @@ import (
|
|||
func unmarshalConvertedConfig(ctx context.Context, dest interface{}, img types.Image, wantedManifestMIMEType string) error {
|
||||
_, actualManifestMIMEType, err := img.Manifest(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error getting manifest MIME type for %q", transports.ImageName(img.Reference()))
|
||||
return fmt.Errorf("error getting manifest MIME type for %q: %w", transports.ImageName(img.Reference()), err)
|
||||
}
|
||||
if wantedManifestMIMEType != actualManifestMIMEType {
|
||||
layerInfos := img.LayerInfos()
|
||||
|
@ -40,22 +40,22 @@ func unmarshalConvertedConfig(ctx context.Context, dest interface{}, img types.I
|
|||
LayerInfos: layerInfos,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "resetting recorded compression for %q", transports.ImageName(img.Reference()))
|
||||
return fmt.Errorf("resetting recorded compression for %q: %w", transports.ImageName(img.Reference()), err)
|
||||
}
|
||||
secondUpdatedImg, err := updatedImg.UpdatedImage(ctx, types.ManifestUpdateOptions{
|
||||
ManifestMIMEType: wantedManifestMIMEType,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error converting image %q from %q to %q", transports.ImageName(img.Reference()), actualManifestMIMEType, wantedManifestMIMEType)
|
||||
return fmt.Errorf("error converting image %q from %q to %q: %w", transports.ImageName(img.Reference()), actualManifestMIMEType, wantedManifestMIMEType, err)
|
||||
}
|
||||
img = secondUpdatedImg
|
||||
}
|
||||
config, err := img.ConfigBlob(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading %s config from %q", wantedManifestMIMEType, transports.ImageName(img.Reference()))
|
||||
return fmt.Errorf("error reading %s config from %q: %w", wantedManifestMIMEType, transports.ImageName(img.Reference()), err)
|
||||
}
|
||||
if err := json.Unmarshal(config, dest); err != nil {
|
||||
return errors.Wrapf(err, "error parsing %s configuration %q from %q", wantedManifestMIMEType, string(config), transports.ImageName(img.Reference()))
|
||||
return fmt.Errorf("error parsing %s configuration %q from %q: %w", wantedManifestMIMEType, string(config), transports.ImageName(img.Reference()), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -64,11 +64,11 @@ func (b *Builder) initConfig(ctx context.Context, img types.Image, sys *types.Sy
|
|||
if img != nil { // A pre-existing image, as opposed to a "FROM scratch" new one.
|
||||
rawManifest, manifestMIMEType, err := img.Manifest(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading image manifest for %q", transports.ImageName(img.Reference()))
|
||||
return fmt.Errorf("error reading image manifest for %q: %w", transports.ImageName(img.Reference()), err)
|
||||
}
|
||||
rawConfig, err := img.ConfigBlob(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading image configuration for %q", transports.ImageName(img.Reference()))
|
||||
return fmt.Errorf("error reading image configuration for %q: %w", transports.ImageName(img.Reference()), err)
|
||||
}
|
||||
b.Manifest = rawManifest
|
||||
b.Config = rawConfig
|
||||
|
@ -89,7 +89,7 @@ func (b *Builder) initConfig(ctx context.Context, img types.Image, sys *types.Sy
|
|||
// Attempt to recover format-specific data from the manifest.
|
||||
v1Manifest := ociv1.Manifest{}
|
||||
if err := json.Unmarshal(b.Manifest, &v1Manifest); err != nil {
|
||||
return errors.Wrapf(err, "error parsing OCI manifest %q", string(b.Manifest))
|
||||
return fmt.Errorf("error parsing OCI manifest %q: %w", string(b.Manifest), err)
|
||||
}
|
||||
for k, v := range v1Manifest.Annotations {
|
||||
// NOTE: do not override annotations that are
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"archive/tar"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
stderrors "errors"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
|
@ -25,7 +25,6 @@ import (
|
|||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -454,17 +453,17 @@ func cleanerReldirectory(candidate string) string {
|
|||
// the two directories are on different volumes
|
||||
func convertToRelSubdirectory(root, directory string) (relative string, err error) {
|
||||
if root == "" || !filepath.IsAbs(root) {
|
||||
return "", errors.Errorf("expected root directory to be an absolute path, got %q", root)
|
||||
return "", fmt.Errorf("expected root directory to be an absolute path, got %q", root)
|
||||
}
|
||||
if directory == "" || !filepath.IsAbs(directory) {
|
||||
return "", errors.Errorf("expected directory to be an absolute path, got %q", root)
|
||||
return "", fmt.Errorf("expected directory to be an absolute path, got %q", root)
|
||||
}
|
||||
if filepath.VolumeName(root) != filepath.VolumeName(directory) {
|
||||
return "", errors.Errorf("%q and %q are on different volumes", root, directory)
|
||||
return "", fmt.Errorf("%q and %q are on different volumes", root, directory)
|
||||
}
|
||||
rel, err := filepath.Rel(root, directory)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error computing path of %q relative to %q", directory, root)
|
||||
return "", fmt.Errorf("error computing path of %q relative to %q: %w", directory, root, err)
|
||||
}
|
||||
return cleanerReldirectory(rel), nil
|
||||
}
|
||||
|
@ -472,7 +471,7 @@ func convertToRelSubdirectory(root, directory string) (relative string, err erro
|
|||
func currentVolumeRoot() (string, error) {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error getting current working directory")
|
||||
return "", fmt.Errorf("error getting current working directory: %w", err)
|
||||
}
|
||||
return filepath.VolumeName(cwd) + string(os.PathSeparator), nil
|
||||
}
|
||||
|
@ -480,7 +479,7 @@ func currentVolumeRoot() (string, error) {
|
|||
func isVolumeRoot(candidate string) (bool, error) {
|
||||
abs, err := filepath.Abs(candidate)
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "error converting %q to an absolute path", candidate)
|
||||
return false, fmt.Errorf("error converting %q to an absolute path: %w", candidate, err)
|
||||
}
|
||||
return abs == filepath.VolumeName(abs)+string(os.PathSeparator), nil
|
||||
}
|
||||
|
@ -494,7 +493,7 @@ func copier(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response,
|
|||
if req.Root == "" {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting current working directory")
|
||||
return nil, fmt.Errorf("error getting current working directory: %w", err)
|
||||
}
|
||||
req.Directory = wd
|
||||
} else {
|
||||
|
@ -504,19 +503,19 @@ func copier(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response,
|
|||
if req.Root == "" {
|
||||
root, err := currentVolumeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error determining root of current volume")
|
||||
return nil, fmt.Errorf("error determining root of current volume: %w", err)
|
||||
}
|
||||
req.Root = root
|
||||
}
|
||||
if filepath.IsAbs(req.Directory) {
|
||||
_, err := convertToRelSubdirectory(req.Root, req.Directory)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error rewriting %q to be relative to %q", req.Directory, req.Root)
|
||||
return nil, fmt.Errorf("error rewriting %q to be relative to %q: %w", req.Directory, req.Root, err)
|
||||
}
|
||||
}
|
||||
isAlreadyRoot, err := isVolumeRoot(req.Root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error checking if %q is a root directory", req.Root)
|
||||
return nil, fmt.Errorf("error checking if %q is a root directory: %w", req.Root, err)
|
||||
}
|
||||
if !isAlreadyRoot && canChroot {
|
||||
return copierWithSubprocess(bulkReader, bulkWriter, req)
|
||||
|
@ -578,27 +577,27 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques
|
|||
cmd := reexec.Command(copierCommand)
|
||||
stdinRead, stdinWrite, err := os.Pipe()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "pipe")
|
||||
return nil, fmt.Errorf("pipe: %w", err)
|
||||
}
|
||||
defer closeIfNotNilYet(&stdinRead, "stdin pipe reader")
|
||||
defer closeIfNotNilYet(&stdinWrite, "stdin pipe writer")
|
||||
encoder := json.NewEncoder(stdinWrite)
|
||||
stdoutRead, stdoutWrite, err := os.Pipe()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "pipe")
|
||||
return nil, fmt.Errorf("pipe: %w", err)
|
||||
}
|
||||
defer closeIfNotNilYet(&stdoutRead, "stdout pipe reader")
|
||||
defer closeIfNotNilYet(&stdoutWrite, "stdout pipe writer")
|
||||
decoder := json.NewDecoder(stdoutRead)
|
||||
bulkReaderRead, bulkReaderWrite, err := os.Pipe()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "pipe")
|
||||
return nil, fmt.Errorf("pipe: %w", err)
|
||||
}
|
||||
defer closeIfNotNilYet(&bulkReaderRead, "child bulk content reader pipe, read end")
|
||||
defer closeIfNotNilYet(&bulkReaderWrite, "child bulk content reader pipe, write end")
|
||||
bulkWriterRead, bulkWriterWrite, err := os.Pipe()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "pipe")
|
||||
return nil, fmt.Errorf("pipe: %w", err)
|
||||
}
|
||||
defer closeIfNotNilYet(&bulkWriterRead, "child bulk content writer pipe, read end")
|
||||
defer closeIfNotNilYet(&bulkWriterWrite, "child bulk content writer pipe, write end")
|
||||
|
@ -611,7 +610,7 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques
|
|||
cmd.Stderr = &errorBuffer
|
||||
cmd.ExtraFiles = []*os.File{bulkReaderRead, bulkWriterWrite}
|
||||
if err = cmd.Start(); err != nil {
|
||||
return nil, errors.Wrapf(err, "error starting subprocess")
|
||||
return nil, fmt.Errorf("error starting subprocess: %w", err)
|
||||
}
|
||||
cmdToWaitFor := cmd
|
||||
defer func() {
|
||||
|
@ -633,9 +632,9 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques
|
|||
bulkWriterWrite = nil
|
||||
killAndReturn := func(err error, step string) (*response, error) { // nolint: unparam
|
||||
if err2 := cmd.Process.Kill(); err2 != nil {
|
||||
return nil, errors.Wrapf(err, "error killing subprocess: %v; %s", err2, step)
|
||||
return nil, fmt.Errorf("error killing subprocess: %v; %s: %w", err2, step, err)
|
||||
}
|
||||
return nil, errors.Wrap(err, step)
|
||||
return nil, fmt.Errorf("%v: %w", step, err)
|
||||
}
|
||||
if err = encoder.Encode(req); err != nil {
|
||||
return killAndReturn(err, "error encoding request for copier subprocess")
|
||||
|
@ -691,10 +690,10 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques
|
|||
}
|
||||
}
|
||||
if readError != nil {
|
||||
return nil, errors.Wrapf(readError, "error passing bulk input to subprocess")
|
||||
return nil, fmt.Errorf("error passing bulk input to subprocess: %w", readError)
|
||||
}
|
||||
if writeError != nil {
|
||||
return nil, errors.Wrapf(writeError, "error passing bulk output from subprocess")
|
||||
return nil, fmt.Errorf("error passing bulk output from subprocess: %w", writeError)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
@ -846,7 +845,7 @@ func copierHandler(bulkReader io.Reader, bulkWriter io.Writer, req request) (*re
|
|||
excludes := req.Excludes()
|
||||
pm, err := fileutils.NewPatternMatcher(excludes)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "error processing excludes list %v", excludes)
|
||||
return nil, nil, fmt.Errorf("error processing excludes list %v: %w", excludes, err)
|
||||
}
|
||||
|
||||
var idMappings *idtools.IDMappings
|
||||
|
@ -857,7 +856,7 @@ func copierHandler(bulkReader io.Reader, bulkWriter io.Writer, req request) (*re
|
|||
|
||||
switch req.Request {
|
||||
default:
|
||||
return nil, nil, errors.Errorf("not an implemented request type: %q", req.Request)
|
||||
return nil, nil, fmt.Errorf("not an implemented request type: %q", req.Request)
|
||||
case requestEval:
|
||||
resp := copierHandlerEval(req)
|
||||
return resp, nil, nil
|
||||
|
@ -884,7 +883,7 @@ func copierHandler(bulkReader io.Reader, bulkWriter io.Writer, req request) (*re
|
|||
func pathIsExcluded(root, path string, pm *fileutils.PatternMatcher) (string, bool, error) {
|
||||
rel, err := convertToRelSubdirectory(root, path)
|
||||
if err != nil {
|
||||
return "", false, errors.Wrapf(err, "copier: error computing path of %q relative to root %q", path, root)
|
||||
return "", false, fmt.Errorf("copier: error computing path of %q relative to root %q: %w", path, root, err)
|
||||
}
|
||||
if pm == nil {
|
||||
return rel, false, nil
|
||||
|
@ -898,7 +897,7 @@ func pathIsExcluded(root, path string, pm *fileutils.PatternMatcher) (string, bo
|
|||
// it expects Unix-style paths.
|
||||
matches, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck
|
||||
if err != nil {
|
||||
return rel, false, errors.Wrapf(err, "copier: error checking if %q is excluded", rel)
|
||||
return rel, false, fmt.Errorf("copier: error checking if %q is excluded: %w", rel, err)
|
||||
}
|
||||
if matches {
|
||||
return rel, true, nil
|
||||
|
@ -916,7 +915,7 @@ func pathIsExcluded(root, path string, pm *fileutils.PatternMatcher) (string, bo
|
|||
func resolvePath(root, path string, evaluateFinalComponent bool, pm *fileutils.PatternMatcher) (string, error) {
|
||||
rel, err := convertToRelSubdirectory(root, path)
|
||||
if err != nil {
|
||||
return "", errors.Errorf("error making path %q relative to %q", path, root)
|
||||
return "", fmt.Errorf("error making path %q relative to %q", path, root)
|
||||
}
|
||||
workingPath := root
|
||||
followed := 0
|
||||
|
@ -953,7 +952,7 @@ func resolvePath(root, path string, evaluateFinalComponent bool, pm *fileutils.P
|
|||
// resolve the remaining components
|
||||
rel, err := convertToRelSubdirectory(root, filepath.Join(workingPath, target))
|
||||
if err != nil {
|
||||
return "", errors.Errorf("error making path %q relative to %q", filepath.Join(workingPath, target), root)
|
||||
return "", fmt.Errorf("error making path %q relative to %q", filepath.Join(workingPath, target), root)
|
||||
}
|
||||
workingPath = root
|
||||
components = append(strings.Split(filepath.Clean(string(os.PathSeparator)+rel), string(os.PathSeparator)), components[1:]...)
|
||||
|
@ -1101,11 +1100,10 @@ func copierHandlerStat(req request, pm *fileutils.PatternMatcher) *response {
|
|||
}
|
||||
|
||||
func errorIsPermission(err error) bool {
|
||||
err = errors.Cause(err)
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
return os.IsPermission(err) || strings.Contains(err.Error(), "permission denied")
|
||||
return errors.Is(err, os.ErrPermission) || strings.Contains(err.Error(), "permission denied")
|
||||
}
|
||||
|
||||
func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMatcher, idMappings *idtools.IDMappings) (*response, func() error, error) {
|
||||
|
@ -1154,7 +1152,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
|
|||
// if the named thing-to-read is a symlink, dereference it
|
||||
info, err := os.Lstat(item)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "copier: get: lstat %q", item)
|
||||
return fmt.Errorf("copier: get: lstat %q: %w", item, err)
|
||||
}
|
||||
// chase links. if we hit a dead end, we should just fail
|
||||
followedLinks := 0
|
||||
|
@ -1171,15 +1169,15 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
|
|||
}
|
||||
item = path
|
||||
if _, err = convertToRelSubdirectory(req.Root, item); err != nil {
|
||||
return errors.Wrapf(err, "copier: get: computing path of %q(%q) relative to %q", queue[i], item, req.Root)
|
||||
return fmt.Errorf("copier: get: computing path of %q(%q) relative to %q: %w", queue[i], item, req.Root, err)
|
||||
}
|
||||
if info, err = os.Lstat(item); err != nil {
|
||||
return errors.Wrapf(err, "copier: get: lstat %q(%q)", queue[i], item)
|
||||
return fmt.Errorf("copier: get: lstat %q(%q): %w", queue[i], item, err)
|
||||
}
|
||||
followedLinks++
|
||||
}
|
||||
if followedLinks >= maxFollowedLinks {
|
||||
return errors.Wrapf(syscall.ELOOP, "copier: get: resolving symlink %q(%q)", queue[i], item)
|
||||
return fmt.Errorf("copier: get: resolving symlink %q(%q): %w", queue[i], item, syscall.ELOOP)
|
||||
}
|
||||
// evaluate excludes relative to the root directory
|
||||
if info.Mode().IsDir() {
|
||||
|
@ -1193,11 +1191,11 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
|
|||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
} else if os.IsNotExist(errors.Cause(err)) {
|
||||
} else if errors.Is(err, os.ErrNotExist) {
|
||||
logrus.Warningf("copier: file disappeared while reading: %q", path)
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "copier: get: error reading %q", path)
|
||||
return fmt.Errorf("copier: get: error reading %q: %w", path, err)
|
||||
}
|
||||
if d.Type() == os.ModeSocket {
|
||||
logrus.Warningf("copier: skipping socket %q", d.Name())
|
||||
|
@ -1208,7 +1206,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
|
|||
// for the tar header
|
||||
rel, relErr := convertToRelSubdirectory(item, path)
|
||||
if relErr != nil {
|
||||
return errors.Wrapf(relErr, "copier: get: error computing path of %q relative to top directory %q", path, item)
|
||||
return fmt.Errorf("copier: get: error computing path of %q relative to top directory %q: %w", path, item, relErr)
|
||||
}
|
||||
// prefix the original item's name if we're keeping it
|
||||
if relNamePrefix != "" {
|
||||
|
@ -1264,7 +1262,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
|
|||
if d.Type() == os.ModeSymlink {
|
||||
target, err := os.Readlink(path)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "copier: get: readlink(%q(%q))", rel, path)
|
||||
return fmt.Errorf("copier: get: readlink(%q(%q)): %w", rel, path, err)
|
||||
}
|
||||
symlinkTarget = target
|
||||
}
|
||||
|
@ -1284,7 +1282,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
|
|||
if err := copierHandlerGetOne(info, symlinkTarget, rel, path, options, tw, hardlinkChecker, idMappings); err != nil {
|
||||
if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) {
|
||||
return ok
|
||||
} else if os.IsNotExist(errors.Cause(err)) {
|
||||
} else if errors.Is(err, os.ErrNotExist) {
|
||||
logrus.Warningf("copier: file disappeared while reading: %q", path)
|
||||
return nil
|
||||
}
|
||||
|
@ -1294,7 +1292,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
|
|||
}
|
||||
// walk the directory tree, checking/adding items individually
|
||||
if err := filepath.WalkDir(item, walkfn); err != nil {
|
||||
return errors.Wrapf(err, "copier: get: %q(%q)", queue[i], item)
|
||||
return fmt.Errorf("copier: get: %q(%q): %w", queue[i], item, err)
|
||||
}
|
||||
itemsCopied++
|
||||
} else {
|
||||
|
@ -1313,13 +1311,13 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
|
|||
if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) {
|
||||
continue
|
||||
}
|
||||
return errors.Wrapf(err, "copier: get: %q", queue[i])
|
||||
return fmt.Errorf("copier: get: %q: %w", queue[i], err)
|
||||
}
|
||||
itemsCopied++
|
||||
}
|
||||
}
|
||||
if itemsCopied == 0 {
|
||||
return errors.Wrapf(syscall.ENOENT, "copier: get: copied no items")
|
||||
return fmt.Errorf("copier: get: copied no items: %w", syscall.ENOENT)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1359,7 +1357,7 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
|
|||
// build the header using the name provided
|
||||
hdr, err := tar.FileInfoHeader(srcfi, symlinkTarget)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error generating tar header for %s (%s)", contentPath, symlinkTarget)
|
||||
return fmt.Errorf("error generating tar header for %s (%s): %w", contentPath, symlinkTarget, err)
|
||||
}
|
||||
if name != "" {
|
||||
hdr.Name = filepath.ToSlash(name)
|
||||
|
@ -1381,7 +1379,7 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
|
|||
if !options.StripXattrs {
|
||||
xattrs, err = Lgetxattrs(contentPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error getting extended attributes for %q", contentPath)
|
||||
return fmt.Errorf("error getting extended attributes for %q: %w", contentPath, err)
|
||||
}
|
||||
}
|
||||
hdr.Xattrs = xattrs // nolint:staticcheck
|
||||
|
@ -1393,12 +1391,12 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
|
|||
if options.ExpandArchives && isArchivePath(contentPath) {
|
||||
f, err := os.Open(contentPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error opening file for reading archive contents")
|
||||
return fmt.Errorf("error opening file for reading archive contents: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
rc, _, err := compression.AutoDecompress(f)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error decompressing %s", contentPath)
|
||||
return fmt.Errorf("error decompressing %s: %w", contentPath, err)
|
||||
}
|
||||
defer rc.Close()
|
||||
tr := tar.NewReader(rc)
|
||||
|
@ -1408,22 +1406,22 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
|
|||
hdr.Name = handleRename(options.Rename, hdr.Name)
|
||||
}
|
||||
if err = tw.WriteHeader(hdr); err != nil {
|
||||
return errors.Wrapf(err, "error writing tar header from %q to pipe", contentPath)
|
||||
return fmt.Errorf("error writing tar header from %q to pipe: %w", contentPath, err)
|
||||
}
|
||||
if hdr.Size != 0 {
|
||||
n, err := io.Copy(tw, tr)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error extracting content from archive %s: %s", contentPath, hdr.Name)
|
||||
return fmt.Errorf("error extracting content from archive %s: %s: %w", contentPath, hdr.Name, err)
|
||||
}
|
||||
if n != hdr.Size {
|
||||
return errors.Errorf("error extracting contents of archive %s: incorrect length for %q", contentPath, hdr.Name)
|
||||
return fmt.Errorf("error extracting contents of archive %s: incorrect length for %q", contentPath, hdr.Name)
|
||||
}
|
||||
tw.Flush()
|
||||
}
|
||||
hdr, err = tr.Next()
|
||||
}
|
||||
if err != io.EOF {
|
||||
return errors.Wrapf(err, "error extracting contents of archive %s", contentPath)
|
||||
return fmt.Errorf("error extracting contents of archive %s: %w", contentPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1445,7 +1443,7 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
|
|||
hostPair := idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
|
||||
hdr.Uid, hdr.Gid, err = idMappings.ToContainer(hostPair)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error mapping host filesystem owners %#v to container filesystem owners", hostPair)
|
||||
return fmt.Errorf("error mapping host filesystem owners %#v to container filesystem owners: %w", hostPair, err)
|
||||
}
|
||||
}
|
||||
// force ownership and/or permissions, if requested
|
||||
|
@ -1469,29 +1467,29 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
|
|||
// open the file first so that we don't write a header for it if we can't actually read it
|
||||
f, err = os.Open(contentPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error opening file for adding its contents to archive")
|
||||
return fmt.Errorf("error opening file for adding its contents to archive: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
} else if hdr.Typeflag == tar.TypeDir {
|
||||
// open the directory file first to make sure we can access it.
|
||||
f, err = os.Open(contentPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error opening directory for adding its contents to archive")
|
||||
return fmt.Errorf("error opening directory for adding its contents to archive: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
}
|
||||
// output the header
|
||||
if err = tw.WriteHeader(hdr); err != nil {
|
||||
return errors.Wrapf(err, "error writing header for %s (%s)", contentPath, hdr.Name)
|
||||
return fmt.Errorf("error writing header for %s (%s): %w", contentPath, hdr.Name, err)
|
||||
}
|
||||
if hdr.Typeflag == tar.TypeReg {
|
||||
// output the content
|
||||
n, err := io.Copy(tw, f)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error copying %s", contentPath)
|
||||
return fmt.Errorf("error copying %s: %w", contentPath, err)
|
||||
}
|
||||
if n != hdr.Size {
|
||||
return errors.Errorf("error copying %s: incorrect size (expected %d bytes, read %d bytes)", contentPath, n, hdr.Size)
|
||||
return fmt.Errorf("error copying %s: incorrect size (expected %d bytes, read %d bytes)", contentPath, n, hdr.Size)
|
||||
}
|
||||
tw.Flush()
|
||||
}
|
||||
|
@ -1542,7 +1540,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
|
|||
ensureDirectoryUnderRoot := func(directory string) error {
|
||||
rel, err := convertToRelSubdirectory(req.Root, directory)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "%q is not a subdirectory of %q", directory, req.Root)
|
||||
return fmt.Errorf("%q is not a subdirectory of %q: %w", directory, req.Root, err)
|
||||
}
|
||||
subdir := ""
|
||||
for _, component := range strings.Split(rel, string(os.PathSeparator)) {
|
||||
|
@ -1550,7 +1548,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
|
|||
path := filepath.Join(req.Root, subdir)
|
||||
if err := os.Mkdir(path, 0700); err == nil {
|
||||
if err = lchown(path, defaultDirUID, defaultDirGID); err != nil {
|
||||
return errors.Wrapf(err, "copier: put: error setting owner of %q to %d:%d", path, defaultDirUID, defaultDirGID)
|
||||
return fmt.Errorf("copier: put: error setting owner of %q to %d:%d: %w", path, defaultDirUID, defaultDirGID, err)
|
||||
}
|
||||
// make a conditional note to set this directory's permissions
|
||||
// later, but not if we already had an explictly-provided mode
|
||||
|
@ -1558,8 +1556,10 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
|
|||
directoryModes[path] = defaultDirMode
|
||||
}
|
||||
} else {
|
||||
if !os.IsExist(err) {
|
||||
return errors.Wrapf(err, "copier: put: error checking directory %q", path)
|
||||
// FreeBSD can return EISDIR for "mkdir /":
|
||||
// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=59739.
|
||||
if !os.IsExist(err) && !errors.Is(err, syscall.EISDIR) {
|
||||
return fmt.Errorf("copier: put: error checking directory %q: %w", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1568,14 +1568,14 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
|
|||
makeDirectoryWriteable := func(directory string) error {
|
||||
st, err := os.Lstat(directory)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "copier: put: error reading permissions of directory %q", directory)
|
||||
return fmt.Errorf("copier: put: error reading permissions of directory %q: %w", directory, err)
|
||||
}
|
||||
mode := st.Mode() & os.ModePerm
|
||||
if _, ok := directoryModes[directory]; !ok {
|
||||
directoryModes[directory] = mode
|
||||
}
|
||||
if err = os.Chmod(directory, 0o700); err != nil {
|
||||
return errors.Wrapf(err, "copier: put: error making directory %q writable", directory)
|
||||
return fmt.Errorf("copier: put: error making directory %q writable: %w", directory, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1584,7 +1584,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
|
|||
if err != nil && os.IsExist(err) {
|
||||
if req.PutOptions.NoOverwriteDirNonDir {
|
||||
if st, err2 := os.Lstat(path); err2 == nil && st.IsDir() {
|
||||
return 0, errors.Wrapf(err, "copier: put: error creating file at %q", path)
|
||||
return 0, fmt.Errorf("copier: put: error creating file at %q: %w", path, err)
|
||||
}
|
||||
}
|
||||
if err = os.RemoveAll(path); err != nil {
|
||||
|
@ -1595,7 +1595,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
|
|||
err = os.RemoveAll(path)
|
||||
}
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "copier: put: error removing item to be overwritten %q", path)
|
||||
return 0, fmt.Errorf("copier: put: error removing item to be overwritten %q: %w", path, err)
|
||||
}
|
||||
}
|
||||
f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600)
|
||||
|
@ -1607,12 +1607,12 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
|
|||
f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600)
|
||||
}
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "copier: put: error opening file %q for writing", path)
|
||||
return 0, fmt.Errorf("copier: put: error opening file %q for writing: %w", path, err)
|
||||
}
|
||||
defer f.Close()
|
||||
n, err := io.Copy(f, tr)
|
||||
if err != nil {
|
||||
return n, errors.Wrapf(err, "copier: put: error writing file %q", path)
|
||||
return n, fmt.Errorf("copier: put: error writing file %q: %w", path, err)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
@ -1671,7 +1671,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
|
|||
containerPair := idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
|
||||
hostPair, err := idMappings.ToHost(containerPair)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error mapping container filesystem owner 0,0 to host filesystem owners")
|
||||
return fmt.Errorf("error mapping container filesystem owner 0,0 to host filesystem owners: %w", err)
|
||||
}
|
||||
hdr.Uid, hdr.Gid = hostPair.UID, hostPair.GID
|
||||
}
|
||||
|
@ -1716,14 +1716,14 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
|
|||
switch hdr.Typeflag {
|
||||
// no type flag for sockets
|
||||
default:
|
||||
return errors.Errorf("unrecognized Typeflag %c", hdr.Typeflag)
|
||||
return fmt.Errorf("unrecognized Typeflag %c", hdr.Typeflag)
|
||||
case tar.TypeReg, tar.TypeRegA:
|
||||
var written int64
|
||||
written, err = createFile(path, tr)
|
||||
// only check the length if there wasn't an error, which we'll
|
||||
// check along with errors for other types of entries
|
||||
if err == nil && written != hdr.Size {
|
||||
return errors.Errorf("copier: put: error creating regular file %q: incorrect length (%d != %d)", path, written, hdr.Size)
|
||||
return fmt.Errorf("copier: put: error creating regular file %q: incorrect length (%d != %d)", path, written, hdr.Size)
|
||||
}
|
||||
case tar.TypeLink:
|
||||
var linkTarget string
|
||||
|
@ -1736,7 +1736,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
|
|||
hdr.Linkname = handleRename(req.PutOptions.Rename, hdr.Linkname)
|
||||
}
|
||||
if linkTarget, err = resolvePath(targetDirectory, filepath.Join(req.Root, filepath.FromSlash(hdr.Linkname)), true, nil); err != nil {
|
||||
return errors.Errorf("error resolving hardlink target path %q under root %q", hdr.Linkname, req.Root)
|
||||
return fmt.Errorf("error resolving hardlink target path %q under root %q", hdr.Linkname, req.Root)
|
||||
}
|
||||
if err = os.Link(linkTarget, path); err != nil && os.IsExist(err) {
|
||||
if req.PutOptions.NoOverwriteDirNonDir {
|
||||
|
@ -1841,11 +1841,11 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
|
|||
}
|
||||
// check for errors
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "copier: put: error creating %q", path)
|
||||
return fmt.Errorf("copier: put: error creating %q: %w", path, err)
|
||||
}
|
||||
// set ownership
|
||||
if err = lchown(path, hdr.Uid, hdr.Gid); err != nil {
|
||||
return errors.Wrapf(err, "copier: put: error setting ownership of %q to %d:%d", path, hdr.Uid, hdr.Gid)
|
||||
return fmt.Errorf("copier: put: error setting ownership of %q to %d:%d: %w", path, hdr.Uid, hdr.Gid, err)
|
||||
}
|
||||
// set permissions, except for symlinks, since we don't
|
||||
// have an lchmod, and directories, which we'll fix up
|
||||
|
@ -1854,7 +1854,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
|
|||
// write to, but which we'll need to create content in
|
||||
if hdr.Typeflag != tar.TypeSymlink && hdr.Typeflag != tar.TypeDir {
|
||||
if err = os.Chmod(path, mode); err != nil {
|
||||
return errors.Wrapf(err, "copier: put: error setting permissions on %q to 0%o", path, mode)
|
||||
return fmt.Errorf("copier: put: error setting permissions on %q to 0%o: %w", path, mode, err)
|
||||
}
|
||||
}
|
||||
// set other bits that might have been reset by chown()
|
||||
|
@ -1869,14 +1869,14 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
|
|||
mode |= syscall.S_ISVTX
|
||||
}
|
||||
if err = syscall.Chmod(path, uint32(mode)); err != nil {
|
||||
return errors.Wrapf(err, "error setting additional permissions on %q to 0%o", path, mode)
|
||||
return fmt.Errorf("error setting additional permissions on %q to 0%o: %w", path, mode, err)
|
||||
}
|
||||
}
|
||||
// set xattrs, including some that might have been reset by chown()
|
||||
if !req.PutOptions.StripXattrs {
|
||||
if err = Lsetxattrs(path, hdr.Xattrs); err != nil { // nolint:staticcheck
|
||||
if !req.PutOptions.IgnoreXattrErrors {
|
||||
return errors.Wrapf(err, "copier: put: error setting extended attributes on %q", path)
|
||||
return fmt.Errorf("copier: put: error setting extended attributes on %q: %w", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1885,13 +1885,13 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
|
|||
hdr.AccessTime = hdr.ModTime
|
||||
}
|
||||
if err = lutimes(hdr.Typeflag == tar.TypeSymlink, path, hdr.AccessTime, hdr.ModTime); err != nil {
|
||||
return errors.Wrapf(err, "error setting access and modify timestamps on %q to %s and %s", path, hdr.AccessTime, hdr.ModTime)
|
||||
return fmt.Errorf("error setting access and modify timestamps on %q to %s and %s: %w", path, hdr.AccessTime, hdr.ModTime, err)
|
||||
}
|
||||
nextHeader:
|
||||
hdr, err = tr.Next()
|
||||
}
|
||||
if err != io.EOF {
|
||||
return errors.Wrapf(err, "error reading tar stream: expected EOF")
|
||||
return fmt.Errorf("error reading tar stream: expected EOF: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1941,7 +1941,9 @@ func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response,
|
|||
return errorResponse("copier: mkdir: error setting permissions on %q to 0%o: %v", path, dirMode)
|
||||
}
|
||||
} else {
|
||||
if !os.IsExist(err) {
|
||||
// FreeBSD can return EISDIR for "mkdir /":
|
||||
// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=59739.
|
||||
if !os.IsExist(err) && !errors.Is(err, syscall.EISDIR) {
|
||||
return errorResponse("copier: mkdir: error checking directory %q: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
@ -1968,15 +1970,3 @@ func copierHandlerRemove(req request) *response {
|
|||
}
|
||||
return &response{Error: "", Remove: removeResponse{}}
|
||||
}
|
||||
|
||||
func unwrapError(err error) error {
|
||||
e := errors.Cause(err)
|
||||
for e != nil {
|
||||
err = e
|
||||
e = stderrors.Unwrap(err)
|
||||
if e == err {
|
||||
break
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
//go:build !windows && !freebsd
|
||||
// +build !windows,!freebsd
|
||||
|
||||
package copier
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func mknod(path string, mode uint32, dev int) error {
|
||||
return unix.Mknod(path, mode, dev)
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
//go:build freebsd
|
||||
// +build freebsd
|
||||
|
||||
package copier
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func mknod(path string, mode uint32, dev int) error {
|
||||
return unix.Mknod(path, mode, uint64(dev))
|
||||
}
|
|
@ -1,13 +1,14 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package copier
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
@ -16,13 +17,13 @@ var canChroot = os.Getuid() == 0
|
|||
func chroot(root string) (bool, error) {
|
||||
if canChroot {
|
||||
if err := os.Chdir(root); err != nil {
|
||||
return false, errors.Wrapf(err, "error changing to intended-new-root directory %q", root)
|
||||
return false, fmt.Errorf("error changing to intended-new-root directory %q: %w", root, err)
|
||||
}
|
||||
if err := unix.Chroot(root); err != nil {
|
||||
return false, errors.Wrapf(err, "error chrooting to directory %q", root)
|
||||
return false, fmt.Errorf("error chrooting to directory %q: %w", root, err)
|
||||
}
|
||||
if err := os.Chdir(string(os.PathSeparator)); err != nil {
|
||||
return false, errors.Wrapf(err, "error changing to just-became-root directory %q", root)
|
||||
return false, fmt.Errorf("error changing to just-became-root directory %q: %w", root, err)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
@ -45,10 +46,6 @@ func mkfifo(path string, mode uint32) error {
|
|||
return unix.Mkfifo(path, mode)
|
||||
}
|
||||
|
||||
func mknod(path string, mode uint32, dev int) error {
|
||||
return unix.Mknod(path, mode, dev)
|
||||
}
|
||||
|
||||
func chmod(path string, mode os.FileMode) error {
|
||||
return os.Chmod(path, mode)
|
||||
}
|
||||
|
|
|
@ -1,13 +1,15 @@
|
|||
//go:build linux || netbsd || freebsd || darwin
|
||||
// +build linux netbsd freebsd darwin
|
||||
|
||||
package copier
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/containers/buildah/util"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
@ -43,22 +45,22 @@ func Lgetxattrs(path string) (map[string]string, error) {
|
|||
list = make([]byte, listSize)
|
||||
size, err := unix.Llistxattr(path, list)
|
||||
if err != nil {
|
||||
if unwrapError(err) == syscall.ERANGE {
|
||||
if util.Cause(err) == syscall.ERANGE {
|
||||
listSize *= 2
|
||||
continue
|
||||
}
|
||||
if (unwrapError(err) == syscall.ENOTSUP) || (unwrapError(err) == syscall.ENOSYS) {
|
||||
if (util.Cause(err) == syscall.ENOTSUP) || (util.Cause(err) == syscall.ENOSYS) {
|
||||
// treat these errors listing xattrs as equivalent to "no xattrs"
|
||||
list = list[:0]
|
||||
break
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error listing extended attributes of %q", path)
|
||||
return nil, fmt.Errorf("error listing extended attributes of %q: %w", path, err)
|
||||
}
|
||||
list = list[:size]
|
||||
break
|
||||
}
|
||||
if listSize >= maxSize {
|
||||
return nil, errors.Errorf("unable to read list of attributes for %q: size would have been too big", path)
|
||||
return nil, fmt.Errorf("unable to read list of attributes for %q: size would have been too big", path)
|
||||
}
|
||||
m := make(map[string]string)
|
||||
for _, attribute := range strings.Split(string(list), string('\000')) {
|
||||
|
@ -69,17 +71,17 @@ func Lgetxattrs(path string) (map[string]string, error) {
|
|||
attributeValue = make([]byte, attributeSize)
|
||||
size, err := unix.Lgetxattr(path, attribute, attributeValue)
|
||||
if err != nil {
|
||||
if unwrapError(err) == syscall.ERANGE {
|
||||
if util.Cause(err) == syscall.ERANGE {
|
||||
attributeSize *= 2
|
||||
continue
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error getting value of extended attribute %q on %q", attribute, path)
|
||||
return nil, fmt.Errorf("error getting value of extended attribute %q on %q: %w", attribute, path, err)
|
||||
}
|
||||
m[attribute] = string(attributeValue[:size])
|
||||
break
|
||||
}
|
||||
if attributeSize >= maxSize {
|
||||
return nil, errors.Errorf("unable to read attribute %q of %q: size would have been too big", attribute, path)
|
||||
return nil, fmt.Errorf("unable to read attribute %q of %q: size would have been too big", attribute, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -91,7 +93,7 @@ func Lsetxattrs(path string, xattrs map[string]string) error {
|
|||
for attribute, value := range xattrs {
|
||||
if isRelevantXattr(attribute) {
|
||||
if err := unix.Lsetxattr(path, attribute, []byte(value), 0); err != nil {
|
||||
return errors.Wrapf(err, "error setting value of extended attribute %q on %q", attribute, path)
|
||||
return fmt.Errorf("error setting value of extended attribute %q on %q: %w", attribute, path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -101,6 +101,8 @@ type CommonBuildOptions struct {
|
|||
Secrets []string
|
||||
// SSHSources is the available ssh agent connections to forward in the build
|
||||
SSHSources []string
|
||||
// OCIHooksDir is the location of OCI hooks for the build containers
|
||||
OCIHooksDir []string
|
||||
}
|
||||
|
||||
// BuildOptions can be used to alter how an image is built.
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
//go:build freebsd
|
||||
// +build freebsd
|
||||
|
||||
package define
|
||||
|
||||
const (
|
||||
// TypeBind is the type for mounting host dir
|
||||
TypeBind = "nullfs"
|
||||
|
||||
// TempDir is the default for storing temporary files
|
||||
TempDir = "/var/tmp"
|
||||
)
|
||||
|
||||
var (
|
||||
// Mount potions for bind
|
||||
BindOptions = []string{}
|
||||
)
|
|
@ -0,0 +1,17 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package define
|
||||
|
||||
const (
|
||||
// TypeBind is the type for mounting host dir
|
||||
TypeBind = "bind"
|
||||
|
||||
// TempDir is the default for storing temporary files
|
||||
TempDir = "/dev/shm"
|
||||
)
|
||||
|
||||
var (
|
||||
// Mount potions for bind
|
||||
BindOptions = []string{"bind"}
|
||||
)
|
|
@ -3,6 +3,7 @@ package define
|
|||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
@ -17,9 +18,9 @@ import (
|
|||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chrootarchive"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/containers/storage/types"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -87,6 +88,8 @@ type IDMappingOptions struct {
|
|||
HostGIDMapping bool
|
||||
UIDMap []specs.LinuxIDMapping
|
||||
GIDMap []specs.LinuxIDMapping
|
||||
AutoUserNs bool
|
||||
AutoUserNsOpts types.AutoUserNsOptions
|
||||
}
|
||||
|
||||
// Secret is a secret source that can be used in a RUN
|
||||
|
@ -120,11 +123,11 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err
|
|||
}
|
||||
name, err = ioutil.TempDir(dir, prefix)
|
||||
if err != nil {
|
||||
return "", "", errors.Wrapf(err, "error creating temporary directory for %q", url)
|
||||
return "", "", fmt.Errorf("error creating temporary directory for %q: %w", url, err)
|
||||
}
|
||||
urlParsed, err := urlpkg.Parse(url)
|
||||
if err != nil {
|
||||
return "", "", errors.Wrapf(err, "error parsing url %q", url)
|
||||
return "", "", fmt.Errorf("error parsing url %q: %w", url, err)
|
||||
}
|
||||
if strings.HasPrefix(url, "git://") || strings.HasSuffix(urlParsed.Path, ".git") {
|
||||
combinedOutput, gitSubDir, err := cloneToDirectory(url, name)
|
||||
|
@ -132,7 +135,7 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err
|
|||
if err2 := os.RemoveAll(name); err2 != nil {
|
||||
logrus.Debugf("error removing temporary directory %q: %v", name, err2)
|
||||
}
|
||||
return "", "", errors.Wrapf(err, "cloning %q to %q:\n%s", url, name, string(combinedOutput))
|
||||
return "", "", fmt.Errorf("cloning %q to %q:\n%s: %w", url, name, string(combinedOutput), err)
|
||||
}
|
||||
// Check if git url specifies any subdir
|
||||
// if subdir is there switch to subdir.
|
||||
|
@ -172,7 +175,7 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err
|
|||
if err2 := os.Remove(name); err2 != nil {
|
||||
logrus.Debugf("error removing temporary directory %q: %v", name, err2)
|
||||
}
|
||||
return "", "", errors.Errorf("unreachable code reached")
|
||||
return "", "", errors.New("unreachable code reached")
|
||||
}
|
||||
|
||||
func cloneToDirectory(url, dir string) ([]byte, string, error) {
|
||||
|
@ -207,8 +210,11 @@ func downloadToDirectory(url, dir string) error {
|
|||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusBadRequest {
|
||||
return fmt.Errorf("invalid response status %d", resp.StatusCode)
|
||||
}
|
||||
if resp.ContentLength == 0 {
|
||||
return errors.Errorf("no contents in %q", url)
|
||||
return fmt.Errorf("no contents in %q", url)
|
||||
}
|
||||
if err := chrootarchive.Untar(resp.Body, dir, nil); err != nil {
|
||||
resp1, err := http.Get(url)
|
||||
|
@ -223,7 +229,7 @@ func downloadToDirectory(url, dir string) error {
|
|||
dockerfile := filepath.Join(dir, "Dockerfile")
|
||||
// Assume this is a Dockerfile
|
||||
if err := ioutils.AtomicWriteFile(dockerfile, body, 0600); err != nil {
|
||||
return errors.Wrapf(err, "Failed to write %q to %q", url, dockerfile)
|
||||
return fmt.Errorf("failed to write %q to %q: %w", url, dockerfile, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -234,14 +240,14 @@ func stdinToDirectory(dir string) error {
|
|||
r := bufio.NewReader(os.Stdin)
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Failed to read from stdin")
|
||||
return fmt.Errorf("failed to read from stdin: %w", err)
|
||||
}
|
||||
reader := bytes.NewReader(b)
|
||||
if err := chrootarchive.Untar(reader, dir, nil); err != nil {
|
||||
dockerfile := filepath.Join(dir, "Dockerfile")
|
||||
// Assume this is a Dockerfile
|
||||
if err := ioutils.AtomicWriteFile(dockerfile, b, 0600); err != nil {
|
||||
return errors.Wrapf(err, "Failed to write bytes to %q", dockerfile)
|
||||
return fmt.Errorf("failed to write bytes to %q: %w", dockerfile, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -1,14 +1,12 @@
|
|||
package buildah
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
import "fmt"
|
||||
|
||||
// Delete removes the working container. The buildah.Builder object should not
|
||||
// be used after this method is called.
|
||||
func (b *Builder) Delete() error {
|
||||
if err := b.store.DeleteContainer(b.ContainerID); err != nil {
|
||||
return errors.Wrapf(err, "error deleting build container %q", b.ContainerID)
|
||||
return fmt.Errorf("error deleting build container %q: %w", b.ContainerID, err)
|
||||
}
|
||||
b.MountPoint = ""
|
||||
b.Container = ""
|
||||
|
|
|
@ -2,6 +2,7 @@ package buildah
|
|||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
@ -9,7 +10,6 @@ import (
|
|||
"time"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type digester interface {
|
||||
|
@ -68,14 +68,14 @@ func (t *tarFilterer) Close() error {
|
|||
t.closedLock.Lock()
|
||||
if t.closed {
|
||||
t.closedLock.Unlock()
|
||||
return errors.Errorf("tar filter is already closed")
|
||||
return errors.New("tar filter is already closed")
|
||||
}
|
||||
t.closed = true
|
||||
t.closedLock.Unlock()
|
||||
err := t.pipeWriter.Close()
|
||||
t.wg.Wait()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error closing filter pipe")
|
||||
return fmt.Errorf("error closing filter pipe: %w", err)
|
||||
}
|
||||
return t.err
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (sk
|
|||
if !skip {
|
||||
err = tarWriter.WriteHeader(hdr)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "error filtering tar header for %q", hdr.Name)
|
||||
err = fmt.Errorf("error filtering tar header for %q: %w", hdr.Name, err)
|
||||
break
|
||||
}
|
||||
if hdr.Size != 0 {
|
||||
|
@ -122,11 +122,11 @@ func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (sk
|
|||
n, copyErr = io.Copy(tarWriter, tarReader)
|
||||
}
|
||||
if copyErr != nil {
|
||||
err = errors.Wrapf(copyErr, "error copying content for %q", hdr.Name)
|
||||
err = fmt.Errorf("error copying content for %q: %w", hdr.Name, copyErr)
|
||||
break
|
||||
}
|
||||
if n != hdr.Size {
|
||||
err = errors.Errorf("error filtering content for %q: expected %d bytes, got %d bytes", hdr.Name, hdr.Size, n)
|
||||
err = fmt.Errorf("error filtering content for %q: expected %d bytes, got %d bytes", hdr.Name, hdr.Size, n)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (sk
|
|||
hdr, err = tarReader.Next()
|
||||
}
|
||||
if err != io.EOF {
|
||||
filterer.err = errors.Wrapf(err, "error reading tar archive")
|
||||
filterer.err = fmt.Errorf("error reading tar archive: %w", err)
|
||||
break
|
||||
}
|
||||
filterer.closedLock.Lock()
|
||||
|
|
|
@ -5,10 +5,10 @@ go 1.17
|
|||
require (
|
||||
github.com/containerd/containerd v1.6.6
|
||||
github.com/containernetworking/cni v1.1.1
|
||||
github.com/containers/common v0.48.1-0.20220608111710-dbecabbe82c9
|
||||
github.com/containers/image/v5 v5.21.2-0.20220520105616-e594853d6471
|
||||
github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f
|
||||
github.com/containers/storage v1.41.1-0.20220607143333-8951d0153bf6
|
||||
github.com/containers/common v0.48.1-0.20220715075726-2ac10faca05a
|
||||
github.com/containers/image/v5 v5.21.2-0.20220714132403-2bb3f3e44c5c
|
||||
github.com/containers/ocicrypt v1.1.5
|
||||
github.com/containers/storage v1.41.1-0.20220714115232-fc9b0ff5272a
|
||||
github.com/docker/distribution v2.8.1+incompatible
|
||||
github.com/docker/docker v20.10.17+incompatible
|
||||
github.com/docker/go-units v0.4.0
|
||||
|
@ -19,23 +19,22 @@ require (
|
|||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/gomega v1.19.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198
|
||||
github.com/opencontainers/runc v1.1.3
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
|
||||
github.com/opencontainers/runtime-tools v0.9.0
|
||||
github.com/opencontainers/selinux v1.10.1
|
||||
github.com/openshift/imagebuilder v1.2.4-0.20220502172744-009dbc6cb805
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646
|
||||
github.com/openshift/imagebuilder v1.2.4-0.20220711175835-4151e43600df
|
||||
github.com/seccomp/libseccomp-golang v0.10.0
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/spf13/cobra v1.4.0
|
||||
github.com/spf13/cobra v1.5.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.7.2
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
||||
go.etcd.io/bbolt v1.3.6
|
||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
|
||||
golang.org/x/sys v0.0.0-20220624220833-87e55d714810
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467
|
||||
)
|
||||
|
||||
|
@ -51,7 +50,7 @@ require (
|
|||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect
|
||||
github.com/containerd/cgroups v1.0.3 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.4 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.12.0 // indirect
|
||||
github.com/containernetworking/plugins v1.1.1 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||
|
@ -60,20 +59,22 @@ require (
|
|||
github.com/docker/docker-credential-helpers v0.6.4 // indirect
|
||||
github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-containerregistry v0.10.0 // indirect
|
||||
github.com/google/go-intervals v0.0.2 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jinzhu/copier v0.3.5 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.15.6 // indirect
|
||||
github.com/klauspost/compress v1.15.8 // indirect
|
||||
github.com/klauspost/pgzip v1.2.5 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e // indirect
|
||||
github.com/manifoldco/promptui v0.9.0 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
|
@ -87,19 +88,23 @@ require (
|
|||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/proglottis/gpgme v0.1.2 // indirect
|
||||
github.com/prometheus/client_golang v1.11.1 // indirect
|
||||
github.com/proglottis/gpgme v0.1.3 // indirect
|
||||
github.com/prometheus/client_golang v1.12.1 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.30.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/sigstore/sigstore v1.3.1-0.20220629021053-b95fc0d626c1 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
||||
github.com/sylabs/sif/v2 v2.7.0 // indirect
|
||||
github.com/sylabs/sif/v2 v2.7.1 // indirect
|
||||
github.com/tchap/go-patricia v2.3.0+incompatible // indirect
|
||||
github.com/theupdateframework/go-tuf v0.3.0 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/ulikunitz/xz v0.5.10 // indirect
|
||||
github.com/vbatts/tar-split v0.11.2 // indirect
|
||||
github.com/vbauerster/mpb/v7 v7.4.1 // indirect
|
||||
github.com/vbauerster/mpb/v7 v7.4.2 // indirect
|
||||
github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 // indirect
|
||||
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
|
@ -107,12 +112,12 @@ require (
|
|||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8 // indirect
|
||||
google.golang.org/grpc v1.44.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f // indirect
|
||||
google.golang.org/grpc v1.47.0 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.5.1 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -5,6 +5,7 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -28,7 +29,6 @@ import (
|
|||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -167,7 +167,7 @@ func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadClo
|
|||
var uidMap, gidMap []idtools.IDMap
|
||||
mountPoint, err := i.store.Mount(i.containerID, i.mountLabel)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "error mounting container %q", i.containerID)
|
||||
return nil, nil, fmt.Errorf("error mounting container %q: %w", i.containerID, err)
|
||||
}
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
errChan := make(chan error, 1)
|
||||
|
@ -190,11 +190,11 @@ func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadClo
|
|||
}()
|
||||
return ioutils.NewReadCloserWrapper(pipeReader, func() error {
|
||||
if err = pipeReader.Close(); err != nil {
|
||||
err = errors.Wrapf(err, "error closing tar archive of container %q", i.containerID)
|
||||
err = fmt.Errorf("error closing tar archive of container %q: %w", i.containerID, err)
|
||||
}
|
||||
if _, err2 := i.store.Unmount(i.containerID, false); err == nil {
|
||||
if err2 != nil {
|
||||
err2 = errors.Wrapf(err2, "error unmounting container %q", i.containerID)
|
||||
err2 = fmt.Errorf("error unmounting container %q: %w", i.containerID, err2)
|
||||
}
|
||||
err = err2
|
||||
}
|
||||
|
@ -282,7 +282,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
|
|||
manifestType := i.preferredManifestType
|
||||
// If it's not a format we support, return an error.
|
||||
if manifestType != v1.MediaTypeImageManifest && manifestType != manifest.DockerV2Schema2MediaType {
|
||||
return nil, errors.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)",
|
||||
return nil, fmt.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)",
|
||||
manifestType, v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType)
|
||||
}
|
||||
// Start building the list of layers using the read-write layer.
|
||||
|
@ -290,7 +290,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
|
|||
layerID := i.layerID
|
||||
layer, err := i.store.Layer(layerID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to read layer %q", layerID)
|
||||
return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err)
|
||||
}
|
||||
// Walk the list of parent layers, prepending each as we go. If we're squashing,
|
||||
// stop at the layer ID of the top layer, which we won't really be using anyway.
|
||||
|
@ -303,7 +303,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
|
|||
}
|
||||
layer, err = i.store.Layer(layerID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to read layer %q", layerID)
|
||||
return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err)
|
||||
}
|
||||
}
|
||||
logrus.Debugf("layer list: %q", layers)
|
||||
|
@ -311,7 +311,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
|
|||
// Make a temporary directory to hold blobs.
|
||||
path, err := ioutil.TempDir(os.TempDir(), define.Package)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating temporary directory to hold layer blobs")
|
||||
return nil, fmt.Errorf("error creating temporary directory to hold layer blobs: %w", err)
|
||||
}
|
||||
logrus.Debugf("using %q to hold temporary data", path)
|
||||
defer func() {
|
||||
|
@ -343,7 +343,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
|
|||
// Look up this layer.
|
||||
layer, err := i.store.Layer(layerID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to locate layer %q", layerID)
|
||||
return nil, fmt.Errorf("unable to locate layer %q: %w", layerID, err)
|
||||
}
|
||||
// If we're up to the final layer, but we don't want to include
|
||||
// a diff for it, we're done.
|
||||
|
@ -400,7 +400,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
|
|||
// Extract this layer, one of possibly many.
|
||||
rc, err = i.store.Diff("", layerID, diffOptions)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error extracting %s", what)
|
||||
return nil, fmt.Errorf("error extracting %s: %w", what, err)
|
||||
}
|
||||
}
|
||||
srcHasher := digest.Canonical.Digester()
|
||||
|
@ -408,7 +408,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
|
|||
layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
rc.Close()
|
||||
return nil, errors.Wrapf(err, "error opening file for %s", what)
|
||||
return nil, fmt.Errorf("error opening file for %s: %w", what, err)
|
||||
}
|
||||
|
||||
counter := ioutils.NewWriteCounter(layerFile)
|
||||
|
@ -427,7 +427,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
|
|||
if err != nil {
|
||||
layerFile.Close()
|
||||
rc.Close()
|
||||
return nil, errors.Wrapf(err, "error compressing %s", what)
|
||||
return nil, fmt.Errorf("error compressing %s: %w", what, err)
|
||||
}
|
||||
writer := io.MultiWriter(writeCloser, srcHasher.Hash())
|
||||
// Use specified timestamps in the layer, if we're doing that for
|
||||
|
@ -468,11 +468,11 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error storing %s to file", what)
|
||||
return nil, fmt.Errorf("error storing %s to file: %w", what, err)
|
||||
}
|
||||
if i.compression == archive.Uncompressed {
|
||||
if size != counter.Count {
|
||||
return nil, errors.Errorf("error storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, counter.Count)
|
||||
return nil, fmt.Errorf("error storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, counter.Count)
|
||||
}
|
||||
} else {
|
||||
size = counter.Count
|
||||
|
@ -481,7 +481,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
|
|||
// Rename the layer so that we can more easily find it by digest later.
|
||||
finalBlobName := filepath.Join(path, destHasher.Digest().String())
|
||||
if err = os.Rename(filepath.Join(path, "layer"), finalBlobName); err != nil {
|
||||
return nil, errors.Wrapf(err, "error storing %s to file while renaming %q to %q", what, filepath.Join(path, "layer"), finalBlobName)
|
||||
return nil, fmt.Errorf("error storing %s to file while renaming %q to %q: %w", what, filepath.Join(path, "layer"), finalBlobName, err)
|
||||
}
|
||||
// Add a note in the manifest about the layer. The blobs are identified by their possibly-
|
||||
// compressed blob digests.
|
||||
|
@ -574,11 +574,11 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
|
|||
if baseImageHistoryLen != 0 {
|
||||
expectedDiffIDs := expectedOCIDiffIDs(oimage)
|
||||
if len(oimage.RootFS.DiffIDs) != expectedDiffIDs {
|
||||
return nil, errors.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(oimage.RootFS.DiffIDs))
|
||||
return nil, fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(oimage.RootFS.DiffIDs))
|
||||
}
|
||||
expectedDiffIDs = expectedDockerDiffIDs(dimage)
|
||||
if len(dimage.RootFS.DiffIDs) != expectedDiffIDs {
|
||||
return nil, errors.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(dimage.RootFS.DiffIDs))
|
||||
return nil, fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(dimage.RootFS.DiffIDs))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -586,7 +586,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
|
|||
// Encode the image configuration blob.
|
||||
oconfig, err := json.Marshal(&oimage)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error encoding %#v as json", oimage)
|
||||
return nil, fmt.Errorf("error encoding %#v as json: %w", oimage, err)
|
||||
}
|
||||
logrus.Debugf("OCIv1 config = %s", oconfig)
|
||||
|
||||
|
@ -598,14 +598,14 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
|
|||
// Encode the manifest.
|
||||
omanifestbytes, err := json.Marshal(&omanifest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error encoding %#v as json", omanifest)
|
||||
return nil, fmt.Errorf("error encoding %#v as json: %w", omanifest, err)
|
||||
}
|
||||
logrus.Debugf("OCIv1 manifest = %s", omanifestbytes)
|
||||
|
||||
// Encode the image configuration blob.
|
||||
dconfig, err := json.Marshal(&dimage)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error encoding %#v as json", dimage)
|
||||
return nil, fmt.Errorf("error encoding %#v as json: %w", dimage, err)
|
||||
}
|
||||
logrus.Debugf("Docker v2s2 config = %s", dconfig)
|
||||
|
||||
|
@ -617,7 +617,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
|
|||
// Encode the manifest.
|
||||
dmanifestbytes, err := json.Marshal(&dmanifest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error encoding %#v as json", dmanifest)
|
||||
return nil, fmt.Errorf("error encoding %#v as json: %w", dmanifest, err)
|
||||
}
|
||||
logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes)
|
||||
|
||||
|
@ -654,7 +654,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
|
|||
}
|
||||
|
||||
func (i *containerImageRef) NewImageDestination(ctx context.Context, sc *types.SystemContext) (types.ImageDestination, error) {
|
||||
return nil, errors.Errorf("can't write to a container")
|
||||
return nil, errors.New("can't write to a container")
|
||||
}
|
||||
|
||||
func (i *containerImageRef) DockerReference() reference.Named {
|
||||
|
@ -688,7 +688,7 @@ func (i *containerImageRef) Transport() types.ImageTransport {
|
|||
func (i *containerImageSource) Close() error {
|
||||
err := os.RemoveAll(i.path)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error removing layer blob directory")
|
||||
return fmt.Errorf("error removing layer blob directory: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -754,13 +754,13 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo,
|
|||
}
|
||||
if err != nil || layerReadCloser == nil || size == -1 {
|
||||
logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err)
|
||||
return nil, -1, errors.Wrap(err, "error opening layer blob")
|
||||
return nil, -1, fmt.Errorf("error opening layer blob: %w", err)
|
||||
}
|
||||
logrus.Debugf("reading layer %q", blob.Digest.String())
|
||||
closer := func() error {
|
||||
logrus.Debugf("finished reading layer %q", blob.Digest.String())
|
||||
if err := layerReadCloser.Close(); err != nil {
|
||||
return errors.Wrapf(err, "error closing layer %q after reading", blob.Digest.String())
|
||||
return fmt.Errorf("error closing layer %q after reading: %w", blob.Digest.String(), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -771,7 +771,7 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
|
|||
var name reference.Named
|
||||
container, err := b.store.Container(b.ContainerID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error locating container %q", b.ContainerID)
|
||||
return nil, fmt.Errorf("error locating container %q: %w", b.ContainerID, err)
|
||||
}
|
||||
if len(container.Names) > 0 {
|
||||
if parsed, err2 := reference.ParseNamed(container.Names[0]); err2 == nil {
|
||||
|
@ -788,11 +788,11 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
|
|||
}
|
||||
oconfig, err := json.Marshal(&b.OCIv1)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error encoding OCI-format image configuration %#v", b.OCIv1)
|
||||
return nil, fmt.Errorf("error encoding OCI-format image configuration %#v: %w", b.OCIv1, err)
|
||||
}
|
||||
dconfig, err := json.Marshal(&b.Docker)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error encoding docker-format image configuration %#v", b.Docker)
|
||||
return nil, fmt.Errorf("error encoding docker-format image configuration %#v: %w", b.Docker, err)
|
||||
}
|
||||
var created *time.Time
|
||||
if options.HistoryTimestamp != nil {
|
||||
|
@ -848,7 +848,7 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
|
|||
func (b *Builder) ExtractRootfs(options CommitOptions, opts ExtractRootfsOptions) (io.ReadCloser, chan error, error) {
|
||||
src, err := b.makeContainerImageRef(options)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "error creating image reference for container %q to extract its contents", b.ContainerID)
|
||||
return nil, nil, fmt.Errorf("error creating image reference for container %q to extract its contents: %w", b.ContainerID, err)
|
||||
}
|
||||
return src.extractRootfs(opts)
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package imagebuildah
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -33,7 +34,6 @@ import (
|
|||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/openshift/imagebuilder"
|
||||
"github.com/openshift/imagebuilder/dockerfile/parser"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
@ -68,10 +68,10 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
|
|||
}
|
||||
|
||||
if len(paths) == 0 {
|
||||
return "", nil, errors.Errorf("error building: no dockerfiles specified")
|
||||
return "", nil, errors.New("error building: no dockerfiles specified")
|
||||
}
|
||||
if len(options.Platforms) > 1 && options.IIDFile != "" {
|
||||
return "", nil, errors.Errorf("building multiple images, but iidfile %q can only be used to store one image ID", options.IIDFile)
|
||||
return "", nil, fmt.Errorf("building multiple images, but iidfile %q can only be used to store one image ID", options.IIDFile)
|
||||
}
|
||||
|
||||
logger := logrus.New()
|
||||
|
@ -94,7 +94,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
|
|||
continue
|
||||
}
|
||||
if _, err := util.VerifyTagName(tag); err != nil {
|
||||
return "", nil, errors.Wrapf(err, "tag %s", tag)
|
||||
return "", nil, fmt.Errorf("tag %s: %w", tag, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -109,7 +109,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
|
|||
}
|
||||
if resp.ContentLength == 0 {
|
||||
resp.Body.Close()
|
||||
return "", nil, errors.Errorf("no contents in %q", dfile)
|
||||
return "", nil, fmt.Errorf("no contents in %q", dfile)
|
||||
}
|
||||
data = resp.Body
|
||||
} else {
|
||||
|
@ -127,31 +127,22 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
|
|||
}
|
||||
|
||||
var contents *os.File
|
||||
// If given a directory, add '/Dockerfile' to it.
|
||||
// If given a directory error out since `-f` does not supports path to directory
|
||||
if dinfo.Mode().IsDir() {
|
||||
for _, file := range []string{"Containerfile", "Dockerfile"} {
|
||||
f := filepath.Join(dfile, file)
|
||||
logger.Debugf("reading local %q", f)
|
||||
contents, err = os.Open(f)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
contents, err = os.Open(dfile)
|
||||
return "", nil, fmt.Errorf("containerfile: %q cannot be path to a directory", dfile)
|
||||
}
|
||||
|
||||
contents, err = os.Open(dfile)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
dinfo, err = contents.Stat()
|
||||
if err != nil {
|
||||
contents.Close()
|
||||
return "", nil, errors.Wrapf(err, "error reading info about %q", dfile)
|
||||
return "", nil, fmt.Errorf("error reading info about %q: %w", dfile, err)
|
||||
}
|
||||
if dinfo.Mode().IsRegular() && dinfo.Size() == 0 {
|
||||
contents.Close()
|
||||
return "", nil, errors.Errorf("no contents in %q", dfile)
|
||||
return "", nil, fmt.Errorf("no contents in %q", dfile)
|
||||
}
|
||||
data = contents
|
||||
}
|
||||
|
@ -258,7 +249,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
|
|||
logFile := platformOptions.LogFile + "_" + platformOptions.OS + "_" + platformOptions.Architecture
|
||||
f, err := os.OpenFile(logFile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "opening logfile: %q", logFile)
|
||||
return fmt.Errorf("opening logfile: %q: %w", logFile, err)
|
||||
}
|
||||
defer f.Close()
|
||||
loggerPerPlatform = logrus.New()
|
||||
|
@ -302,7 +293,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
|
|||
// partially-populated state at any point if we're creating it
|
||||
// fresh.
|
||||
list, err := rt.LookupManifestList(manifestList)
|
||||
if err != nil && errors.Cause(err) == storage.ErrImageUnknown {
|
||||
if err != nil && errors.Is(err, storage.ErrImageUnknown) {
|
||||
list, err = rt.CreateManifestList(manifestList)
|
||||
}
|
||||
if err != nil {
|
||||
|
@ -363,7 +354,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
|
|||
func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logrus.Logger, logPrefix string, options define.BuildOptions, dockerfiles []string, dockerfilecontents [][]byte) (string, reference.Canonical, error) {
|
||||
mainNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(dockerfilecontents[0]))
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "error parsing main Dockerfile: %s", dockerfiles[0])
|
||||
return "", nil, fmt.Errorf("error parsing main Dockerfile: %s: %w", dockerfiles[0], err)
|
||||
}
|
||||
|
||||
warnOnUnsetBuildArgs(logger, mainNode, options.Args)
|
||||
|
@ -405,7 +396,7 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr
|
|||
for i, d := range dockerfilecontents[1:] {
|
||||
additionalNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(d))
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "error parsing additional Dockerfile %s", dockerfiles[i])
|
||||
return "", nil, fmt.Errorf("error parsing additional Dockerfile %s: %w", dockerfiles[i], err)
|
||||
}
|
||||
mainNode.Children = append(mainNode.Children, additionalNode.Children...)
|
||||
}
|
||||
|
@ -431,7 +422,7 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr
|
|||
labelLine = fmt.Sprintf("LABEL %q=%q\n", key, value)
|
||||
additionalNode, err := imagebuilder.ParseDockerfile(strings.NewReader(labelLine))
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "error while adding additional LABEL steps")
|
||||
return "", nil, fmt.Errorf("error while adding additional LABEL steps: %w", err)
|
||||
}
|
||||
mainNode.Children = append(mainNode.Children, additionalNode.Children...)
|
||||
}
|
||||
|
@ -440,22 +431,22 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr
|
|||
|
||||
exec, err := newExecutor(logger, logPrefix, store, options, mainNode)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "error creating build executor")
|
||||
return "", nil, fmt.Errorf("error creating build executor: %w", err)
|
||||
}
|
||||
b := imagebuilder.NewBuilder(options.Args)
|
||||
defaultContainerConfig, err := config.Default()
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "failed to get container config")
|
||||
return "", nil, fmt.Errorf("failed to get container config: %w", err)
|
||||
}
|
||||
b.Env = append(defaultContainerConfig.GetDefaultEnv(), b.Env...)
|
||||
stages, err := imagebuilder.NewStages(mainNode, b)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrap(err, "error reading multiple stages")
|
||||
return "", nil, fmt.Errorf("error reading multiple stages: %w", err)
|
||||
}
|
||||
if options.Target != "" {
|
||||
stagesTargeted, ok := stages.ThroughTarget(options.Target)
|
||||
if !ok {
|
||||
return "", nil, errors.Errorf("The target %q was not found in the provided Dockerfile", options.Target)
|
||||
return "", nil, fmt.Errorf("The target %q was not found in the provided Dockerfile", options.Target)
|
||||
}
|
||||
stages = stagesTargeted
|
||||
}
|
||||
|
@ -506,7 +497,7 @@ func preprocessContainerfileContents(logger *logrus.Logger, containerfile string
|
|||
if flags, ok := os.LookupEnv("BUILDAH_CPPFLAGS"); ok {
|
||||
args, err := shellwords.Parse(flags)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("error parsing BUILDAH_CPPFLAGS %q: %v", flags, err)
|
||||
return nil, fmt.Errorf("error parsing BUILDAH_CPPFLAGS %q: %v", flags, err)
|
||||
}
|
||||
cppArgs = append(cppArgs, args...)
|
||||
}
|
||||
|
@ -517,14 +508,14 @@ func preprocessContainerfileContents(logger *logrus.Logger, containerfile string
|
|||
cmd.Stderr = &stderrBuffer
|
||||
|
||||
if err = cmd.Start(); err != nil {
|
||||
return nil, errors.Wrapf(err, "preprocessing %s", containerfile)
|
||||
return nil, fmt.Errorf("preprocessing %s: %w", containerfile, err)
|
||||
}
|
||||
if err = cmd.Wait(); err != nil {
|
||||
if stderrBuffer.Len() != 0 {
|
||||
logger.Warnf("Ignoring %s\n", stderrBuffer.String())
|
||||
}
|
||||
if stdoutBuffer.Len() == 0 {
|
||||
return nil, errors.Wrapf(err, "error preprocessing %s: preprocessor produced no output", containerfile)
|
||||
return nil, fmt.Errorf("error preprocessing %s: preprocessor produced no output: %w", containerfile, err)
|
||||
}
|
||||
}
|
||||
return &stdoutBuffer, nil
|
||||
|
@ -536,18 +527,18 @@ func preprocessContainerfileContents(logger *logrus.Logger, containerfile string
|
|||
func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfilepaths []string, dockerfiles [][]byte, from string, args map[string]string, additionalBuildContext map[string]*define.AdditionalBuildContext, systemContext *types.SystemContext) ([]struct{ OS, Arch, Variant string }, error) {
|
||||
baseImages, err := baseImages(dockerfilepaths, dockerfiles, from, args, additionalBuildContext)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "determining list of base images")
|
||||
return nil, fmt.Errorf("determining list of base images: %w", err)
|
||||
}
|
||||
logrus.Debugf("unresolved base images: %v", baseImages)
|
||||
if len(baseImages) == 0 {
|
||||
return nil, errors.Wrapf(err, "build uses no non-scratch base images")
|
||||
return nil, fmt.Errorf("build uses no non-scratch base images: %w", err)
|
||||
}
|
||||
targetPlatforms := make(map[string]struct{})
|
||||
var platformList []struct{ OS, Arch, Variant string }
|
||||
for baseImageIndex, baseImage := range baseImages {
|
||||
resolved, err := shortnames.Resolve(systemContext, baseImage)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "resolving image name %q", baseImage)
|
||||
return nil, fmt.Errorf("resolving image name %q: %w", baseImage, err)
|
||||
}
|
||||
var manifestBytes []byte
|
||||
var manifestType string
|
||||
|
@ -582,27 +573,27 @@ func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfi
|
|||
}
|
||||
if len(manifestBytes) == 0 {
|
||||
if len(resolved.PullCandidates) > 0 {
|
||||
return nil, errors.Errorf("base image name %q didn't resolve to a manifest list", baseImage)
|
||||
return nil, fmt.Errorf("base image name %q didn't resolve to a manifest list", baseImage)
|
||||
}
|
||||
return nil, errors.Errorf("base image name %q didn't resolve to anything", baseImage)
|
||||
return nil, fmt.Errorf("base image name %q didn't resolve to anything", baseImage)
|
||||
}
|
||||
if manifestType != v1.MediaTypeImageIndex {
|
||||
list, err := manifest.ListFromBlob(manifestBytes, manifestType)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "parsing manifest list from base image %q", baseImage)
|
||||
return nil, fmt.Errorf("parsing manifest list from base image %q: %w", baseImage, err)
|
||||
}
|
||||
list, err = list.ConvertToMIMEType(v1.MediaTypeImageIndex)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "converting manifest list from base image %q to v2s2 list", baseImage)
|
||||
return nil, fmt.Errorf("converting manifest list from base image %q to v2s2 list: %w", baseImage, err)
|
||||
}
|
||||
manifestBytes, err = list.Serialize()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "encoding converted v2s2 manifest list for base image %q", baseImage)
|
||||
return nil, fmt.Errorf("encoding converted v2s2 manifest list for base image %q: %w", baseImage, err)
|
||||
}
|
||||
}
|
||||
index, err := manifest.OCI1IndexFromManifest(manifestBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "decoding manifest list for base image %q", baseImage)
|
||||
return nil, fmt.Errorf("decoding manifest list for base image %q: %w", baseImage, err)
|
||||
}
|
||||
if baseImageIndex == 0 {
|
||||
// populate the list with the first image's normalized platforms
|
||||
|
@ -641,7 +632,7 @@ func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfi
|
|||
for platform := range targetPlatforms {
|
||||
platform, err := platforms.Parse(platform)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "parsing platform double/triple %q", platform)
|
||||
return nil, fmt.Errorf("parsing platform double/triple %q: %w", platform, err)
|
||||
}
|
||||
platformList = append(platformList, struct{ OS, Arch, Variant string }{
|
||||
OS: platform.OS,
|
||||
|
@ -665,13 +656,13 @@ func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfi
|
|||
func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from string, args map[string]string, additionalBuildContext map[string]*define.AdditionalBuildContext) ([]string, error) {
|
||||
mainNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(dockerfilecontents[0]))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing main Dockerfile: %s", dockerfilenames[0])
|
||||
return nil, fmt.Errorf("error parsing main Dockerfile: %s: %w", dockerfilenames[0], err)
|
||||
}
|
||||
|
||||
for i, d := range dockerfilecontents[1:] {
|
||||
additionalNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(d))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing additional Dockerfile %s", dockerfilenames[i])
|
||||
return nil, fmt.Errorf("error parsing additional Dockerfile %s: %w", dockerfilenames[i], err)
|
||||
}
|
||||
mainNode.Children = append(mainNode.Children, additionalNode.Children...)
|
||||
}
|
||||
|
@ -679,12 +670,12 @@ func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from stri
|
|||
b := imagebuilder.NewBuilder(args)
|
||||
defaultContainerConfig, err := config.Default()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get container config")
|
||||
return nil, fmt.Errorf("failed to get container config: %w", err)
|
||||
}
|
||||
b.Env = defaultContainerConfig.GetDefaultEnv()
|
||||
stages, err := imagebuilder.NewStages(mainNode, b)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error reading multiple stages")
|
||||
return nil, fmt.Errorf("error reading multiple stages: %w", err)
|
||||
}
|
||||
var baseImages []string
|
||||
nicknames := make(map[string]bool)
|
||||
|
|
|
@ -2,6 +2,7 @@ package imagebuildah
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -34,7 +35,6 @@ import (
|
|||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/openshift/imagebuilder"
|
||||
"github.com/openshift/imagebuilder/dockerfile/parser"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
@ -151,7 +151,7 @@ type imageTypeAndHistoryAndDiffIDs struct {
|
|||
func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, options define.BuildOptions, mainNode *parser.Node) (*Executor, error) {
|
||||
defaultContainerConfig, err := config.Default()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get container config")
|
||||
return nil, fmt.Errorf("failed to get container config: %w", err)
|
||||
}
|
||||
|
||||
excludes := options.Excludes
|
||||
|
@ -396,7 +396,7 @@ func (b *Executor) waitForStage(ctx context.Context, name string, stages imagebu
|
|||
b.stagesSemaphore.Release(1)
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
if err := b.stagesSemaphore.Acquire(ctx, 1); err != nil {
|
||||
return true, errors.Wrapf(err, "error reacquiring job semaphore")
|
||||
return true, fmt.Errorf("error reacquiring job semaphore: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -411,20 +411,20 @@ func (b *Executor) getImageTypeAndHistoryAndDiffIDs(ctx context.Context, imageID
|
|||
}
|
||||
imageRef, err := is.Transport.ParseStoreReference(b.store, "@"+imageID)
|
||||
if err != nil {
|
||||
return "", nil, nil, errors.Wrapf(err, "error getting image reference %q", imageID)
|
||||
return "", nil, nil, fmt.Errorf("error getting image reference %q: %w", imageID, err)
|
||||
}
|
||||
ref, err := imageRef.NewImage(ctx, nil)
|
||||
if err != nil {
|
||||
return "", nil, nil, errors.Wrapf(err, "error creating new image from reference to image %q", imageID)
|
||||
return "", nil, nil, fmt.Errorf("error creating new image from reference to image %q: %w", imageID, err)
|
||||
}
|
||||
defer ref.Close()
|
||||
oci, err := ref.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return "", nil, nil, errors.Wrapf(err, "error getting possibly-converted OCI config of image %q", imageID)
|
||||
return "", nil, nil, fmt.Errorf("error getting possibly-converted OCI config of image %q: %w", imageID, err)
|
||||
}
|
||||
manifestBytes, manifestFormat, err := ref.Manifest(ctx)
|
||||
if err != nil {
|
||||
return "", nil, nil, errors.Wrapf(err, "error getting manifest of image %q", imageID)
|
||||
return "", nil, nil, fmt.Errorf("error getting manifest of image %q: %w", imageID, err)
|
||||
}
|
||||
if manifestFormat == "" && len(manifestBytes) > 0 {
|
||||
manifestFormat = manifest.GuessMIMEType(manifestBytes)
|
||||
|
@ -510,6 +510,25 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE
|
|||
return imageID, ref, nil
|
||||
}
|
||||
|
||||
type stageDependencyInfo struct {
|
||||
Name string
|
||||
Position int
|
||||
Needs []string
|
||||
NeededByTarget bool
|
||||
}
|
||||
|
||||
// Marks `NeededByTarget` as true for the given stage and all its dependency stages as true recursively.
|
||||
func markDependencyStagesForTarget(dependencyMap map[string]*stageDependencyInfo, stage string) {
|
||||
if stageDependencyInfo, ok := dependencyMap[stage]; ok {
|
||||
if !stageDependencyInfo.NeededByTarget {
|
||||
stageDependencyInfo.NeededByTarget = true
|
||||
for _, need := range stageDependencyInfo.Needs {
|
||||
markDependencyStagesForTarget(dependencyMap, need)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Build takes care of the details of running Prepare/Execute/Commit/Delete
|
||||
// over each of the one or more parsed Dockerfiles and stages.
|
||||
func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (imageID string, ref reference.Canonical, err error) {
|
||||
|
@ -566,7 +585,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
|
|||
}
|
||||
if _, err := b.store.DeleteImage(removeID, true); err != nil {
|
||||
logrus.Debugf("failed to remove intermediate image %q: %v", removeID, err)
|
||||
if b.forceRmIntermediateCtrs || errors.Cause(err) != storage.ErrImageUsedByContainer {
|
||||
if b.forceRmIntermediateCtrs || !errors.Is(err, storage.ErrImageUsedByContainer) {
|
||||
lastErr = err
|
||||
}
|
||||
}
|
||||
|
@ -588,15 +607,20 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
|
|||
if err == nil {
|
||||
err = cleanupErr
|
||||
} else {
|
||||
err = errors.Wrap(err, cleanupErr.Error())
|
||||
err = fmt.Errorf("%v: %w", cleanupErr.Error(), err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// dependencyMap contains dependencyInfo for each stage,
|
||||
// dependencyInfo is used later to mark if a particular
|
||||
// stage is needed by target or not.
|
||||
dependencyMap := make(map[string]*stageDependencyInfo)
|
||||
// Build maps of every named base image and every referenced stage root
|
||||
// filesystem. Individual stages can use them to determine whether or
|
||||
// not they can skip certain steps near the end of their stages.
|
||||
for stageIndex, stage := range stages {
|
||||
dependencyMap[stage.Name] = &stageDependencyInfo{Name: stage.Name, Position: stage.Position}
|
||||
node := stage.Node // first line
|
||||
for node != nil { // each line
|
||||
for _, child := range node.Children { // tokens on this line, though we only care about the first
|
||||
|
@ -620,10 +644,20 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
|
|||
userArgs := argsMapToSlice(stage.Builder.Args)
|
||||
baseWithArg, err := imagebuilder.ProcessWord(base, userArgs)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "while replacing arg variables with values for format %q", base)
|
||||
return "", nil, fmt.Errorf("while replacing arg variables with values for format %q: %w", base, err)
|
||||
}
|
||||
b.baseMap[baseWithArg] = true
|
||||
logrus.Debugf("base for stage %d: %q", stageIndex, base)
|
||||
// Check if selected base is not an additional
|
||||
// build context and if base is a valid stage
|
||||
// add it to current stage's dependency tree.
|
||||
if _, ok := b.additionalBuildContexts[baseWithArg]; !ok {
|
||||
if _, ok := dependencyMap[baseWithArg]; ok {
|
||||
// update current stage's dependency info
|
||||
currentStageInfo := dependencyMap[stage.Name]
|
||||
currentStageInfo.Needs = append(currentStageInfo.Needs, baseWithArg)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case "ADD", "COPY":
|
||||
|
@ -636,12 +670,68 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
|
|||
rootfs := strings.TrimPrefix(flag, "--from=")
|
||||
b.rootfsMap[rootfs] = true
|
||||
logrus.Debugf("rootfs needed for COPY in stage %d: %q", stageIndex, rootfs)
|
||||
// Populate dependency tree and check
|
||||
// if following ADD or COPY needs any other
|
||||
// stage.
|
||||
stageName := rootfs
|
||||
// If --from=<index> convert index to name
|
||||
if index, err := strconv.Atoi(stageName); err == nil {
|
||||
stageName = stages[index].Name
|
||||
}
|
||||
// Check if selected base is not an additional
|
||||
// build context and if base is a valid stage
|
||||
// add it to current stage's dependency tree.
|
||||
if _, ok := b.additionalBuildContexts[stageName]; !ok {
|
||||
if _, ok := dependencyMap[stageName]; ok {
|
||||
// update current stage's dependency info
|
||||
currentStageInfo := dependencyMap[stage.Name]
|
||||
currentStageInfo.Needs = append(currentStageInfo.Needs, stageName)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case "RUN":
|
||||
for _, flag := range child.Flags { // flags for this instruction
|
||||
// We need to populate dependency tree of stages
|
||||
// if it is using `--mount` and `from=` field is set
|
||||
// and `from=` points to a stage consider it in
|
||||
// dependency calculation.
|
||||
if strings.HasPrefix(flag, "--mount=") && strings.Contains(flag, "from") {
|
||||
mountFlags := strings.TrimPrefix(flag, "--mount=")
|
||||
fields := strings.Split(mountFlags, ",")
|
||||
for _, field := range fields {
|
||||
if strings.HasPrefix(field, "from=") {
|
||||
fromField := strings.SplitN(field, "=", 2)
|
||||
if len(fromField) > 1 {
|
||||
mountFrom := fromField[1]
|
||||
// Check if this base is a stage if yes
|
||||
// add base to current stage's dependency tree
|
||||
// but also confirm if this is not in additional context.
|
||||
if _, ok := b.additionalBuildContexts[mountFrom]; !ok {
|
||||
if _, ok := dependencyMap[mountFrom]; ok {
|
||||
// update current stage's dependency info
|
||||
currentStageInfo := dependencyMap[stage.Name]
|
||||
currentStageInfo.Needs = append(currentStageInfo.Needs, mountFrom)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return "", nil, fmt.Errorf("invalid value for field `from=`: %q", fromField[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
node = node.Next // next line
|
||||
}
|
||||
// Last stage is always target stage.
|
||||
// Since last/target stage is processed
|
||||
// let's calculate dependency map of stages
|
||||
// so we can mark stages which can be skipped.
|
||||
if stage.Position == (len(stages) - 1) {
|
||||
markDependencyStagesForTarget(dependencyMap, stage.Name)
|
||||
}
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
|
@ -685,9 +775,9 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
|
|||
if cancel || cleanupStages == nil {
|
||||
var err error
|
||||
if stages[index].Name != strconv.Itoa(index) {
|
||||
err = errors.Errorf("not building stage %d: build canceled", index)
|
||||
err = fmt.Errorf("not building stage %d: build canceled", index)
|
||||
} else {
|
||||
err = errors.Errorf("not building stage %d (%s): build canceled", index, stages[index].Name)
|
||||
err = fmt.Errorf("not building stage %d (%s): build canceled", index, stages[index].Name)
|
||||
}
|
||||
ch <- Result{
|
||||
Index: index,
|
||||
|
@ -695,6 +785,18 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
|
|||
}
|
||||
return
|
||||
}
|
||||
// Skip stage if it is not needed by TargetStage
|
||||
// or any of its dependency stages.
|
||||
if stageDependencyInfo, ok := dependencyMap[stages[index].Name]; ok {
|
||||
if !stageDependencyInfo.NeededByTarget {
|
||||
logrus.Debugf("Skipping stage with Name %q and index %d since its not needed by the target stage", stages[index].Name, index)
|
||||
ch <- Result{
|
||||
Index: index,
|
||||
Error: nil,
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
stageID, stageRef, stageErr := b.buildStage(ctx, cleanupStages, stages, index)
|
||||
if stageErr != nil {
|
||||
cancel = true
|
||||
|
@ -765,18 +867,18 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
|
|||
case is.Transport.Name():
|
||||
img, err := is.Transport.GetStoreImage(b.store, dest)
|
||||
if err != nil {
|
||||
return imageID, ref, errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest))
|
||||
return imageID, ref, fmt.Errorf("error locating just-written image %q: %w", transports.ImageName(dest), err)
|
||||
}
|
||||
if len(b.additionalTags) > 0 {
|
||||
if err = util.AddImageNames(b.store, "", b.systemContext, img, b.additionalTags); err != nil {
|
||||
return imageID, ref, errors.Wrapf(err, "error setting image names to %v", append(img.Names, b.additionalTags...))
|
||||
return imageID, ref, fmt.Errorf("error setting image names to %v: %w", append(img.Names, b.additionalTags...), err)
|
||||
}
|
||||
logrus.Debugf("assigned names %v to image %q", img.Names, img.ID)
|
||||
}
|
||||
// Report back the caller the tags applied, if any.
|
||||
img, err = is.Transport.GetStoreImage(b.store, dest)
|
||||
if err != nil {
|
||||
return imageID, ref, errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest))
|
||||
return imageID, ref, fmt.Errorf("error locating just-written image %q: %w", transports.ImageName(dest), err)
|
||||
}
|
||||
for _, name := range img.Names {
|
||||
fmt.Fprintf(b.out, "Successfully tagged %s\n", name)
|
||||
|
@ -795,11 +897,11 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
|
|||
logrus.Debugf("printing final image id %q", imageID)
|
||||
if b.iidfile != "" {
|
||||
if err = ioutil.WriteFile(b.iidfile, []byte("sha256:"+imageID), 0644); err != nil {
|
||||
return imageID, ref, errors.Wrapf(err, "failed to write image ID to file %q", b.iidfile)
|
||||
return imageID, ref, fmt.Errorf("failed to write image ID to file %q: %w", b.iidfile, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := stdout.Write([]byte(imageID + "\n")); err != nil {
|
||||
return imageID, ref, errors.Wrapf(err, "failed to write image ID to stdout")
|
||||
return imageID, ref, fmt.Errorf("failed to write image ID to stdout: %w", err)
|
||||
}
|
||||
}
|
||||
return imageID, ref, nil
|
||||
|
|
|
@ -36,7 +36,6 @@ import (
|
|||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/openshift/imagebuilder"
|
||||
"github.com/openshift/imagebuilder/dockerfile/parser"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -90,10 +89,10 @@ func (s *StageExecutor) Preserve(path string) error {
|
|||
// except ensure that it exists.
|
||||
createdDirPerms := os.FileMode(0755)
|
||||
if err := copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
|
||||
return errors.Wrapf(err, "error ensuring volume path exists")
|
||||
return fmt.Errorf("error ensuring volume path exists: %w", err)
|
||||
}
|
||||
if err := s.volumeCacheInvalidate(path); err != nil {
|
||||
return errors.Wrapf(err, "error ensuring volume path %q is preserved", filepath.Join(s.mountPoint, path))
|
||||
return fmt.Errorf("error ensuring volume path %q is preserved: %w", filepath.Join(s.mountPoint, path), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -101,7 +100,7 @@ func (s *StageExecutor) Preserve(path string) error {
|
|||
s.preserved++
|
||||
cacheDir, err := s.executor.store.ContainerDirectory(s.builder.ContainerID)
|
||||
if err != nil {
|
||||
return errors.Errorf("unable to locate temporary directory for container")
|
||||
return fmt.Errorf("unable to locate temporary directory for container")
|
||||
}
|
||||
cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", s.preserved))
|
||||
// Save info about the top level of the location that we'll be archiving.
|
||||
|
@ -112,22 +111,22 @@ func (s *StageExecutor) Preserve(path string) error {
|
|||
if evaluated, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, path), copier.EvalOptions{}); err == nil {
|
||||
symLink, err := filepath.Rel(s.mountPoint, evaluated)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "making evaluated path %q relative to %q", evaluated, s.mountPoint)
|
||||
return fmt.Errorf("making evaluated path %q relative to %q: %w", evaluated, s.mountPoint, err)
|
||||
}
|
||||
if strings.HasPrefix(symLink, ".."+string(os.PathSeparator)) {
|
||||
return errors.Errorf("evaluated path %q was not below %q", evaluated, s.mountPoint)
|
||||
return fmt.Errorf("evaluated path %q was not below %q", evaluated, s.mountPoint)
|
||||
}
|
||||
archivedPath = evaluated
|
||||
path = string(os.PathSeparator) + symLink
|
||||
} else {
|
||||
return errors.Wrapf(err, "error evaluating path %q", path)
|
||||
return fmt.Errorf("error evaluating path %q: %w", path, err)
|
||||
}
|
||||
|
||||
st, err := os.Stat(archivedPath)
|
||||
if os.IsNotExist(err) {
|
||||
createdDirPerms := os.FileMode(0755)
|
||||
if err = copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
|
||||
return errors.Wrapf(err, "error ensuring volume path exists")
|
||||
return fmt.Errorf("error ensuring volume path exists: %w", err)
|
||||
}
|
||||
st, err = os.Stat(archivedPath)
|
||||
}
|
||||
|
@ -139,7 +138,7 @@ func (s *StageExecutor) Preserve(path string) error {
|
|||
if !s.volumes.Add(path) {
|
||||
// This path is not a subdirectory of a volume path that we're
|
||||
// already preserving, so adding it to the list should work.
|
||||
return errors.Errorf("error adding %q to the volume cache", path)
|
||||
return fmt.Errorf("error adding %q to the volume cache", path)
|
||||
}
|
||||
s.volumeCache[path] = cacheFile
|
||||
// Now prune cache files for volumes that are now supplanted by this one.
|
||||
|
@ -204,14 +203,14 @@ func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) {
|
|||
for cachedPath, cacheFile := range s.volumeCache {
|
||||
archivedPath, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.EvalOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error evaluating volume path")
|
||||
return nil, fmt.Errorf("error evaluating volume path: %w", err)
|
||||
}
|
||||
relativePath, err := filepath.Rel(s.mountPoint, archivedPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error converting %q into a path relative to %q", archivedPath, s.mountPoint)
|
||||
return nil, fmt.Errorf("error converting %q into a path relative to %q: %w", archivedPath, s.mountPoint, err)
|
||||
}
|
||||
if strings.HasPrefix(relativePath, ".."+string(os.PathSeparator)) {
|
||||
return nil, errors.Errorf("error converting %q into a path relative to %q", archivedPath, s.mountPoint)
|
||||
return nil, fmt.Errorf("error converting %q into a path relative to %q", archivedPath, s.mountPoint)
|
||||
}
|
||||
_, err = os.Stat(cacheFile)
|
||||
if err == nil {
|
||||
|
@ -223,7 +222,7 @@ func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) {
|
|||
}
|
||||
createdDirPerms := os.FileMode(0755)
|
||||
if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
|
||||
return nil, errors.Wrapf(err, "error ensuring volume path exists")
|
||||
return nil, fmt.Errorf("error ensuring volume path exists: %w", err)
|
||||
}
|
||||
logrus.Debugf("caching contents of volume %q in %q", archivedPath, cacheFile)
|
||||
cache, err := os.Create(cacheFile)
|
||||
|
@ -233,12 +232,12 @@ func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) {
|
|||
defer cache.Close()
|
||||
rc, err := chrootarchive.Tar(archivedPath, nil, s.mountPoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error archiving %q", archivedPath)
|
||||
return nil, fmt.Errorf("error archiving %q: %w", archivedPath, err)
|
||||
}
|
||||
defer rc.Close()
|
||||
_, err = io.Copy(cache, rc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error archiving %q to %q", archivedPath, cacheFile)
|
||||
return nil, fmt.Errorf("error archiving %q to %q: %w", archivedPath, cacheFile, err)
|
||||
}
|
||||
mount := specs.Mount{
|
||||
Source: archivedPath,
|
||||
|
@ -256,7 +255,7 @@ func (s *StageExecutor) volumeCacheRestoreVFS() (err error) {
|
|||
for cachedPath, cacheFile := range s.volumeCache {
|
||||
archivedPath, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.EvalOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error evaluating volume path")
|
||||
return fmt.Errorf("error evaluating volume path: %w", err)
|
||||
}
|
||||
logrus.Debugf("restoring contents of volume %q from %q", archivedPath, cacheFile)
|
||||
cache, err := os.Open(cacheFile)
|
||||
|
@ -273,7 +272,7 @@ func (s *StageExecutor) volumeCacheRestoreVFS() (err error) {
|
|||
}
|
||||
err = chrootarchive.Untar(cache, archivedPath, nil)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error extracting archive at %q", archivedPath)
|
||||
return fmt.Errorf("error extracting archive at %q: %w", archivedPath, err)
|
||||
}
|
||||
if st, ok := s.volumeCacheInfo[cachedPath]; ok {
|
||||
if err := os.Chmod(archivedPath, st.Mode()); err != nil {
|
||||
|
@ -302,7 +301,7 @@ func (s *StageExecutor) volumeCacheSaveOverlay() (mounts []specs.Mount, err erro
|
|||
for cachedPath := range s.volumeCache {
|
||||
err = copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.MkdirOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "ensuring volume exists")
|
||||
return nil, fmt.Errorf("ensuring volume exists: %w", err)
|
||||
}
|
||||
volumePath := filepath.Join(s.mountPoint, cachedPath)
|
||||
mount := specs.Mount{
|
||||
|
@ -367,7 +366,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
|
|||
// value. Otherwise just return the value found.
|
||||
from, fromErr := imagebuilder.ProcessWord(copy.From, s.stage.Builder.Arguments())
|
||||
if fromErr != nil {
|
||||
return errors.Wrapf(fromErr, "unable to resolve argument %q", copy.From)
|
||||
return fmt.Errorf("unable to resolve argument %q: %w", copy.From, fromErr)
|
||||
}
|
||||
var additionalBuildContext *define.AdditionalBuildContext
|
||||
if foundContext, ok := s.executor.additionalBuildContexts[from]; ok {
|
||||
|
@ -378,12 +377,11 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
|
|||
// exists and if stage short_name matches any
|
||||
// additionalContext replace stage with additional
|
||||
// build context.
|
||||
if _, err := strconv.Atoi(from); err == nil {
|
||||
if stage, ok := s.executor.stages[from]; ok {
|
||||
if foundContext, ok := s.executor.additionalBuildContexts[stage.name]; ok {
|
||||
additionalBuildContext = foundContext
|
||||
}
|
||||
}
|
||||
if index, err := strconv.Atoi(from); err == nil {
|
||||
from = s.stages[index].Name
|
||||
}
|
||||
if foundContext, ok := s.executor.additionalBuildContexts[from]; ok {
|
||||
additionalBuildContext = foundContext
|
||||
}
|
||||
}
|
||||
if additionalBuildContext != nil {
|
||||
|
@ -400,7 +398,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
|
|||
// temp and point context to that.
|
||||
path, subdir, err := define.TempDirForURL(internalUtil.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to download context from external source %q", additionalBuildContext.Value)
|
||||
return fmt.Errorf("unable to download context from external source %q: %w", additionalBuildContext.Value, err)
|
||||
}
|
||||
// point context dir to the extracted path
|
||||
contextDir = filepath.Join(path, subdir)
|
||||
|
@ -425,7 +423,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
|
|||
contextDir = builder.MountPoint
|
||||
idMappingOptions = &builder.IDMappingOptions
|
||||
} else {
|
||||
return errors.Errorf("the stage %q has not been built", copy.From)
|
||||
return fmt.Errorf("the stage %q has not been built", copy.From)
|
||||
}
|
||||
} else if additionalBuildContext.IsImage {
|
||||
// Image was selected as additionalContext so only process image.
|
||||
|
@ -450,7 +448,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
|
|||
sources = append(sources, src)
|
||||
} else {
|
||||
// returns an error to be compatible with docker
|
||||
return errors.Errorf("source can't be a URL for COPY")
|
||||
return fmt.Errorf("source can't be a URL for COPY")
|
||||
}
|
||||
} else {
|
||||
sources = append(sources, filepath.Join(contextDir, src))
|
||||
|
@ -484,7 +482,7 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte
|
|||
if strings.Contains(flag, "from") {
|
||||
arr := strings.SplitN(flag, ",", 2)
|
||||
if len(arr) < 2 {
|
||||
return nil, errors.Errorf("Invalid --mount command: %s", flag)
|
||||
return nil, fmt.Errorf("Invalid --mount command: %s", flag)
|
||||
}
|
||||
tokens := strings.Split(arr[1], ",")
|
||||
for _, val := range tokens {
|
||||
|
@ -492,14 +490,14 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte
|
|||
switch kv[0] {
|
||||
case "from":
|
||||
if len(kv) == 1 {
|
||||
return nil, errors.Errorf("unable to resolve argument for `from=`: bad argument")
|
||||
return nil, fmt.Errorf("unable to resolve argument for `from=`: bad argument")
|
||||
}
|
||||
if kv[1] == "" {
|
||||
return nil, errors.Errorf("unable to resolve argument for `from=`: from points to an empty value")
|
||||
return nil, fmt.Errorf("unable to resolve argument for `from=`: from points to an empty value")
|
||||
}
|
||||
from, fromErr := imagebuilder.ProcessWord(kv[1], s.stage.Builder.Arguments())
|
||||
if fromErr != nil {
|
||||
return nil, errors.Wrapf(fromErr, "unable to resolve argument %q", kv[1])
|
||||
return nil, fmt.Errorf("unable to resolve argument %q: %w", kv[1], fromErr)
|
||||
}
|
||||
// If additional buildContext contains this
|
||||
// give priority to that and break if additional
|
||||
|
@ -508,7 +506,7 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte
|
|||
if additionalBuildContext.IsImage {
|
||||
mountPoint, err := s.getImageRootfs(s.ctx, additionalBuildContext.Value)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("%s from=%s: image found with that name", flag, from)
|
||||
return nil, fmt.Errorf("%s from=%s: image found with that name", flag, from)
|
||||
}
|
||||
// The `from` in stageMountPoints should point
|
||||
// to `mountPoint` replaced from additional
|
||||
|
@ -536,7 +534,7 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte
|
|||
// temp and point context to that.
|
||||
path, subdir, err := define.TempDirForURL(internalUtil.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to download context from external source %q", additionalBuildContext.Value)
|
||||
return nil, fmt.Errorf("unable to download context from external source %q: %w", additionalBuildContext.Value, err)
|
||||
}
|
||||
// point context dir to the extracted path
|
||||
mountPoint = filepath.Join(path, subdir)
|
||||
|
@ -562,7 +560,7 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte
|
|||
} else {
|
||||
mountPoint, err := s.getImageRootfs(s.ctx, from)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("%s from=%s: no stage or image found with that name", flag, from)
|
||||
return nil, fmt.Errorf("%s from=%s: no stage or image found with that name", flag, from)
|
||||
}
|
||||
stageMountPoints[from] = internal.StageMountDetails{IsStage: false, MountPoint: mountPoint}
|
||||
break
|
||||
|
@ -585,13 +583,13 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
|
|||
return err
|
||||
}
|
||||
if s.builder == nil {
|
||||
return errors.Errorf("no build container available")
|
||||
return fmt.Errorf("no build container available")
|
||||
}
|
||||
stdin := s.executor.in
|
||||
if stdin == nil {
|
||||
devNull, err := os.Open(os.DevNull)
|
||||
if err != nil {
|
||||
return errors.Errorf("error opening %q for reading: %v", os.DevNull, err)
|
||||
return fmt.Errorf("error opening %q for reading: %v", os.DevNull, err)
|
||||
}
|
||||
defer devNull.Close()
|
||||
stdin = devNull
|
||||
|
@ -669,7 +667,7 @@ func (s *StageExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error {
|
|||
s.executor.logger.Errorf("+(UNHANDLED LOGLEVEL) %#v", step)
|
||||
}
|
||||
|
||||
return errors.Errorf(err)
|
||||
return fmt.Errorf(err)
|
||||
}
|
||||
|
||||
// prepare creates a working container based on the specified image, or if one
|
||||
|
@ -684,7 +682,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
|
|||
base, err := ib.From(node)
|
||||
if err != nil {
|
||||
logrus.Debugf("prepare(node.Children=%#v)", node.Children)
|
||||
return nil, errors.Wrapf(err, "error determining starting point for build")
|
||||
return nil, fmt.Errorf("error determining starting point for build: %w", err)
|
||||
}
|
||||
from = base
|
||||
}
|
||||
|
@ -710,7 +708,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
|
|||
if stage.Builder.Platform != "" {
|
||||
os, arch, variant, err := parse.Platform(stage.Builder.Platform)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to parse platform %q", stage.Builder.Platform)
|
||||
return nil, fmt.Errorf("unable to parse platform %q: %w", stage.Builder.Platform, err)
|
||||
}
|
||||
if arch != "" || variant != "" {
|
||||
builderSystemContext.ArchitectureChoice = arch
|
||||
|
@ -753,7 +751,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
|
|||
|
||||
builder, err = buildah.NewBuilder(ctx, s.executor.store, builderOptions)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating build container")
|
||||
return nil, fmt.Errorf("error creating build container: %w", err)
|
||||
}
|
||||
|
||||
// If executor's ProcessLabel and MountLabel is empty means this is the first stage
|
||||
|
@ -815,7 +813,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
|
|||
if err2 := builder.Delete(); err2 != nil {
|
||||
logrus.Debugf("error deleting container which we failed to update: %v", err2)
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error updating build context")
|
||||
return nil, fmt.Errorf("error updating build context: %w", err)
|
||||
}
|
||||
}
|
||||
mountPoint, err := builder.Mount(builder.MountLabel)
|
||||
|
@ -823,7 +821,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
|
|||
if err2 := builder.Delete(); err2 != nil {
|
||||
logrus.Debugf("error deleting container which we failed to mount: %v", err2)
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error mounting new container")
|
||||
return nil, fmt.Errorf("error mounting new container: %w", err)
|
||||
}
|
||||
if rebase {
|
||||
// Make this our "current" working container.
|
||||
|
@ -970,7 +968,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||
// the case, we need to commit() to create a new image.
|
||||
logCommit(s.output, -1)
|
||||
if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), false, s.output, s.executor.squash); err != nil {
|
||||
return "", nil, errors.Wrapf(err, "error committing base container")
|
||||
return "", nil, fmt.Errorf("error committing base container: %w", err)
|
||||
}
|
||||
} else if len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 {
|
||||
// The image would be modified by the labels passed
|
||||
|
@ -987,6 +985,22 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||
if imgID, ref, err = s.tagExistingImage(ctx, s.builder.FromImageID, s.output); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if s.executor.buildOutput != "" && lastStage {
|
||||
// If we have reached this point then our build is just performing a tag
|
||||
// and it contains no steps or instructions (i.e Containerfile only contains
|
||||
// `FROM <imagename> and nothing else so we will never end up committing this
|
||||
// but instead just re-tag image. For such use-cases if `-o` or `--output` was
|
||||
// specified honor that and export the contents of the current build anyways.
|
||||
logrus.Debugf("Generating custom build output with options %q", s.executor.buildOutput)
|
||||
buildOutputOption, err := parse.GetBuildOutput(s.executor.buildOutput)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("failed to parse build output: %w", err)
|
||||
}
|
||||
if err := s.generateBuildOutput(buildah.CommitOptions{}, buildOutputOption); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
logImageID(imgID)
|
||||
}
|
||||
|
@ -998,7 +1012,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||
// Resolve any arguments in this instruction.
|
||||
step := ib.Step()
|
||||
if err := step.Resolve(node); err != nil {
|
||||
return "", nil, errors.Wrapf(err, "error resolving step %+v", *node)
|
||||
return "", nil, fmt.Errorf("error resolving step %+v: %w", *node, err)
|
||||
}
|
||||
logrus.Debugf("Parsed Step: %+v", *step)
|
||||
if !s.executor.quiet {
|
||||
|
@ -1011,21 +1025,29 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||
command := strings.ToUpper(step.Command)
|
||||
// chmod, chown and from flags should have an '=' sign, '--chmod=', '--chown=' or '--from='
|
||||
if command == "COPY" && (flag == "--chmod" || flag == "--chown" || flag == "--from") {
|
||||
return "", nil, errors.Errorf("COPY only supports the --chmod=<permissions> --chown=<uid:gid> and the --from=<image|stage> flags")
|
||||
return "", nil, fmt.Errorf("COPY only supports the --chmod=<permissions> --chown=<uid:gid> and the --from=<image|stage> flags")
|
||||
}
|
||||
if command == "ADD" && (flag == "--chmod" || flag == "--chown") {
|
||||
return "", nil, errors.Errorf("ADD only supports the --chmod=<permissions> and the --chown=<uid:gid> flags")
|
||||
return "", nil, fmt.Errorf("ADD only supports the --chmod=<permissions> and the --chown=<uid:gid> flags")
|
||||
}
|
||||
if strings.Contains(flag, "--from") && command == "COPY" {
|
||||
arr := strings.Split(flag, "=")
|
||||
if len(arr) != 2 {
|
||||
return "", nil, errors.Errorf("%s: invalid --from flag, should be --from=<name|stage>", command)
|
||||
return "", nil, fmt.Errorf("%s: invalid --from flag, should be --from=<name|stage>", command)
|
||||
}
|
||||
// If arr[1] has an argument within it, resolve it to its
|
||||
// value. Otherwise just return the value found.
|
||||
from, fromErr := imagebuilder.ProcessWord(arr[1], s.stage.Builder.Arguments())
|
||||
if fromErr != nil {
|
||||
return "", nil, errors.Wrapf(fromErr, "unable to resolve argument %q", arr[1])
|
||||
return "", nil, fmt.Errorf("unable to resolve argument %q: %w", arr[1], fromErr)
|
||||
}
|
||||
|
||||
// Before looking into additional context
|
||||
// also account if the index is given instead
|
||||
// of name so convert index in --from=<index>
|
||||
// to name.
|
||||
if index, err := strconv.Atoi(from); err == nil {
|
||||
from = s.stages[index].Name
|
||||
}
|
||||
// If additional buildContext contains this
|
||||
// give priority to that and break if additional
|
||||
|
@ -1040,7 +1062,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||
// replace with image set in build context
|
||||
from = additionalBuildContext.Value
|
||||
if _, err := s.getImageRootfs(ctx, from); err != nil {
|
||||
return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, from)
|
||||
return "", nil, fmt.Errorf("%s --from=%s: no stage or image found with that name", command, from)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
@ -1055,7 +1077,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||
if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index {
|
||||
break
|
||||
} else if _, err = s.getImageRootfs(ctx, from); err != nil {
|
||||
return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, from)
|
||||
return "", nil, fmt.Errorf("%s --from=%s: no stage or image found with that name", command, from)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
@ -1075,8 +1097,8 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||
if !s.executor.layers {
|
||||
err := ib.Run(step, s, noRunsRemaining)
|
||||
if err != nil {
|
||||
logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
|
||||
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
|
||||
logrus.Debugf("Error building at step %+v: %v", *step, err)
|
||||
return "", nil, fmt.Errorf("error building at STEP \"%s\": %w", step.Message, err)
|
||||
}
|
||||
// In case we added content, retrieve its digest.
|
||||
addedContentType, addedContentDigest := s.builder.ContentDigester.Digest()
|
||||
|
@ -1109,7 +1131,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||
logCommit(s.output, i)
|
||||
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output, s.executor.squash)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
|
||||
return "", nil, fmt.Errorf("error committing container for step %+v: %w", *step, err)
|
||||
}
|
||||
logImageID(imgID)
|
||||
} else {
|
||||
|
@ -1144,7 +1166,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||
if checkForLayers && step.Command != "arg" && !(s.executor.squash && lastInstruction && lastStage) {
|
||||
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
|
||||
return "", nil, fmt.Errorf("error checking if cached image exists from a previous build: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1155,8 +1177,8 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||
if cacheID == "" {
|
||||
// Process the instruction directly.
|
||||
if err = ib.Run(step, s, noRunsRemaining); err != nil {
|
||||
logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
|
||||
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
|
||||
logrus.Debugf("Error building at step %+v: %v", *step, err)
|
||||
return "", nil, fmt.Errorf("error building at STEP \"%s\": %w", step.Message, err)
|
||||
}
|
||||
|
||||
// In case we added content, retrieve its digest.
|
||||
|
@ -1175,7 +1197,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||
if checkForLayers {
|
||||
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
|
||||
return "", nil, fmt.Errorf("error checking if cached image exists from a previous build: %w", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -1188,8 +1210,8 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||
if !s.stepRequiresLayer(step) {
|
||||
err := ib.Run(step, s, noRunsRemaining)
|
||||
if err != nil {
|
||||
logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
|
||||
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
|
||||
logrus.Debugf("Error building at step %+v: %v", *step, err)
|
||||
return "", nil, fmt.Errorf("error building at STEP \"%s\": %w", step.Message, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1222,7 +1244,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||
// can be part of build-cache.
|
||||
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, false)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
|
||||
return "", nil, fmt.Errorf("error committing container for step %+v: %w", *step, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1232,7 +1254,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||
if s.executor.squash && lastInstruction && lastStage {
|
||||
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, true)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "error committing final squash step %+v", *step)
|
||||
return "", nil, fmt.Errorf("error committing final squash step %+v: %w", *step, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1261,7 +1283,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
|||
// ID that we really should not be pulling anymore (see
|
||||
// containers/podman/issues/10307).
|
||||
if _, err := s.prepare(ctx, imgID, false, true, define.PullNever); err != nil {
|
||||
return "", nil, errors.Wrap(err, "error preparing container for next step")
|
||||
return "", nil, fmt.Errorf("error preparing container for next step: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1465,7 +1487,7 @@ func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output st
|
|||
if err == nil {
|
||||
err = destroyErr
|
||||
} else {
|
||||
err = errors.Wrap(err, destroyErr.Error())
|
||||
err = fmt.Errorf("%v: %w", destroyErr.Error(), err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -1473,27 +1495,27 @@ func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output st
|
|||
// Look up the source image, expecting it to be in local storage
|
||||
src, err := is.Transport.ParseStoreReference(s.executor.store, cacheID)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "error getting source imageReference for %q", cacheID)
|
||||
return "", nil, fmt.Errorf("error getting source imageReference for %q: %w", cacheID, err)
|
||||
}
|
||||
options := cp.Options{
|
||||
RemoveSignatures: true, // more like "ignore signatures", since they don't get removed when src and dest are the same image
|
||||
}
|
||||
manifestBytes, err := cp.Image(ctx, policyContext, dest, src, &options)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "error copying image %q", cacheID)
|
||||
return "", nil, fmt.Errorf("error copying image %q: %w", cacheID, err)
|
||||
}
|
||||
manifestDigest, err := manifest.Digest(manifestBytes)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "error computing digest of manifest for image %q", cacheID)
|
||||
return "", nil, fmt.Errorf("error computing digest of manifest for image %q: %w", cacheID, err)
|
||||
}
|
||||
img, err := is.Transport.GetStoreImage(s.executor.store, dest)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "error locating new copy of image %q (i.e., %q)", cacheID, transports.ImageName(dest))
|
||||
return "", nil, fmt.Errorf("error locating new copy of image %q (i.e., %q): %w", cacheID, transports.ImageName(dest), err)
|
||||
}
|
||||
var ref reference.Canonical
|
||||
if dref := dest.DockerReference(); dref != nil {
|
||||
if ref, err = reference.WithDigest(dref, manifestDigest); err != nil {
|
||||
return "", nil, errors.Wrapf(err, "error computing canonical reference for new image %q (i.e., %q)", cacheID, transports.ImageName(dest))
|
||||
return "", nil, fmt.Errorf("error computing canonical reference for new image %q (i.e., %q): %w", cacheID, transports.ImageName(dest), err)
|
||||
}
|
||||
}
|
||||
return img.ID, ref, nil
|
||||
|
@ -1505,14 +1527,14 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p
|
|||
// Get the list of images available in the image store
|
||||
images, err := s.executor.store.Images()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "error getting image list from store")
|
||||
return "", fmt.Errorf("error getting image list from store: %w", err)
|
||||
}
|
||||
var baseHistory []v1.History
|
||||
var baseDiffIDs []digest.Digest
|
||||
if s.builder.FromImageID != "" {
|
||||
_, baseHistory, baseDiffIDs, err = s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, s.builder.FromImageID)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error getting history of base image %q", s.builder.FromImageID)
|
||||
return "", fmt.Errorf("error getting history of base image %q: %w", s.builder.FromImageID, err)
|
||||
}
|
||||
}
|
||||
for _, image := range images {
|
||||
|
@ -1521,7 +1543,7 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p
|
|||
if image.TopLayer != "" {
|
||||
imageTopLayer, err = s.executor.store.Layer(image.TopLayer)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error getting top layer info")
|
||||
return "", fmt.Errorf("error getting top layer info: %w", err)
|
||||
}
|
||||
// Figure out which layer from this image we should
|
||||
// compare our container's base layer to.
|
||||
|
@ -1574,7 +1596,7 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
|
|||
logrus.Debugf("Generating custom build output with options %q", s.executor.buildOutput)
|
||||
buildOutputOption, err = parse.GetBuildOutput(s.executor.buildOutput)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "failed to parse build output")
|
||||
return "", nil, fmt.Errorf("failed to parse build output: %w", err)
|
||||
}
|
||||
}
|
||||
var imageRef types.ImageReference
|
||||
|
@ -1719,36 +1741,9 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
|
|||
}
|
||||
// generate build output
|
||||
if s.executor.buildOutput != "" {
|
||||
extractRootfsOpts := buildah.ExtractRootfsOptions{}
|
||||
if unshare.IsRootless() {
|
||||
// In order to maintain as much parity as possible
|
||||
// with buildkit's version of --output and to avoid
|
||||
// unsafe invocation of exported executables it was
|
||||
// decided to strip setuid,setgid and extended attributes.
|
||||
// Since modes like setuid,setgid leaves room for executable
|
||||
// to get invoked with different file-system permission its safer
|
||||
// to strip them off for unpriviledged invocation.
|
||||
// See: https://github.com/containers/buildah/pull/3823#discussion_r829376633
|
||||
extractRootfsOpts.StripSetuidBit = true
|
||||
extractRootfsOpts.StripSetgidBit = true
|
||||
extractRootfsOpts.StripXattrs = true
|
||||
if err := s.generateBuildOutput(buildah.CommitOptions{}, buildOutputOption); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
rc, errChan, err := s.builder.ExtractRootfs(options, extractRootfsOpts)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "failed to extract rootfs from given container image")
|
||||
}
|
||||
defer rc.Close()
|
||||
err = internalUtil.ExportFromReader(rc, buildOutputOption)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "failed to export build output")
|
||||
}
|
||||
if errChan != nil {
|
||||
err = <-errChan
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options)
|
||||
if err != nil {
|
||||
|
@ -1758,13 +1753,46 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
|
|||
if imageRef != nil {
|
||||
if dref := imageRef.DockerReference(); dref != nil {
|
||||
if ref, err = reference.WithDigest(dref, manifestDigest); err != nil {
|
||||
return "", nil, errors.Wrapf(err, "error computing canonical reference for new image %q", imgID)
|
||||
return "", nil, fmt.Errorf("error computing canonical reference for new image %q: %w", imgID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return imgID, ref, nil
|
||||
}
|
||||
|
||||
func (s *StageExecutor) generateBuildOutput(commitOpts buildah.CommitOptions, buildOutputOpts define.BuildOutputOption) error {
|
||||
extractRootfsOpts := buildah.ExtractRootfsOptions{}
|
||||
if unshare.IsRootless() {
|
||||
// In order to maintain as much parity as possible
|
||||
// with buildkit's version of --output and to avoid
|
||||
// unsafe invocation of exported executables it was
|
||||
// decided to strip setuid,setgid and extended attributes.
|
||||
// Since modes like setuid,setgid leaves room for executable
|
||||
// to get invoked with different file-system permission its safer
|
||||
// to strip them off for unpriviledged invocation.
|
||||
// See: https://github.com/containers/buildah/pull/3823#discussion_r829376633
|
||||
extractRootfsOpts.StripSetuidBit = true
|
||||
extractRootfsOpts.StripSetgidBit = true
|
||||
extractRootfsOpts.StripXattrs = true
|
||||
}
|
||||
rc, errChan, err := s.builder.ExtractRootfs(commitOpts, extractRootfsOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to extract rootfs from given container image: %w", err)
|
||||
}
|
||||
defer rc.Close()
|
||||
err = internalUtil.ExportFromReader(rc, buildOutputOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to export build output: %w", err)
|
||||
}
|
||||
if errChan != nil {
|
||||
err = <-errChan
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *StageExecutor) EnsureContainerPath(path string) error {
|
||||
return s.builder.EnsureContainerPathAs(path, "", nil)
|
||||
}
|
||||
|
|
|
@ -2,6 +2,8 @@ package buildah
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/buildah/define"
|
||||
"github.com/containers/buildah/docker"
|
||||
|
@ -13,12 +15,11 @@ import (
|
|||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func importBuilderDataFromImage(ctx context.Context, store storage.Store, systemContext *types.SystemContext, imageID, containerName, containerID string) (*Builder, error) {
|
||||
if imageID == "" {
|
||||
return nil, errors.Errorf("Internal error: imageID is empty in importBuilderDataFromImage")
|
||||
return nil, errors.New("Internal error: imageID is empty in importBuilderDataFromImage")
|
||||
}
|
||||
|
||||
storeopts, err := storage.DefaultStoreOptions(false, 0)
|
||||
|
@ -29,18 +30,18 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
|
|||
|
||||
ref, err := is.Transport.ParseStoreReference(store, imageID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "no such image %q", imageID)
|
||||
return nil, fmt.Errorf("no such image %q: %w", imageID, err)
|
||||
}
|
||||
src, err := ref.NewImageSource(ctx, systemContext)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error instantiating image source")
|
||||
return nil, fmt.Errorf("error instantiating image source: %w", err)
|
||||
}
|
||||
defer src.Close()
|
||||
|
||||
imageDigest := ""
|
||||
manifestBytes, manifestType, err := src.GetManifest(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error loading image manifest for %q", transports.ImageName(ref))
|
||||
return nil, fmt.Errorf("error loading image manifest for %q: %w", transports.ImageName(ref), err)
|
||||
}
|
||||
if manifestDigest, err := manifest.Digest(manifestBytes); err == nil {
|
||||
imageDigest = manifestDigest.String()
|
||||
|
@ -50,18 +51,18 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
|
|||
if manifest.MIMETypeIsMultiImage(manifestType) {
|
||||
list, err := manifest.ListFromBlob(manifestBytes, manifestType)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing image manifest for %q as list", transports.ImageName(ref))
|
||||
return nil, fmt.Errorf("error parsing image manifest for %q as list: %w", transports.ImageName(ref), err)
|
||||
}
|
||||
instance, err := list.ChooseInstance(systemContext)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error finding an appropriate image in manifest list %q", transports.ImageName(ref))
|
||||
return nil, fmt.Errorf("error finding an appropriate image in manifest list %q: %w", transports.ImageName(ref), err)
|
||||
}
|
||||
instanceDigest = &instance
|
||||
}
|
||||
|
||||
image, err := image.FromUnparsedImage(ctx, systemContext, image.UnparsedInstance(src, instanceDigest))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error instantiating image for %q instance %q", transports.ImageName(ref), instanceDigest)
|
||||
return nil, fmt.Errorf("error instantiating image for %q instance %q: %w", transports.ImageName(ref), instanceDigest, err)
|
||||
}
|
||||
|
||||
imageName := ""
|
||||
|
@ -72,7 +73,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
|
|||
if img.TopLayer != "" {
|
||||
layer, err4 := store.Layer(img.TopLayer)
|
||||
if err4 != nil {
|
||||
return nil, errors.Wrapf(err4, "error reading information about image's top layer")
|
||||
return nil, fmt.Errorf("error reading information about image's top layer: %w", err4)
|
||||
}
|
||||
uidmap, gidmap = convertStorageIDMaps(layer.UIDMap, layer.GIDMap)
|
||||
}
|
||||
|
@ -109,7 +110,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
|
|||
}
|
||||
|
||||
if err := builder.initConfig(ctx, image, systemContext); err != nil {
|
||||
return nil, errors.Wrapf(err, "error preparing image configuration")
|
||||
return nil, fmt.Errorf("error preparing image configuration: %w", err)
|
||||
}
|
||||
|
||||
return builder, nil
|
||||
|
@ -117,7 +118,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
|
|||
|
||||
func importBuilder(ctx context.Context, store storage.Store, options ImportOptions) (*Builder, error) {
|
||||
if options.Container == "" {
|
||||
return nil, errors.Errorf("container name must be specified")
|
||||
return nil, errors.New("container name must be specified")
|
||||
}
|
||||
|
||||
c, err := store.Container(options.Container)
|
||||
|
@ -146,7 +147,7 @@ func importBuilder(ctx context.Context, store storage.Store, options ImportOptio
|
|||
|
||||
err = builder.Save()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error saving builder state")
|
||||
return nil, fmt.Errorf("error saving builder state: %w", err)
|
||||
}
|
||||
|
||||
return builder, nil
|
||||
|
@ -154,19 +155,19 @@ func importBuilder(ctx context.Context, store storage.Store, options ImportOptio
|
|||
|
||||
func importBuilderFromImage(ctx context.Context, store storage.Store, options ImportFromImageOptions) (*Builder, error) {
|
||||
if options.Image == "" {
|
||||
return nil, errors.Errorf("image name must be specified")
|
||||
return nil, errors.New("image name must be specified")
|
||||
}
|
||||
|
||||
systemContext := getSystemContext(store, options.SystemContext, options.SignaturePolicyPath)
|
||||
|
||||
_, img, err := util.FindImage(store, "", systemContext, options.Image)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "importing settings")
|
||||
return nil, fmt.Errorf("importing settings: %w", err)
|
||||
}
|
||||
|
||||
builder, err := importBuilderDataFromImage(ctx, store, systemContext, img.ID, "", "")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error importing build settings from image %q", options.Image)
|
||||
return nil, fmt.Errorf("error importing build settings from image %q: %w", options.Image, err)
|
||||
}
|
||||
|
||||
builder.setupLogger()
|
||||
|
|
|
@ -3,6 +3,7 @@ package buildah
|
|||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
@ -17,7 +18,6 @@ import (
|
|||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -189,7 +189,7 @@ func readUptime() (string, error) {
|
|||
}
|
||||
f := bytes.Fields(buf)
|
||||
if len(f) < 1 {
|
||||
return "", errors.Errorf("invalid uptime")
|
||||
return "", errors.New("invalid uptime")
|
||||
}
|
||||
return string(f[0]), nil
|
||||
}
|
||||
|
|
|
@ -8,6 +8,8 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/containers/buildah/internal"
|
||||
internalUtil "github.com/containers/buildah/internal/util"
|
||||
"github.com/containers/common/pkg/parse"
|
||||
|
@ -16,7 +18,6 @@ import (
|
|||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/lockfile"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -76,22 +77,22 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st
|
|||
newMount.Options = append(newMount.Options, kv[0])
|
||||
case "from":
|
||||
if len(kv) == 1 {
|
||||
return newMount, "", errors.Wrapf(errBadOptionArg, kv[0])
|
||||
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
|
||||
}
|
||||
fromImage = kv[1]
|
||||
case "bind-propagation":
|
||||
if len(kv) == 1 {
|
||||
return newMount, "", errors.Wrapf(errBadOptionArg, kv[0])
|
||||
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
|
||||
}
|
||||
newMount.Options = append(newMount.Options, kv[1])
|
||||
case "src", "source":
|
||||
if len(kv) == 1 {
|
||||
return newMount, "", errors.Wrapf(errBadOptionArg, kv[0])
|
||||
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
|
||||
}
|
||||
newMount.Source = kv[1]
|
||||
case "target", "dst", "destination":
|
||||
if len(kv) == 1 {
|
||||
return newMount, "", errors.Wrapf(errBadOptionArg, kv[0])
|
||||
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
|
||||
}
|
||||
if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
|
||||
return newMount, "", err
|
||||
|
@ -103,7 +104,7 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st
|
|||
// and can thus be safely ignored.
|
||||
// See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts
|
||||
default:
|
||||
return newMount, "", errors.Wrapf(errBadMntOption, kv[0])
|
||||
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadMntOption)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -223,22 +224,22 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
|
|||
sharing = kv[1]
|
||||
case "bind-propagation":
|
||||
if len(kv) == 1 {
|
||||
return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
|
||||
return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
|
||||
}
|
||||
newMount.Options = append(newMount.Options, kv[1])
|
||||
case "id":
|
||||
if len(kv) == 1 {
|
||||
return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
|
||||
return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
|
||||
}
|
||||
id = kv[1]
|
||||
case "from":
|
||||
if len(kv) == 1 {
|
||||
return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
|
||||
return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
|
||||
}
|
||||
fromStage = kv[1]
|
||||
case "target", "dst", "destination":
|
||||
if len(kv) == 1 {
|
||||
return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
|
||||
return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
|
||||
}
|
||||
if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
|
||||
return newMount, lockedTargets, err
|
||||
|
@ -247,35 +248,35 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
|
|||
setDest = true
|
||||
case "src", "source":
|
||||
if len(kv) == 1 {
|
||||
return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
|
||||
return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
|
||||
}
|
||||
newMount.Source = kv[1]
|
||||
case "mode":
|
||||
if len(kv) == 1 {
|
||||
return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
|
||||
return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
|
||||
}
|
||||
mode, err = strconv.ParseUint(kv[1], 8, 32)
|
||||
if err != nil {
|
||||
return newMount, lockedTargets, errors.Wrapf(err, "Unable to parse cache mode")
|
||||
return newMount, lockedTargets, fmt.Errorf("unable to parse cache mode: %w", err)
|
||||
}
|
||||
case "uid":
|
||||
if len(kv) == 1 {
|
||||
return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
|
||||
return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
|
||||
}
|
||||
uid, err = strconv.Atoi(kv[1])
|
||||
if err != nil {
|
||||
return newMount, lockedTargets, errors.Wrapf(err, "Unable to parse cache uid")
|
||||
return newMount, lockedTargets, fmt.Errorf("unable to parse cache uid: %w", err)
|
||||
}
|
||||
case "gid":
|
||||
if len(kv) == 1 {
|
||||
return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0])
|
||||
return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
|
||||
}
|
||||
gid, err = strconv.Atoi(kv[1])
|
||||
if err != nil {
|
||||
return newMount, lockedTargets, errors.Wrapf(err, "Unable to parse cache gid")
|
||||
return newMount, lockedTargets, fmt.Errorf("unable to parse cache gid: %w", err)
|
||||
}
|
||||
default:
|
||||
return newMount, lockedTargets, errors.Wrapf(errBadMntOption, kv[0])
|
||||
return newMount, lockedTargets, fmt.Errorf("%v: %w", kv[0], errBadMntOption)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -313,7 +314,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
|
|||
// create cache on host if not present
|
||||
err = os.MkdirAll(cacheParent, os.FileMode(0755))
|
||||
if err != nil {
|
||||
return newMount, lockedTargets, errors.Wrapf(err, "Unable to create build cache directory")
|
||||
return newMount, lockedTargets, fmt.Errorf("unable to create build cache directory: %w", err)
|
||||
}
|
||||
|
||||
if id != "" {
|
||||
|
@ -328,7 +329,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
|
|||
//buildkit parity: change uid and gid if specified otheriwise keep `0`
|
||||
err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair)
|
||||
if err != nil {
|
||||
return newMount, lockedTargets, errors.Wrapf(err, "Unable to change uid,gid of cache directory")
|
||||
return newMount, lockedTargets, fmt.Errorf("unable to change uid,gid of cache directory: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -337,7 +338,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
|
|||
// lock parent cache
|
||||
lockfile, err := lockfile.GetLockfile(filepath.Join(newMount.Source, BuildahCacheLockfile))
|
||||
if err != nil {
|
||||
return newMount, lockedTargets, errors.Wrapf(err, "Unable to acquire lock when sharing mode is locked")
|
||||
return newMount, lockedTargets, fmt.Errorf("unable to acquire lock when sharing mode is locked: %w", err)
|
||||
}
|
||||
// Will be unlocked after the RUN step is executed.
|
||||
lockfile.Lock()
|
||||
|
@ -347,7 +348,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
|
|||
break
|
||||
default:
|
||||
// error out for unknown values
|
||||
return newMount, lockedTargets, errors.Wrapf(err, "Unrecognized value %q for field `sharing`", sharing)
|
||||
return newMount, lockedTargets, fmt.Errorf("unrecognized value %q for field `sharing`: %w", sharing, err)
|
||||
}
|
||||
|
||||
// buildkit parity: default sharing should be shared
|
||||
|
@ -375,10 +376,10 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
|
|||
// ValidateVolumeMountHostDir validates the host path of buildah --volume
|
||||
func ValidateVolumeMountHostDir(hostDir string) error {
|
||||
if !filepath.IsAbs(hostDir) {
|
||||
return errors.Errorf("invalid host path, must be an absolute path %q", hostDir)
|
||||
return fmt.Errorf("invalid host path, must be an absolute path %q", hostDir)
|
||||
}
|
||||
if _, err := os.Stat(hostDir); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -421,7 +422,7 @@ func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) {
|
|||
return nil, err
|
||||
}
|
||||
if _, ok := finalVolumeMounts[volumeMount.Destination]; ok {
|
||||
return nil, errors.Wrapf(errDuplicateDest, volumeMount.Destination)
|
||||
return nil, fmt.Errorf("%v: %w", volumeMount.Destination, errDuplicateDest)
|
||||
}
|
||||
finalVolumeMounts[volumeMount.Destination] = volumeMount
|
||||
}
|
||||
|
@ -433,7 +434,7 @@ func Volume(volume string) (specs.Mount, error) {
|
|||
mount := specs.Mount{}
|
||||
arr := SplitStringWithColonEscape(volume)
|
||||
if len(arr) < 2 {
|
||||
return mount, errors.Errorf("incorrect volume format %q, should be host-dir:ctr-dir[:option]", volume)
|
||||
return mount, fmt.Errorf("incorrect volume format %q, should be host-dir:ctr-dir[:option]", volume)
|
||||
}
|
||||
if err := ValidateVolumeMountHostDir(arr[0]); err != nil {
|
||||
return mount, err
|
||||
|
@ -468,7 +469,7 @@ func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string,
|
|||
}
|
||||
for dest, mount := range volumeMounts {
|
||||
if _, ok := unifiedMounts[dest]; ok {
|
||||
return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, dest)
|
||||
return nil, mountedImages, lockedTargets, fmt.Errorf("%v: %w", dest, errDuplicateDest)
|
||||
}
|
||||
unifiedMounts[dest] = mount
|
||||
}
|
||||
|
@ -489,7 +490,7 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, c
|
|||
mountedImages := make([]string, 0)
|
||||
lockedTargets := make([]string, 0)
|
||||
|
||||
errInvalidSyntax := errors.Errorf("incorrect mount format: should be --mount type=<bind|tmpfs>,[src=<host-dir>,]target=<ctr-dir>[,options]")
|
||||
errInvalidSyntax := errors.New("incorrect mount format: should be --mount type=<bind|tmpfs>,[src=<host-dir>,]target=<ctr-dir>[,options]")
|
||||
|
||||
// TODO(vrothberg): the manual parsing can be replaced with a regular expression
|
||||
// to allow a more robust parsing of the mount format and to give
|
||||
|
@ -497,13 +498,13 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, c
|
|||
for _, mount := range mounts {
|
||||
arr := strings.SplitN(mount, ",", 2)
|
||||
if len(arr) < 2 {
|
||||
return nil, mountedImages, lockedTargets, errors.Wrapf(errInvalidSyntax, "%q", mount)
|
||||
return nil, mountedImages, lockedTargets, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
|
||||
}
|
||||
kv := strings.Split(arr[0], "=")
|
||||
// TODO: type is not explicitly required in Docker.
|
||||
// If not specified, it defaults to "volume".
|
||||
if len(kv) != 2 || kv[0] != "type" {
|
||||
return nil, mountedImages, lockedTargets, errors.Wrapf(errInvalidSyntax, "%q", mount)
|
||||
return nil, mountedImages, lockedTargets, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
|
||||
}
|
||||
|
||||
tokens := strings.Split(arr[1], ",")
|
||||
|
@ -514,7 +515,7 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, c
|
|||
return nil, mountedImages, lockedTargets, err
|
||||
}
|
||||
if _, ok := finalMounts[mount.Destination]; ok {
|
||||
return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, mount.Destination)
|
||||
return nil, mountedImages, lockedTargets, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
|
||||
}
|
||||
finalMounts[mount.Destination] = mount
|
||||
mountedImages = append(mountedImages, image)
|
||||
|
@ -525,7 +526,7 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, c
|
|||
return nil, mountedImages, lockedTargets, err
|
||||
}
|
||||
if _, ok := finalMounts[mount.Destination]; ok {
|
||||
return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, mount.Destination)
|
||||
return nil, mountedImages, lockedTargets, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
|
||||
}
|
||||
finalMounts[mount.Destination] = mount
|
||||
case TypeTmpfs:
|
||||
|
@ -534,11 +535,11 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, c
|
|||
return nil, mountedImages, lockedTargets, err
|
||||
}
|
||||
if _, ok := finalMounts[mount.Destination]; ok {
|
||||
return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, mount.Destination)
|
||||
return nil, mountedImages, lockedTargets, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest)
|
||||
}
|
||||
finalMounts[mount.Destination] = mount
|
||||
default:
|
||||
return nil, mountedImages, lockedTargets, errors.Errorf("invalid filesystem type %q", kv[1])
|
||||
return nil, mountedImages, lockedTargets, fmt.Errorf("invalid filesystem type %q", kv[1])
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -567,19 +568,19 @@ func GetTmpfsMount(args []string) (specs.Mount, error) {
|
|||
newMount.Options = append(newMount.Options, kv[0])
|
||||
case "tmpfs-mode":
|
||||
if len(kv) == 1 {
|
||||
return newMount, errors.Wrapf(errBadOptionArg, kv[0])
|
||||
return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
|
||||
}
|
||||
newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1]))
|
||||
case "tmpfs-size":
|
||||
if len(kv) == 1 {
|
||||
return newMount, errors.Wrapf(errBadOptionArg, kv[0])
|
||||
return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
|
||||
}
|
||||
newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1]))
|
||||
case "src", "source":
|
||||
return newMount, errors.Errorf("source is not supported with tmpfs mounts")
|
||||
return newMount, errors.New("source is not supported with tmpfs mounts")
|
||||
case "target", "dst", "destination":
|
||||
if len(kv) == 1 {
|
||||
return newMount, errors.Wrapf(errBadOptionArg, kv[0])
|
||||
return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
|
||||
}
|
||||
if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
|
||||
return newMount, err
|
||||
|
@ -587,7 +588,7 @@ func GetTmpfsMount(args []string) (specs.Mount, error) {
|
|||
newMount.Destination = kv[1]
|
||||
setDest = true
|
||||
default:
|
||||
return newMount, errors.Wrapf(errBadMntOption, kv[0])
|
||||
return newMount, fmt.Errorf("%v: %w", kv[0], errBadMntOption)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -14,7 +15,6 @@ import (
|
|||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chrootarchive"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// LookupImage returns *Image to corresponding imagename or id
|
||||
|
@ -66,25 +66,25 @@ func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error {
|
|||
|
||||
err = os.MkdirAll(opts.Path, 0700)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed while creating the destination path %q", opts.Path)
|
||||
return fmt.Errorf("failed while creating the destination path %q: %w", opts.Path, err)
|
||||
}
|
||||
|
||||
err = chrootarchive.Untar(input, opts.Path, &archive.TarOptions{NoLchown: noLChown})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed while performing untar at %q", opts.Path)
|
||||
return fmt.Errorf("failed while performing untar at %q: %w", opts.Path, err)
|
||||
}
|
||||
} else {
|
||||
outFile := os.Stdout
|
||||
if !opts.IsStdout {
|
||||
outFile, err = os.Create(opts.Path)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed while creating destination tar at %q", opts.Path)
|
||||
return fmt.Errorf("failed while creating destination tar at %q: %w", opts.Path, err)
|
||||
}
|
||||
defer outFile.Close()
|
||||
}
|
||||
_, err = io.Copy(outFile, input)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed while performing copy to %q", opts.Path)
|
||||
return fmt.Errorf("failed while performing copy to %q: %w", opts.Path, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -97,7 +97,7 @@ func DecryptConfig(decryptionKeys []string) (*encconfig.DecryptConfig, error) {
|
|||
// decryption
|
||||
dcc, err := enchelpers.CreateCryptoConfig([]string{}, decryptionKeys)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid decryption keys")
|
||||
return nil, fmt.Errorf("invalid decryption keys: %w", err)
|
||||
}
|
||||
cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{dcc})
|
||||
decryptConfig = cc.DecryptConfig
|
||||
|
@ -116,7 +116,7 @@ func EncryptConfig(encryptionKeys []string, encryptLayers []int) (*encconfig.Enc
|
|||
encLayers = &encryptLayers
|
||||
ecc, err := enchelpers.CreateCryptoConfig(encryptionKeys, []string{})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "invalid encryption keys")
|
||||
return nil, nil, fmt.Errorf("invalid encryption keys: %w", err)
|
||||
}
|
||||
cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{ecc})
|
||||
encConfig = cc.EncryptConfig
|
||||
|
@ -132,6 +132,6 @@ func GetFormat(format string) (string, error) {
|
|||
case define.DOCKER:
|
||||
return define.Dockerv2ImageManifest, nil
|
||||
default:
|
||||
return "", errors.Errorf("unrecognized image type %q", format)
|
||||
return "", fmt.Errorf("unrecognized image type %q", format)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,21 +1,19 @@
|
|||
package buildah
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
import "fmt"
|
||||
|
||||
// Mount mounts a container's root filesystem in a location which can be
|
||||
// accessed from the host, and returns the location.
|
||||
func (b *Builder) Mount(label string) (string, error) {
|
||||
mountpoint, err := b.store.Mount(b.ContainerID, label)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error mounting build container %q", b.ContainerID)
|
||||
return "", fmt.Errorf("error mounting build container %q: %w", b.ContainerID, err)
|
||||
}
|
||||
b.MountPoint = mountpoint
|
||||
|
||||
err = b.Save()
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error saving updated state for build container %q", b.ContainerID)
|
||||
return "", fmt.Errorf("error saving updated state for build container %q: %w", b.ContainerID, err)
|
||||
}
|
||||
return mountpoint, nil
|
||||
}
|
||||
|
@ -23,7 +21,7 @@ func (b *Builder) Mount(label string) (string, error) {
|
|||
func (b *Builder) setMountPoint(mountPoint string) error {
|
||||
b.MountPoint = mountPoint
|
||||
if err := b.Save(); err != nil {
|
||||
return errors.Wrapf(err, "error saving updated state for build container %q", b.ContainerID)
|
||||
return fmt.Errorf("error saving updated state for build container %q: %w", b.ContainerID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -32,17 +30,17 @@ func (b *Builder) setMountPoint(mountPoint string) error {
|
|||
func (b *Builder) Mounted() (bool, error) {
|
||||
mountCnt, err := b.store.Mounted(b.ContainerID)
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "error determining if mounting build container %q is mounted", b.ContainerID)
|
||||
return false, fmt.Errorf("error determining if mounting build container %q is mounted: %w", b.ContainerID, err)
|
||||
}
|
||||
mounted := mountCnt > 0
|
||||
if mounted && b.MountPoint == "" {
|
||||
ctr, err := b.store.Container(b.ContainerID)
|
||||
if err != nil {
|
||||
return mountCnt > 0, errors.Wrapf(err, "error determining if mounting build container %q is mounted", b.ContainerID)
|
||||
return mountCnt > 0, fmt.Errorf("error determining if mounting build container %q is mounted: %w", b.ContainerID, err)
|
||||
}
|
||||
layer, err := b.store.Layer(ctr.LayerID)
|
||||
if err != nil {
|
||||
return mountCnt > 0, errors.Wrapf(err, "error determining if mounting build container %q is mounted", b.ContainerID)
|
||||
return mountCnt > 0, fmt.Errorf("error determining if mounting build container %q is mounted: %w", b.ContainerID, err)
|
||||
}
|
||||
return mounted, b.setMountPoint(layer.MountPoint)
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package buildah
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
|
@ -19,7 +20,6 @@ import (
|
|||
digest "github.com/opencontainers/go-digest"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/openshift/imagebuilder"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -76,15 +76,20 @@ func imageNamePrefix(imageName string) string {
|
|||
func newContainerIDMappingOptions(idmapOptions *define.IDMappingOptions) storage.IDMappingOptions {
|
||||
var options storage.IDMappingOptions
|
||||
if idmapOptions != nil {
|
||||
options.HostUIDMapping = idmapOptions.HostUIDMapping
|
||||
options.HostGIDMapping = idmapOptions.HostGIDMapping
|
||||
uidmap, gidmap := convertRuntimeIDMaps(idmapOptions.UIDMap, idmapOptions.GIDMap)
|
||||
if len(uidmap) > 0 && len(gidmap) > 0 {
|
||||
options.UIDMap = uidmap
|
||||
options.GIDMap = gidmap
|
||||
if idmapOptions.AutoUserNs {
|
||||
options.AutoUserNs = true
|
||||
options.AutoUserNsOpts = idmapOptions.AutoUserNsOpts
|
||||
} else {
|
||||
options.HostUIDMapping = true
|
||||
options.HostGIDMapping = true
|
||||
options.HostUIDMapping = idmapOptions.HostUIDMapping
|
||||
options.HostGIDMapping = idmapOptions.HostGIDMapping
|
||||
uidmap, gidmap := convertRuntimeIDMaps(idmapOptions.UIDMap, idmapOptions.GIDMap)
|
||||
if len(uidmap) > 0 && len(gidmap) > 0 {
|
||||
options.UIDMap = uidmap
|
||||
options.GIDMap = gidmap
|
||||
} else {
|
||||
options.HostUIDMapping = true
|
||||
options.HostGIDMapping = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return options
|
||||
|
@ -185,12 +190,12 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
|
|||
if ref != nil {
|
||||
srcSrc, err := ref.NewImageSource(ctx, systemContext)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error instantiating image for %q", transports.ImageName(ref))
|
||||
return nil, fmt.Errorf("error instantiating image for %q: %w", transports.ImageName(ref), err)
|
||||
}
|
||||
defer srcSrc.Close()
|
||||
manifestBytes, manifestType, err := srcSrc.GetManifest(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error loading image manifest for %q", transports.ImageName(ref))
|
||||
return nil, fmt.Errorf("error loading image manifest for %q: %w", transports.ImageName(ref), err)
|
||||
}
|
||||
if manifestDigest, err := manifest.Digest(manifestBytes); err == nil {
|
||||
imageDigest = manifestDigest.String()
|
||||
|
@ -199,17 +204,17 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
|
|||
if manifest.MIMETypeIsMultiImage(manifestType) {
|
||||
list, err := manifest.ListFromBlob(manifestBytes, manifestType)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing image manifest for %q as list", transports.ImageName(ref))
|
||||
return nil, fmt.Errorf("error parsing image manifest for %q as list: %w", transports.ImageName(ref), err)
|
||||
}
|
||||
instance, err := list.ChooseInstance(systemContext)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error finding an appropriate image in manifest list %q", transports.ImageName(ref))
|
||||
return nil, fmt.Errorf("error finding an appropriate image in manifest list %q: %w", transports.ImageName(ref), err)
|
||||
}
|
||||
instanceDigest = &instance
|
||||
}
|
||||
src, err = image.FromUnparsedImage(ctx, systemContext, image.UnparsedInstance(srcSrc, instanceDigest))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error instantiating image for %q instance %q", transports.ImageName(ref), instanceDigest)
|
||||
return nil, fmt.Errorf("error instantiating image for %q instance %q: %w", transports.ImageName(ref), instanceDigest, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -229,7 +234,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
|
|||
if options.Container == "" {
|
||||
containers, err := store.Containers()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to check for container names")
|
||||
return nil, fmt.Errorf("unable to check for container names: %w", err)
|
||||
}
|
||||
tmpName = findUnusedContainer(tmpName, containers)
|
||||
}
|
||||
|
@ -257,8 +262,8 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
|
|||
name = tmpName
|
||||
break
|
||||
}
|
||||
if errors.Cause(err) != storage.ErrDuplicateName || options.Container != "" {
|
||||
return nil, errors.Wrapf(err, "error creating container")
|
||||
if !errors.Is(err, storage.ErrDuplicateName) || options.Container != "" {
|
||||
return nil, fmt.Errorf("error creating container: %w", err)
|
||||
}
|
||||
tmpName = fmt.Sprintf("%s-%d", name, rand.Int()%conflict)
|
||||
conflict = conflict * 10
|
||||
|
@ -328,16 +333,16 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
|
|||
if options.Mount {
|
||||
_, err = builder.Mount(container.MountLabel())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error mounting build container %q", builder.ContainerID)
|
||||
return nil, fmt.Errorf("error mounting build container %q: %w", builder.ContainerID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := builder.initConfig(ctx, src, systemContext); err != nil {
|
||||
return nil, errors.Wrapf(err, "error preparing image configuration")
|
||||
return nil, fmt.Errorf("error preparing image configuration: %w", err)
|
||||
}
|
||||
err = builder.Save()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error saving builder state for container %q", builder.ContainerID)
|
||||
return nil, fmt.Errorf("error saving builder state for container %q: %w", builder.ContainerID, err)
|
||||
}
|
||||
|
||||
return builder, nil
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
package chrootuser
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os/user"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -76,9 +76,9 @@ func GetUser(rootdir, userspec string) (uint32, uint32, string, error) {
|
|||
return uint32(uid64), uint32(gid64), homedir, nil
|
||||
}
|
||||
|
||||
err = errors.Wrapf(uerr, "error determining run uid")
|
||||
err = fmt.Errorf("error determining run uid: %w", uerr)
|
||||
if uerr == nil {
|
||||
err = errors.Wrapf(gerr, "error determining run gid")
|
||||
err = fmt.Errorf("error determining run gid: %w", gerr)
|
||||
}
|
||||
|
||||
return 0, 0, homedir, err
|
||||
|
@ -94,7 +94,7 @@ func GetGroup(rootdir, groupspec string) (uint32, error) {
|
|||
gid64, gerr = lookupGroupInContainer(rootdir, groupspec)
|
||||
}
|
||||
if gerr != nil {
|
||||
return 0, errors.Wrapf(gerr, "error looking up group for gid %q", groupspec)
|
||||
return 0, fmt.Errorf("error looking up group for gid %q: %w", groupspec, gerr)
|
||||
}
|
||||
return uint32(gid64), nil
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ func GetGroup(rootdir, groupspec string) (uint32, error) {
|
|||
func GetAdditionalGroupsForUser(rootdir string, userid uint64) ([]uint32, error) {
|
||||
gids, err := lookupAdditionalGroupsForUIDInContainer(rootdir, userid)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error looking up supplemental groups for uid %d", userid)
|
||||
return nil, fmt.Errorf("error looking up supplemental groups for uid %d: %w", userid, err)
|
||||
}
|
||||
return gids, nil
|
||||
}
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
// +build !linux
|
||||
//go:build !linux && !freebsd
|
||||
// +build !linux,!freebsd
|
||||
|
||||
package chrootuser
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"errors"
|
||||
)
|
||||
|
||||
func lookupUserInContainer(rootdir, username string) (uint64, uint64, error) {
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
// +build linux
|
||||
//go:build linux || freebsd
|
||||
// +build linux freebsd
|
||||
|
||||
package chrootuser
|
||||
|
|
@ -5,6 +5,7 @@ package cli
|
|||
// that vendor in this code can use them too.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -18,7 +19,6 @@ import (
|
|||
"github.com/containers/buildah/pkg/parse"
|
||||
"github.com/containers/buildah/pkg/util"
|
||||
"github.com/containers/common/pkg/auth"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -66,7 +66,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
|
|||
|
||||
if c.Flag("logsplit").Changed {
|
||||
if !c.Flag("logfile").Changed {
|
||||
return options, nil, nil, errors.Errorf("cannot use --logsplit without --logfile")
|
||||
return options, nil, nil, errors.New("cannot use --logsplit without --logfile")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -114,7 +114,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
|
|||
if len(av) > 1 {
|
||||
parseAdditionalBuildContext, err := parse.GetAdditionalBuildContext(av[1])
|
||||
if err != nil {
|
||||
return options, nil, nil, errors.Wrapf(err, "while parsing additional build context")
|
||||
return options, nil, nil, fmt.Errorf("while parsing additional build context: %w", err)
|
||||
}
|
||||
additionalBuildContext[av[0]] = &parseAdditionalBuildContext
|
||||
} else {
|
||||
|
@ -140,13 +140,13 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
|
|||
if len(cliArgs) == 0 {
|
||||
contextDir, err = os.Getwd()
|
||||
if err != nil {
|
||||
return options, nil, nil, errors.Wrapf(err, "unable to choose current working directory as build context")
|
||||
return options, nil, nil, fmt.Errorf("unable to choose current working directory as build context: %w", err)
|
||||
}
|
||||
} else {
|
||||
// The context directory could be a URL. Try to handle that.
|
||||
tempDir, subDir, err := define.TempDirForURL("", "buildah", cliArgs[0])
|
||||
if err != nil {
|
||||
return options, nil, nil, errors.Wrapf(err, "error prepping temporary context directory")
|
||||
return options, nil, nil, fmt.Errorf("error prepping temporary context directory: %w", err)
|
||||
}
|
||||
if tempDir != "" {
|
||||
// We had to download it to a temporary directory.
|
||||
|
@ -157,7 +157,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
|
|||
// Nope, it was local. Use it as is.
|
||||
absDir, err := filepath.Abs(cliArgs[0])
|
||||
if err != nil {
|
||||
return options, nil, nil, errors.Wrapf(err, "error determining path to directory")
|
||||
return options, nil, nil, fmt.Errorf("error determining path to directory: %w", err)
|
||||
}
|
||||
contextDir = absDir
|
||||
}
|
||||
|
@ -175,7 +175,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
|
|||
|
||||
contextDir, err = filepath.EvalSymlinks(contextDir)
|
||||
if err != nil {
|
||||
return options, nil, nil, errors.Wrapf(err, "error evaluating symlinks in build context path")
|
||||
return options, nil, nil, fmt.Errorf("error evaluating symlinks in build context path: %w", err)
|
||||
}
|
||||
|
||||
var stdin io.Reader
|
||||
|
@ -196,7 +196,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
|
|||
|
||||
systemContext, err := parse.SystemContextFromOptions(c)
|
||||
if err != nil {
|
||||
return options, nil, nil, errors.Wrapf(err, "error building system context")
|
||||
return options, nil, nil, fmt.Errorf("error building system context: %w", err)
|
||||
}
|
||||
|
||||
isolation, err := parse.IsolationOption(iopts.Isolation)
|
||||
|
@ -226,11 +226,11 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
|
|||
}
|
||||
|
||||
if pullFlagsCount > 1 {
|
||||
return options, nil, nil, errors.Errorf("can only set one of 'pull' or 'pull-always' or 'pull-never'")
|
||||
return options, nil, nil, errors.New("can only set one of 'pull' or 'pull-always' or 'pull-never'")
|
||||
}
|
||||
|
||||
if (c.Flag("rm").Changed || c.Flag("force-rm").Changed) && (!c.Flag("layers").Changed && !c.Flag("no-cache").Changed) {
|
||||
return options, nil, nil, errors.Errorf("'rm' and 'force-rm' can only be set with either 'layers' or 'no-cache'")
|
||||
return options, nil, nil, errors.New("'rm' and 'force-rm' can only be set with either 'layers' or 'no-cache'")
|
||||
}
|
||||
|
||||
if c.Flag("cache-from").Changed {
|
||||
|
@ -256,7 +256,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
|
|||
}
|
||||
usernsOption, idmappingOptions, err := parse.IDMappingOptions(c, isolation)
|
||||
if err != nil {
|
||||
return options, nil, nil, errors.Wrapf(err, "error parsing ID mapping options")
|
||||
return options, nil, nil, fmt.Errorf("error parsing ID mapping options: %w", err)
|
||||
}
|
||||
namespaceOptions.AddOrReplace(usernsOption...)
|
||||
|
||||
|
@ -267,7 +267,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
|
|||
|
||||
decryptConfig, err := iutil.DecryptConfig(iopts.DecryptionKeys)
|
||||
if err != nil {
|
||||
return options, nil, nil, errors.Wrapf(err, "unable to obtain decrypt config")
|
||||
return options, nil, nil, fmt.Errorf("unable to obtain decrypt config: %w", err)
|
||||
}
|
||||
|
||||
var excludes []string
|
||||
|
|
|
@ -17,7 +17,6 @@ import (
|
|||
"github.com/containers/common/pkg/config"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
|
@ -74,6 +73,7 @@ type BudResults struct {
|
|||
NoCache bool
|
||||
Timestamp int64
|
||||
OmitHistory bool
|
||||
OCIHooksDir []string
|
||||
Pull string
|
||||
PullAlways bool
|
||||
PullNever bool
|
||||
|
@ -194,6 +194,7 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
|
|||
fs.String("arch", runtime.GOARCH, "set the ARCH of the image to the provided value instead of the architecture of the host")
|
||||
fs.StringArrayVar(&flags.Annotation, "annotation", []string{}, "set metadata for an image (default [])")
|
||||
fs.StringVar(&flags.Authfile, "authfile", "", "path of the authentication file.")
|
||||
fs.StringArrayVar(&flags.OCIHooksDir, "hooks-dir", []string{}, "set the OCI hooks directory path (may be set multiple times)")
|
||||
fs.StringArrayVar(&flags.BuildArg, "build-arg", []string{}, "`argument=value` to supply to the builder")
|
||||
fs.StringArrayVar(&flags.BuildContext, "build-context", []string{}, "`argument=value` to supply additional build context to the builder")
|
||||
fs.StringVar(&flags.CacheFrom, "cache-from", "", "images to utilise as potential cache sources. The build process does not currently support caching so this is a NOOP.")
|
||||
|
@ -282,6 +283,7 @@ func GetBudFlagsCompletions() commonComp.FlagCompletions {
|
|||
flagCompletion["file"] = commonComp.AutocompleteDefault
|
||||
flagCompletion["format"] = commonComp.AutocompleteNone
|
||||
flagCompletion["from"] = commonComp.AutocompleteDefault
|
||||
flagCompletion["hooks-dir"] = commonComp.AutocompleteNone
|
||||
flagCompletion["ignorefile"] = commonComp.AutocompleteDefault
|
||||
flagCompletion["iidfile"] = commonComp.AutocompleteDefault
|
||||
flagCompletion["jobs"] = commonComp.AutocompleteNone
|
||||
|
@ -311,7 +313,7 @@ func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults,
|
|||
fs := pflag.FlagSet{}
|
||||
defaultContainerConfig, err := config.Default()
|
||||
if err != nil {
|
||||
return fs, errors.Wrapf(err, "failed to get container config")
|
||||
return fs, fmt.Errorf("failed to get container config: %w", err)
|
||||
}
|
||||
|
||||
fs.StringSliceVar(&flags.AddHost, "add-host", []string{}, "add a custom host-to-IP mapping (`host:ip`) (default [])")
|
||||
|
@ -440,7 +442,7 @@ func DefaultHistory() bool {
|
|||
func VerifyFlagsArgsOrder(args []string) error {
|
||||
for _, arg := range args {
|
||||
if strings.HasPrefix(arg, "-") {
|
||||
return errors.Errorf("No options (%s) can be specified after the image or container name", arg)
|
||||
return fmt.Errorf("no options (%s) can be specified after the image or container name", arg)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -0,0 +1,180 @@
|
|||
//go:build freebsd
|
||||
// +build freebsd
|
||||
|
||||
package jail
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type NS int32
|
||||
|
||||
const (
|
||||
DISABLED NS = 0
|
||||
NEW NS = 1
|
||||
INHERIT NS = 2
|
||||
|
||||
JAIL_CREATE = 0x01
|
||||
JAIL_UPDATE = 0x02
|
||||
JAIL_ATTACH = 0x04
|
||||
)
|
||||
|
||||
type config struct {
|
||||
params map[string]interface{}
|
||||
}
|
||||
|
||||
func NewConfig() *config {
|
||||
return &config{
|
||||
params: make(map[string]interface{}),
|
||||
}
|
||||
}
|
||||
|
||||
func handleBoolSetting(key string, val bool) (string, interface{}) {
|
||||
// jail doesn't deal with booleans - it uses paired parameter
|
||||
// names, e.g. "persist"/"nopersist". If the key contains '.',
|
||||
// the "no" prefix is applied to the last element.
|
||||
if val == false {
|
||||
parts := strings.Split(key, ".")
|
||||
parts[len(parts)-1] = "no" + parts[len(parts)-1]
|
||||
key = strings.Join(parts, ".")
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
|
||||
func (c *config) Set(key string, value interface{}) {
|
||||
// Normalise integer types to int32
|
||||
switch v := value.(type) {
|
||||
case int:
|
||||
value = int32(v)
|
||||
case uint32:
|
||||
value = int32(v)
|
||||
}
|
||||
|
||||
switch key {
|
||||
case "jid", "devfs_ruleset", "enforce_statfs", "children.max", "securelevel":
|
||||
if _, ok := value.(int32); !ok {
|
||||
logrus.Fatalf("value for parameter %s must be an int32", key)
|
||||
}
|
||||
case "ip4", "ip6", "host", "vnet":
|
||||
nsval, ok := value.(NS)
|
||||
if !ok {
|
||||
logrus.Fatalf("value for parameter %s must be a jail.NS", key)
|
||||
}
|
||||
if (key == "host" || key == "vnet") && nsval == DISABLED {
|
||||
logrus.Fatalf("value for parameter %s cannot be DISABLED", key)
|
||||
}
|
||||
case "persist", "sysvmsg", "sysvsem", "sysvshm":
|
||||
bval, ok := value.(bool)
|
||||
if !ok {
|
||||
logrus.Fatalf("value for parameter %s must be bool", key)
|
||||
}
|
||||
key, value = handleBoolSetting(key, bval)
|
||||
default:
|
||||
if strings.HasPrefix(key, "allow.") {
|
||||
bval, ok := value.(bool)
|
||||
if !ok {
|
||||
logrus.Fatalf("value for parameter %s must be bool", key)
|
||||
}
|
||||
key, value = handleBoolSetting(key, bval)
|
||||
} else {
|
||||
if _, ok := value.(string); !ok {
|
||||
logrus.Fatalf("value for parameter %s must be a string", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
c.params[key] = value
|
||||
}
|
||||
|
||||
func (c *config) getIovec() ([]syscall.Iovec, error) {
|
||||
jiov := make([]syscall.Iovec, 0)
|
||||
for key, value := range c.params {
|
||||
iov, err := stringToIovec(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jiov = append(jiov, iov)
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
iov, err := stringToIovec(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jiov = append(jiov, iov)
|
||||
case int32:
|
||||
jiov = append(jiov, syscall.Iovec{
|
||||
Base: (*byte)(unsafe.Pointer(&v)),
|
||||
Len: 4,
|
||||
})
|
||||
case NS:
|
||||
jiov = append(jiov, syscall.Iovec{
|
||||
Base: (*byte)(unsafe.Pointer(&v)),
|
||||
Len: 4,
|
||||
})
|
||||
default:
|
||||
jiov = append(jiov, syscall.Iovec{
|
||||
Base: nil,
|
||||
Len: 0,
|
||||
})
|
||||
}
|
||||
}
|
||||
return jiov, nil
|
||||
}
|
||||
|
||||
type jail struct {
|
||||
jid int32
|
||||
}
|
||||
|
||||
func jailSet(jconf *config, flags int) (*jail, error) {
|
||||
jiov, err := jconf.getIovec()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
jid, _, errno := syscall.Syscall(unix.SYS_JAIL_SET, uintptr(unsafe.Pointer(&jiov[0])), uintptr(len(jiov)), uintptr(flags))
|
||||
if errno != 0 {
|
||||
return nil, errno
|
||||
}
|
||||
return &jail{
|
||||
jid: int32(jid),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func jailGet(jconf *config, flags int) (*jail, error) {
|
||||
jiov, err := jconf.getIovec()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
jid, _, errno := syscall.Syscall(unix.SYS_JAIL_GET, uintptr(unsafe.Pointer(&jiov[0])), uintptr(len(jiov)), uintptr(flags))
|
||||
if errno != 0 {
|
||||
return nil, errno
|
||||
}
|
||||
return &jail{
|
||||
jid: int32(jid),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func Create(jconf *config) (*jail, error) {
|
||||
return jailSet(jconf, JAIL_CREATE)
|
||||
}
|
||||
|
||||
func CreateAndAttach(jconf *config) (*jail, error) {
|
||||
return jailSet(jconf, JAIL_CREATE|JAIL_ATTACH)
|
||||
}
|
||||
|
||||
func FindByName(name string) (*jail, error) {
|
||||
jconf := NewConfig()
|
||||
jconf.Set("name", name)
|
||||
return jailGet(jconf, 0)
|
||||
}
|
||||
|
||||
func (j *jail) Set(jconf *config) error {
|
||||
jconf.Set("jid", j.jid)
|
||||
_, err := jailSet(jconf, JAIL_UPDATE)
|
||||
return err
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
//go:build (386 || arm) && freebsd
|
||||
// +build 386 arm
|
||||
// +build freebsd
|
||||
|
||||
package jail
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func stringToIovec(val string) (syscall.Iovec, error) {
|
||||
bs, err := syscall.ByteSliceFromString(val)
|
||||
if err != nil {
|
||||
return syscall.Iovec{}, err
|
||||
}
|
||||
var res syscall.Iovec
|
||||
res.Base = &bs[0]
|
||||
res.Len = uint32(len(bs))
|
||||
return res, nil
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
//go:build !(386 || arm) && freebsd
|
||||
// +build !386,!arm,freebsd
|
||||
|
||||
package jail
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func stringToIovec(val string) (syscall.Iovec, error) {
|
||||
bs, err := syscall.ByteSliceFromString(val)
|
||||
if err != nil {
|
||||
return syscall.Iovec{}, err
|
||||
}
|
||||
var res syscall.Iovec
|
||||
res.Base = &bs[0]
|
||||
res.Len = uint64(len(bs))
|
||||
return res, nil
|
||||
}
|
|
@ -9,11 +9,12 @@ import (
|
|||
"strings"
|
||||
"syscall"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
@ -56,12 +57,12 @@ type Options struct {
|
|||
func TempDir(containerDir string, rootUID, rootGID int) (string, error) {
|
||||
contentDir := filepath.Join(containerDir, "overlay")
|
||||
if err := idtools.MkdirAllAs(contentDir, 0700, rootUID, rootGID); err != nil {
|
||||
return "", errors.Wrapf(err, "failed to create the overlay %s directory", contentDir)
|
||||
return "", fmt.Errorf("failed to create the overlay %s directory: %w", contentDir, err)
|
||||
}
|
||||
|
||||
contentDir, err := ioutil.TempDir(contentDir, "")
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to create the overlay tmpdir in %s directory", contentDir)
|
||||
return "", fmt.Errorf("failed to create the overlay tmpdir in %s directory: %w", contentDir, err)
|
||||
}
|
||||
|
||||
return generateOverlayStructure(contentDir, rootUID, rootGID)
|
||||
|
@ -71,7 +72,7 @@ func TempDir(containerDir string, rootUID, rootGID int) (string, error) {
|
|||
func GenerateStructure(containerDir, containerID, name string, rootUID, rootGID int) (string, error) {
|
||||
contentDir := filepath.Join(containerDir, "overlay-containers", containerID, name)
|
||||
if err := idtools.MkdirAllAs(contentDir, 0700, rootUID, rootGID); err != nil {
|
||||
return "", errors.Wrapf(err, "failed to create the overlay %s directory", contentDir)
|
||||
return "", fmt.Errorf("failed to create the overlay %s directory: %w", contentDir, err)
|
||||
}
|
||||
|
||||
return generateOverlayStructure(contentDir, rootUID, rootGID)
|
||||
|
@ -82,14 +83,14 @@ func generateOverlayStructure(containerDir string, rootUID, rootGID int) (string
|
|||
upperDir := filepath.Join(containerDir, "upper")
|
||||
workDir := filepath.Join(containerDir, "work")
|
||||
if err := idtools.MkdirAllAs(upperDir, 0700, rootUID, rootGID); err != nil {
|
||||
return "", errors.Wrapf(err, "failed to create the overlay %s directory", upperDir)
|
||||
return "", fmt.Errorf("failed to create the overlay %s directory: %w", upperDir, err)
|
||||
}
|
||||
if err := idtools.MkdirAllAs(workDir, 0700, rootUID, rootGID); err != nil {
|
||||
return "", errors.Wrapf(err, "failed to create the overlay %s directory", workDir)
|
||||
return "", fmt.Errorf("failed to create the overlay %s directory: %w", workDir, err)
|
||||
}
|
||||
mergeDir := filepath.Join(containerDir, "merge")
|
||||
if err := idtools.MkdirAllAs(mergeDir, 0700, rootUID, rootGID); err != nil {
|
||||
return "", errors.Wrapf(err, "failed to create the overlay %s directory", mergeDir)
|
||||
return "", fmt.Errorf("failed to create the overlay %s directory: %w", mergeDir, err)
|
||||
}
|
||||
|
||||
return containerDir, nil
|
||||
|
@ -141,7 +142,7 @@ func mountWithMountProgram(mountProgram, overlayOptions, mergeDir string) error
|
|||
cmd := exec.Command(mountProgram, "-o", overlayOptions, mergeDir)
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return errors.Wrapf(err, "exec %s", mountProgram)
|
||||
return fmt.Errorf("exec %s: %w", mountProgram, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -238,7 +239,7 @@ func Unmount(contentDir string) error {
|
|||
// If they fail, fallback to unix.Unmount
|
||||
for _, v := range []string{"fusermount3", "fusermount"} {
|
||||
err := exec.Command(v, "-u", mergeDir).Run()
|
||||
if err != nil && errors.Cause(err) != exec.ErrNotFound {
|
||||
if err != nil && !errors.Is(err, exec.ErrNotFound) {
|
||||
logrus.Debugf("Error unmounting %s with %s - %v", mergeDir, v, err)
|
||||
}
|
||||
if err == nil {
|
||||
|
@ -250,7 +251,7 @@ func Unmount(contentDir string) error {
|
|||
|
||||
// Ignore EINVAL as the specified merge dir is not a mount point
|
||||
if err := unix.Unmount(mergeDir, 0); err != nil && !os.IsNotExist(err) && err != unix.EINVAL {
|
||||
return errors.Wrapf(err, "unmount overlay %s", mergeDir)
|
||||
return fmt.Errorf("unmount overlay %s: %w", mergeDir, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -261,15 +262,15 @@ func recreate(contentDir string) error {
|
|||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "failed to stat overlay upper directory")
|
||||
return fmt.Errorf("failed to stat overlay upper directory: %w", err)
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(contentDir); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := idtools.MkdirAllAs(contentDir, os.FileMode(st.Mode()), int(st.UID()), int(st.GID())); err != nil {
|
||||
return errors.Wrap(err, "failed to create overlay directory")
|
||||
return fmt.Errorf("failed to create overlay directory: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -295,7 +296,7 @@ func CleanupContent(containerDir string) (Err error) {
|
|||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "read directory")
|
||||
return fmt.Errorf("read directory: %w", err)
|
||||
}
|
||||
for _, f := range files {
|
||||
dir := filepath.Join(contentDir, f.Name())
|
||||
|
@ -305,7 +306,7 @@ func CleanupContent(containerDir string) (Err error) {
|
|||
}
|
||||
|
||||
if err := os.RemoveAll(contentDir); err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrap(err, "failed to cleanup overlay directory")
|
||||
return fmt.Errorf("failed to cleanup overlay directory: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ package parse
|
|||
// would be useful to projects vendoring buildah
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
|
@ -21,10 +22,10 @@ import (
|
|||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
storageTypes "github.com/containers/storage/types"
|
||||
units "github.com/docker/go-units"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/openshift/imagebuilder"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
|
@ -65,7 +66,7 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name
|
|||
if memVal != "" {
|
||||
memoryLimit, err = units.RAMInBytes(memVal)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid value for memory")
|
||||
return nil, fmt.Errorf("invalid value for memory: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -76,7 +77,7 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name
|
|||
} else {
|
||||
memorySwap, err = units.RAMInBytes(memSwapValue)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid value for memory-swap")
|
||||
return nil, fmt.Errorf("invalid value for memory-swap: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -86,11 +87,11 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name
|
|||
addHost, _ := flags.GetStringSlice("add-host")
|
||||
if len(addHost) > 0 {
|
||||
if noHosts {
|
||||
return nil, errors.Errorf("--no-hosts and --add-host conflict, can not be used together")
|
||||
return nil, errors.New("--no-hosts and --add-host conflict, can not be used together")
|
||||
}
|
||||
for _, host := range addHost {
|
||||
if err := validateExtraHost(host); err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid value for add-host")
|
||||
return nil, fmt.Errorf("invalid value for add-host: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -105,7 +106,7 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name
|
|||
}
|
||||
}
|
||||
if noDNS && len(dnsServers) > 1 {
|
||||
return nil, errors.Errorf("invalid --dns, --dns=none may not be used with any other --dns options")
|
||||
return nil, errors.New("invalid --dns, --dns=none may not be used with any other --dns options")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -113,7 +114,7 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name
|
|||
if flags.Changed("dns-search") {
|
||||
dnsSearch, _ = flags.GetStringSlice("dns-search")
|
||||
if noDNS && len(dnsSearch) > 0 {
|
||||
return nil, errors.Errorf("invalid --dns-search, --dns-search may not be used with --dns=none")
|
||||
return nil, errors.New("invalid --dns-search, --dns-search may not be used with --dns=none")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -121,12 +122,12 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name
|
|||
if flags.Changed("dns-option") {
|
||||
dnsOptions, _ = flags.GetStringSlice("dns-option")
|
||||
if noDNS && len(dnsOptions) > 0 {
|
||||
return nil, errors.Errorf("invalid --dns-option, --dns-option may not be used with --dns=none")
|
||||
return nil, errors.New("invalid --dns-option, --dns-option may not be used with --dns=none")
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := units.FromHumanSize(findFlagFunc("shm-size").Value.String()); err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid --shm-size")
|
||||
return nil, fmt.Errorf("invalid --shm-size: %w", err)
|
||||
}
|
||||
volumes, _ := flags.GetStringArray("volume")
|
||||
if err := Volumes(volumes); err != nil {
|
||||
|
@ -146,6 +147,7 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name
|
|||
|
||||
secrets, _ := flags.GetStringArray("secret")
|
||||
sshsources, _ := flags.GetStringArray("ssh")
|
||||
ociHooks, _ := flags.GetStringArray("hooks-dir")
|
||||
|
||||
commonOpts := &define.CommonBuildOptions{
|
||||
AddHost: addHost,
|
||||
|
@ -169,6 +171,7 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name
|
|||
Volumes: volumes,
|
||||
Secrets: secrets,
|
||||
SSHSources: sshsources,
|
||||
OCIHooksDir: ociHooks,
|
||||
}
|
||||
securityOpts, _ := flags.GetStringArray("security-opt")
|
||||
if err := parseSecurityOpts(securityOpts, commonOpts); err != nil {
|
||||
|
@ -195,7 +198,7 @@ func GetAdditionalBuildContext(value string) (define.AdditionalBuildContext, err
|
|||
} else {
|
||||
path, err := filepath.Abs(value)
|
||||
if err != nil {
|
||||
return define.AdditionalBuildContext{}, errors.Wrapf(err, "unable to convert additional build-context %q path to absolute", value)
|
||||
return define.AdditionalBuildContext{}, fmt.Errorf("unable to convert additional build-context %q path to absolute: %w", value, err)
|
||||
}
|
||||
ret.Value = path
|
||||
}
|
||||
|
@ -205,11 +208,11 @@ func GetAdditionalBuildContext(value string) (define.AdditionalBuildContext, err
|
|||
func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOptions) error {
|
||||
for _, opt := range securityOpts {
|
||||
if opt == "no-new-privileges" {
|
||||
return errors.Errorf("no-new-privileges is not supported")
|
||||
return errors.New("no-new-privileges is not supported")
|
||||
}
|
||||
con := strings.SplitN(opt, "=", 2)
|
||||
if len(con) != 2 {
|
||||
return errors.Errorf("invalid --security-opt name=value pair: %q", opt)
|
||||
return fmt.Errorf("invalid --security-opt name=value pair: %q", opt)
|
||||
}
|
||||
|
||||
switch con[0] {
|
||||
|
@ -220,7 +223,7 @@ func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOpti
|
|||
case "seccomp":
|
||||
commonOpts.SeccompProfilePath = con[1]
|
||||
default:
|
||||
return errors.Errorf("invalid --security-opt 2: %q", opt)
|
||||
return fmt.Errorf("invalid --security-opt 2: %q", opt)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -230,11 +233,11 @@ func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOpti
|
|||
commonOpts.SeccompProfilePath = SeccompOverridePath
|
||||
} else {
|
||||
if !os.IsNotExist(err) {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
if _, err := os.Stat(SeccompDefaultPath); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
commonOpts.SeccompProfilePath = SeccompDefaultPath
|
||||
|
@ -289,10 +292,10 @@ func validateExtraHost(val string) error {
|
|||
// allow for IPv6 addresses in extra hosts by only splitting on first ":"
|
||||
arr := strings.SplitN(val, ":", 2)
|
||||
if len(arr) != 2 || len(arr[0]) == 0 {
|
||||
return errors.Errorf("bad format for add-host: %q", val)
|
||||
return fmt.Errorf("bad format for add-host: %q", val)
|
||||
}
|
||||
if _, err := validateIPAddress(arr[1]); err != nil {
|
||||
return errors.Errorf("invalid IP address in add-host: %q", arr[1])
|
||||
return fmt.Errorf("invalid IP address in add-host: %q", arr[1])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -304,7 +307,7 @@ func validateIPAddress(val string) (string, error) {
|
|||
if ip != nil {
|
||||
return ip.String(), nil
|
||||
}
|
||||
return "", errors.Errorf("%s is not an ip address", val)
|
||||
return "", fmt.Errorf("%s is not an ip address", val)
|
||||
}
|
||||
|
||||
// SystemContextFromOptions returns a SystemContext populated with values
|
||||
|
@ -393,7 +396,7 @@ func SystemContextFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name strin
|
|||
return nil, err
|
||||
}
|
||||
if len(specs) == 0 || specs[0] == "" {
|
||||
return nil, errors.Errorf("unable to parse --platform value %v", specs)
|
||||
return nil, fmt.Errorf("unable to parse --platform value %v", specs)
|
||||
}
|
||||
platform := specs[0]
|
||||
os, arch, variant, err := Platform(platform)
|
||||
|
@ -401,7 +404,7 @@ func SystemContextFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name strin
|
|||
return nil, err
|
||||
}
|
||||
if ctx.OSChoice != "" || ctx.ArchitectureChoice != "" || ctx.VariantChoice != "" {
|
||||
return nil, errors.Errorf("invalid --platform may not be used with --os, --arch, or --variant")
|
||||
return nil, errors.New("invalid --platform may not be used with --os, --arch, or --variant")
|
||||
}
|
||||
ctx.OSChoice = os
|
||||
ctx.ArchitectureChoice = arch
|
||||
|
@ -428,7 +431,7 @@ func PlatformFromOptions(c *cobra.Command) (os, arch string, err error) {
|
|||
return "", "", err
|
||||
}
|
||||
if len(platforms) < 1 {
|
||||
return "", "", errors.Errorf("invalid platform syntax for --platform (use OS/ARCH[/VARIANT])")
|
||||
return "", "", errors.New("invalid platform syntax for --platform (use OS/ARCH[/VARIANT])")
|
||||
}
|
||||
return platforms[0].OS, platforms[0].Arch, nil
|
||||
}
|
||||
|
@ -458,14 +461,14 @@ func PlatformsFromOptions(c *cobra.Command) (platforms []struct{ OS, Arch, Varia
|
|||
platforms = nil
|
||||
platformSpecs, err := c.Flags().GetStringSlice("platform")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to parse platform")
|
||||
return nil, fmt.Errorf("unable to parse platform: %w", err)
|
||||
}
|
||||
if os != "" || arch != "" || variant != "" {
|
||||
return nil, errors.Errorf("invalid --platform may not be used with --os, --arch, or --variant")
|
||||
return nil, fmt.Errorf("invalid --platform may not be used with --os, --arch, or --variant")
|
||||
}
|
||||
for _, pf := range platformSpecs {
|
||||
if os, arch, variant, err = Platform(pf); err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to parse platform %q", pf)
|
||||
return nil, fmt.Errorf("unable to parse platform %q: %w", pf, err)
|
||||
}
|
||||
platforms = append(platforms, struct{ OS, Arch, Variant string }{os, arch, variant})
|
||||
}
|
||||
|
@ -497,7 +500,7 @@ func Platform(platform string) (os, arch, variant string, err error) {
|
|||
return Platform(DefaultPlatform())
|
||||
}
|
||||
}
|
||||
return "", "", "", errors.Errorf("invalid platform syntax for %q (use OS/ARCH[/VARIANT][,...])", platform)
|
||||
return "", "", "", fmt.Errorf("invalid platform syntax for %q (use OS/ARCH[/VARIANT][,...])", platform)
|
||||
}
|
||||
|
||||
func parseCreds(creds string) (string, string) {
|
||||
|
@ -526,7 +529,7 @@ func AuthConfig(creds string) (*types.DockerAuthConfig, error) {
|
|||
fmt.Print("Password: ")
|
||||
termPassword, err := term.ReadPassword(0)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not read password from terminal")
|
||||
return nil, fmt.Errorf("could not read password from terminal: %w", err)
|
||||
}
|
||||
password = string(termPassword)
|
||||
}
|
||||
|
@ -567,7 +570,7 @@ func GetBuildOutput(buildOutput string) (define.BuildOutputOption, error) {
|
|||
switch arr[0] {
|
||||
case "type":
|
||||
if typeSelected {
|
||||
return define.BuildOutputOption{}, fmt.Errorf("Duplicate %q not supported", arr[0])
|
||||
return define.BuildOutputOption{}, fmt.Errorf("duplicate %q not supported", arr[0])
|
||||
}
|
||||
typeSelected = true
|
||||
if arr[1] == "local" {
|
||||
|
@ -579,12 +582,12 @@ func GetBuildOutput(buildOutput string) (define.BuildOutputOption, error) {
|
|||
}
|
||||
case "dest":
|
||||
if pathSelected {
|
||||
return define.BuildOutputOption{}, fmt.Errorf("Duplicate %q not supported", arr[0])
|
||||
return define.BuildOutputOption{}, fmt.Errorf("duplicate %q not supported", arr[0])
|
||||
}
|
||||
pathSelected = true
|
||||
path = arr[1]
|
||||
default:
|
||||
return define.BuildOutputOption{}, fmt.Errorf("Unrecognized key %q in build output option: %q", arr[0], buildOutput)
|
||||
return define.BuildOutputOption{}, fmt.Errorf("unrecognized key %q in build output option: %q", arr[0], buildOutput)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -609,8 +612,52 @@ func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptio
|
|||
return IDMappingOptionsFromFlagSet(c.Flags(), c.PersistentFlags(), c.Flag)
|
||||
}
|
||||
|
||||
// GetAutoOptions returns a AutoUserNsOptions with the settings to setup automatically
|
||||
// a user namespace.
|
||||
func GetAutoOptions(base string) (*storageTypes.AutoUserNsOptions, error) {
|
||||
parts := strings.SplitN(base, ":", 2)
|
||||
if parts[0] != "auto" {
|
||||
return nil, errors.New("wrong user namespace mode")
|
||||
}
|
||||
options := storageTypes.AutoUserNsOptions{}
|
||||
if len(parts) == 1 {
|
||||
return &options, nil
|
||||
}
|
||||
for _, o := range strings.Split(parts[1], ",") {
|
||||
v := strings.SplitN(o, "=", 2)
|
||||
if len(v) != 2 {
|
||||
return nil, fmt.Errorf("invalid option specified: %q", o)
|
||||
}
|
||||
switch v[0] {
|
||||
case "size":
|
||||
s, err := strconv.ParseUint(v[1], 10, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options.Size = uint32(s)
|
||||
case "uidmapping":
|
||||
mapping, err := storageTypes.ParseIDMapping([]string{v[1]}, nil, "", "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options.AdditionalUIDMappings = append(options.AdditionalUIDMappings, mapping.UIDMap...)
|
||||
case "gidmapping":
|
||||
mapping, err := storageTypes.ParseIDMapping(nil, []string{v[1]}, "", "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options.AdditionalGIDMappings = append(options.AdditionalGIDMappings, mapping.GIDMap...)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown option specified: %q", v[0])
|
||||
}
|
||||
}
|
||||
return &options, nil
|
||||
}
|
||||
|
||||
// IDMappingOptionsFromFlagSet parses the build options related to user namespaces and ID mapping.
|
||||
func IDMappingOptionsFromFlagSet(flags *pflag.FlagSet, persistentFlags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) {
|
||||
isAuto := false
|
||||
autoOpts := &storageTypes.AutoUserNsOptions{}
|
||||
user := findFlagFunc("userns-uid-map-user").Value.String()
|
||||
group := findFlagFunc("userns-gid-map-group").Value.String()
|
||||
// If only the user or group was specified, use the same value for the
|
||||
|
@ -693,18 +740,27 @@ func IDMappingOptionsFromFlagSet(flags *pflag.FlagSet, persistentFlags *pflag.Fl
|
|||
// user namespaces, override that default.
|
||||
if findFlagFunc("userns").Changed {
|
||||
how := findFlagFunc("userns").Value.String()
|
||||
switch how {
|
||||
case "", "container", "private":
|
||||
usernsOption.Host = false
|
||||
case "host":
|
||||
usernsOption.Host = true
|
||||
default:
|
||||
how = strings.TrimPrefix(how, "ns:")
|
||||
if _, err := os.Stat(how); err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "checking %s namespace", string(specs.UserNamespace))
|
||||
if strings.HasPrefix(how, "auto") {
|
||||
autoOpts, err = GetAutoOptions(how)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
isAuto = true
|
||||
usernsOption.Host = false
|
||||
} else {
|
||||
switch how {
|
||||
case "", "container", "private":
|
||||
usernsOption.Host = false
|
||||
case "host":
|
||||
usernsOption.Host = true
|
||||
default:
|
||||
how = strings.TrimPrefix(how, "ns:")
|
||||
if _, err := os.Stat(how); err != nil {
|
||||
return nil, nil, fmt.Errorf("checking %s namespace: %w", string(specs.UserNamespace), err)
|
||||
}
|
||||
logrus.Debugf("setting %q namespace to %q", string(specs.UserNamespace), how)
|
||||
usernsOption.Path = how
|
||||
}
|
||||
logrus.Debugf("setting %q namespace to %q", string(specs.UserNamespace), how)
|
||||
usernsOption.Path = how
|
||||
}
|
||||
}
|
||||
usernsOptions = define.NamespaceOptions{usernsOption}
|
||||
|
@ -712,13 +768,15 @@ func IDMappingOptionsFromFlagSet(flags *pflag.FlagSet, persistentFlags *pflag.Fl
|
|||
// If the user requested that we use the host namespace, but also that
|
||||
// we use mappings, that's not going to work.
|
||||
if (len(uidmap) != 0 || len(gidmap) != 0) && usernsOption.Host {
|
||||
return nil, nil, errors.Errorf("can not specify ID mappings while using host's user namespace")
|
||||
return nil, nil, fmt.Errorf("can not specify ID mappings while using host's user namespace")
|
||||
}
|
||||
return usernsOptions, &define.IDMappingOptions{
|
||||
HostUIDMapping: usernsOption.Host,
|
||||
HostGIDMapping: usernsOption.Host,
|
||||
UIDMap: uidmap,
|
||||
GIDMap: gidmap,
|
||||
AutoUserNs: isAuto,
|
||||
AutoUserNsOpts: *autoOpts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -726,20 +784,20 @@ func parseIDMap(spec []string) (m [][3]uint32, err error) {
|
|||
for _, s := range spec {
|
||||
args := strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsDigit(r) })
|
||||
if len(args)%3 != 0 {
|
||||
return nil, errors.Errorf("mapping %q is not in the form containerid:hostid:size[,...]", s)
|
||||
return nil, fmt.Errorf("mapping %q is not in the form containerid:hostid:size[,...]", s)
|
||||
}
|
||||
for len(args) >= 3 {
|
||||
cid, err := strconv.ParseUint(args[0], 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing container ID %q from mapping %q as a number", args[0], s)
|
||||
return nil, fmt.Errorf("error parsing container ID %q from mapping %q as a number: %w", args[0], s, err)
|
||||
}
|
||||
hostid, err := strconv.ParseUint(args[1], 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing host ID %q from mapping %q as a number", args[1], s)
|
||||
return nil, fmt.Errorf("error parsing host ID %q from mapping %q as a number: %w", args[1], s, err)
|
||||
}
|
||||
size, err := strconv.ParseUint(args[2], 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing %q from mapping %q as a number", args[2], s)
|
||||
return nil, fmt.Errorf("error parsing %q from mapping %q as a number: %w", args[2], s, err)
|
||||
}
|
||||
m = append(m, [3]uint32{uint32(cid), uint32(hostid), uint32(size)})
|
||||
args = args[3:]
|
||||
|
@ -793,7 +851,7 @@ func NamespaceOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name st
|
|||
// if not a path we assume it is a comma separated network list, see setupNamespaces() in run_linux.go
|
||||
if filepath.IsAbs(how) || what != string(specs.NetworkNamespace) {
|
||||
if _, err := os.Stat(how); err != nil {
|
||||
return nil, define.NetworkDefault, errors.Wrapf(err, "checking %s namespace", what)
|
||||
return nil, define.NetworkDefault, fmt.Errorf("checking %s namespace: %w", what, err)
|
||||
}
|
||||
}
|
||||
policy = define.NetworkEnabled
|
||||
|
@ -819,7 +877,7 @@ func defaultIsolation() (define.Isolation, error) {
|
|||
case "chroot":
|
||||
return define.IsolationChroot, nil
|
||||
default:
|
||||
return 0, errors.Errorf("unrecognized $BUILDAH_ISOLATION value %q", isolation)
|
||||
return 0, fmt.Errorf("unrecognized $BUILDAH_ISOLATION value %q", isolation)
|
||||
}
|
||||
}
|
||||
if unshare.IsRootless() {
|
||||
|
@ -839,7 +897,7 @@ func IsolationOption(isolation string) (define.Isolation, error) {
|
|||
case "chroot":
|
||||
return define.IsolationChroot, nil
|
||||
default:
|
||||
return 0, errors.Errorf("unrecognized isolation type %q", isolation)
|
||||
return 0, fmt.Errorf("unrecognized isolation type %q", isolation)
|
||||
}
|
||||
}
|
||||
return defaultIsolation()
|
||||
|
@ -859,7 +917,7 @@ func Device(device string) (string, string, string, error) {
|
|||
switch len(arr) {
|
||||
case 3:
|
||||
if !isValidDeviceMode(arr[2]) {
|
||||
return "", "", "", errors.Errorf("invalid device mode: %s", arr[2])
|
||||
return "", "", "", fmt.Errorf("invalid device mode: %s", arr[2])
|
||||
}
|
||||
permissions = arr[2]
|
||||
fallthrough
|
||||
|
@ -868,7 +926,7 @@ func Device(device string) (string, string, string, error) {
|
|||
permissions = arr[1]
|
||||
} else {
|
||||
if len(arr[1]) == 0 || arr[1][0] != '/' {
|
||||
return "", "", "", errors.Errorf("invalid device mode: %s", arr[1])
|
||||
return "", "", "", fmt.Errorf("invalid device mode: %s", arr[1])
|
||||
}
|
||||
dst = arr[1]
|
||||
}
|
||||
|
@ -880,7 +938,7 @@ func Device(device string) (string, string, string, error) {
|
|||
}
|
||||
fallthrough
|
||||
default:
|
||||
return "", "", "", errors.Errorf("invalid device specification: %s", device)
|
||||
return "", "", "", fmt.Errorf("invalid device specification: %s", device)
|
||||
}
|
||||
|
||||
if dst == "" {
|
||||
|
@ -918,7 +976,7 @@ func GetTempDir() string {
|
|||
|
||||
// Secrets parses the --secret flag
|
||||
func Secrets(secrets []string) (map[string]define.Secret, error) {
|
||||
invalidSyntax := errors.Errorf("incorrect secret flag format: should be --secret id=foo,src=bar[,env=ENV,type=file|env]")
|
||||
invalidSyntax := fmt.Errorf("incorrect secret flag format: should be --secret id=foo,src=bar[,env=ENV,type=file|env]")
|
||||
parsed := make(map[string]define.Secret)
|
||||
for _, secret := range secrets {
|
||||
tokens := strings.Split(secret, ",")
|
||||
|
@ -957,11 +1015,11 @@ func Secrets(secrets []string) (map[string]define.Secret, error) {
|
|||
if typ == "file" {
|
||||
fullPath, err := filepath.Abs(src)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse secrets")
|
||||
return nil, fmt.Errorf("could not parse secrets: %w", err)
|
||||
}
|
||||
_, err = os.Stat(fullPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse secrets")
|
||||
return nil, fmt.Errorf("could not parse secrets: %w", err)
|
||||
}
|
||||
src = fullPath
|
||||
}
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
//go:build linux || darwin
|
||||
// +build linux darwin
|
||||
|
||||
package parse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/buildah/define"
|
||||
"github.com/opencontainers/runc/libcontainer/devices"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func DeviceFromPath(device string) (define.ContainerDevices, error) {
|
||||
|
@ -19,13 +20,13 @@ func DeviceFromPath(device string) (define.ContainerDevices, error) {
|
|||
}
|
||||
srcInfo, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting info of source device %s", src)
|
||||
return nil, fmt.Errorf("error getting info of source device %s: %w", src, err)
|
||||
}
|
||||
|
||||
if !srcInfo.IsDir() {
|
||||
dev, err := devices.DeviceFromPath(src, permissions)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "%s is not a valid device", src)
|
||||
return nil, fmt.Errorf("%s is not a valid device: %w", src, err)
|
||||
}
|
||||
dev.Path = dst
|
||||
device := define.BuildahDevice{Device: *dev, Source: src, Destination: dst}
|
||||
|
@ -36,7 +37,7 @@ func DeviceFromPath(device string) (define.ContainerDevices, error) {
|
|||
// If source device is a directory
|
||||
srcDevices, err := devices.GetDevices(src)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting source devices from directory %s", src)
|
||||
return nil, fmt.Errorf("error getting source devices from directory %s: %w", src, err)
|
||||
}
|
||||
for _, d := range srcDevices {
|
||||
d.Path = filepath.Join(dst, filepath.Base(d.Path))
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
//go:build !linux && !darwin
|
||||
// +build !linux,!darwin
|
||||
|
||||
package parse
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/containers/buildah/define"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func getDefaultProcessLimits() []string {
|
||||
|
@ -12,5 +14,5 @@ func getDefaultProcessLimits() []string {
|
|||
}
|
||||
|
||||
func DeviceFromPath(device string) (define.ContainerDevices, error) {
|
||||
return nil, errors.Errorf("devices not supported")
|
||||
return nil, errors.New("devices not supported")
|
||||
}
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package rusage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func mkduration(tv syscall.Timeval) time.Duration {
|
||||
|
@ -17,7 +17,7 @@ func get() (Rusage, error) {
|
|||
var rusage syscall.Rusage
|
||||
err := syscall.Getrusage(syscall.RUSAGE_CHILDREN, &rusage)
|
||||
if err != nil {
|
||||
return Rusage{}, errors.Wrapf(err, "error getting resource usage")
|
||||
return Rusage{}, fmt.Errorf("error getting resource usage: %w", err)
|
||||
}
|
||||
r := Rusage{
|
||||
Date: time.Now(),
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package rusage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func get() (Rusage, error) {
|
||||
return Rusage{}, errors.Wrapf(syscall.ENOTSUP, "error getting resource usage")
|
||||
return Rusage{}, fmt.Errorf("error getting resource usage: %w", syscall.ENOTSUP)
|
||||
}
|
||||
|
||||
// Supported returns true if resource usage counters are supported on this OS.
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package sshagent
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
|
@ -10,7 +12,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/crypto/ssh/agent"
|
||||
|
@ -40,7 +41,7 @@ func newAgentServerKeyring(keys []interface{}) (*AgentServer, error) {
|
|||
a := agent.NewKeyring()
|
||||
for _, k := range keys {
|
||||
if err := a.Add(agent.AddedKey{PrivateKey: k}); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create ssh agent")
|
||||
return nil, fmt.Errorf("failed to create ssh agent: %w", err)
|
||||
}
|
||||
}
|
||||
return &AgentServer{
|
||||
|
@ -216,7 +217,7 @@ func NewSource(paths []string) (*Source, error) {
|
|||
|
||||
k, err := ssh.ParseRawPrivateKey(dt)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot parse ssh key")
|
||||
return nil, fmt.Errorf("cannot parse ssh key: %w", err)
|
||||
}
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
|
|
@ -1,12 +1,11 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Mirrors path to a tmpfile if path points to a
|
||||
|
@ -44,7 +43,7 @@ func DiscoverContainerfile(path string) (foundCtrFile string, err error) {
|
|||
// Test for existence of the file
|
||||
target, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "discovering Containerfile")
|
||||
return "", fmt.Errorf("discovering Containerfile: %w", err)
|
||||
}
|
||||
|
||||
switch mode := target.Mode(); {
|
||||
|
@ -61,7 +60,7 @@ func DiscoverContainerfile(path string) (foundCtrFile string, err error) {
|
|||
// Test for existence of the Dockerfile file
|
||||
file, err = os.Stat(ctrfile)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "cannot find Containerfile or Dockerfile in context directory")
|
||||
return "", fmt.Errorf("cannot find Containerfile or Dockerfile in context directory: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -69,7 +68,7 @@ func DiscoverContainerfile(path string) (foundCtrFile string, err error) {
|
|||
if mode := file.Mode(); mode.IsRegular() {
|
||||
foundCtrFile = ctrfile
|
||||
} else {
|
||||
return "", errors.Errorf("assumed Containerfile %q is not a file", ctrfile)
|
||||
return "", fmt.Errorf("assumed Containerfile %q is not a file", ctrfile)
|
||||
}
|
||||
|
||||
case mode.IsRegular():
|
||||
|
|
|
@ -2,6 +2,7 @@ package buildah
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
|
@ -11,7 +12,6 @@ import (
|
|||
"github.com/containers/image/v5/types"
|
||||
encconfig "github.com/containers/ocicrypt/config"
|
||||
"github.com/containers/storage"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// PullOptions can be used to alter how an image is copied in from somewhere.
|
||||
|
@ -69,7 +69,6 @@ func Pull(ctx context.Context, imageName string, options PullOptions) (imageID s
|
|||
libimageOptions.MaxRetries = &retries
|
||||
}
|
||||
|
||||
|
||||
pullPolicy, err := config.ParsePullPolicy(options.PullPolicy.String())
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -94,7 +93,7 @@ func Pull(ctx context.Context, imageName string, options PullOptions) (imageID s
|
|||
}
|
||||
|
||||
if len(pulledImages) == 0 {
|
||||
return "", errors.Errorf("internal error pulling %s: no image pulled and no error", imageName)
|
||||
return "", fmt.Errorf("internal error pulling %s: no image pulled and no error", imageName)
|
||||
}
|
||||
|
||||
return pulledImages[0].ID(), nil
|
||||
|
|
|
@ -17,7 +17,6 @@ import (
|
|||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -33,7 +32,7 @@ func cacheLookupReferenceFunc(directory string, compress types.LayerCompression)
|
|||
}
|
||||
ref, err := blobcache.NewBlobCache(ref, directory, compress)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error using blobcache %q", directory)
|
||||
return nil, fmt.Errorf("error using blobcache %q: %w", directory, err)
|
||||
}
|
||||
return ref, nil
|
||||
}
|
||||
|
@ -136,7 +135,7 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options
|
|||
|
||||
manifestDigest, err := manifest.Digest(manifestBytes)
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrapf(err, "error computing digest of manifest of new image %q", transports.ImageName(dest))
|
||||
return nil, "", fmt.Errorf("error computing digest of manifest of new image %q: %w", transports.ImageName(dest), err)
|
||||
}
|
||||
|
||||
var ref reference.Canonical
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,548 @@
|
|||
//go:build freebsd
|
||||
// +build freebsd
|
||||
|
||||
package buildah
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/containers/buildah/bind"
|
||||
"github.com/containers/buildah/chroot"
|
||||
"github.com/containers/buildah/copier"
|
||||
"github.com/containers/buildah/define"
|
||||
"github.com/containers/buildah/internal"
|
||||
"github.com/containers/buildah/pkg/jail"
|
||||
"github.com/containers/buildah/util"
|
||||
"github.com/containers/common/libnetwork/resolvconf"
|
||||
nettypes "github.com/containers/common/libnetwork/types"
|
||||
"github.com/containers/common/pkg/config"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/runtime-tools/generate"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
P_PID = 0
|
||||
P_PGID = 2
|
||||
PROC_REAP_ACQUIRE = 2
|
||||
PROC_REAP_RELEASE = 3
|
||||
)
|
||||
|
||||
var (
|
||||
// We dont want to remove destinations with /etc, /dev as
|
||||
// rootfs already contains these files and unionfs will create
|
||||
// a `whiteout` i.e `.wh` files on removal of overlapping
|
||||
// files from these directories. everything other than these
|
||||
// will be cleaned up
|
||||
nonCleanablePrefixes = []string{
|
||||
"/etc", "/dev",
|
||||
}
|
||||
)
|
||||
|
||||
func procctl(idtype int, id int, cmd int, arg *byte) error {
|
||||
_, _, e1 := unix.Syscall6(
|
||||
unix.SYS_PROCCTL, uintptr(idtype), uintptr(id),
|
||||
uintptr(cmd), uintptr(unsafe.Pointer(arg)), 0, 0)
|
||||
if e1 != 0 {
|
||||
return unix.Errno(e1)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setChildProcess() error {
|
||||
if err := procctl(P_PID, unix.Getpid(), PROC_REAP_ACQUIRE, nil); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "procctl(PROC_REAP_ACQUIRE): %v\n", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Builder) Run(command []string, options RunOptions) error {
|
||||
p, err := ioutil.TempDir("", Package)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// On some hosts like AH, /tmp is a symlink and we need an
|
||||
// absolute path.
|
||||
path, err := filepath.EvalSymlinks(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("using %q to hold bundle data", path)
|
||||
defer func() {
|
||||
if err2 := os.RemoveAll(path); err2 != nil {
|
||||
logrus.Errorf("error removing %q: %v", path, err2)
|
||||
}
|
||||
}()
|
||||
|
||||
gp, err := generate.New("freebsd")
|
||||
if err != nil {
|
||||
return fmt.Errorf("error generating new 'freebsd' runtime spec: %w", err)
|
||||
}
|
||||
g := &gp
|
||||
|
||||
isolation := options.Isolation
|
||||
if isolation == IsolationDefault {
|
||||
isolation = b.Isolation
|
||||
if isolation == IsolationDefault {
|
||||
isolation = IsolationOCI
|
||||
}
|
||||
}
|
||||
if err := checkAndOverrideIsolationOptions(isolation, &options); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// hardwire the environment to match docker build to avoid subtle and hard-to-debug differences due to containers.conf
|
||||
b.configureEnvironment(g, options, []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"})
|
||||
|
||||
if b.CommonBuildOpts == nil {
|
||||
return fmt.Errorf("invalid format on container you must recreate the container")
|
||||
}
|
||||
|
||||
if err := addCommonOptsToSpec(b.CommonBuildOpts, g); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if options.WorkingDir != "" {
|
||||
g.SetProcessCwd(options.WorkingDir)
|
||||
} else if b.WorkDir() != "" {
|
||||
g.SetProcessCwd(b.WorkDir())
|
||||
}
|
||||
mountPoint, err := b.Mount(b.MountLabel)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error mounting container %q: %w", b.ContainerID, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := b.Unmount(); err != nil {
|
||||
logrus.Errorf("error unmounting container: %v", err)
|
||||
}
|
||||
}()
|
||||
g.SetRootPath(mountPoint)
|
||||
if len(command) > 0 {
|
||||
command = runLookupPath(g, command)
|
||||
g.SetProcessArgs(command)
|
||||
} else {
|
||||
g.SetProcessArgs(nil)
|
||||
}
|
||||
|
||||
setupTerminal(g, options.Terminal, options.TerminalSize)
|
||||
|
||||
configureNetwork, configureNetworks, err := b.configureNamespaces(g, &options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
containerName := Package + "-" + filepath.Base(path)
|
||||
if configureNetwork {
|
||||
g.AddAnnotation("org.freebsd.parentJail", containerName+"-vnet")
|
||||
}
|
||||
|
||||
homeDir, err := b.configureUIDGID(g, mountPoint, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Now grab the spec from the generator. Set the generator to nil so that future contributors
|
||||
// will quickly be able to tell that they're supposed to be modifying the spec directly from here.
|
||||
spec := g.Config
|
||||
g = nil
|
||||
|
||||
// Set the seccomp configuration using the specified profile name. Some syscalls are
|
||||
// allowed if certain capabilities are to be granted (example: CAP_SYS_CHROOT and chroot),
|
||||
// so we sorted out the capabilities lists first.
|
||||
if err = setupSeccomp(spec, b.CommonBuildOpts.SeccompProfilePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uid, gid := spec.Process.User.UID, spec.Process.User.GID
|
||||
idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)}
|
||||
|
||||
mode := os.FileMode(0755)
|
||||
coptions := copier.MkdirOptions{
|
||||
ChownNew: idPair,
|
||||
ChmodNew: &mode,
|
||||
}
|
||||
if err := copier.Mkdir(mountPoint, filepath.Join(mountPoint, spec.Process.Cwd), coptions); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bindFiles := make(map[string]string)
|
||||
volumes := b.Volumes()
|
||||
|
||||
// Figure out who owns files that will appear to be owned by UID/GID 0 in the container.
|
||||
rootUID, rootGID, err := util.GetHostRootIDs(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}
|
||||
|
||||
hostFile := ""
|
||||
if !options.NoHosts && !contains(volumes, config.DefaultHostsFile) && options.ConfigureNetwork != define.NetworkDisabled {
|
||||
hostFile, err = b.generateHosts(path, rootIDPair, mountPoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bindFiles[config.DefaultHostsFile] = hostFile
|
||||
}
|
||||
|
||||
if !contains(volumes, resolvconf.DefaultResolvConf) && options.ConfigureNetwork != define.NetworkDisabled && !(len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none") {
|
||||
resolvFile, err := b.addResolvConf(path, rootIDPair, b.CommonBuildOpts.DNSServers, b.CommonBuildOpts.DNSSearch, b.CommonBuildOpts.DNSOptions, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bindFiles[resolvconf.DefaultResolvConf] = resolvFile
|
||||
}
|
||||
|
||||
runMountInfo := runMountInfo{
|
||||
ContextDir: options.ContextDir,
|
||||
Secrets: options.Secrets,
|
||||
SSHSources: options.SSHSources,
|
||||
StageMountPoints: options.StageMountPoints,
|
||||
SystemContext: options.SystemContext,
|
||||
}
|
||||
|
||||
runArtifacts, err := b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, options.RunMounts, runMountInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error resolving mountpoints for container %q: %w", b.ContainerID, err)
|
||||
}
|
||||
if runArtifacts.SSHAuthSock != "" {
|
||||
sshenv := "SSH_AUTH_SOCK=" + runArtifacts.SSHAuthSock
|
||||
spec.Process.Env = append(spec.Process.Env, sshenv)
|
||||
}
|
||||
|
||||
// following run was called from `buildah run`
|
||||
// and some images were mounted for this run
|
||||
// add them to cleanup artifacts
|
||||
if len(options.ExternalImageMounts) > 0 {
|
||||
runArtifacts.MountedImages = append(runArtifacts.MountedImages, options.ExternalImageMounts...)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := b.cleanupRunMounts(options.SystemContext, mountPoint, runArtifacts); err != nil {
|
||||
options.Logger.Errorf("unable to cleanup run mounts %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
defer b.cleanupTempVolumes()
|
||||
|
||||
// If we are creating a network, make the vnet here so that we
|
||||
// can execute the OCI runtime inside it.
|
||||
if configureNetwork {
|
||||
mynetns := containerName + "-vnet"
|
||||
|
||||
jconf := jail.NewConfig()
|
||||
jconf.Set("name", mynetns)
|
||||
jconf.Set("vnet", jail.NEW)
|
||||
jconf.Set("children.max", 1)
|
||||
jconf.Set("persist", true)
|
||||
jconf.Set("enforce_statfs", 0)
|
||||
jconf.Set("devfs_ruleset", 4)
|
||||
jconf.Set("allow.raw_sockets", true)
|
||||
jconf.Set("allow.mount", true)
|
||||
jconf.Set("allow.mount.devfs", true)
|
||||
jconf.Set("allow.mount.nullfs", true)
|
||||
jconf.Set("allow.mount.fdescfs", true)
|
||||
jconf.Set("securelevel", -1)
|
||||
netjail, err := jail.Create(jconf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
jconf := jail.NewConfig()
|
||||
jconf.Set("persist", false)
|
||||
err2 := netjail.Set(jconf)
|
||||
if err2 != nil {
|
||||
logrus.Errorf("error releasing vnet jail %q: %v", mynetns, err2)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
switch isolation {
|
||||
case IsolationOCI:
|
||||
var moreCreateArgs []string
|
||||
if options.NoPivot {
|
||||
moreCreateArgs = []string{"--no-pivot"}
|
||||
} else {
|
||||
moreCreateArgs = nil
|
||||
}
|
||||
err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, containerName, b.Container, hostFile)
|
||||
case IsolationChroot:
|
||||
err = chroot.RunUsingChroot(spec, path, homeDir, options.Stdin, options.Stdout, options.Stderr)
|
||||
default:
|
||||
err = errors.New("don't know how to run this command")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func addCommonOptsToSpec(commonOpts *define.CommonBuildOptions, g *generate.Generator) error {
|
||||
defaultContainerConfig, err := config.Default()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get container config: %w", err)
|
||||
}
|
||||
// Other process resource limits
|
||||
if err := addRlimits(commonOpts.Ulimit, g, defaultContainerConfig.Containers.DefaultUlimits); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf("Resources: %#v", commonOpts)
|
||||
return nil
|
||||
}
|
||||
|
||||
// setupSpecialMountSpecChanges creates special mounts for depending
|
||||
// on the namespaces - nothing yet for freebsd
|
||||
func setupSpecialMountSpecChanges(spec *spec.Spec, shmSize string) ([]specs.Mount, error) {
|
||||
return spec.Mounts, nil
|
||||
}
|
||||
|
||||
func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps) (*spec.Mount, []string, error) {
|
||||
return nil, nil, errors.New("cache mounts not supported on freebsd")
|
||||
}
|
||||
|
||||
func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount, idMaps IDMaps) (mounts []specs.Mount, Err error) {
|
||||
// Make sure the overlay directory is clean before running
|
||||
_, err := b.store.ContainerDirectory(b.ContainerID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error looking up container directory for %s: %w", b.ContainerID, err)
|
||||
}
|
||||
|
||||
parseMount := func(mountType, host, container string, options []string) (specs.Mount, error) {
|
||||
var foundrw, foundro bool
|
||||
for _, opt := range options {
|
||||
switch opt {
|
||||
case "rw":
|
||||
foundrw = true
|
||||
case "ro":
|
||||
foundro = true
|
||||
}
|
||||
}
|
||||
if !foundrw && !foundro {
|
||||
options = append(options, "rw")
|
||||
}
|
||||
if mountType == "bind" || mountType == "rbind" {
|
||||
mountType = "nullfs"
|
||||
}
|
||||
return specs.Mount{
|
||||
Destination: container,
|
||||
Type: mountType,
|
||||
Source: host,
|
||||
Options: options,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Bind mount volumes specified for this particular Run() invocation
|
||||
for _, i := range optionMounts {
|
||||
logrus.Debugf("setting up mounted volume at %q", i.Destination)
|
||||
mount, err := parseMount(i.Type, i.Source, i.Destination, i.Options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mounts = append(mounts, mount)
|
||||
}
|
||||
// Bind mount volumes given by the user when the container was created
|
||||
for _, i := range volumeMounts {
|
||||
var options []string
|
||||
spliti := strings.Split(i, ":")
|
||||
if len(spliti) > 2 {
|
||||
options = strings.Split(spliti[2], ",")
|
||||
}
|
||||
options = append(options, "bind")
|
||||
mount, err := parseMount("bind", spliti[0], spliti[1], options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mounts = append(mounts, mount)
|
||||
}
|
||||
return mounts, nil
|
||||
}
|
||||
|
||||
func setupCapabilities(g *generate.Generator, defaultCapabilities, adds, drops []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, options RunOptions, configureNetworks []string, containerName string) (teardown func(), netStatus map[string]nettypes.StatusBlock, err error) {
|
||||
//if isolation == IsolationOCIRootless {
|
||||
//return setupRootlessNetwork(pid)
|
||||
//}
|
||||
|
||||
if len(configureNetworks) == 0 {
|
||||
configureNetworks = []string{b.NetworkInterface.DefaultNetworkName()}
|
||||
}
|
||||
logrus.Debugf("configureNetworks: %v", configureNetworks)
|
||||
|
||||
mynetns := containerName + "-vnet"
|
||||
|
||||
networks := make(map[string]nettypes.PerNetworkOptions, len(configureNetworks))
|
||||
for i, network := range configureNetworks {
|
||||
networks[network] = nettypes.PerNetworkOptions{
|
||||
InterfaceName: fmt.Sprintf("eth%d", i),
|
||||
}
|
||||
}
|
||||
|
||||
opts := nettypes.NetworkOptions{
|
||||
ContainerID: containerName,
|
||||
ContainerName: containerName,
|
||||
Networks: networks,
|
||||
}
|
||||
_, err = b.NetworkInterface.Setup(mynetns, nettypes.SetupOptions{NetworkOptions: opts})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
teardown = func() {
|
||||
err := b.NetworkInterface.Teardown(mynetns, nettypes.TeardownOptions{NetworkOptions: opts})
|
||||
if err != nil {
|
||||
logrus.Errorf("failed to cleanup network: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return teardown, nil, nil
|
||||
}
|
||||
|
||||
func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOptions define.NamespaceOptions, idmapOptions define.IDMappingOptions, policy define.NetworkConfigurationPolicy) (configureNetwork bool, configureNetworks []string, configureUTS bool, err error) {
|
||||
// Set namespace options in the container configuration.
|
||||
for _, namespaceOption := range namespaceOptions {
|
||||
switch namespaceOption.Name {
|
||||
case string(specs.NetworkNamespace):
|
||||
configureNetwork = false
|
||||
if !namespaceOption.Host && (namespaceOption.Path == "" || !filepath.IsAbs(namespaceOption.Path)) {
|
||||
if namespaceOption.Path != "" && !filepath.IsAbs(namespaceOption.Path) {
|
||||
configureNetworks = strings.Split(namespaceOption.Path, ",")
|
||||
namespaceOption.Path = ""
|
||||
}
|
||||
configureNetwork = (policy != define.NetworkDisabled)
|
||||
}
|
||||
case string(specs.UTSNamespace):
|
||||
configureUTS = false
|
||||
if !namespaceOption.Host && namespaceOption.Path == "" {
|
||||
configureUTS = true
|
||||
}
|
||||
}
|
||||
// TODO: re-visit this when there is consensus on a
|
||||
// FreeBSD runtime-spec. FreeBSD jails have rough
|
||||
// equivalents for UTS and and network namespaces.
|
||||
}
|
||||
|
||||
return configureNetwork, configureNetworks, configureUTS, nil
|
||||
}
|
||||
|
||||
func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions) (bool, []string, error) {
|
||||
defaultNamespaceOptions, err := DefaultNamespaceOptions()
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
namespaceOptions := defaultNamespaceOptions
|
||||
namespaceOptions.AddOrReplace(b.NamespaceOptions...)
|
||||
namespaceOptions.AddOrReplace(options.NamespaceOptions...)
|
||||
|
||||
networkPolicy := options.ConfigureNetwork
|
||||
//Nothing was specified explicitly so network policy should be inherited from builder
|
||||
if networkPolicy == NetworkDefault {
|
||||
networkPolicy = b.ConfigureNetwork
|
||||
|
||||
// If builder policy was NetworkDisabled and
|
||||
// we want to disable network for this run.
|
||||
// reset options.ConfigureNetwork to NetworkDisabled
|
||||
// since it will be treated as source of truth later.
|
||||
if networkPolicy == NetworkDisabled {
|
||||
options.ConfigureNetwork = networkPolicy
|
||||
}
|
||||
}
|
||||
|
||||
configureNetwork, configureNetworks, configureUTS, err := setupNamespaces(options.Logger, g, namespaceOptions, b.IDMappingOptions, networkPolicy)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
if configureUTS {
|
||||
if options.Hostname != "" {
|
||||
g.SetHostname(options.Hostname)
|
||||
} else if b.Hostname() != "" {
|
||||
g.SetHostname(b.Hostname())
|
||||
} else {
|
||||
g.SetHostname(stringid.TruncateID(b.ContainerID))
|
||||
}
|
||||
} else {
|
||||
g.SetHostname("")
|
||||
}
|
||||
|
||||
found := false
|
||||
spec := g.Config
|
||||
for i := range spec.Process.Env {
|
||||
if strings.HasPrefix(spec.Process.Env[i], "HOSTNAME=") {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("HOSTNAME=%s", spec.Hostname))
|
||||
}
|
||||
|
||||
return configureNetwork, configureNetworks, nil
|
||||
}
|
||||
|
||||
func runSetupBoundFiles(bundlePath string, bindFiles map[string]string) (mounts []specs.Mount) {
|
||||
for dest, src := range bindFiles {
|
||||
options := []string{}
|
||||
if strings.HasPrefix(src, bundlePath) {
|
||||
options = append(options, bind.NoBindOption)
|
||||
}
|
||||
mounts = append(mounts, specs.Mount{
|
||||
Source: src,
|
||||
Destination: dest,
|
||||
Type: "nullfs",
|
||||
Options: options,
|
||||
})
|
||||
}
|
||||
return mounts
|
||||
}
|
||||
|
||||
func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string) error {
|
||||
var (
|
||||
ul *units.Ulimit
|
||||
err error
|
||||
)
|
||||
|
||||
ulimit = append(defaultUlimits, ulimit...)
|
||||
for _, u := range ulimit {
|
||||
if ul, err = units.ParseUlimit(u); err != nil {
|
||||
return fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", u, err)
|
||||
}
|
||||
|
||||
g.AddProcessRlimits("RLIMIT_"+strings.ToUpper(ul.Name), uint64(ul.Hard), uint64(ul.Soft))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// setPdeathsig sets a parent-death signal for the process
|
||||
func setPdeathsig(cmd *exec.Cmd) {
|
||||
if cmd.SysProcAttr == nil {
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{}
|
||||
}
|
||||
cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
|
||||
}
|
||||
|
||||
// Create pipes to use for relaying stdio.
|
||||
func runMakeStdioPipe(uid, gid int) ([][]int, error) {
|
||||
stdioPipe := make([][]int, 3)
|
||||
for i := range stdioPipe {
|
||||
stdioPipe[i] = make([]int, 2)
|
||||
if err := unix.Pipe(stdioPipe[i]); err != nil {
|
||||
return nil, fmt.Errorf("error creating pipe for container FD %d: %w", i, err)
|
||||
}
|
||||
}
|
||||
return stdioPipe, nil
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,13 +1,15 @@
|
|||
//go:build darwin
|
||||
// +build darwin
|
||||
|
||||
package buildah
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/containers/buildah/define"
|
||||
nettypes "github.com/containers/common/libnetwork/types"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/containers/storage"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
// ContainerDevices is an alias for a slice of github.com/opencontainers/runc/libcontainer/configs.Device structures.
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
// +build !linux,!darwin
|
||||
//go:build !linux && !darwin && !freebsd
|
||||
// +build !linux,!darwin,!freebsd
|
||||
|
||||
package buildah
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
nettypes "github.com/containers/common/libnetwork/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func setChildProcess() error {
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
//go:build seccomp && linux
|
||||
// +build seccomp,linux
|
||||
|
||||
package buildah
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/common/pkg/seccomp"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
|
||||
|
@ -17,17 +18,17 @@ func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
|
|||
case "":
|
||||
seccompConfig, err := seccomp.GetDefaultProfile(spec)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "loading default seccomp profile failed")
|
||||
return fmt.Errorf("loading default seccomp profile failed: %w", err)
|
||||
}
|
||||
spec.Linux.Seccomp = seccompConfig
|
||||
default:
|
||||
seccompProfile, err := ioutil.ReadFile(seccompProfilePath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "opening seccomp profile (%s) failed", seccompProfilePath)
|
||||
return fmt.Errorf("opening seccomp profile (%s) failed: %w", seccompProfilePath, err)
|
||||
}
|
||||
seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "loading seccomp profile (%s) failed", seccompProfilePath)
|
||||
return fmt.Errorf("loading seccomp profile (%s) failed: %w", seccompProfilePath, err)
|
||||
}
|
||||
spec.Linux.Seccomp = seccompConfig
|
||||
}
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
|
||||
"github.com/opencontainers/runtime-tools/generate"
|
||||
selinux "github.com/opencontainers/selinux/go-selinux"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func selinuxGetEnabled() bool {
|
||||
|
@ -30,12 +29,12 @@ func runLabelStdioPipes(stdioPipe [][]int, processLabel, mountLabel string) erro
|
|||
}
|
||||
pipeContext, err := selinux.ComputeCreateContext(processLabel, mountLabel, "fifo_file")
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "computing file creation context for pipes")
|
||||
return fmt.Errorf("computing file creation context for pipes: %w", err)
|
||||
}
|
||||
for i := range stdioPipe {
|
||||
pipeFdName := fmt.Sprintf("/proc/self/fd/%d", stdioPipe[i][0])
|
||||
if err := selinux.SetFileLabel(pipeFdName, pipeContext); err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "setting file label on %q", pipeFdName)
|
||||
return fmt.Errorf("setting file label on %q: %w", pipeFdName, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -1,19 +1,17 @@
|
|||
package buildah
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
import "fmt"
|
||||
|
||||
// Unmount unmounts a build container.
|
||||
func (b *Builder) Unmount() error {
|
||||
_, err := b.store.Unmount(b.ContainerID, false)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error unmounting build container %q", b.ContainerID)
|
||||
return fmt.Errorf("error unmounting build container %q: %w", b.ContainerID, err)
|
||||
}
|
||||
b.MountPoint = ""
|
||||
err = b.Save()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error saving updated state for build container %q", b.ContainerID)
|
||||
return fmt.Errorf("error saving updated state for build container %q: %w", b.ContainerID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package buildah
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -16,7 +18,6 @@ import (
|
|||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -107,7 +108,7 @@ func convertRuntimeIDMaps(UIDMap, GIDMap []rspec.LinuxIDMapping) ([]idtools.IDMa
|
|||
func isRegistryBlocked(registry string, sc *types.SystemContext) (bool, error) {
|
||||
reginfo, err := sysregistriesv2.FindRegistry(sc, registry)
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "unable to parse the registries configuration (%s)", sysregistriesv2.ConfigPath(sc))
|
||||
return false, fmt.Errorf("unable to parse the registries configuration (%s): %w", sysregistriesv2.ConfigPath(sc), err)
|
||||
}
|
||||
if reginfo != nil {
|
||||
if reginfo.Blocked {
|
||||
|
@ -150,7 +151,7 @@ func ReserveSELinuxLabels(store storage.Store, id string) error {
|
|||
if selinuxGetEnabled() {
|
||||
containers, err := store.Containers()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error getting list of containers")
|
||||
return fmt.Errorf("error getting list of containers: %w", err)
|
||||
}
|
||||
|
||||
for _, c := range containers {
|
||||
|
@ -159,7 +160,7 @@ func ReserveSELinuxLabels(store storage.Store, id string) error {
|
|||
} else {
|
||||
b, err := OpenBuilder(store, c.ID)
|
||||
if err != nil {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// Ignore not exist errors since containers probably created by other tool
|
||||
// TODO, we need to read other containers json data to reserve their SELinux labels
|
||||
continue
|
||||
|
@ -168,7 +169,7 @@ func ReserveSELinuxLabels(store storage.Store, id string) error {
|
|||
}
|
||||
// Prevent different containers from using same MCS label
|
||||
if err := label.ReserveLabel(b.ProcessLabel); err != nil {
|
||||
return errors.Wrapf(err, "error reserving SELinux label %q", b.ProcessLabel)
|
||||
return fmt.Errorf("error reserving SELinux label %q: %w", b.ProcessLabel, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -218,10 +219,10 @@ func extractWithTar(root, src, dest string) error {
|
|||
wg.Wait()
|
||||
|
||||
if getErr != nil {
|
||||
return errors.Wrapf(getErr, "error reading %q", src)
|
||||
return fmt.Errorf("error reading %q: %w", src, getErr)
|
||||
}
|
||||
if putErr != nil {
|
||||
return errors.Wrapf(putErr, "error copying contents of %q to %q", src, dest)
|
||||
return fmt.Errorf("error copying contents of %q to %q: %w", src, dest, putErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
|
@ -24,7 +25,6 @@ import (
|
|||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -118,18 +118,18 @@ func ExpandNames(names []string, systemContext *types.SystemContext, store stora
|
|||
var name reference.Named
|
||||
nameList, _, err := resolveName(n, systemContext, store)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing name %q", n)
|
||||
return nil, fmt.Errorf("error parsing name %q: %w", n, err)
|
||||
}
|
||||
if len(nameList) == 0 {
|
||||
named, err := reference.ParseNormalizedNamed(n)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing name %q", n)
|
||||
return nil, fmt.Errorf("error parsing name %q: %w", n, err)
|
||||
}
|
||||
name = named
|
||||
} else {
|
||||
named, err := reference.ParseNormalizedNamed(nameList[0])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing name %q", nameList[0])
|
||||
return nil, fmt.Errorf("error parsing name %q: %w", nameList[0], err)
|
||||
}
|
||||
name = named
|
||||
}
|
||||
|
@ -169,7 +169,7 @@ func ResolveNameToReferences(
|
|||
) (refs []types.ImageReference, err error) {
|
||||
names, transport, err := resolveName(image, systemContext, store)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing name %q", image)
|
||||
return nil, fmt.Errorf("error parsing name %q: %w", image, err)
|
||||
}
|
||||
|
||||
if transport != DefaultTransport {
|
||||
|
@ -185,7 +185,7 @@ func ResolveNameToReferences(
|
|||
refs = append(refs, ref)
|
||||
}
|
||||
if len(refs) == 0 {
|
||||
return nil, errors.Errorf("error locating images with names %v", names)
|
||||
return nil, fmt.Errorf("error locating images with names %v", names)
|
||||
}
|
||||
return refs, nil
|
||||
}
|
||||
|
@ -206,7 +206,7 @@ func AddImageNames(store storage.Store, firstRegistry string, systemContext *typ
|
|||
|
||||
for _, tag := range addNames {
|
||||
if err := localImage.Tag(tag); err != nil {
|
||||
return errors.Wrapf(err, "error tagging image %s", image.ID)
|
||||
return fmt.Errorf("error tagging image %s: %w", image.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -217,7 +217,7 @@ func AddImageNames(store storage.Store, firstRegistry string, systemContext *typ
|
|||
// error message that reflects the reason of the failure.
|
||||
// In case err type is not a familiar one the error "defaultError" is returned.
|
||||
func GetFailureCause(err, defaultError error) error {
|
||||
switch nErr := errors.Cause(err).(type) {
|
||||
switch nErr := err.(type) {
|
||||
case errcode.Errors:
|
||||
return err
|
||||
case errcode.Error, *url.Error:
|
||||
|
@ -263,7 +263,7 @@ func GetContainerIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (ui
|
|||
}
|
||||
}
|
||||
if !uidMapped {
|
||||
return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid)
|
||||
return 0, 0, fmt.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid)
|
||||
}
|
||||
gidMapped := true
|
||||
for _, m := range gidmap {
|
||||
|
@ -275,7 +275,7 @@ func GetContainerIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (ui
|
|||
}
|
||||
}
|
||||
if !gidMapped {
|
||||
return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid)
|
||||
return 0, 0, fmt.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid)
|
||||
}
|
||||
return uid, gid, nil
|
||||
}
|
||||
|
@ -293,7 +293,7 @@ func GetHostIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32,
|
|||
}
|
||||
}
|
||||
if !uidMapped {
|
||||
return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid)
|
||||
return 0, 0, fmt.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid)
|
||||
}
|
||||
gidMapped := true
|
||||
for _, m := range gidmap {
|
||||
|
@ -305,7 +305,7 @@ func GetHostIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32,
|
|||
}
|
||||
}
|
||||
if !gidMapped {
|
||||
return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid)
|
||||
return 0, 0, fmt.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid)
|
||||
}
|
||||
return uid, gid, nil
|
||||
}
|
||||
|
@ -460,3 +460,22 @@ func VerifyTagName(imageSpec string) (types.ImageReference, error) {
|
|||
}
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
// Cause returns the most underlying error for the provided one. There is a
|
||||
// maximum error depth of 100 to avoid endless loops. An additional error log
|
||||
// message will be created if this maximum has reached.
|
||||
func Cause(err error) (cause error) {
|
||||
cause = err
|
||||
|
||||
const maxDepth = 100
|
||||
for i := 0; i <= maxDepth; i++ {
|
||||
res := errors.Unwrap(cause)
|
||||
if res == nil {
|
||||
return cause
|
||||
}
|
||||
cause = res
|
||||
}
|
||||
|
||||
logrus.Errorf("Max error depth of %d reached, cannot unwrap until root cause: %v", maxDepth, err)
|
||||
return cause
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
// +build linux,!mips,!mipsle,!mips64,!mips64le
|
||||
//go:build (linux && !mips && !mipsle && !mips64 && !mips64le) || freebsd
|
||||
// +build linux,!mips,!mipsle,!mips64,!mips64le freebsd
|
||||
|
||||
package util
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
// +build linux darwin
|
||||
//go:build linux || darwin || freebsd
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package util
|
||||
|
||||
|
|
|
@ -2,6 +2,8 @@ package libimage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
@ -17,7 +19,6 @@ import (
|
|||
storageTransport "github.com/containers/image/v5/storage"
|
||||
"github.com/containers/image/v5/types"
|
||||
encconfig "github.com/containers/ocicrypt/config"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -343,12 +344,12 @@ func (c *copier) copy(ctx context.Context, source, destination types.ImageRefere
|
|||
// Sanity checks for Buildah.
|
||||
if sourceInsecure != nil && *sourceInsecure {
|
||||
if c.systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
|
||||
return nil, errors.Errorf("can't require tls verification on an insecured registry")
|
||||
return nil, fmt.Errorf("can't require tls verification on an insecured registry")
|
||||
}
|
||||
}
|
||||
if destinationInsecure != nil && *destinationInsecure {
|
||||
if c.systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
|
||||
return nil, errors.Errorf("can't require tls verification on an insecured registry")
|
||||
return nil, fmt.Errorf("can't require tls verification on an insecured registry")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -402,7 +403,7 @@ func checkRegistrySourcesAllows(dest types.ImageReference) (insecure *bool, err
|
|||
AllowedRegistries []string `json:"allowedRegistries,omitempty"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(registrySources), &sources); err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON", registrySources)
|
||||
return nil, fmt.Errorf("error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON: %w", registrySources, err)
|
||||
}
|
||||
blocked := false
|
||||
if len(sources.BlockedRegistries) > 0 {
|
||||
|
@ -413,7 +414,7 @@ func checkRegistrySourcesAllows(dest types.ImageReference) (insecure *bool, err
|
|||
}
|
||||
}
|
||||
if blocked {
|
||||
return nil, errors.Errorf("registry %q denied by policy: it is in the blocked registries list (%s)", reference.Domain(dref), registrySources)
|
||||
return nil, fmt.Errorf("registry %q denied by policy: it is in the blocked registries list (%s)", reference.Domain(dref), registrySources)
|
||||
}
|
||||
allowed := true
|
||||
if len(sources.AllowedRegistries) > 0 {
|
||||
|
@ -425,7 +426,7 @@ func checkRegistrySourcesAllows(dest types.ImageReference) (insecure *bool, err
|
|||
}
|
||||
}
|
||||
if !allowed {
|
||||
return nil, errors.Errorf("registry %q denied by policy: not in allowed registries list (%s)", reference.Domain(dref), registrySources)
|
||||
return nil, fmt.Errorf("registry %q denied by policy: not in allowed registries list (%s)", reference.Domain(dref), registrySources)
|
||||
}
|
||||
|
||||
for _, inseureDomain := range sources.InsecureRegistries {
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
filtersPkg "github.com/containers/common/pkg/filters"
|
||||
"github.com/containers/common/pkg/timetype"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -102,7 +101,7 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp
|
|||
} else {
|
||||
split = strings.SplitN(f, "=", 2)
|
||||
if len(split) != 2 {
|
||||
return nil, errors.Errorf("invalid image filter %q: must be in the format %q", f, "filter=value or filter!=value")
|
||||
return nil, fmt.Errorf("invalid image filter %q: must be in the format %q", f, "filter=value or filter!=value")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -186,7 +185,7 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp
|
|||
filter = filterBefore(until)
|
||||
|
||||
default:
|
||||
return nil, errors.Errorf("unsupported image filter %q", key)
|
||||
return nil, fmt.Errorf("unsupported image filter %q", key)
|
||||
}
|
||||
if negate {
|
||||
filter = negateFilter(filter)
|
||||
|
@ -206,7 +205,7 @@ func negateFilter(f filterFunc) filterFunc {
|
|||
|
||||
func (r *Runtime) containers(duplicate map[string]string, key, value string, externalFunc IsExternalContainerFunc) error {
|
||||
if exists, ok := duplicate[key]; ok && exists != value {
|
||||
return errors.Errorf("specifying %q filter more than once with different values is not supported", key)
|
||||
return fmt.Errorf("specifying %q filter more than once with different values is not supported", key)
|
||||
}
|
||||
duplicate[key] = value
|
||||
switch value {
|
||||
|
@ -237,19 +236,19 @@ func (r *Runtime) until(value string) (time.Time, error) {
|
|||
func (r *Runtime) time(key, value string) (*Image, error) {
|
||||
img, _, err := r.LookupImage(value, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not find local image for filter filter %q=%q", key, value)
|
||||
return nil, fmt.Errorf("could not find local image for filter filter %q=%q: %w", key, value, err)
|
||||
}
|
||||
return img, nil
|
||||
}
|
||||
|
||||
func (r *Runtime) bool(duplicate map[string]string, key, value string) (bool, error) {
|
||||
if exists, ok := duplicate[key]; ok && exists != value {
|
||||
return false, errors.Errorf("specifying %q filter more than once with different values is not supported", key)
|
||||
return false, fmt.Errorf("specifying %q filter more than once with different values is not supported", key)
|
||||
}
|
||||
duplicate[key] = value
|
||||
set, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "non-boolean value %q for %s filter", key, value)
|
||||
return false, fmt.Errorf("non-boolean value %q for %s filter: %w", key, value, err)
|
||||
}
|
||||
return set, nil
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package libimage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
@ -16,7 +17,6 @@ import (
|
|||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -54,7 +54,7 @@ func (i *Image) reload() error {
|
|||
logrus.Tracef("Reloading image %s", i.ID())
|
||||
img, err := i.runtime.store.Image(i.ID())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "reloading image")
|
||||
return fmt.Errorf("reloading image: %w", err)
|
||||
}
|
||||
i.storageImage = img
|
||||
i.cached.imageSource = nil
|
||||
|
@ -81,7 +81,7 @@ func (i *Image) isCorrupted(name string) error {
|
|||
if name == "" {
|
||||
name = i.ID()[:12]
|
||||
}
|
||||
return errors.Errorf("Image %s exists in local storage but may be corrupted (remove the image to resolve the issue): %v", name, err)
|
||||
return fmt.Errorf("Image %s exists in local storage but may be corrupted (remove the image to resolve the issue): %v", name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ func (i *Image) Labels(ctx context.Context) (map[string]string, error) {
|
|||
if err != nil {
|
||||
isManifestList, listErr := i.IsManifestList(ctx)
|
||||
if listErr != nil {
|
||||
err = errors.Wrapf(err, "fallback error checking whether image is a manifest list: %v", err)
|
||||
err = fmt.Errorf("fallback error checking whether image is a manifest list: %v: %w", err, err)
|
||||
} else if isManifestList {
|
||||
logrus.Debugf("Ignoring error: cannot return labels for manifest list or image index %s", i.ID())
|
||||
return nil, nil
|
||||
|
@ -305,7 +305,7 @@ func (i *Image) removeContainers(options *RemoveImagesOptions) error {
|
|||
for _, cID := range containers {
|
||||
if err := i.runtime.store.DeleteContainer(cID); err != nil {
|
||||
// If the container does not exist anymore, we're good.
|
||||
if errors.Cause(err) != storage.ErrContainerUnknown {
|
||||
if !errors.Is(err, storage.ErrContainerUnknown) {
|
||||
multiE = multierror.Append(multiE, err)
|
||||
}
|
||||
}
|
||||
|
@ -361,7 +361,7 @@ func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveIma
|
|||
logrus.Debugf("Removing image %s", i.ID())
|
||||
|
||||
if i.IsReadOnly() {
|
||||
return processedIDs, errors.Errorf("cannot remove read-only image %q", i.ID())
|
||||
return processedIDs, fmt.Errorf("cannot remove read-only image %q", i.ID())
|
||||
}
|
||||
|
||||
if i.runtime.eventChannel != nil {
|
||||
|
@ -384,15 +384,12 @@ func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveIma
|
|||
// have a closer look at the errors. On top, image removal should be
|
||||
// tolerant toward corrupted images.
|
||||
handleError := func(err error) error {
|
||||
switch errors.Cause(err) {
|
||||
case storage.ErrImageUnknown, storage.ErrNotAnImage, storage.ErrLayerUnknown:
|
||||
// The image or layers of the image may already
|
||||
// have been removed in which case we consider
|
||||
// the image to be removed.
|
||||
if errors.Is(err, storage.ErrImageUnknown) || errors.Is(err, storage.ErrNotAnImage) || errors.Is(err, storage.ErrLayerUnknown) {
|
||||
// The image or layers of the image may already have been removed
|
||||
// in which case we consider the image to be removed.
|
||||
return nil
|
||||
default:
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Calculate the size if requested. `podman-image-prune` likes to
|
||||
|
@ -421,11 +418,11 @@ func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveIma
|
|||
byDigest := strings.HasPrefix(referencedBy, "sha256:")
|
||||
if !options.Force {
|
||||
if byID && numNames > 1 {
|
||||
return processedIDs, errors.Errorf("unable to delete image %q by ID with more than one tag (%s): please force removal", i.ID(), i.Names())
|
||||
return processedIDs, fmt.Errorf("unable to delete image %q by ID with more than one tag (%s): please force removal", i.ID(), i.Names())
|
||||
} else if byDigest && numNames > 1 {
|
||||
// FIXME - Docker will remove the digest but containers storage
|
||||
// does not support that yet, so our hands are tied.
|
||||
return processedIDs, errors.Errorf("unable to delete image %q by digest with more than one tag (%s): please force removal", i.ID(), i.Names())
|
||||
return processedIDs, fmt.Errorf("unable to delete image %q by digest with more than one tag (%s): please force removal", i.ID(), i.Names())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -509,16 +506,16 @@ var errTagDigest = errors.New("tag by digest not supported")
|
|||
// storage. The name is normalized according to the rules of NormalizeName.
|
||||
func (i *Image) Tag(name string) error {
|
||||
if strings.HasPrefix(name, "sha256:") { // ambiguous input
|
||||
return errors.Wrap(errTagDigest, name)
|
||||
return fmt.Errorf("%s: %w", name, errTagDigest)
|
||||
}
|
||||
|
||||
ref, err := NormalizeName(name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "normalizing name %q", name)
|
||||
return fmt.Errorf("normalizing name %q: %w", name, err)
|
||||
}
|
||||
|
||||
if _, isDigested := ref.(reference.Digested); isDigested {
|
||||
return errors.Wrap(errTagDigest, name)
|
||||
return fmt.Errorf("%s: %w", name, errTagDigest)
|
||||
}
|
||||
|
||||
logrus.Debugf("Tagging image %s with %q", i.ID(), ref.String())
|
||||
|
@ -546,12 +543,12 @@ var errUntagDigest = errors.New("untag by digest not supported")
|
|||
// of NormalizeName.
|
||||
func (i *Image) Untag(name string) error {
|
||||
if strings.HasPrefix(name, "sha256:") { // ambiguous input
|
||||
return errors.Wrap(errUntagDigest, name)
|
||||
return fmt.Errorf("%s: %w", name, errUntagDigest)
|
||||
}
|
||||
|
||||
ref, err := NormalizeName(name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "normalizing name %q", name)
|
||||
return fmt.Errorf("normalizing name %q: %w", name, err)
|
||||
}
|
||||
|
||||
// FIXME: this is breaking Podman CI but must be re-enabled once
|
||||
|
@ -560,9 +557,9 @@ func (i *Image) Untag(name string) error {
|
|||
//
|
||||
// !!! Also make sure to re-enable the tests !!!
|
||||
//
|
||||
// if _, isDigested := ref.(reference.Digested); isDigested {
|
||||
// return errors.Wrap(errUntagDigest, name)
|
||||
// }
|
||||
// if _, isDigested := ref.(reference.Digested); isDigested {
|
||||
// return fmt.Errorf("%s: %w", name, errUntagDigest)
|
||||
// }
|
||||
|
||||
name = ref.String()
|
||||
|
||||
|
@ -582,7 +579,7 @@ func (i *Image) Untag(name string) error {
|
|||
}
|
||||
|
||||
if !removedName {
|
||||
return errors.Wrap(errTagUnknown, name)
|
||||
return fmt.Errorf("%s: %w", name, errTagUnknown)
|
||||
}
|
||||
|
||||
if err := i.runtime.store.SetNames(i.ID(), newNames); err != nil {
|
||||
|
@ -731,7 +728,7 @@ func (i *Image) Mount(ctx context.Context, mountOptions []string, mountLabel str
|
|||
func (i *Image) Mountpoint() (string, error) {
|
||||
mountedTimes, err := i.runtime.store.Mounted(i.TopLayer())
|
||||
if err != nil || mountedTimes == 0 {
|
||||
if errors.Cause(err) == storage.ErrLayerUnknown {
|
||||
if errors.Is(err, storage.ErrLayerUnknown) {
|
||||
// Can happen, Podman did it, but there's no
|
||||
// explanation why.
|
||||
err = nil
|
||||
|
@ -943,7 +940,7 @@ func getImageID(ctx context.Context, src types.ImageReference, sys *types.System
|
|||
}()
|
||||
imageDigest := newImg.ConfigInfo().Digest
|
||||
if err = imageDigest.Validate(); err != nil {
|
||||
return "", errors.Wrapf(err, "getting config info")
|
||||
return "", fmt.Errorf("getting config info: %w", err)
|
||||
}
|
||||
return "@" + imageDigest.Encoded(), nil
|
||||
}
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
|
||||
"github.com/containers/common/pkg/signal"
|
||||
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ImageConfig is a wrapper around the OCIv1 Image Configuration struct exported
|
||||
|
@ -44,7 +43,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:
|
|||
if len(split) != 2 {
|
||||
split = strings.SplitN(change, "=", 2)
|
||||
if len(split) != 2 {
|
||||
return nil, errors.Errorf("invalid change %q - must be formatted as KEY VALUE", change)
|
||||
return nil, fmt.Errorf("invalid change %q - must be formatted as KEY VALUE", change)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -54,7 +53,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:
|
|||
case "USER":
|
||||
// Assume literal contents are the user.
|
||||
if value == "" {
|
||||
return nil, errors.Errorf("invalid change %q - must provide a value to USER", change)
|
||||
return nil, fmt.Errorf("invalid change %q - must provide a value to USER", change)
|
||||
}
|
||||
config.User = value
|
||||
case "EXPOSE":
|
||||
|
@ -63,14 +62,14 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:
|
|||
// Protocol must be "tcp" or "udp"
|
||||
splitPort := strings.Split(value, "/")
|
||||
if len(splitPort) > 2 {
|
||||
return nil, errors.Errorf("invalid change %q - EXPOSE port must be formatted as PORT[/PROTO]", change)
|
||||
return nil, fmt.Errorf("invalid change %q - EXPOSE port must be formatted as PORT[/PROTO]", change)
|
||||
}
|
||||
portNum, err := strconv.Atoi(splitPort[0])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid change %q - EXPOSE port must be an integer", change)
|
||||
return nil, fmt.Errorf("invalid change %q - EXPOSE port must be an integer: %w", change, err)
|
||||
}
|
||||
if portNum > 65535 || portNum <= 0 {
|
||||
return nil, errors.Errorf("invalid change %q - EXPOSE port must be a valid port number", change)
|
||||
return nil, fmt.Errorf("invalid change %q - EXPOSE port must be a valid port number", change)
|
||||
}
|
||||
proto := "tcp"
|
||||
if len(splitPort) > 1 {
|
||||
|
@ -79,7 +78,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:
|
|||
case "tcp", "udp":
|
||||
proto = testProto
|
||||
default:
|
||||
return nil, errors.Errorf("invalid change %q - EXPOSE protocol must be TCP or UDP", change)
|
||||
return nil, fmt.Errorf("invalid change %q - EXPOSE protocol must be TCP or UDP", change)
|
||||
}
|
||||
}
|
||||
if config.ExposedPorts == nil {
|
||||
|
@ -101,7 +100,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:
|
|||
key = splitEnv[0]
|
||||
// We do need a key
|
||||
if key == "" {
|
||||
return nil, errors.Errorf("invalid change %q - ENV must have at least one argument", change)
|
||||
return nil, fmt.Errorf("invalid change %q - ENV must have at least one argument", change)
|
||||
}
|
||||
// Perfectly valid to not have a value
|
||||
if len(splitEnv) == 2 {
|
||||
|
@ -163,11 +162,11 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:
|
|||
testUnmarshal = strings.Split(value, " ")
|
||||
}
|
||||
if len(testUnmarshal) == 0 {
|
||||
return nil, errors.Errorf("invalid change %q - must provide at least one argument to VOLUME", change)
|
||||
return nil, fmt.Errorf("invalid change %q - must provide at least one argument to VOLUME", change)
|
||||
}
|
||||
for _, vol := range testUnmarshal {
|
||||
if vol == "" {
|
||||
return nil, errors.Errorf("invalid change %q - VOLUME paths must not be empty", change)
|
||||
return nil, fmt.Errorf("invalid change %q - VOLUME paths must not be empty", change)
|
||||
}
|
||||
if config.Volumes == nil {
|
||||
config.Volumes = make(map[string]struct{})
|
||||
|
@ -181,7 +180,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:
|
|||
// WORKDIR c results in /A/b/c
|
||||
// Just need to check it's not empty...
|
||||
if value == "" {
|
||||
return nil, errors.Errorf("invalid change %q - must provide a non-empty WORKDIR", change)
|
||||
return nil, fmt.Errorf("invalid change %q - must provide a non-empty WORKDIR", change)
|
||||
}
|
||||
config.WorkingDir = filepath.Join(config.WorkingDir, value)
|
||||
case "LABEL":
|
||||
|
@ -198,7 +197,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:
|
|||
splitLabel := strings.SplitN(value, "=", 2)
|
||||
// Unlike ENV, LABEL must have a value
|
||||
if len(splitLabel) != 2 {
|
||||
return nil, errors.Errorf("invalid change %q - LABEL must be formatted key=value", change)
|
||||
return nil, fmt.Errorf("invalid change %q - LABEL must be formatted key=value", change)
|
||||
}
|
||||
key = splitLabel[0]
|
||||
val = splitLabel[1]
|
||||
|
@ -211,7 +210,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:
|
|||
}
|
||||
// Check key after we strip quotations
|
||||
if key == "" {
|
||||
return nil, errors.Errorf("invalid change %q - LABEL must have a non-empty key", change)
|
||||
return nil, fmt.Errorf("invalid change %q - LABEL must have a non-empty key", change)
|
||||
}
|
||||
if config.Labels == nil {
|
||||
config.Labels = make(map[string]string)
|
||||
|
@ -221,17 +220,17 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:
|
|||
// Check the provided signal for validity.
|
||||
killSignal, err := signal.ParseSignal(value)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid change %q - KILLSIGNAL must be given a valid signal", change)
|
||||
return nil, fmt.Errorf("invalid change %q - KILLSIGNAL must be given a valid signal: %w", change, err)
|
||||
}
|
||||
config.StopSignal = fmt.Sprintf("%d", killSignal)
|
||||
case "ONBUILD":
|
||||
// Onbuild always appends.
|
||||
if value == "" {
|
||||
return nil, errors.Errorf("invalid change %q - ONBUILD must be given an argument", change)
|
||||
return nil, fmt.Errorf("invalid change %q - ONBUILD must be given an argument", change)
|
||||
}
|
||||
config.OnBuild = append(config.OnBuild, value)
|
||||
default:
|
||||
return nil, errors.Errorf("invalid change %q - invalid instruction %s", change, outerKey)
|
||||
return nil, fmt.Errorf("invalid change %q - invalid instruction %s", change, outerKey)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ package libimage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
|
@ -10,7 +11,6 @@ import (
|
|||
storageTransport "github.com/containers/image/v5/storage"
|
||||
tarballTransport "github.com/containers/image/v5/tarball"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -117,7 +117,7 @@ func (r *Runtime) Import(ctx context.Context, path string, options *ImportOption
|
|||
if options.Tag != "" {
|
||||
image, _, err := r.LookupImage(name, nil)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "looking up imported image")
|
||||
return "", fmt.Errorf("looking up imported image: %w", err)
|
||||
}
|
||||
if err := image.Tag(options.Tag); err != nil {
|
||||
return "", err
|
||||
|
|
|
@ -2,6 +2,7 @@ package libimage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
|
@ -13,7 +14,6 @@ import (
|
|||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NOTE: the abstractions and APIs here are a first step to further merge
|
||||
|
@ -145,7 +145,7 @@ func (m *ManifestList) LookupInstance(ctx context.Context, architecture, os, var
|
|||
}
|
||||
}
|
||||
|
||||
return nil, errors.Wrapf(storage.ErrImageUnknown, "could not find image instance %s of manifest list %s in local containers storage", instanceDigest, m.ID())
|
||||
return nil, fmt.Errorf("could not find image instance %s of manifest list %s in local containers storage: %w", instanceDigest, m.ID(), storage.ErrImageUnknown)
|
||||
}
|
||||
|
||||
// Saves the specified manifest list and reloads it from storage with the new ID.
|
||||
|
@ -169,6 +169,21 @@ func (m *ManifestList) saveAndReload() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Reload the image and list instances from storage
|
||||
func (m *ManifestList) reload() error {
|
||||
listID := m.ID()
|
||||
if err := m.image.reload(); err != nil {
|
||||
return err
|
||||
}
|
||||
image, list, err := m.image.runtime.lookupManifestList(listID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.image = image
|
||||
m.list = list
|
||||
return nil
|
||||
}
|
||||
|
||||
// getManifestList is a helper to obtain a manifest list
|
||||
func (i *Image) getManifestList() (manifests.List, error) {
|
||||
_, list, err := manifests.LoadFromImage(i.runtime.store, i.ID())
|
||||
|
@ -253,7 +268,17 @@ func (m *ManifestList) Add(ctx context.Context, name string, options *ManifestLi
|
|||
Password: options.Password,
|
||||
}
|
||||
}
|
||||
|
||||
locker, err := manifests.LockerForImage(m.image.runtime.store, m.ID())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
locker.Lock()
|
||||
defer locker.Unlock()
|
||||
// Make sure to reload the image from the containers storage to fetch
|
||||
// the latest data (e.g., new or delete digests).
|
||||
if err := m.reload(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
newDigest, err := m.list.Add(ctx, systemContext, ref, options.All)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
|
|
@ -3,7 +3,9 @@ package manifests
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/containers/common/pkg/manifests"
|
||||
|
@ -21,7 +23,6 @@ import (
|
|||
"github.com/containers/storage/pkg/lockfile"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -83,11 +84,11 @@ func Create() List {
|
|||
func LoadFromImage(store storage.Store, image string) (string, List, error) {
|
||||
img, err := store.Image(image)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "error locating image %q for loading manifest list", image)
|
||||
return "", nil, fmt.Errorf("error locating image %q for loading manifest list: %w", image, err)
|
||||
}
|
||||
manifestBytes, err := store.ImageBigData(img.ID, storage.ImageDigestManifestBigDataNamePrefix)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "error locating image %q for loading manifest list", image)
|
||||
return "", nil, fmt.Errorf("error locating image %q for loading manifest list: %w", image, err)
|
||||
}
|
||||
manifestList, err := manifests.FromBlob(manifestBytes)
|
||||
if err != nil {
|
||||
|
@ -99,10 +100,10 @@ func LoadFromImage(store storage.Store, image string) (string, List, error) {
|
|||
}
|
||||
instancesBytes, err := store.ImageBigData(img.ID, instancesData)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "error locating image %q for loading instance list", image)
|
||||
return "", nil, fmt.Errorf("error locating image %q for loading instance list: %w", image, err)
|
||||
}
|
||||
if err := json.Unmarshal(instancesBytes, &list.instances); err != nil {
|
||||
return "", nil, errors.Wrapf(err, "error decoding instance list for image %q", image)
|
||||
return "", nil, fmt.Errorf("error decoding instance list for image %q: %w", image, err)
|
||||
}
|
||||
list.instances[""] = img.ID
|
||||
return img.ID, list, err
|
||||
|
@ -122,7 +123,7 @@ func (l *list) SaveToImage(store storage.Store, imageID string, names []string,
|
|||
return "", err
|
||||
}
|
||||
img, err := store.CreateImage(imageID, names, "", "", &storage.ImageOptions{})
|
||||
if err == nil || errors.Cause(err) == storage.ErrDuplicateID {
|
||||
if err == nil || errors.Is(err, storage.ErrDuplicateID) {
|
||||
created := (err == nil)
|
||||
if created {
|
||||
imageID = img.ID
|
||||
|
@ -135,7 +136,7 @@ func (l *list) SaveToImage(store storage.Store, imageID string, names []string,
|
|||
logrus.Errorf("Deleting image %q after failing to save manifest for it", img.ID)
|
||||
}
|
||||
}
|
||||
return "", errors.Wrapf(err, "saving manifest list to image %q", imageID)
|
||||
return "", fmt.Errorf("saving manifest list to image %q: %w", imageID, err)
|
||||
}
|
||||
err = store.SetImageBigData(imageID, instancesData, instancesBytes, nil)
|
||||
if err != nil {
|
||||
|
@ -144,22 +145,22 @@ func (l *list) SaveToImage(store storage.Store, imageID string, names []string,
|
|||
logrus.Errorf("Deleting image %q after failing to save instance locations for it", img.ID)
|
||||
}
|
||||
}
|
||||
return "", errors.Wrapf(err, "saving instance list to image %q", imageID)
|
||||
return "", fmt.Errorf("saving instance list to image %q: %w", imageID, err)
|
||||
}
|
||||
return imageID, nil
|
||||
}
|
||||
return "", errors.Wrapf(err, "error creating image to hold manifest list")
|
||||
return "", fmt.Errorf("error creating image to hold manifest list: %w", err)
|
||||
}
|
||||
|
||||
// Reference returns an image reference for the composite image being built
|
||||
// in the list, or an error if the list has never been saved to a local image.
|
||||
func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, instances []digest.Digest) (types.ImageReference, error) {
|
||||
if l.instances[""] == "" {
|
||||
return nil, errors.Wrap(ErrListImageUnknown, "error building reference to list")
|
||||
return nil, fmt.Errorf("error building reference to list: %w", ErrListImageUnknown)
|
||||
}
|
||||
s, err := is.Transport.ParseStoreReference(store, l.instances[""])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating ImageReference from image %q", l.instances[""])
|
||||
return nil, fmt.Errorf("error creating ImageReference from image %q: %w", l.instances[""], err)
|
||||
}
|
||||
references := make([]types.ImageReference, 0, len(l.instances))
|
||||
whichInstances := make([]digest.Digest, 0, len(l.instances))
|
||||
|
@ -183,7 +184,7 @@ func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, in
|
|||
imageName := l.instances[instance]
|
||||
ref, err := alltransports.ParseImageName(imageName)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating ImageReference from image %q", imageName)
|
||||
return nil, fmt.Errorf("error creating ImageReference from image %q: %w", imageName, err)
|
||||
}
|
||||
references = append(references, ref)
|
||||
}
|
||||
|
@ -195,7 +196,7 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push
|
|||
// Load the system signing policy.
|
||||
pushPolicy, err := signature.DefaultPolicy(options.SystemContext)
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrapf(err, "error obtaining default signature policy")
|
||||
return nil, "", fmt.Errorf("error obtaining default signature policy: %w", err)
|
||||
}
|
||||
|
||||
// Override the settings for local storage to make sure that we can always read the source "image".
|
||||
|
@ -203,7 +204,7 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push
|
|||
|
||||
policyContext, err := signature.NewPolicyContext(pushPolicy)
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrapf(err, "error creating new signature policy context")
|
||||
return nil, "", fmt.Errorf("error creating new signature policy context: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err2 := policyContext.Destroy(); err2 != nil {
|
||||
|
@ -266,7 +267,7 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push
|
|||
func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.ImageReference, all bool) (digest.Digest, error) {
|
||||
src, err := ref.NewImageSource(ctx, sys)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error setting up to read manifest and configuration from %q", transports.ImageName(ref))
|
||||
return "", fmt.Errorf("error setting up to read manifest and configuration from %q: %w", transports.ImageName(ref), err)
|
||||
}
|
||||
defer src.Close()
|
||||
|
||||
|
@ -281,13 +282,13 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag
|
|||
|
||||
primaryManifestBytes, primaryManifestType, err := src.GetManifest(ctx, nil)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error reading manifest from %q", transports.ImageName(ref))
|
||||
return "", fmt.Errorf("error reading manifest from %q: %w", transports.ImageName(ref), err)
|
||||
}
|
||||
|
||||
if manifest.MIMETypeIsMultiImage(primaryManifestType) {
|
||||
lists, err := manifests.FromBlob(primaryManifestBytes)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error parsing manifest list in %q", transports.ImageName(ref))
|
||||
return "", fmt.Errorf("error parsing manifest list in %q: %w", transports.ImageName(ref), err)
|
||||
}
|
||||
if all {
|
||||
for i, instance := range lists.OCIv1().Manifests {
|
||||
|
@ -311,11 +312,11 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag
|
|||
} else {
|
||||
list, err := manifest.ListFromBlob(primaryManifestBytes, primaryManifestType)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error parsing manifest list in %q", transports.ImageName(ref))
|
||||
return "", fmt.Errorf("error parsing manifest list in %q: %w", transports.ImageName(ref), err)
|
||||
}
|
||||
instanceDigest, err := list.ChooseInstance(sys)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error selecting image from manifest list in %q", transports.ImageName(ref))
|
||||
return "", fmt.Errorf("error selecting image from manifest list in %q: %w", transports.ImageName(ref), err)
|
||||
}
|
||||
added := false
|
||||
for i, instance := range lists.OCIv1().Manifests {
|
||||
|
@ -357,11 +358,11 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag
|
|||
if instanceInfo.OS == "" || instanceInfo.Architecture == "" {
|
||||
img, err := image.FromUnparsedImage(ctx, sys, image.UnparsedInstance(src, instanceInfo.instanceDigest))
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error reading configuration blob from %q", transports.ImageName(ref))
|
||||
return "", fmt.Errorf("error reading configuration blob from %q: %w", transports.ImageName(ref), err)
|
||||
}
|
||||
config, err := img.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error reading info about config blob from %q", transports.ImageName(ref))
|
||||
return "", fmt.Errorf("error reading info about config blob from %q: %w", transports.ImageName(ref), err)
|
||||
}
|
||||
if instanceInfo.OS == "" {
|
||||
instanceInfo.OS = config.OS
|
||||
|
@ -375,12 +376,12 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag
|
|||
}
|
||||
manifestBytes, manifestType, err := src.GetManifest(ctx, instanceInfo.instanceDigest)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error reading manifest from %q, instance %q", transports.ImageName(ref), instanceInfo.instanceDigest)
|
||||
return "", fmt.Errorf("error reading manifest from %q, instance %q: %w", transports.ImageName(ref), instanceInfo.instanceDigest, err)
|
||||
}
|
||||
if instanceInfo.instanceDigest == nil {
|
||||
manifestDigest, err = manifest.Digest(manifestBytes)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error computing digest of manifest from %q", transports.ImageName(ref))
|
||||
return "", fmt.Errorf("error computing digest of manifest from %q: %w", transports.ImageName(ref), err)
|
||||
}
|
||||
instanceInfo.instanceDigest = &manifestDigest
|
||||
instanceInfo.Size = int64(len(manifestBytes))
|
||||
|
@ -389,7 +390,7 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag
|
|||
}
|
||||
err = l.List.AddInstance(*instanceInfo.instanceDigest, instanceInfo.Size, manifestType, instanceInfo.OS, instanceInfo.Architecture, instanceInfo.OSVersion, instanceInfo.OSFeatures, instanceInfo.Variant, instanceInfo.Features, instanceInfo.Annotations)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error adding instance with digest %q", *instanceInfo.instanceDigest)
|
||||
return "", fmt.Errorf("error adding instance with digest %q: %w", *instanceInfo.instanceDigest, err)
|
||||
}
|
||||
if _, ok := l.instances[*instanceInfo.instanceDigest]; !ok {
|
||||
l.instances[*instanceInfo.instanceDigest] = transports.ImageName(ref)
|
||||
|
@ -416,11 +417,11 @@ func (l *list) Remove(instanceDigest digest.Digest) error {
|
|||
func LockerForImage(store storage.Store, image string) (lockfile.Locker, error) {
|
||||
img, err := store.Image(image)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "locating image %q for locating lock", image)
|
||||
return nil, fmt.Errorf("locating image %q for locating lock: %w", image, err)
|
||||
}
|
||||
d := digest.NewDigestFromEncoded(digest.Canonical, img.ID)
|
||||
if err := d.Validate(); err != nil {
|
||||
return nil, errors.Wrapf(err, "coercing image ID for %q into a digest", image)
|
||||
return nil, fmt.Errorf("coercing image ID for %q into a digest: %w", image, err)
|
||||
}
|
||||
return store.GetDigestLock(d)
|
||||
}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
package libimage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -18,12 +18,12 @@ func NormalizeName(name string) (reference.Named, error) {
|
|||
// NOTE: this code is in symmetrie with containers/image/pkg/shortnames.
|
||||
ref, err := reference.Parse(name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error normalizing name %q", name)
|
||||
return nil, fmt.Errorf("error normalizing name %q: %w", name, err)
|
||||
}
|
||||
|
||||
named, ok := ref.(reference.Named)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("%q is not a named reference", name)
|
||||
return nil, fmt.Errorf("%q is not a named reference", name)
|
||||
}
|
||||
|
||||
// Enforce "localhost" if needed.
|
||||
|
|
|
@ -2,6 +2,7 @@ package libimage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime"
|
||||
|
@ -23,7 +24,6 @@ import (
|
|||
"github.com/containers/storage"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ociSpec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -74,7 +74,7 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP
|
|||
// In fact, we need to since they are not parseable.
|
||||
if strings.HasPrefix(name, "sha256:") || (len(name) == 64 && !strings.ContainsAny(name, "/.:@")) {
|
||||
if pullPolicy == config.PullPolicyAlways {
|
||||
return nil, errors.Errorf("pull policy is always but image has been referred to by ID (%s)", name)
|
||||
return nil, fmt.Errorf("pull policy is always but image has been referred to by ID (%s)", name)
|
||||
}
|
||||
local, _, err := r.LookupImage(name, nil)
|
||||
if err != nil {
|
||||
|
@ -113,7 +113,7 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP
|
|||
}
|
||||
|
||||
if options.AllTags && ref.Transport().Name() != registryTransport.Transport.Name() {
|
||||
return nil, errors.Errorf("pulling all tags is not supported for %s transport", ref.Transport().Name())
|
||||
return nil, fmt.Errorf("pulling all tags is not supported for %s transport", ref.Transport().Name())
|
||||
}
|
||||
|
||||
if r.eventChannel != nil {
|
||||
|
@ -163,7 +163,7 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP
|
|||
for _, name := range pulledImages {
|
||||
image, _, err := r.LookupImage(name, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error locating pulled image %q name in containers storage", name)
|
||||
return nil, fmt.Errorf("error locating pulled image %q name in containers storage: %w", name, err)
|
||||
}
|
||||
|
||||
// Note that we can ignore the 2nd return value here. Some
|
||||
|
@ -258,7 +258,7 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference,
|
|||
storageName = ref.StringWithinTransport()
|
||||
named := ref.DockerReference()
|
||||
if named == nil {
|
||||
return nil, errors.Errorf("could not get an image name for storage reference %q", ref)
|
||||
return nil, fmt.Errorf("could not get an image name for storage reference %q", ref)
|
||||
}
|
||||
imageName = named.String()
|
||||
|
||||
|
@ -276,7 +276,7 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference,
|
|||
// Create a storage reference.
|
||||
destRef, err := storageTransport.Transport.ParseStoreReference(r.store, storageName)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "parsing %q", storageName)
|
||||
return nil, fmt.Errorf("parsing %q: %w", storageName, err)
|
||||
}
|
||||
|
||||
_, err = c.copy(ctx, ref, destRef)
|
||||
|
@ -318,7 +318,7 @@ func (r *Runtime) storageReferencesReferencesFromArchiveReader(ctx context.Conte
|
|||
for _, destName := range destNames {
|
||||
destRef, err := storageTransport.Transport.ParseStoreReference(r.store, destName)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "error parsing dest reference name %#v", destName)
|
||||
return nil, nil, fmt.Errorf("error parsing dest reference name %#v: %w", destName, err)
|
||||
}
|
||||
references = append(references, destRef)
|
||||
}
|
||||
|
@ -393,13 +393,13 @@ func (r *Runtime) copyFromRegistry(ctx context.Context, ref types.ImageReference
|
|||
for _, tag := range tags {
|
||||
select { // Let's be gentle with Podman remote.
|
||||
case <-ctx.Done():
|
||||
return nil, errors.Errorf("pulling cancelled")
|
||||
return nil, fmt.Errorf("pulling cancelled")
|
||||
default:
|
||||
// We can continue.
|
||||
}
|
||||
tagged, err := reference.WithTag(named, tag)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating tagged reference (name %s, tag %s)", named.String(), tag)
|
||||
return nil, fmt.Errorf("error creating tagged reference (name %s, tag %s): %w", named.String(), tag, err)
|
||||
}
|
||||
pulled, err := r.copySingleImageFromRegistry(ctx, tagged.String(), pullPolicy, options)
|
||||
if err != nil {
|
||||
|
@ -423,30 +423,30 @@ func (r *Runtime) imagesIDsForManifest(manifestBytes []byte, sys *types.SystemCo
|
|||
if manifest.MIMETypeIsMultiImage(manifestType) {
|
||||
list, err := manifest.ListFromBlob(manifestBytes, manifestType)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "parsing manifest list")
|
||||
return nil, fmt.Errorf("parsing manifest list: %w", err)
|
||||
}
|
||||
d, err := list.ChooseInstance(sys)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "choosing instance from manifest list")
|
||||
return nil, fmt.Errorf("choosing instance from manifest list: %w", err)
|
||||
}
|
||||
imageDigest = d
|
||||
} else {
|
||||
d, err := manifest.Digest(manifestBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "digesting manifest")
|
||||
return nil, fmt.Errorf("digesting manifest")
|
||||
}
|
||||
imageDigest = d
|
||||
}
|
||||
images, err := r.store.ImagesByDigest(imageDigest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "listing images by manifest digest")
|
||||
return nil, fmt.Errorf("listing images by manifest digest: %w", err)
|
||||
}
|
||||
results := make([]string, 0, len(images))
|
||||
for _, image := range images {
|
||||
results = append(results, image.ID)
|
||||
}
|
||||
if len(results) == 0 {
|
||||
return nil, errors.Wrapf(storage.ErrImageUnknown, "identifying new image by manifest digest")
|
||||
return nil, fmt.Errorf("identifying new image by manifest digest: %w", storage.ErrImageUnknown)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
@ -483,7 +483,7 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str
|
|||
lookupImageOptions.OS = options.OS
|
||||
}
|
||||
localImage, resolvedImageName, err = r.LookupImage(imageName, lookupImageOptions)
|
||||
if err != nil && errors.Cause(err) != storage.ErrImageUnknown {
|
||||
if err != nil && !errors.Is(err, storage.ErrImageUnknown) {
|
||||
logrus.Errorf("Looking up %s in local storage: %v", imageName, err)
|
||||
}
|
||||
|
||||
|
@ -515,7 +515,7 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str
|
|||
return []string{resolvedImageName}, nil
|
||||
}
|
||||
logrus.Debugf("Pull policy %q but no local image has been found for %s", pullPolicy, imageName)
|
||||
return nil, errors.Wrap(storage.ErrImageUnknown, imageName)
|
||||
return nil, fmt.Errorf("%s: %w", imageName, storage.ErrImageUnknown)
|
||||
}
|
||||
|
||||
if pullPolicy == config.PullPolicyMissing && localImage != nil {
|
||||
|
@ -526,7 +526,7 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str
|
|||
if localImage != nil && strings.HasPrefix(localImage.ID(), imageName) {
|
||||
switch pullPolicy {
|
||||
case config.PullPolicyAlways:
|
||||
return nil, errors.Errorf("pull policy is always but image has been referred to by ID (%s)", imageName)
|
||||
return nil, fmt.Errorf("pull policy is always but image has been referred to by ID (%s)", imageName)
|
||||
default:
|
||||
return []string{resolvedImageName}, nil
|
||||
}
|
||||
|
@ -648,7 +648,7 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str
|
|||
}
|
||||
|
||||
if len(pullErrors) == 0 {
|
||||
return nil, errors.Errorf("internal error: no image pulled (pull policy %s)", pullPolicy)
|
||||
return nil, fmt.Errorf("internal error: no image pulled (pull policy %s)", pullPolicy)
|
||||
}
|
||||
|
||||
return nil, resolved.FormatPullErrors(pullErrors)
|
||||
|
|
|
@ -2,6 +2,7 @@ package libimage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
@ -15,7 +16,6 @@ import (
|
|||
"github.com/containers/storage"
|
||||
deepcopy "github.com/jinzhu/copier"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -160,7 +160,7 @@ func (r *Runtime) storageToImage(storageImage *storage.Image, ref types.ImageRef
|
|||
// storage. Note that it may return false if an image corrupted.
|
||||
func (r *Runtime) Exists(name string) (bool, error) {
|
||||
image, _, err := r.LookupImage(name, nil)
|
||||
if err != nil && errors.Cause(err) != storage.ErrImageUnknown {
|
||||
if err != nil && !errors.Is(err, storage.ErrImageUnknown) {
|
||||
return false, err
|
||||
}
|
||||
if image == nil {
|
||||
|
@ -227,7 +227,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
|
|||
storageRef, err := alltransports.ParseImageName(name)
|
||||
if err == nil {
|
||||
if storageRef.Transport().Name() != storageTransport.Transport.Name() {
|
||||
return nil, "", errors.Errorf("unsupported transport %q for looking up local images", storageRef.Transport().Name())
|
||||
return nil, "", fmt.Errorf("unsupported transport %q for looking up local images", storageRef.Transport().Name())
|
||||
}
|
||||
img, err := storageTransport.Transport.GetStoreImage(r.store, storageRef)
|
||||
if err != nil {
|
||||
|
@ -266,7 +266,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
|
|||
if img != nil {
|
||||
return img, originalName, nil
|
||||
}
|
||||
return nil, "", errors.Wrap(storage.ErrImageUnknown, originalName)
|
||||
return nil, "", fmt.Errorf("%s: %w", originalName, storage.ErrImageUnknown)
|
||||
}
|
||||
|
||||
// Unless specified, set the platform specified in the system context
|
||||
|
@ -288,7 +288,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
|
|||
// "localhost/" prefixed images into account as well.
|
||||
candidates, err := shortnames.ResolveLocally(&r.systemContext, name)
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrap(storage.ErrImageUnknown, name)
|
||||
return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown)
|
||||
}
|
||||
// Backwards compat: normalize to docker.io as some users may very well
|
||||
// rely on that.
|
||||
|
@ -324,7 +324,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
|
|||
func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *LookupImageOptions) (*Image, error) {
|
||||
logrus.Debugf("Trying %q ...", candidate)
|
||||
img, err := r.store.Image(candidate)
|
||||
if err != nil && errors.Cause(err) != storage.ErrImageUnknown {
|
||||
if err != nil && !errors.Is(err, storage.ErrImageUnknown) {
|
||||
return nil, err
|
||||
}
|
||||
if img == nil {
|
||||
|
@ -342,7 +342,7 @@ func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *Loo
|
|||
// find a matching instance in the local containers storage.
|
||||
isManifestList, err := image.IsManifestList(context.Background())
|
||||
if err != nil {
|
||||
if errors.Cause(err) == os.ErrNotExist {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// We must be tolerant toward corrupted images.
|
||||
// See containers/podman commit fd9dd7065d44.
|
||||
logrus.Warnf("Failed to determine if an image is a manifest list: %v, ignoring the error", err)
|
||||
|
@ -356,7 +356,7 @@ func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *Loo
|
|||
}
|
||||
// return ErrNotAManifestList if lookupManifest is set otherwise try resolving image.
|
||||
if options.lookupManifest {
|
||||
return nil, errors.Wrapf(ErrNotAManifestList, candidate)
|
||||
return nil, fmt.Errorf("%s: %w", candidate, ErrNotAManifestList)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -372,7 +372,7 @@ func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *Loo
|
|||
logrus.Debug("No matching instance was found: returning manifest list instead")
|
||||
return image, nil
|
||||
}
|
||||
return nil, errors.Wrap(storage.ErrImageUnknown, err.Error())
|
||||
return nil, fmt.Errorf("%v: %w", err, storage.ErrImageUnknown)
|
||||
}
|
||||
ref, err = storageTransport.Transport.ParseStoreReference(r.store, "@"+instance.ID())
|
||||
if err != nil {
|
||||
|
@ -434,7 +434,7 @@ func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, options *LookupIm
|
|||
}
|
||||
named, isNamed := ref.(reference.Named)
|
||||
if !isNamed {
|
||||
return nil, "", errors.Wrap(storage.ErrImageUnknown, name)
|
||||
return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown)
|
||||
}
|
||||
|
||||
digested, isDigested := named.(reference.Digested)
|
||||
|
@ -454,11 +454,11 @@ func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, options *LookupIm
|
|||
|
||||
}
|
||||
}
|
||||
return nil, "", errors.Wrap(storage.ErrImageUnknown, name)
|
||||
return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown)
|
||||
}
|
||||
|
||||
if !shortnames.IsShortName(name) {
|
||||
return nil, "", errors.Wrap(storage.ErrImageUnknown, name)
|
||||
return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown)
|
||||
}
|
||||
|
||||
named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed
|
||||
|
@ -486,7 +486,7 @@ func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, options *LookupIm
|
|||
}
|
||||
}
|
||||
|
||||
return nil, "", errors.Wrap(storage.ErrImageUnknown, name)
|
||||
return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown)
|
||||
}
|
||||
|
||||
// ResolveName resolves the specified name. If the name resolves to a local
|
||||
|
@ -499,7 +499,7 @@ func (r *Runtime) ResolveName(name string) (string, error) {
|
|||
return "", nil
|
||||
}
|
||||
image, resolvedName, err := r.LookupImage(name, nil)
|
||||
if err != nil && errors.Cause(err) != storage.ErrImageUnknown {
|
||||
if err != nil && !errors.Is(err, storage.ErrImageUnknown) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
@ -713,7 +713,7 @@ func (r *Runtime) RemoveImages(ctx context.Context, names []string, options *Rem
|
|||
for _, id := range toDelete {
|
||||
del, exists := deleteMap[id]
|
||||
if !exists {
|
||||
appendError(errors.Errorf("internal error: ID %s not in found in image-deletion map", id))
|
||||
appendError(fmt.Errorf("internal error: ID %s not in found in image-deletion map", id))
|
||||
continue
|
||||
}
|
||||
if len(del.referencedBy) == 0 {
|
||||
|
|
|
@ -2,6 +2,8 @@ package libimage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -13,7 +15,6 @@ import (
|
|||
ociTransport "github.com/containers/image/v5/oci/layout"
|
||||
"github.com/containers/image/v5/types"
|
||||
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -47,10 +48,10 @@ func (r *Runtime) Save(ctx context.Context, names []string, format, path string,
|
|||
// All formats support saving 1.
|
||||
default:
|
||||
if format != "docker-archive" {
|
||||
return errors.Errorf("unsupported format %q for saving multiple images (only docker-archive)", format)
|
||||
return fmt.Errorf("unsupported format %q for saving multiple images (only docker-archive)", format)
|
||||
}
|
||||
if len(options.AdditionalTags) > 0 {
|
||||
return errors.Errorf("cannot save multiple images with multiple tags")
|
||||
return fmt.Errorf("cannot save multiple images with multiple tags")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -58,7 +59,7 @@ func (r *Runtime) Save(ctx context.Context, names []string, format, path string,
|
|||
switch format {
|
||||
case "oci-archive", "oci-dir", "docker-dir":
|
||||
if len(names) > 1 {
|
||||
return errors.Errorf("%q does not support saving multiple images (%v)", format, names)
|
||||
return fmt.Errorf("%q does not support saving multiple images (%v)", format, names)
|
||||
}
|
||||
return r.saveSingleImage(ctx, names[0], format, path, options)
|
||||
|
||||
|
@ -67,7 +68,7 @@ func (r *Runtime) Save(ctx context.Context, names []string, format, path string,
|
|||
return r.saveDockerArchive(ctx, names, path, options)
|
||||
}
|
||||
|
||||
return errors.Errorf("unsupported format %q for saving images", format)
|
||||
return fmt.Errorf("unsupported format %q for saving images", format)
|
||||
}
|
||||
|
||||
// saveSingleImage saves the specified image name to the specified path.
|
||||
|
@ -109,7 +110,7 @@ func (r *Runtime) saveSingleImage(ctx context.Context, name, format, path string
|
|||
options.ManifestMIMEType = manifest.DockerV2Schema2MediaType
|
||||
|
||||
default:
|
||||
return errors.Errorf("unsupported format %q for saving images", format)
|
||||
return fmt.Errorf("unsupported format %q for saving images", format)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
@ -143,7 +144,7 @@ func (r *Runtime) saveDockerArchive(ctx context.Context, names []string, path st
|
|||
if err == nil {
|
||||
tagged, withTag := named.(reference.NamedTagged)
|
||||
if !withTag {
|
||||
return errors.Errorf("invalid additional tag %q: normalized to untagged %q", tag, named.String())
|
||||
return fmt.Errorf("invalid additional tag %q: normalized to untagged %q", tag, named.String())
|
||||
}
|
||||
additionalTags = append(additionalTags, tagged)
|
||||
}
|
||||
|
@ -195,7 +196,7 @@ func (r *Runtime) saveDockerArchive(ctx context.Context, names []string, path st
|
|||
for _, id := range orderedIDs {
|
||||
local, exists := localImages[id]
|
||||
if !exists {
|
||||
return errors.Errorf("internal error: saveDockerArchive: ID %s not found in local map", id)
|
||||
return fmt.Errorf("internal error: saveDockerArchive: ID %s not found in local map", id)
|
||||
}
|
||||
|
||||
copyOpts := options.CopyOptions
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
@ -84,11 +83,11 @@ func ParseSearchFilter(filter []string) (*SearchFilter, error) {
|
|||
switch arr[0] {
|
||||
case define.SearchFilterStars:
|
||||
if len(arr) < 2 {
|
||||
return nil, errors.Errorf("invalid filter %q, should be stars=<value>", filter)
|
||||
return nil, fmt.Errorf("invalid filter %q, should be stars=<value>", filter)
|
||||
}
|
||||
stars, err := strconv.Atoi(arr[1])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "incorrect value type for stars filter")
|
||||
return nil, fmt.Errorf("incorrect value type for stars filter: %w", err)
|
||||
}
|
||||
sFilter.Stars = stars
|
||||
case define.SearchFilterAutomated:
|
||||
|
@ -104,7 +103,7 @@ func ParseSearchFilter(filter []string) (*SearchFilter, error) {
|
|||
sFilter.IsOfficial = types.OptionalBoolTrue
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("invalid filter type %q", f)
|
||||
return nil, fmt.Errorf("invalid filter type %q", f)
|
||||
}
|
||||
}
|
||||
return sFilter, nil
|
||||
|
@ -273,16 +272,16 @@ func searchRepositoryTags(ctx context.Context, sys *types.SystemContext, registr
|
|||
dockerPrefix := "docker://"
|
||||
imageRef, err := alltransports.ParseImageName(fmt.Sprintf("%s/%s", registry, term))
|
||||
if err == nil && imageRef.Transport().Name() != registryTransport.Transport.Name() {
|
||||
return nil, errors.Errorf("reference %q must be a docker reference", term)
|
||||
return nil, fmt.Errorf("reference %q must be a docker reference", term)
|
||||
} else if err != nil {
|
||||
imageRef, err = alltransports.ParseImageName(fmt.Sprintf("%s%s", dockerPrefix, fmt.Sprintf("%s/%s", registry, term)))
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("reference %q must be a docker reference", term)
|
||||
return nil, fmt.Errorf("reference %q must be a docker reference", term)
|
||||
}
|
||||
}
|
||||
tags, err := registryTransport.GetRepositoryTags(ctx, sys, imageRef)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("error getting repository tags: %v", err)
|
||||
return nil, fmt.Errorf("error getting repository tags: %v", err)
|
||||
}
|
||||
limit := searchMaxQueries
|
||||
if len(tags) < limit {
|
||||
|
|
|
@ -5,6 +5,8 @@ package cni
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
|
@ -18,7 +20,6 @@ import (
|
|||
"github.com/containers/common/libnetwork/types"
|
||||
"github.com/containers/common/libnetwork/util"
|
||||
pkgutil "github.com/containers/common/pkg/util"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
@ -35,7 +36,7 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str
|
|||
cniJSON := make(map[string]interface{})
|
||||
err := json.Unmarshal(conf.Bytes, &cniJSON)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to unmarshal network config %s", conf.Name)
|
||||
return nil, fmt.Errorf("failed to unmarshal network config %s: %w", conf.Name, err)
|
||||
}
|
||||
if args, ok := cniJSON["args"]; ok {
|
||||
if key, ok := args.(map[string]interface{}); ok {
|
||||
|
@ -59,7 +60,7 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str
|
|||
var bridge hostLocalBridge
|
||||
err := json.Unmarshal(firstPlugin.Bytes, &bridge)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to unmarshal the bridge plugin config in %s", confPath)
|
||||
return nil, fmt.Errorf("failed to unmarshal the bridge plugin config in %s: %w", confPath, err)
|
||||
}
|
||||
network.NetworkInterface = bridge.BrName
|
||||
|
||||
|
@ -70,10 +71,10 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str
|
|||
|
||||
// set network options
|
||||
if bridge.MTU != 0 {
|
||||
network.Options["mtu"] = strconv.Itoa(bridge.MTU)
|
||||
network.Options[types.MTUOption] = strconv.Itoa(bridge.MTU)
|
||||
}
|
||||
if bridge.Vlan != 0 {
|
||||
network.Options["vlan"] = strconv.Itoa(bridge.Vlan)
|
||||
network.Options[types.VLANOption] = strconv.Itoa(bridge.Vlan)
|
||||
}
|
||||
|
||||
err = convertIPAMConfToNetwork(&network, &bridge.IPAM, confPath)
|
||||
|
@ -85,17 +86,17 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str
|
|||
var vlan VLANConfig
|
||||
err := json.Unmarshal(firstPlugin.Bytes, &vlan)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to unmarshal the macvlan plugin config in %s", confPath)
|
||||
return nil, fmt.Errorf("failed to unmarshal the macvlan plugin config in %s: %w", confPath, err)
|
||||
}
|
||||
network.NetworkInterface = vlan.Master
|
||||
|
||||
// set network options
|
||||
if vlan.MTU != 0 {
|
||||
network.Options["mtu"] = strconv.Itoa(vlan.MTU)
|
||||
network.Options[types.MTUOption] = strconv.Itoa(vlan.MTU)
|
||||
}
|
||||
|
||||
if vlan.Mode != "" {
|
||||
network.Options["mode"] = vlan.Mode
|
||||
network.Options[types.ModeOption] = vlan.Mode
|
||||
}
|
||||
|
||||
err = convertIPAMConfToNetwork(&network, &vlan.IPAM, confPath)
|
||||
|
@ -110,18 +111,31 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str
|
|||
}
|
||||
|
||||
// check if the dnsname plugin is configured
|
||||
network.DNSEnabled = findPluginByName(conf.Plugins, "dnsname")
|
||||
network.DNSEnabled = findPluginByName(conf.Plugins, "dnsname") != nil
|
||||
|
||||
// now get isolation mode from firewall plugin
|
||||
firewall := findPluginByName(conf.Plugins, "firewall")
|
||||
if firewall != nil {
|
||||
var firewallConf firewallConfig
|
||||
err := json.Unmarshal(firewall.Bytes, &firewallConf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal the firewall plugin config in %s: %w", confPath, err)
|
||||
}
|
||||
if firewallConf.IngressPolicy == ingressPolicySameBridge {
|
||||
network.Options[types.IsolateOption] = "true"
|
||||
}
|
||||
}
|
||||
|
||||
return &network, nil
|
||||
}
|
||||
|
||||
func findPluginByName(plugins []*libcni.NetworkConfig, name string) bool {
|
||||
for _, plugin := range plugins {
|
||||
if plugin.Network.Type == name {
|
||||
return true
|
||||
func findPluginByName(plugins []*libcni.NetworkConfig, name string) *libcni.NetworkConfig {
|
||||
for i := range plugins {
|
||||
if plugins[i].Network.Type == name {
|
||||
return plugins[i]
|
||||
}
|
||||
}
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
|
||||
// convertIPAMConfToNetwork converts A cni IPAMConfig to libpod network subnets.
|
||||
|
@ -151,7 +165,7 @@ func convertIPAMConfToNetwork(network *types.Network, ipam *ipamConfig, confPath
|
|||
if ipam.Gateway != "" {
|
||||
gateway = net.ParseIP(ipam.Gateway)
|
||||
if gateway == nil {
|
||||
return errors.Errorf("failed to parse gateway ip %s", ipam.Gateway)
|
||||
return fmt.Errorf("failed to parse gateway ip %s", ipam.Gateway)
|
||||
}
|
||||
// convert to 4 byte if ipv4
|
||||
util.NormalizeIP(&gateway)
|
||||
|
@ -159,7 +173,7 @@ func convertIPAMConfToNetwork(network *types.Network, ipam *ipamConfig, confPath
|
|||
// only add a gateway address if the network is not internal
|
||||
gateway, err = util.FirstIPInSubnet(sub)
|
||||
if err != nil {
|
||||
return errors.Errorf("failed to get first ip in subnet %s", sub.String())
|
||||
return fmt.Errorf("failed to get first ip in subnet %s", sub.String())
|
||||
}
|
||||
}
|
||||
s.Gateway = gateway
|
||||
|
@ -169,13 +183,13 @@ func convertIPAMConfToNetwork(network *types.Network, ipam *ipamConfig, confPath
|
|||
if ipam.RangeStart != "" {
|
||||
rangeStart = net.ParseIP(ipam.RangeStart)
|
||||
if rangeStart == nil {
|
||||
return errors.Errorf("failed to parse range start ip %s", ipam.RangeStart)
|
||||
return fmt.Errorf("failed to parse range start ip %s", ipam.RangeStart)
|
||||
}
|
||||
}
|
||||
if ipam.RangeEnd != "" {
|
||||
rangeEnd = net.ParseIP(ipam.RangeEnd)
|
||||
if rangeEnd == nil {
|
||||
return errors.Errorf("failed to parse range end ip %s", ipam.RangeEnd)
|
||||
return fmt.Errorf("failed to parse range end ip %s", ipam.RangeEnd)
|
||||
}
|
||||
}
|
||||
if rangeStart != nil || rangeEnd != nil {
|
||||
|
@ -267,7 +281,7 @@ func (n *cniNetwork) createCNIConfigListFromNetwork(network *types.Network, writ
|
|||
case types.NoneIPAMDriver:
|
||||
// do nothing
|
||||
default:
|
||||
return nil, "", errors.Errorf("unsupported ipam driver %q", ipamDriver)
|
||||
return nil, "", fmt.Errorf("unsupported ipam driver %q", ipamDriver)
|
||||
}
|
||||
|
||||
opts, err := parseOptions(network.Options, network.Driver)
|
||||
|
@ -291,7 +305,7 @@ func (n *cniNetwork) createCNIConfigListFromNetwork(network *types.Network, writ
|
|||
switch network.Driver {
|
||||
case types.BridgeNetworkDriver:
|
||||
bridge := newHostLocalBridge(network.NetworkInterface, isGateway, ipMasq, opts.mtu, opts.vlan, ipamConf)
|
||||
plugins = append(plugins, bridge, newPortMapPlugin(), newFirewallPlugin(), newTuningPlugin())
|
||||
plugins = append(plugins, bridge, newPortMapPlugin(), newFirewallPlugin(opts.isolate), newTuningPlugin())
|
||||
// if we find the dnsname plugin we add configuration for it
|
||||
if hasDNSNamePlugin(n.cniPluginDirs) && network.DNSEnabled {
|
||||
// Note: in the future we might like to allow for dynamic domain names
|
||||
|
@ -305,7 +319,7 @@ func (n *cniNetwork) createCNIConfigListFromNetwork(network *types.Network, writ
|
|||
plugins = append(plugins, newVLANPlugin(types.IPVLANNetworkDriver, network.NetworkInterface, opts.vlanPluginMode, opts.mtu, ipamConf))
|
||||
|
||||
default:
|
||||
return nil, "", errors.Errorf("driver %q is not supported by cni", network.Driver)
|
||||
return nil, "", fmt.Errorf("driver %q is not supported by cni", network.Driver)
|
||||
}
|
||||
ncList["plugins"] = plugins
|
||||
b, err := json.MarshalIndent(ncList, "", " ")
|
||||
|
@ -344,7 +358,7 @@ func convertSpecgenPortsToCNIPorts(ports []types.PortMapping) ([]cniPortMapEntry
|
|||
|
||||
for _, protocol := range protocols {
|
||||
if !pkgutil.StringInSlice(protocol, []string{"tcp", "udp", "sctp"}) {
|
||||
return nil, errors.Errorf("unknown port protocol %s", protocol)
|
||||
return nil, fmt.Errorf("unknown port protocol %s", protocol)
|
||||
}
|
||||
cniPort := cniPortMapEntry{
|
||||
HostPort: int(port.HostPort),
|
||||
|
@ -382,6 +396,7 @@ type options struct {
|
|||
vlan int
|
||||
mtu int
|
||||
vlanPluginMode string
|
||||
isolate bool
|
||||
}
|
||||
|
||||
func parseOptions(networkOptions map[string]string, networkDriver string) (*options, error) {
|
||||
|
@ -389,35 +404,44 @@ func parseOptions(networkOptions map[string]string, networkDriver string) (*opti
|
|||
var err error
|
||||
for k, v := range networkOptions {
|
||||
switch k {
|
||||
case "mtu":
|
||||
case types.MTUOption:
|
||||
opt.mtu, err = internalutil.ParseMTU(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
case "vlan":
|
||||
case types.VLANOption:
|
||||
opt.vlan, err = internalutil.ParseVlan(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
case "mode":
|
||||
case types.ModeOption:
|
||||
switch networkDriver {
|
||||
case types.MacVLANNetworkDriver:
|
||||
if !pkgutil.StringInSlice(v, types.ValidMacVLANModes) {
|
||||
return nil, errors.Errorf("unknown macvlan mode %q", v)
|
||||
return nil, fmt.Errorf("unknown macvlan mode %q", v)
|
||||
}
|
||||
case types.IPVLANNetworkDriver:
|
||||
if !pkgutil.StringInSlice(v, types.ValidIPVLANModes) {
|
||||
return nil, errors.Errorf("unknown ipvlan mode %q", v)
|
||||
return nil, fmt.Errorf("unknown ipvlan mode %q", v)
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("cannot set option \"mode\" with driver %q", networkDriver)
|
||||
return nil, fmt.Errorf("cannot set option \"mode\" with driver %q", networkDriver)
|
||||
}
|
||||
opt.vlanPluginMode = v
|
||||
|
||||
case types.IsolateOption:
|
||||
if networkDriver != types.BridgeNetworkDriver {
|
||||
return nil, errors.New("isolate option is only supported with the bridge driver")
|
||||
}
|
||||
opt.isolate, err = strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse isolate option: %w", err)
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, errors.Errorf("unsupported network option %s", k)
|
||||
return nil, fmt.Errorf("unsupported network option %s", k)
|
||||
}
|
||||
}
|
||||
return opt, nil
|
||||
|
|
|
@ -26,6 +26,9 @@ const (
|
|||
|
||||
// podmanOptionsKey key used to store the podman network options in a cni config
|
||||
podmanOptionsKey = "podman_options"
|
||||
|
||||
// ingressPolicySameBridge is used to only allow connection on the same bridge network
|
||||
ingressPolicySameBridge = "same-bridge"
|
||||
)
|
||||
|
||||
// cniPortMapEntry struct is used by the portmap plugin
|
||||
|
@ -95,8 +98,9 @@ type VLANConfig struct {
|
|||
|
||||
// firewallConfig describes the firewall plugin
|
||||
type firewallConfig struct {
|
||||
PluginType string `json:"type"`
|
||||
Backend string `json:"backend"`
|
||||
PluginType string `json:"type"`
|
||||
Backend string `json:"backend"`
|
||||
IngressPolicy string `json:"ingressPolicy,omitempty"`
|
||||
}
|
||||
|
||||
// tuningConfig describes the tuning plugin
|
||||
|
@ -222,10 +226,14 @@ func newPortMapPlugin() portMapConfig {
|
|||
}
|
||||
|
||||
// newFirewallPlugin creates a generic firewall plugin
|
||||
func newFirewallPlugin() firewallConfig {
|
||||
return firewallConfig{
|
||||
func newFirewallPlugin(isolate bool) firewallConfig {
|
||||
fw := firewallConfig{
|
||||
PluginType: "firewall",
|
||||
}
|
||||
if isolate {
|
||||
fw.IngressPolicy = ingressPolicySameBridge
|
||||
}
|
||||
return fw
|
||||
}
|
||||
|
||||
// newTuningPlugin creates a generic tuning section
|
||||
|
|
|
@ -4,13 +4,14 @@
|
|||
package cni
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
internalutil "github.com/containers/common/libnetwork/internal/util"
|
||||
"github.com/containers/common/libnetwork/types"
|
||||
pkgutil "github.com/containers/common/pkg/util"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -43,7 +44,7 @@ func (n *cniNetwork) networkCreate(newNetwork *types.Network, defaultNet bool) (
|
|||
// FIXME: Should we use a different type for network create without the ID field?
|
||||
// the caller is not allowed to set a specific ID
|
||||
if newNetwork.ID != "" {
|
||||
return nil, errors.Wrap(types.ErrInvalidArg, "ID can not be set for network create")
|
||||
return nil, fmt.Errorf("ID can not be set for network create: %w", types.ErrInvalidArg)
|
||||
}
|
||||
|
||||
err := internalutil.CommonNetworkCreate(n, newNetwork)
|
||||
|
@ -83,7 +84,7 @@ func (n *cniNetwork) networkCreate(newNetwork *types.Network, defaultNet bool) (
|
|||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, errors.Wrapf(types.ErrInvalidArg, "unsupported driver %s", newNetwork.Driver)
|
||||
return nil, fmt.Errorf("unsupported driver %s: %w", newNetwork.Driver, types.ErrInvalidArg)
|
||||
}
|
||||
|
||||
err = internalutil.ValidateSubnets(newNetwork, !newNetwork.Internal, usedNetworks)
|
||||
|
@ -127,7 +128,7 @@ func (n *cniNetwork) NetworkRemove(nameOrID string) error {
|
|||
|
||||
// Removing the default network is not allowed.
|
||||
if network.libpodNet.Name == n.defaultNetwork {
|
||||
return errors.Errorf("default network %s cannot be removed", n.defaultNetwork)
|
||||
return fmt.Errorf("default network %s cannot be removed", n.defaultNetwork)
|
||||
}
|
||||
|
||||
// Remove the bridge network interface on the host.
|
||||
|
@ -193,7 +194,7 @@ func createIPMACVLAN(network *types.Network) error {
|
|||
return err
|
||||
}
|
||||
if !pkgutil.StringInSlice(network.NetworkInterface, interfaceNames) {
|
||||
return errors.Errorf("parent interface %s does not exist", network.NetworkInterface)
|
||||
return fmt.Errorf("parent interface %s does not exist", network.NetworkInterface)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -224,10 +225,10 @@ func validateIPAMDriver(n *types.Network) error {
|
|||
case "", types.HostLocalIPAMDriver:
|
||||
case types.DHCPIPAMDriver, types.NoneIPAMDriver:
|
||||
if len(n.Subnets) > 0 {
|
||||
return errors.Errorf("%s ipam driver is set but subnets are given", ipamDriver)
|
||||
return fmt.Errorf("%s ipam driver is set but subnets are given", ipamDriver)
|
||||
}
|
||||
default:
|
||||
return errors.Errorf("unsupported ipam driver %q", ipamDriver)
|
||||
return fmt.Errorf("unsupported ipam driver %q", ipamDriver)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -7,6 +7,8 @@ import (
|
|||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
@ -16,7 +18,6 @@ import (
|
|||
"github.com/containers/common/libnetwork/types"
|
||||
"github.com/containers/common/pkg/config"
|
||||
"github.com/containers/storage/pkg/lockfile"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -94,7 +95,7 @@ func NewCNINetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) {
|
|||
}
|
||||
defaultNet, err := types.ParseCIDR(defaultSubnet)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse default subnet")
|
||||
return nil, fmt.Errorf("failed to parse default subnet: %w", err)
|
||||
}
|
||||
|
||||
defaultSubnetPools := conf.DefaultsubnetPools
|
||||
|
@ -201,7 +202,7 @@ func (n *cniNetwork) loadNetworks() error {
|
|||
if networks[n.defaultNetwork] == nil {
|
||||
networkInfo, err := n.createDefaultNetwork()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create default network %s", n.defaultNetwork)
|
||||
return fmt.Errorf("failed to create default network %s: %w", n.defaultNetwork, err)
|
||||
}
|
||||
networks[n.defaultNetwork] = networkInfo
|
||||
}
|
||||
|
@ -243,7 +244,7 @@ func (n *cniNetwork) getNetwork(nameOrID string) (*network, error) {
|
|||
|
||||
if strings.HasPrefix(val.libpodNet.ID, nameOrID) {
|
||||
if net != nil {
|
||||
return nil, errors.Errorf("more than one result for network ID %s", nameOrID)
|
||||
return nil, fmt.Errorf("more than one result for network ID %s", nameOrID)
|
||||
}
|
||||
net = val
|
||||
}
|
||||
|
@ -251,7 +252,7 @@ func (n *cniNetwork) getNetwork(nameOrID string) (*network, error) {
|
|||
if net != nil {
|
||||
return net, nil
|
||||
}
|
||||
return nil, errors.Wrapf(types.ErrNoSuchNetwork, "unable to find network with name or ID %s", nameOrID)
|
||||
return nil, fmt.Errorf("unable to find network with name or ID %s: %w", nameOrID, types.ErrNoSuchNetwork)
|
||||
}
|
||||
|
||||
// getNetworkIDFromName creates a network ID from the name. It is just the
|
||||
|
|
|
@ -5,6 +5,7 @@ package cni
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
|
@ -15,7 +16,6 @@ import (
|
|||
"github.com/containers/common/libnetwork/internal/util"
|
||||
"github.com/containers/common/libnetwork/types"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -36,7 +36,7 @@ func (n *cniNetwork) Setup(namespacePath string, options types.SetupOptions) (ma
|
|||
|
||||
err = setupLoopback(namespacePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to set the loopback adapter up")
|
||||
return nil, fmt.Errorf("failed to set the loopback adapter up: %w", err)
|
||||
}
|
||||
|
||||
var retErr error
|
||||
|
@ -108,7 +108,7 @@ func CNIResultToStatus(res cnitypes.Result) (types.StatusBlock, error) {
|
|||
for _, nameserver := range cniResult.DNS.Nameservers {
|
||||
ip := net.ParseIP(nameserver)
|
||||
if ip == nil {
|
||||
return result, errors.Errorf("failed to parse cni nameserver ip %s", nameserver)
|
||||
return result, fmt.Errorf("failed to parse cni nameserver ip %s", nameserver)
|
||||
}
|
||||
nameservers = append(nameservers, ip)
|
||||
}
|
||||
|
@ -133,7 +133,7 @@ func CNIResultToStatus(res cnitypes.Result) (types.StatusBlock, error) {
|
|||
continue
|
||||
}
|
||||
if len(cniResult.Interfaces) <= *ip.Interface {
|
||||
return result, errors.Errorf("invalid cni result, interface index %d out of range", *ip.Interface)
|
||||
return result, fmt.Errorf("invalid cni result, interface index %d out of range", *ip.Interface)
|
||||
}
|
||||
|
||||
// when we have a ip for this interface add it to the subnets
|
||||
|
@ -236,7 +236,7 @@ func (n *cniNetwork) teardown(namespacePath string, options types.TeardownOption
|
|||
logrus.Warnf("Failed to load cached network config: %v, falling back to loading network %s from disk", err, name)
|
||||
network := n.networks[name]
|
||||
if network == nil {
|
||||
multiErr = multierror.Append(multiErr, errors.Wrapf(types.ErrNoSuchNetwork, "network %s", name))
|
||||
multiErr = multierror.Append(multiErr, fmt.Errorf("network %s: %w", name, types.ErrNoSuchNetwork))
|
||||
continue
|
||||
}
|
||||
cniConfList = network.cniNet
|
||||
|
@ -258,7 +258,7 @@ func getCachedNetworkConfig(cniConf *libcni.CNIConfig, name string, rt *libcni.R
|
|||
if err != nil {
|
||||
return nil, nil, err
|
||||
} else if confBytes == nil {
|
||||
return nil, nil, errors.Errorf("network %s not found in CNI cache", name)
|
||||
return nil, nil, fmt.Errorf("network %s not found in CNI cache", name)
|
||||
}
|
||||
|
||||
cniConfList, err = libcni.ConfListFromBytes(confBytes)
|
||||
|
|
|
@ -1,23 +1,23 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/containers/common/libnetwork/types"
|
||||
"github.com/containers/common/libnetwork/util"
|
||||
"github.com/containers/common/pkg/config"
|
||||
pkgutil "github.com/containers/common/pkg/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func CreateBridge(n NetUtil, network *types.Network, usedNetworks []*net.IPNet, subnetPools []config.SubnetPool) error {
|
||||
if network.NetworkInterface != "" {
|
||||
bridges := GetBridgeInterfaceNames(n)
|
||||
if pkgutil.StringInSlice(network.NetworkInterface, bridges) {
|
||||
return errors.Errorf("bridge name %s already in use", network.NetworkInterface)
|
||||
return fmt.Errorf("bridge name %s already in use", network.NetworkInterface)
|
||||
}
|
||||
if !types.NameRegex.MatchString(network.NetworkInterface) {
|
||||
return errors.Wrapf(types.RegexError, "bridge name %s invalid", network.NetworkInterface)
|
||||
return fmt.Errorf("bridge name %s invalid: %w", network.NetworkInterface, types.RegexError)
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/common/libnetwork/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -22,10 +23,10 @@ func CommonNetworkCreate(n NetUtil, network *types.Network) error {
|
|||
// validate the name when given
|
||||
if network.Name != "" {
|
||||
if !types.NameRegex.MatchString(network.Name) {
|
||||
return errors.Wrapf(types.RegexError, "network name %s invalid", network.Name)
|
||||
return fmt.Errorf("network name %s invalid: %w", network.Name, types.RegexError)
|
||||
}
|
||||
if _, err := n.Network(network.Name); err == nil {
|
||||
return errors.Wrapf(types.ErrNetworkExists, "network name %s already used", network.Name)
|
||||
return fmt.Errorf("network name %s already used: %w", network.Name, types.ErrNetworkExists)
|
||||
}
|
||||
} else {
|
||||
name, err = GetFreeDeviceName(n)
|
||||
|
|
|
@ -2,9 +2,9 @@ package util
|
|||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func incByte(subnet *net.IPNet, idx int, shift uint) error {
|
||||
|
@ -31,7 +31,7 @@ func NextSubnet(subnet *net.IPNet) (*net.IPNet, error) {
|
|||
}
|
||||
ones, bits := newSubnet.Mask.Size()
|
||||
if ones == 0 {
|
||||
return nil, errors.Errorf("%s has only one subnet", subnet.String())
|
||||
return nil, fmt.Errorf("%s has only one subnet", subnet.String())
|
||||
}
|
||||
zeroes := uint(bits - ones)
|
||||
shift := zeroes % 8
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ParseMTU parses the mtu option
|
||||
|
@ -16,7 +15,7 @@ func ParseMTU(mtu string) (int, error) {
|
|||
return 0, err
|
||||
}
|
||||
if m < 0 {
|
||||
return 0, errors.Errorf("mtu %d is less than zero", m)
|
||||
return 0, fmt.Errorf("mtu %d is less than zero", m)
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
@ -31,7 +30,7 @@ func ParseVlan(vlan string) (int, error) {
|
|||
return 0, err
|
||||
}
|
||||
if v < 0 || v > 4094 {
|
||||
return 0, errors.Errorf("vlan ID %d must be between 0 and 4094", v)
|
||||
return 0, fmt.Errorf("vlan ID %d must be between 0 and 4094", v)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/containers/common/libnetwork/types"
|
||||
"github.com/containers/common/libnetwork/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ValidateSubnet will validate a given Subnet. It checks if the
|
||||
|
@ -25,18 +26,18 @@ func ValidateSubnet(s *types.Subnet, addGateway bool, usedNetworks []*net.IPNet)
|
|||
// the network address and not a random ip in the subnet.
|
||||
_, n, err := net.ParseCIDR(s.Subnet.String())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "subnet invalid")
|
||||
return fmt.Errorf("subnet invalid: %w", err)
|
||||
}
|
||||
|
||||
// check that the new subnet does not conflict with existing ones
|
||||
if NetworkIntersectsWithNetworks(n, usedNetworks) {
|
||||
return errors.Errorf("subnet %s is already used on the host or by another config", n.String())
|
||||
return fmt.Errorf("subnet %s is already used on the host or by another config", n.String())
|
||||
}
|
||||
|
||||
s.Subnet = types.IPNet{IPNet: *n}
|
||||
if s.Gateway != nil {
|
||||
if !s.Subnet.Contains(s.Gateway) {
|
||||
return errors.Errorf("gateway %s not in subnet %s", s.Gateway, &s.Subnet)
|
||||
return fmt.Errorf("gateway %s not in subnet %s", s.Gateway, &s.Subnet)
|
||||
}
|
||||
util.NormalizeIP(&s.Gateway)
|
||||
} else if addGateway {
|
||||
|
@ -50,13 +51,13 @@ func ValidateSubnet(s *types.Subnet, addGateway bool, usedNetworks []*net.IPNet)
|
|||
if s.LeaseRange != nil {
|
||||
if s.LeaseRange.StartIP != nil {
|
||||
if !s.Subnet.Contains(s.LeaseRange.StartIP) {
|
||||
return errors.Errorf("lease range start ip %s not in subnet %s", s.LeaseRange.StartIP, &s.Subnet)
|
||||
return fmt.Errorf("lease range start ip %s not in subnet %s", s.LeaseRange.StartIP, &s.Subnet)
|
||||
}
|
||||
util.NormalizeIP(&s.LeaseRange.StartIP)
|
||||
}
|
||||
if s.LeaseRange.EndIP != nil {
|
||||
if !s.Subnet.Contains(s.LeaseRange.EndIP) {
|
||||
return errors.Errorf("lease range end ip %s not in subnet %s", s.LeaseRange.EndIP, &s.Subnet)
|
||||
return fmt.Errorf("lease range end ip %s not in subnet %s", s.LeaseRange.EndIP, &s.Subnet)
|
||||
}
|
||||
util.NormalizeIP(&s.LeaseRange.EndIP)
|
||||
}
|
||||
|
@ -107,7 +108,7 @@ func ValidateSetupOptions(n NetUtil, namespacePath string, options types.SetupOp
|
|||
// validatePerNetworkOpts checks that all given static ips are in a subnet on this network
|
||||
func validatePerNetworkOpts(network *types.Network, netOpts *types.PerNetworkOptions) error {
|
||||
if netOpts.InterfaceName == "" {
|
||||
return errors.Errorf("interface name on network %s is empty", network.Name)
|
||||
return fmt.Errorf("interface name on network %s is empty", network.Name)
|
||||
}
|
||||
if network.IPAMOptions[types.Driver] == types.HostLocalIPAMDriver {
|
||||
outer:
|
||||
|
@ -117,7 +118,7 @@ func validatePerNetworkOpts(network *types.Network, netOpts *types.PerNetworkOpt
|
|||
continue outer
|
||||
}
|
||||
}
|
||||
return errors.Errorf("requested static ip %s not in any subnet on network %s", ip.String(), network.Name)
|
||||
return fmt.Errorf("requested static ip %s not in any subnet on network %s", ip.String(), network.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -5,16 +5,18 @@ package netavark
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
internalutil "github.com/containers/common/libnetwork/internal/util"
|
||||
"github.com/containers/common/libnetwork/types"
|
||||
"github.com/containers/common/pkg/util"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NetworkCreate will take a partial filled Network and fill the
|
||||
|
@ -44,7 +46,7 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
|
|||
// FIXME: Should we use a different type for network create without the ID field?
|
||||
// the caller is not allowed to set a specific ID
|
||||
if newNetwork.ID != "" {
|
||||
return nil, errors.Wrap(types.ErrInvalidArg, "ID can not be set for network create")
|
||||
return nil, fmt.Errorf("ID can not be set for network create: %w", types.ErrInvalidArg)
|
||||
}
|
||||
|
||||
// generate random network ID
|
||||
|
@ -95,20 +97,27 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
|
|||
// validate the given options, we do not need them but just check to make sure they are valid
|
||||
for key, value := range newNetwork.Options {
|
||||
switch key {
|
||||
case "mtu":
|
||||
case types.MTUOption:
|
||||
_, err = internalutil.ParseMTU(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
case "vlan":
|
||||
case types.VLANOption:
|
||||
_, err = internalutil.ParseVlan(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
case types.IsolateOption:
|
||||
val, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// rust only support "true" or "false" while go can parse 1 and 0 as well so we need to change it
|
||||
newNetwork.Options[types.IsolateOption] = strconv.FormatBool(val)
|
||||
default:
|
||||
return nil, errors.Errorf("unsupported bridge network option %s", key)
|
||||
return nil, fmt.Errorf("unsupported bridge network option %s", key)
|
||||
}
|
||||
}
|
||||
case types.MacVLANNetworkDriver:
|
||||
|
@ -117,7 +126,7 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
|
|||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, errors.Wrapf(types.ErrInvalidArg, "unsupported driver %s", newNetwork.Driver)
|
||||
return nil, fmt.Errorf("unsupported driver %s: %w", newNetwork.Driver, types.ErrInvalidArg)
|
||||
}
|
||||
|
||||
// when we do not have ipam we must disable dns
|
||||
|
@ -157,7 +166,7 @@ func createMacvlan(network *types.Network) error {
|
|||
return err
|
||||
}
|
||||
if !util.StringInSlice(network.NetworkInterface, interfaceNames) {
|
||||
return errors.Errorf("parent interface %s does not exist", network.NetworkInterface)
|
||||
return fmt.Errorf("parent interface %s does not exist", network.NetworkInterface)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -165,29 +174,29 @@ func createMacvlan(network *types.Network) error {
|
|||
switch network.IPAMOptions[types.Driver] {
|
||||
case "":
|
||||
if len(network.Subnets) == 0 {
|
||||
return errors.Errorf("macvlan driver needs at least one subnet specified, DHCP is not yet supported with netavark")
|
||||
return fmt.Errorf("macvlan driver needs at least one subnet specified, DHCP is not yet supported with netavark")
|
||||
}
|
||||
network.IPAMOptions[types.Driver] = types.HostLocalIPAMDriver
|
||||
case types.HostLocalIPAMDriver:
|
||||
if len(network.Subnets) == 0 {
|
||||
return errors.Errorf("macvlan driver needs at least one subnet specified, when the host-local ipam driver is set")
|
||||
return fmt.Errorf("macvlan driver needs at least one subnet specified, when the host-local ipam driver is set")
|
||||
}
|
||||
}
|
||||
|
||||
// validate the given options, we do not need them but just check to make sure they are valid
|
||||
for key, value := range network.Options {
|
||||
switch key {
|
||||
case "mode":
|
||||
case types.ModeOption:
|
||||
if !util.StringInSlice(value, types.ValidMacVLANModes) {
|
||||
return errors.Errorf("unknown macvlan mode %q", value)
|
||||
return fmt.Errorf("unknown macvlan mode %q", value)
|
||||
}
|
||||
case "mtu":
|
||||
case types.MTUOption:
|
||||
_, err := internalutil.ParseMTU(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.Errorf("unsupported macvlan network option %s", key)
|
||||
return fmt.Errorf("unsupported macvlan network option %s", key)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -210,7 +219,7 @@ func (n *netavarkNetwork) NetworkRemove(nameOrID string) error {
|
|||
|
||||
// Removing the default network is not allowed.
|
||||
if network.Name == n.defaultNetwork {
|
||||
return errors.Errorf("default network %s cannot be removed", n.defaultNetwork)
|
||||
return fmt.Errorf("default network %s cannot be removed", n.defaultNetwork)
|
||||
}
|
||||
|
||||
file := filepath.Join(n.networkConfigDir, network.Name+".json")
|
||||
|
@ -274,7 +283,7 @@ func validateIPAMDriver(n *types.Network) error {
|
|||
case types.DHCPIPAMDriver:
|
||||
return errors.New("dhcp ipam driver is not yet supported with netavark")
|
||||
default:
|
||||
return errors.Errorf("unsupported ipam driver %q", ipamDriver)
|
||||
return fmt.Errorf("unsupported ipam driver %q", ipamDriver)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -119,6 +119,9 @@ func (n *netavarkNetwork) execNetavark(args []string, stdin, result interface{})
|
|||
if logrus.IsLevelEnabled(logrus.DebugLevel) {
|
||||
cmd.Env = append(cmd.Env, "RUST_BACKTRACE=1")
|
||||
}
|
||||
if n.dnsBindPort != 0 {
|
||||
cmd.Env = append(cmd.Env, "NETAVARK_DNS_PORT="+strconv.Itoa(int(n.dnsBindPort)))
|
||||
}
|
||||
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
|
||||
"github.com/containers/common/libnetwork/types"
|
||||
"github.com/containers/common/libnetwork/util"
|
||||
"github.com/pkg/errors"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
|
@ -180,7 +179,7 @@ func getFreeIPFromBucket(bucket *bbolt.Bucket, subnet *types.Subnet) (net.IP, er
|
|||
lastIP, err := util.LastIPInSubnet(&subnet.Subnet.IPNet)
|
||||
// this error should never happen but lets check anyways to prevent panics
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get lastIP")
|
||||
return nil, fmt.Errorf("failed to get lastIP: %w", err)
|
||||
}
|
||||
// ipv4 uses the last ip in a subnet for broadcast so we cannot use it
|
||||
if util.IsIPv4(lastIP) {
|
||||
|
|
|
@ -5,6 +5,8 @@ package netavark
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -16,7 +18,6 @@ import (
|
|||
"github.com/containers/common/pkg/config"
|
||||
"github.com/containers/storage/pkg/lockfile"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -43,6 +44,9 @@ type netavarkNetwork struct {
|
|||
// defaultsubnetPools contains the subnets which must be used to allocate a free subnet by network create
|
||||
defaultsubnetPools []config.SubnetPool
|
||||
|
||||
// dnsBindPort is set the the port to pass to netavark for aardvark
|
||||
dnsBindPort uint16
|
||||
|
||||
// ipamDBPath is the path to the ip allocation bolt db
|
||||
ipamDBPath string
|
||||
|
||||
|
@ -80,6 +84,9 @@ type InitConfig struct {
|
|||
// DefaultsubnetPools contains the subnets which must be used to allocate a free subnet by network create
|
||||
DefaultsubnetPools []config.SubnetPool
|
||||
|
||||
// DNSBindPort is set the the port to pass to netavark for aardvark
|
||||
DNSBindPort uint16
|
||||
|
||||
// Syslog describes whenever the netavark debbug output should be log to the syslog as well.
|
||||
// This will use logrus to do so, make sure logrus is set up to log to the syslog.
|
||||
Syslog bool
|
||||
|
@ -105,7 +112,7 @@ func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) {
|
|||
}
|
||||
defaultNet, err := types.ParseCIDR(defaultSubnet)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse default subnet")
|
||||
return nil, fmt.Errorf("failed to parse default subnet: %w", err)
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(conf.NetworkConfigDir, 0o755); err != nil {
|
||||
|
@ -131,6 +138,7 @@ func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) {
|
|||
defaultNetwork: defaultNetworkName,
|
||||
defaultSubnet: defaultNet,
|
||||
defaultsubnetPools: defaultSubnetPools,
|
||||
dnsBindPort: conf.DNSBindPort,
|
||||
lock: lock,
|
||||
syslog: conf.Syslog,
|
||||
}
|
||||
|
@ -221,7 +229,7 @@ func (n *netavarkNetwork) loadNetworks() error {
|
|||
if networks[n.defaultNetwork] == nil {
|
||||
networkInfo, err := n.createDefaultNetwork()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create default network %s", n.defaultNetwork)
|
||||
return fmt.Errorf("failed to create default network %s: %w", n.defaultNetwork, err)
|
||||
}
|
||||
networks[n.defaultNetwork] = networkInfo
|
||||
}
|
||||
|
@ -242,7 +250,7 @@ func parseNetwork(network *types.Network) error {
|
|||
}
|
||||
|
||||
if len(network.ID) != 64 {
|
||||
return errors.Errorf("invalid network ID %q", network.ID)
|
||||
return fmt.Errorf("invalid network ID %q", network.ID)
|
||||
}
|
||||
|
||||
// add gateway when not internal or dns enabled
|
||||
|
@ -284,7 +292,7 @@ func (n *netavarkNetwork) getNetwork(nameOrID string) (*types.Network, error) {
|
|||
|
||||
if strings.HasPrefix(val.ID, nameOrID) {
|
||||
if net != nil {
|
||||
return nil, errors.Errorf("more than one result for network ID %s", nameOrID)
|
||||
return nil, fmt.Errorf("more than one result for network ID %s", nameOrID)
|
||||
}
|
||||
net = val
|
||||
}
|
||||
|
@ -292,7 +300,7 @@ func (n *netavarkNetwork) getNetwork(nameOrID string) (*types.Network, error) {
|
|||
if net != nil {
|
||||
return net, nil
|
||||
}
|
||||
return nil, errors.Wrapf(types.ErrNoSuchNetwork, "unable to find network with name or ID %s", nameOrID)
|
||||
return nil, fmt.Errorf("unable to find network with name or ID %s: %w", nameOrID, types.ErrNoSuchNetwork)
|
||||
}
|
||||
|
||||
// Implement the NetUtil interface for easy code sharing with other network interfaces.
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
|
||||
"github.com/containers/common/libnetwork/internal/util"
|
||||
"github.com/containers/common/libnetwork/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -42,7 +41,7 @@ func (n *netavarkNetwork) Setup(namespacePath string, options types.SetupOptions
|
|||
|
||||
netavarkOpts, err := n.convertNetOpts(options.NetworkOptions)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert net opts")
|
||||
return nil, fmt.Errorf("failed to convert net opts: %w", err)
|
||||
}
|
||||
|
||||
// Warn users if one or more networks have dns enabled
|
||||
|
@ -103,7 +102,7 @@ func (n *netavarkNetwork) Teardown(namespacePath string, options types.TeardownO
|
|||
|
||||
netavarkOpts, err := n.convertNetOpts(options.NetworkOptions)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to convert net opts")
|
||||
return fmt.Errorf("failed to convert net opts: %w", err)
|
||||
}
|
||||
|
||||
retErr := n.execNetavark([]string{"teardown", namespacePath}, netavarkOpts, nil)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue