mirror of https://github.com/containers/podman.git
vendor: update common and buildah
vendor the following dependencies: - https://github.com/containers/common/pull/2375 - https://github.com/containers/buildah/pull/6074 Closes: https://github.com/containers/podman/issues/25634 Signed-off-by: Giuseppe Scrivano <gscrivan@redhat.com>
This commit is contained in:
parent
94e77af09d
commit
260035d069
14
go.mod
14
go.mod
|
@ -13,8 +13,8 @@ require (
|
||||||
github.com/checkpoint-restore/checkpointctl v1.3.0
|
github.com/checkpoint-restore/checkpointctl v1.3.0
|
||||||
github.com/checkpoint-restore/go-criu/v7 v7.2.0
|
github.com/checkpoint-restore/go-criu/v7 v7.2.0
|
||||||
github.com/containernetworking/plugins v1.6.2
|
github.com/containernetworking/plugins v1.6.2
|
||||||
github.com/containers/buildah v1.39.2
|
github.com/containers/buildah v1.39.1-0.20250321123219-bc4d7eb70fe3
|
||||||
github.com/containers/common v0.62.3-0.20250320113334-33bf9345b5ef
|
github.com/containers/common v0.62.3-0.20250320215058-fa53559b5062
|
||||||
github.com/containers/conmon v2.0.20+incompatible
|
github.com/containers/conmon v2.0.20+incompatible
|
||||||
github.com/containers/gvisor-tap-vsock v0.8.4
|
github.com/containers/gvisor-tap-vsock v0.8.4
|
||||||
github.com/containers/image/v5 v5.34.3-0.20250311194052-d84dbab374e7
|
github.com/containers/image/v5 v5.34.3-0.20250311194052-d84dbab374e7
|
||||||
|
@ -53,7 +53,7 @@ require (
|
||||||
github.com/moby/sys/user v0.3.0
|
github.com/moby/sys/user v0.3.0
|
||||||
github.com/moby/term v0.5.2
|
github.com/moby/term v0.5.2
|
||||||
github.com/nxadm/tail v1.4.11
|
github.com/nxadm/tail v1.4.11
|
||||||
github.com/onsi/ginkgo/v2 v2.23.1
|
github.com/onsi/ginkgo/v2 v2.23.2
|
||||||
github.com/onsi/gomega v1.36.2
|
github.com/onsi/gomega v1.36.2
|
||||||
github.com/opencontainers/cgroups v0.0.1
|
github.com/opencontainers/cgroups v0.0.1
|
||||||
github.com/opencontainers/go-digest v1.0.0
|
github.com/opencontainers/go-digest v1.0.0
|
||||||
|
@ -61,7 +61,7 @@ require (
|
||||||
github.com/opencontainers/runtime-spec v1.2.1
|
github.com/opencontainers/runtime-spec v1.2.1
|
||||||
github.com/opencontainers/runtime-tools v0.9.1-0.20241108202711-f7e3563b0271
|
github.com/opencontainers/runtime-tools v0.9.1-0.20241108202711-f7e3563b0271
|
||||||
github.com/opencontainers/selinux v1.11.1
|
github.com/opencontainers/selinux v1.11.1
|
||||||
github.com/openshift/imagebuilder v1.2.15
|
github.com/openshift/imagebuilder v1.2.16-0.20250220150830-7ebfb09d364e
|
||||||
github.com/rootless-containers/rootlesskit/v2 v2.3.2
|
github.com/rootless-containers/rootlesskit/v2 v2.3.2
|
||||||
github.com/shirou/gopsutil/v4 v4.25.2
|
github.com/shirou/gopsutil/v4 v4.25.2
|
||||||
github.com/sirupsen/logrus v1.9.3
|
github.com/sirupsen/logrus v1.9.3
|
||||||
|
@ -106,7 +106,7 @@ require (
|
||||||
github.com/containerd/typeurl/v2 v2.2.3 // indirect
|
github.com/containerd/typeurl/v2 v2.2.3 // indirect
|
||||||
github.com/containernetworking/cni v1.2.3 // indirect
|
github.com/containernetworking/cni v1.2.3 // indirect
|
||||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||||
github.com/containers/luksy v0.0.0-20250106202729-a3a812db5b72 // indirect
|
github.com/containers/luksy v0.0.0-20250217190002-40bd943d93b8 // indirect
|
||||||
github.com/coreos/go-oidc/v3 v3.12.0 // indirect
|
github.com/coreos/go-oidc/v3 v3.12.0 // indirect
|
||||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
|
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
|
||||||
github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect
|
github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect
|
||||||
|
@ -170,7 +170,7 @@ require (
|
||||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||||
github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
|
github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
github.com/moby/buildkit v0.19.0 // indirect
|
github.com/moby/buildkit v0.20.1 // indirect
|
||||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||||
github.com/moby/patternmatcher v0.6.0 // indirect
|
github.com/moby/patternmatcher v0.6.0 // indirect
|
||||||
github.com/moby/sys/mountinfo v0.7.2 // indirect
|
github.com/moby/sys/mountinfo v0.7.2 // indirect
|
||||||
|
@ -213,7 +213,7 @@ require (
|
||||||
github.com/ugorji/go/codec v1.2.12 // indirect
|
github.com/ugorji/go/codec v1.2.12 // indirect
|
||||||
github.com/ulikunitz/xz v0.5.12 // indirect
|
github.com/ulikunitz/xz v0.5.12 // indirect
|
||||||
github.com/vbatts/tar-split v0.12.1 // indirect
|
github.com/vbatts/tar-split v0.12.1 // indirect
|
||||||
github.com/vishvananda/netns v0.0.4 // indirect
|
github.com/vishvananda/netns v0.0.5 // indirect
|
||||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||||
go.opencensus.io v0.24.0 // indirect
|
go.opencensus.io v0.24.0 // indirect
|
||||||
|
|
27
go.sum
27
go.sum
|
@ -76,10 +76,10 @@ github.com/containernetworking/cni v1.2.3 h1:hhOcjNVUQTnzdRJ6alC5XF+wd9mfGIUaj8F
|
||||||
github.com/containernetworking/cni v1.2.3/go.mod h1:DuLgF+aPd3DzcTQTtp/Nvl1Kim23oFKdm2okJzBQA5M=
|
github.com/containernetworking/cni v1.2.3/go.mod h1:DuLgF+aPd3DzcTQTtp/Nvl1Kim23oFKdm2okJzBQA5M=
|
||||||
github.com/containernetworking/plugins v1.6.2 h1:pqP8Mq923TLyef5g97XfJ/xpDeVek4yF8A4mzy9Tc4U=
|
github.com/containernetworking/plugins v1.6.2 h1:pqP8Mq923TLyef5g97XfJ/xpDeVek4yF8A4mzy9Tc4U=
|
||||||
github.com/containernetworking/plugins v1.6.2/go.mod h1:SP5UG3jDO9LtmfbBJdP+nl3A1atOtbj2MBOYsnaxy64=
|
github.com/containernetworking/plugins v1.6.2/go.mod h1:SP5UG3jDO9LtmfbBJdP+nl3A1atOtbj2MBOYsnaxy64=
|
||||||
github.com/containers/buildah v1.39.2 h1:YaFMNnuTr7wKYKQDHkm7yyP9HhWVrNB4DA+DjYUS9k4=
|
github.com/containers/buildah v1.39.1-0.20250321123219-bc4d7eb70fe3 h1:F5qpz8HsQ/nxhArveDEgskbyOjFuSsEahevt4JHAePQ=
|
||||||
github.com/containers/buildah v1.39.2/go.mod h1:Vb4sDbEq06qQqk29mcGw/1qit8dyukpfL4hwNQ5t+z8=
|
github.com/containers/buildah v1.39.1-0.20250321123219-bc4d7eb70fe3/go.mod h1:kCk5Le5CiMazPfGhF8yg43LQa1YLKqBZNnI4PTq+W/U=
|
||||||
github.com/containers/common v0.62.3-0.20250320113334-33bf9345b5ef h1:OpMI1mIyrKWPGAYlJY9VVjsCXigtk+3ckYE1SN7JxYA=
|
github.com/containers/common v0.62.3-0.20250320215058-fa53559b5062 h1:aIOZMBptfl13GutH4jt7Sa3K3pIJO80I9Kjz7Pe5o7M=
|
||||||
github.com/containers/common v0.62.3-0.20250320113334-33bf9345b5ef/go.mod h1:vGmB+0zxqbh5Gd0kOKDlC6sIeoMqso3l4ZKg6+G3YFc=
|
github.com/containers/common v0.62.3-0.20250320215058-fa53559b5062/go.mod h1:IW8fUkTIwJkeclyROeASOV5FvFBpHjtQj/XBXffhuBk=
|
||||||
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
||||||
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
||||||
github.com/containers/gvisor-tap-vsock v0.8.4 h1:z7MqcldnXYGaU6uTaKVl7RFxTmbhNsd2UL0CyM3fdBs=
|
github.com/containers/gvisor-tap-vsock v0.8.4 h1:z7MqcldnXYGaU6uTaKVl7RFxTmbhNsd2UL0CyM3fdBs=
|
||||||
|
@ -90,8 +90,8 @@ github.com/containers/libhvee v0.10.0 h1:7VLv8keWZpHuGmWvyY4c1mVH5V1JYb1G78VC+8A
|
||||||
github.com/containers/libhvee v0.10.0/go.mod h1:at0h8lRcK5jCKfQgU/e6Io0Mw12F36zRLjXVOXRoDTM=
|
github.com/containers/libhvee v0.10.0/go.mod h1:at0h8lRcK5jCKfQgU/e6Io0Mw12F36zRLjXVOXRoDTM=
|
||||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||||
github.com/containers/luksy v0.0.0-20250106202729-a3a812db5b72 h1:hdBIFaml6hO+Bal8CdQSQPTF305gwsJfubs4NoOV53A=
|
github.com/containers/luksy v0.0.0-20250217190002-40bd943d93b8 h1:hAkmJxAYcNxgv7EsFY9sf1uIYhilYOQqjJ9UzCmYvzY=
|
||||||
github.com/containers/luksy v0.0.0-20250106202729-a3a812db5b72/go.mod h1:UpMgEjd9XelIA/iK+qD3hWIrZY/M3eaepn+gm5U8OYE=
|
github.com/containers/luksy v0.0.0-20250217190002-40bd943d93b8/go.mod h1:m5Y0Lh2ROHGCbOhGeUDfoOLuUDDtxszrbZc2IsQOGAM=
|
||||||
github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM=
|
github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM=
|
||||||
github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ=
|
github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ=
|
||||||
github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g=
|
github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g=
|
||||||
|
@ -357,8 +357,8 @@ github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPn
|
||||||
github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
|
github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
|
||||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
github.com/moby/buildkit v0.19.0 h1:w9G1p7sArvCGNkpWstAqJfRQTXBKukMyMK1bsah1HNo=
|
github.com/moby/buildkit v0.20.1 h1:sT0ZXhhNo5rVbMcYfgttma3TdUHfO5JjFA0UAL8p9fY=
|
||||||
github.com/moby/buildkit v0.19.0/go.mod h1:WiHBFTgWV8eB1AmPxIWsAlKjUACAwm3X/14xOV4VWew=
|
github.com/moby/buildkit v0.20.1/go.mod h1:Rq9nB/fJImdk6QeM0niKtOHJqwKeYMrK847hTTDVuA4=
|
||||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||||
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
|
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
|
||||||
|
@ -388,8 +388,8 @@ github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY=
|
||||||
github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc=
|
github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc=
|
||||||
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
||||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||||
github.com/onsi/ginkgo/v2 v2.23.1 h1:Ox0cOPv/t8RzKJUfDo9ZKtRvBOJY369sFJnl00CjqwY=
|
github.com/onsi/ginkgo/v2 v2.23.2 h1:LYLd7Wz401p0N7xR8y7WL6D2QZwKpbirDg0EVIvzvMM=
|
||||||
github.com/onsi/ginkgo/v2 v2.23.1/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM=
|
github.com/onsi/ginkgo/v2 v2.23.2/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM=
|
||||||
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
||||||
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
|
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
|
||||||
github.com/opencontainers/cgroups v0.0.1 h1:MXjMkkFpKv6kpuirUa4USFBas573sSAY082B4CiHEVA=
|
github.com/opencontainers/cgroups v0.0.1 h1:MXjMkkFpKv6kpuirUa4USFBas573sSAY082B4CiHEVA=
|
||||||
|
@ -406,8 +406,8 @@ github.com/opencontainers/runtime-tools v0.9.1-0.20241108202711-f7e3563b0271 h1:
|
||||||
github.com/opencontainers/runtime-tools v0.9.1-0.20241108202711-f7e3563b0271/go.mod h1:oIH6VwKkaDOO+SIYZpdwrC/0wKYqrfO6E1sG1j3UVws=
|
github.com/opencontainers/runtime-tools v0.9.1-0.20241108202711-f7e3563b0271/go.mod h1:oIH6VwKkaDOO+SIYZpdwrC/0wKYqrfO6E1sG1j3UVws=
|
||||||
github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8=
|
github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8=
|
||||||
github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
||||||
github.com/openshift/imagebuilder v1.2.15 h1:MNn1OztEE/l8pSEDPYAQ71Ys6rpXA2P00UFhdY9p/yk=
|
github.com/openshift/imagebuilder v1.2.16-0.20250220150830-7ebfb09d364e h1:yKNaeGlH4+h06lkADFa5rAIG7ifxOiV04kLRCL0rct8=
|
||||||
github.com/openshift/imagebuilder v1.2.15/go.mod h1:cK6MLyBl1IHmIYGLY/2SLOG6p0PtEDUOC7khxsFYUXE=
|
github.com/openshift/imagebuilder v1.2.16-0.20250220150830-7ebfb09d364e/go.mod h1:cK6MLyBl1IHmIYGLY/2SLOG6p0PtEDUOC7khxsFYUXE=
|
||||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
|
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
|
||||||
|
@ -526,8 +526,9 @@ github.com/vbauerster/mpb/v8 v8.9.3 h1:PnMeF+sMvYv9u23l6DO6Q3+Mdj408mjLRXIzmUmU2
|
||||||
github.com/vbauerster/mpb/v8 v8.9.3/go.mod h1:hxS8Hz4C6ijnppDSIX6LjG8FYJSoPo9iIOcE53Zik0c=
|
github.com/vbauerster/mpb/v8 v8.9.3/go.mod h1:hxS8Hz4C6ijnppDSIX6LjG8FYJSoPo9iIOcE53Zik0c=
|
||||||
github.com/vishvananda/netlink v1.3.1-0.20250221194427-0af32151e72b h1:hYWtmuzlR0jpWu+ljWfPMi7oNiZ9x/D3GbBqgZTOhyI=
|
github.com/vishvananda/netlink v1.3.1-0.20250221194427-0af32151e72b h1:hYWtmuzlR0jpWu+ljWfPMi7oNiZ9x/D3GbBqgZTOhyI=
|
||||||
github.com/vishvananda/netlink v1.3.1-0.20250221194427-0af32151e72b/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs=
|
github.com/vishvananda/netlink v1.3.1-0.20250221194427-0af32151e72b/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs=
|
||||||
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
|
|
||||||
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||||
|
github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY=
|
||||||
|
github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||||
|
|
|
@ -97,7 +97,7 @@ func DevicesFromPath(g *generate.Generator, devicePath string) error {
|
||||||
|
|
||||||
func BlockAccessToKernelFilesystems(privileged, pidModeIsHost bool, mask, unmask []string, g *generate.Generator) {
|
func BlockAccessToKernelFilesystems(privileged, pidModeIsHost bool, mask, unmask []string, g *generate.Generator) {
|
||||||
if !privileged {
|
if !privileged {
|
||||||
for _, mp := range config.DefaultMaskedPaths {
|
for _, mp := range config.DefaultMaskedPaths() {
|
||||||
// check that the path to mask is not in the list of paths to unmask
|
// check that the path to mask is not in the list of paths to unmask
|
||||||
if shouldMask(mp, unmask) {
|
if shouldMask(mp, unmask) {
|
||||||
g.AddLinuxMaskedPaths(mp)
|
g.AddLinuxMaskedPaths(mp)
|
||||||
|
|
|
@ -375,7 +375,7 @@ var _ = Describe("Podman run", func() {
|
||||||
session := podmanTest.Podman([]string{"run", "-d", "--name=maskCtr", ALPINE, "sleep", "200"})
|
session := podmanTest.Podman([]string{"run", "-d", "--name=maskCtr", ALPINE, "sleep", "200"})
|
||||||
session.WaitWithDefaultTimeout()
|
session.WaitWithDefaultTimeout()
|
||||||
Expect(session).Should(ExitCleanly())
|
Expect(session).Should(ExitCleanly())
|
||||||
for _, mask := range config.DefaultMaskedPaths {
|
for _, mask := range config.DefaultMaskedPaths() {
|
||||||
if st, err := os.Stat(mask); err == nil {
|
if st, err := os.Stat(mask); err == nil {
|
||||||
if st.IsDir() {
|
if st.IsDir() {
|
||||||
session = podmanTest.Podman([]string{"exec", "maskCtr", "ls", mask})
|
session = podmanTest.Podman([]string{"exec", "maskCtr", "ls", mask})
|
||||||
|
|
|
@ -6,7 +6,7 @@ env:
|
||||||
#### Global variables used for all tasks
|
#### Global variables used for all tasks
|
||||||
####
|
####
|
||||||
# Name of the ultimate destination branch for this CI run, PR or post-merge.
|
# Name of the ultimate destination branch for this CI run, PR or post-merge.
|
||||||
DEST_BRANCH: "release-1.39"
|
DEST_BRANCH: "main"
|
||||||
GOPATH: "/var/tmp/go"
|
GOPATH: "/var/tmp/go"
|
||||||
GOSRC: "${GOPATH}/src/github.com/containers/buildah"
|
GOSRC: "${GOPATH}/src/github.com/containers/buildah"
|
||||||
GOCACHE: "/tmp/go-build"
|
GOCACHE: "/tmp/go-build"
|
||||||
|
@ -33,7 +33,7 @@ env:
|
||||||
DEBIAN_NAME: "debian-13"
|
DEBIAN_NAME: "debian-13"
|
||||||
|
|
||||||
# Image identifiers
|
# Image identifiers
|
||||||
IMAGE_SUFFIX: "c20250107t132430z-f41f40d13"
|
IMAGE_SUFFIX: "c20250131t121915z-f41f40d13"
|
||||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||||
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
|
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
|
||||||
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
|
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
|
||||||
|
@ -50,7 +50,7 @@ env:
|
||||||
gcp_credentials: ENCRYPTED[ae0bf7370f0b6e446bc61d0865a2c55d3e166b3fab9466eb0393e38e1c66a31ca4c71ddc7e0139d47d075c36dd6d3fd7]
|
gcp_credentials: ENCRYPTED[ae0bf7370f0b6e446bc61d0865a2c55d3e166b3fab9466eb0393e38e1c66a31ca4c71ddc7e0139d47d075c36dd6d3fd7]
|
||||||
|
|
||||||
# Default timeout for each task
|
# Default timeout for each task
|
||||||
timeout_in: 120m
|
timeout_in: 30m
|
||||||
|
|
||||||
# Default VM to use unless set or modified by task
|
# Default VM to use unless set or modified by task
|
||||||
gce_instance: &standardvm
|
gce_instance: &standardvm
|
||||||
|
@ -95,12 +95,12 @@ smoke_task:
|
||||||
|
|
||||||
gce_instance:
|
gce_instance:
|
||||||
memory: "12G"
|
memory: "12G"
|
||||||
cpu: 4
|
cpu: 8
|
||||||
|
|
||||||
# Don't bother running on branches (including cron), or for tags.
|
# Don't bother running on branches (including cron), or for tags.
|
||||||
skip: $CIRRUS_PR == ''
|
skip: $CIRRUS_PR == ''
|
||||||
|
|
||||||
timeout_in: 30m
|
timeout_in: 10m
|
||||||
|
|
||||||
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
|
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
|
||||||
build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}'
|
build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}'
|
||||||
|
@ -122,7 +122,7 @@ vendor_task:
|
||||||
|
|
||||||
# Runs within Cirrus's "community cluster"
|
# Runs within Cirrus's "community cluster"
|
||||||
container:
|
container:
|
||||||
image: docker.io/library/golang:1.22
|
image: docker.io/library/golang:1.23
|
||||||
cpu: 1
|
cpu: 1
|
||||||
memory: 1
|
memory: 1
|
||||||
|
|
||||||
|
@ -154,6 +154,8 @@ cross_build_task:
|
||||||
|
|
||||||
unit_task:
|
unit_task:
|
||||||
name: 'Unit tests w/ $STORAGE_DRIVER'
|
name: 'Unit tests w/ $STORAGE_DRIVER'
|
||||||
|
gce_instance:
|
||||||
|
cpu: 4
|
||||||
alias: unit
|
alias: unit
|
||||||
skip: ¬_build_docs >-
|
skip: ¬_build_docs >-
|
||||||
$CIRRUS_CHANGE_TITLE =~ '.*CI:DOCS.*' ||
|
$CIRRUS_CHANGE_TITLE =~ '.*CI:DOCS.*' ||
|
||||||
|
@ -162,8 +164,6 @@ unit_task:
|
||||||
- smoke
|
- smoke
|
||||||
- vendor
|
- vendor
|
||||||
|
|
||||||
timeout_in: 90m
|
|
||||||
|
|
||||||
matrix:
|
matrix:
|
||||||
- env:
|
- env:
|
||||||
STORAGE_DRIVER: 'vfs'
|
STORAGE_DRIVER: 'vfs'
|
||||||
|
@ -181,10 +181,9 @@ conformance_task:
|
||||||
depends_on: *smoke_vendor
|
depends_on: *smoke_vendor
|
||||||
|
|
||||||
gce_instance:
|
gce_instance:
|
||||||
|
cpu: 4
|
||||||
image_name: "${DEBIAN_CACHE_IMAGE_NAME}"
|
image_name: "${DEBIAN_CACHE_IMAGE_NAME}"
|
||||||
|
|
||||||
timeout_in: 65m
|
|
||||||
|
|
||||||
matrix:
|
matrix:
|
||||||
- env:
|
- env:
|
||||||
STORAGE_DRIVER: 'vfs'
|
STORAGE_DRIVER: 'vfs'
|
||||||
|
@ -208,7 +207,7 @@ integration_task:
|
||||||
DISTRO_NV: "${FEDORA_NAME}"
|
DISTRO_NV: "${FEDORA_NAME}"
|
||||||
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
|
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
|
||||||
STORAGE_DRIVER: 'vfs'
|
STORAGE_DRIVER: 'vfs'
|
||||||
# Disabled until we update to f40/41 as f39 does not have go 1.22
|
# Disabled until we update to f41/42 as f40 does not have go 1.22
|
||||||
# - env:
|
# - env:
|
||||||
# DISTRO_NV: "${PRIOR_FEDORA_NAME}"
|
# DISTRO_NV: "${PRIOR_FEDORA_NAME}"
|
||||||
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
|
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
|
||||||
|
@ -222,7 +221,7 @@ integration_task:
|
||||||
DISTRO_NV: "${FEDORA_NAME}"
|
DISTRO_NV: "${FEDORA_NAME}"
|
||||||
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
|
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
|
||||||
STORAGE_DRIVER: 'overlay'
|
STORAGE_DRIVER: 'overlay'
|
||||||
# Disabled until we update to f40/41 as f39 does not have go 1.22
|
# Disabled until we update to f41/42 as f40 does not have go 1.22
|
||||||
# - env:
|
# - env:
|
||||||
# DISTRO_NV: "${PRIOR_FEDORA_NAME}"
|
# DISTRO_NV: "${PRIOR_FEDORA_NAME}"
|
||||||
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
|
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
|
||||||
|
@ -234,7 +233,8 @@ integration_task:
|
||||||
|
|
||||||
gce_instance:
|
gce_instance:
|
||||||
image_name: "$IMAGE_NAME"
|
image_name: "$IMAGE_NAME"
|
||||||
cpu: 4
|
cpu: 8
|
||||||
|
memory: "8G"
|
||||||
|
|
||||||
# Separate scripts for separate outputs, makes debugging easier.
|
# Separate scripts for separate outputs, makes debugging easier.
|
||||||
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
|
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
|
||||||
|
@ -282,7 +282,8 @@ integration_rootless_task:
|
||||||
|
|
||||||
gce_instance:
|
gce_instance:
|
||||||
image_name: "$IMAGE_NAME"
|
image_name: "$IMAGE_NAME"
|
||||||
cpu: 4
|
cpu: 8
|
||||||
|
memory: "8G"
|
||||||
|
|
||||||
# Separate scripts for separate outputs, makes debugging easier.
|
# Separate scripts for separate outputs, makes debugging easier.
|
||||||
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
|
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
|
||||||
|
@ -302,7 +303,8 @@ in_podman_task:
|
||||||
depends_on: *smoke_vendor
|
depends_on: *smoke_vendor
|
||||||
|
|
||||||
gce_instance:
|
gce_instance:
|
||||||
cpu: 4
|
cpu: 8
|
||||||
|
memory: "8G"
|
||||||
|
|
||||||
env:
|
env:
|
||||||
# This is key, cause the scripts to re-execute themselves inside a container.
|
# This is key, cause the scripts to re-execute themselves inside a container.
|
||||||
|
|
|
@ -5,6 +5,27 @@
|
||||||
downstream_package_name: buildah
|
downstream_package_name: buildah
|
||||||
upstream_tag_template: v{version}
|
upstream_tag_template: v{version}
|
||||||
|
|
||||||
|
# These files get synced from upstream to downstream (Fedora / CentOS Stream) on every
|
||||||
|
# propose-downstream job. This is done so tests maintained upstream can be run
|
||||||
|
# downstream in Zuul CI and Bodhi.
|
||||||
|
# Ref: https://packit.dev/docs/configuration#files_to_sync
|
||||||
|
files_to_sync:
|
||||||
|
- src: rpm/gating.yaml
|
||||||
|
dest: gating.yaml
|
||||||
|
delete: true
|
||||||
|
- src: plans/
|
||||||
|
dest: plans/
|
||||||
|
delete: true
|
||||||
|
mkpath: true
|
||||||
|
- src: tests/tmt/
|
||||||
|
dest: tests/tmt/
|
||||||
|
delete: true
|
||||||
|
mkpath: true
|
||||||
|
- src: .fmf/
|
||||||
|
dest: .fmf/
|
||||||
|
delete: true
|
||||||
|
- .packit.yaml
|
||||||
|
|
||||||
packages:
|
packages:
|
||||||
buildah-fedora:
|
buildah-fedora:
|
||||||
pkg_tool: fedpkg
|
pkg_tool: fedpkg
|
||||||
|
@ -25,9 +46,13 @@ jobs:
|
||||||
notifications: &copr_build_failure_notification
|
notifications: &copr_build_failure_notification
|
||||||
failure_comment:
|
failure_comment:
|
||||||
message: "Ephemeral COPR build failed. @containers/packit-build please check."
|
message: "Ephemeral COPR build failed. @containers/packit-build please check."
|
||||||
targets:
|
targets: &fedora_copr_targets
|
||||||
- fedora-all-x86_64
|
# f40 ships go 1.22 and we require go 1.23 now. This should be revert to fedora-all
|
||||||
- fedora-all-aarch64
|
# once either f40 is rebased to go 1.23 or f42 is released and f40 EOL.
|
||||||
|
- fedora-latest-stable-x86_64
|
||||||
|
- fedora-latest-stable-aarch64
|
||||||
|
- fedora-development-x86_64
|
||||||
|
- fedora-development-aarch64
|
||||||
enable_net: true
|
enable_net: true
|
||||||
|
|
||||||
- job: copr_build
|
- job: copr_build
|
||||||
|
@ -47,7 +72,7 @@ jobs:
|
||||||
trigger: pull_request
|
trigger: pull_request
|
||||||
packages: [buildah-centos]
|
packages: [buildah-centos]
|
||||||
notifications: *copr_build_failure_notification
|
notifications: *copr_build_failure_notification
|
||||||
targets:
|
targets: ¢os_copr_targets
|
||||||
- centos-stream-9-x86_64
|
- centos-stream-9-x86_64
|
||||||
- centos-stream-9-aarch64
|
- centos-stream-9-aarch64
|
||||||
- centos-stream-10-x86_64
|
- centos-stream-10-x86_64
|
||||||
|
@ -66,6 +91,33 @@ jobs:
|
||||||
project: podman-next
|
project: podman-next
|
||||||
enable_net: true
|
enable_net: true
|
||||||
|
|
||||||
|
# Tests on Fedora for main branch PRs
|
||||||
|
- job: tests
|
||||||
|
trigger: pull_request
|
||||||
|
packages: [buildah-fedora]
|
||||||
|
targets: &fedora_copr_test_targets
|
||||||
|
# See the other comment above, this should be reverted to fedora-all when possible.
|
||||||
|
- fedora-latest-stable-x86_64
|
||||||
|
- fedora-development-x86_64
|
||||||
|
tf_extra_params:
|
||||||
|
environments:
|
||||||
|
- artifacts:
|
||||||
|
- type: repository-file
|
||||||
|
id: https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/repo/fedora-$releasever/rhcontainerbot-podman-next-fedora-$releasever.repo
|
||||||
|
|
||||||
|
# Tests on CentOS Stream for main branch PRs
|
||||||
|
- job: tests
|
||||||
|
trigger: pull_request
|
||||||
|
packages: [buildah-centos]
|
||||||
|
targets: ¢os_copr_test_targets
|
||||||
|
- centos-stream-9-x86_64
|
||||||
|
- centos-stream-10-x86_64
|
||||||
|
tf_extra_params:
|
||||||
|
environments:
|
||||||
|
- artifacts:
|
||||||
|
- type: repository-file
|
||||||
|
id: https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/repo/centos-stream-$releasever/rhcontainerbot-podman-next-centos-stream-$releasever.repo
|
||||||
|
|
||||||
# Sync to Fedora
|
# Sync to Fedora
|
||||||
- job: propose_downstream
|
- job: propose_downstream
|
||||||
trigger: release
|
trigger: release
|
||||||
|
|
|
@ -2,15 +2,6 @@
|
||||||
|
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
## v1.39.2 (2025-03-03)
|
|
||||||
|
|
||||||
[release-1.39] Bump c/image to v5.34.1, c/common v0.62.1
|
|
||||||
|
|
||||||
## v1.39.1 (2025-02-25)
|
|
||||||
|
|
||||||
chroot createPlatformContainer: use MS_REMOUNT
|
|
||||||
chore(deps): update module github.com/go-jose/go-jose/v4 to v4.0.5 [security]
|
|
||||||
|
|
||||||
## v1.39.0 (2025-01-31)
|
## v1.39.0 (2025-01-31)
|
||||||
|
|
||||||
Bump c/storage v1.57.1, c/image 5.34.0, c/common v0.62.0
|
Bump c/storage v1.57.1, c/image 5.34.0, c/common v0.62.0
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
## The Buildah Project Community Code of Conduct
|
## The Buildah Project Community Code of Conduct
|
||||||
|
|
||||||
The Buildah Project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md).
|
The Buildah Project, as part of Podman Container Tools, follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
|
||||||
|
|
|
@ -22,9 +22,10 @@ STRIP ?= strip
|
||||||
GO := go
|
GO := go
|
||||||
GO_LDFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-ldflags"; fi)
|
GO_LDFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-ldflags"; fi)
|
||||||
GO_GCFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-gcflags"; fi)
|
GO_GCFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-gcflags"; fi)
|
||||||
|
NPROCS := $(shell nproc)
|
||||||
export GO_BUILD=$(GO) build
|
export GO_BUILD=$(GO) build
|
||||||
export GO_TEST=$(GO) test
|
export GO_TEST=$(GO) test -parallel=$(NPROCS)
|
||||||
RACEFLAGS := $(shell $(GO_TEST) -race ./pkg/dummy > /dev/null 2>&1 && echo -race)
|
RACEFLAGS ?= $(shell $(GO_TEST) -race ./pkg/dummy > /dev/null 2>&1 && echo -race)
|
||||||
|
|
||||||
COMMIT_NO ?= $(shell git rev-parse HEAD 2> /dev/null || true)
|
COMMIT_NO ?= $(shell git rev-parse HEAD 2> /dev/null || true)
|
||||||
GIT_COMMIT ?= $(if $(shell git status --porcelain --untracked-files=no),${COMMIT_NO}-dirty,${COMMIT_NO})
|
GIT_COMMIT ?= $(if $(shell git status --porcelain --untracked-files=no),${COMMIT_NO}-dirty,${COMMIT_NO})
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -94,6 +95,8 @@ type AddAndCopyOptions struct {
|
||||||
// RetryDelay is how long to wait before retrying attempts to retrieve
|
// RetryDelay is how long to wait before retrying attempts to retrieve
|
||||||
// remote contents.
|
// remote contents.
|
||||||
RetryDelay time.Duration
|
RetryDelay time.Duration
|
||||||
|
// Parents preserve parent directories of source content
|
||||||
|
Parents bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// gitURLFragmentSuffix matches fragments to use as Git reference and build
|
// gitURLFragmentSuffix matches fragments to use as Git reference and build
|
||||||
|
@ -263,6 +266,25 @@ func globbedToGlobbable(glob string) string {
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getParentsPrefixToRemoveAndParentsToSkip gets from the pattern the prefix before the "pivot point",
|
||||||
|
// the location in the source path marked by the path component named "."
|
||||||
|
// (i.e. where "/./" occurs in the path). And list of parents to skip.
|
||||||
|
// In case "/./" is not present is returned "/".
|
||||||
|
func getParentsPrefixToRemoveAndParentsToSkip(pattern string, contextDir string) (string, []string) {
|
||||||
|
prefix, _, found := strings.Cut(strings.TrimPrefix(pattern, contextDir), "/./")
|
||||||
|
if !found {
|
||||||
|
return string(filepath.Separator), []string{}
|
||||||
|
}
|
||||||
|
prefix = strings.TrimPrefix(filepath.Clean(string(filepath.Separator)+prefix), string(filepath.Separator))
|
||||||
|
out := []string{}
|
||||||
|
parentPath := prefix
|
||||||
|
for parentPath != "/" && parentPath != "." {
|
||||||
|
out = append(out, parentPath)
|
||||||
|
parentPath = filepath.Dir(parentPath)
|
||||||
|
}
|
||||||
|
return prefix, out
|
||||||
|
}
|
||||||
|
|
||||||
// Add copies the contents of the specified sources into the container's root
|
// Add copies the contents of the specified sources into the container's root
|
||||||
// filesystem, optionally extracting contents of local files that look like
|
// filesystem, optionally extracting contents of local files that look like
|
||||||
// non-empty archives.
|
// non-empty archives.
|
||||||
|
@ -476,7 +498,6 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
||||||
if err := copier.Mkdir(mountPoint, extractDirectory, mkdirOptions); err != nil {
|
if err := copier.Mkdir(mountPoint, extractDirectory, mkdirOptions); err != nil {
|
||||||
return fmt.Errorf("ensuring target directory exists: %w", err)
|
return fmt.Errorf("ensuring target directory exists: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy each source in turn.
|
// Copy each source in turn.
|
||||||
for _, src := range sources {
|
for _, src := range sources {
|
||||||
var multiErr *multierror.Error
|
var multiErr *multierror.Error
|
||||||
|
@ -587,7 +608,6 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
||||||
if localSourceStat == nil {
|
if localSourceStat == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iterate through every item that matched the glob.
|
// Iterate through every item that matched the glob.
|
||||||
itemsCopied := 0
|
itemsCopied := 0
|
||||||
for _, globbed := range localSourceStat.Globbed {
|
for _, globbed := range localSourceStat.Globbed {
|
||||||
|
@ -640,6 +660,25 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if options.Parents {
|
||||||
|
parentsPrefixToRemove, parentsToSkip := getParentsPrefixToRemoveAndParentsToSkip(src, options.ContextDir)
|
||||||
|
writer = newTarFilterer(writer, func(hdr *tar.Header) (bool, bool, io.Reader) {
|
||||||
|
if slices.Contains(parentsToSkip, hdr.Name) && hdr.Typeflag == tar.TypeDir {
|
||||||
|
return true, false, nil
|
||||||
|
}
|
||||||
|
hdr.Name = strings.TrimPrefix(hdr.Name, parentsPrefixToRemove)
|
||||||
|
hdr.Name = strings.TrimPrefix(hdr.Name, "/")
|
||||||
|
if hdr.Typeflag == tar.TypeLink {
|
||||||
|
hdr.Linkname = strings.TrimPrefix(hdr.Linkname, parentsPrefixToRemove)
|
||||||
|
hdr.Linkname = strings.TrimPrefix(hdr.Linkname, "/")
|
||||||
|
}
|
||||||
|
if hdr.Name == "" {
|
||||||
|
return true, false, nil
|
||||||
|
}
|
||||||
|
return false, false, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
writer = newTarFilterer(writer, func(_ *tar.Header) (bool, bool, io.Reader) {
|
writer = newTarFilterer(writer, func(_ *tar.Header) (bool, bool, io.Reader) {
|
||||||
itemsCopied++
|
itemsCopied++
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
|
@ -656,6 +695,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
||||||
StripSetuidBit: options.StripSetuidBit,
|
StripSetuidBit: options.StripSetuidBit,
|
||||||
StripSetgidBit: options.StripSetgidBit,
|
StripSetgidBit: options.StripSetgidBit,
|
||||||
StripStickyBit: options.StripStickyBit,
|
StripStickyBit: options.StripStickyBit,
|
||||||
|
Parents: options.Parents,
|
||||||
}
|
}
|
||||||
getErr = copier.Get(contextDir, contextDir, getOptions, []string{globbedToGlobbable(globbed)}, writer)
|
getErr = copier.Get(contextDir, contextDir, getOptions, []string{globbedToGlobbable(globbed)}, writer)
|
||||||
closeErr = writer.Close()
|
closeErr = writer.Close()
|
||||||
|
|
|
@ -1,11 +1,3 @@
|
||||||
- Changelog for v1.39.2 (2025-03-03)
|
|
||||||
* [release-1.39] Bump c/image to v5.34.1, c/common v0.62.1
|
|
||||||
|
|
||||||
- Changelog for v1.39.1 (2025-02-25)
|
|
||||||
|
|
||||||
* chroot createPlatformContainer: use MS_REMOUNT
|
|
||||||
* chore(deps): update module github.com/go-jose/go-jose/v4 to v4.0.5 [security]
|
|
||||||
|
|
||||||
- Changelog for v1.39.0 (2025-01-31)
|
- Changelog for v1.39.0 (2025-01-31)
|
||||||
* Bump c/storage v1.57.1, c/image 5.34.0, c/common v0.62.0
|
* Bump c/storage v1.57.1, c/image 5.34.0, c/common v0.62.0
|
||||||
* Update module github.com/containers/storage to v1.57.0
|
* Update module github.com/containers/storage to v1.57.0
|
||||||
|
|
|
@ -6,11 +6,11 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"maps"
|
"maps"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/platforms"
|
||||||
"github.com/containers/buildah/define"
|
"github.com/containers/buildah/define"
|
||||||
"github.com/containers/buildah/docker"
|
"github.com/containers/buildah/docker"
|
||||||
internalUtil "github.com/containers/buildah/internal/util"
|
internalUtil "github.com/containers/buildah/internal/util"
|
||||||
|
@ -137,18 +137,19 @@ func (b *Builder) fixupConfig(sys *types.SystemContext) {
|
||||||
if b.OCIv1.Created == nil || b.OCIv1.Created.IsZero() {
|
if b.OCIv1.Created == nil || b.OCIv1.Created.IsZero() {
|
||||||
b.OCIv1.Created = &now
|
b.OCIv1.Created = &now
|
||||||
}
|
}
|
||||||
|
currentPlatformSpecification := platforms.DefaultSpec()
|
||||||
if b.OS() == "" {
|
if b.OS() == "" {
|
||||||
if sys != nil && sys.OSChoice != "" {
|
if sys != nil && sys.OSChoice != "" {
|
||||||
b.SetOS(sys.OSChoice)
|
b.SetOS(sys.OSChoice)
|
||||||
} else {
|
} else {
|
||||||
b.SetOS(runtime.GOOS)
|
b.SetOS(currentPlatformSpecification.OS)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if b.Architecture() == "" {
|
if b.Architecture() == "" {
|
||||||
if sys != nil && sys.ArchitectureChoice != "" {
|
if sys != nil && sys.ArchitectureChoice != "" {
|
||||||
b.SetArchitecture(sys.ArchitectureChoice)
|
b.SetArchitecture(sys.ArchitectureChoice)
|
||||||
} else {
|
} else {
|
||||||
b.SetArchitecture(runtime.GOARCH)
|
b.SetArchitecture(currentPlatformSpecification.Architecture)
|
||||||
}
|
}
|
||||||
// in case the arch string we started with was shorthand for a known arch+variant pair, normalize it
|
// in case the arch string we started with was shorthand for a known arch+variant pair, normalize it
|
||||||
ps := internalUtil.NormalizePlatform(ociv1.Platform{OS: b.OS(), Architecture: b.Architecture(), Variant: b.Variant()})
|
ps := internalUtil.NormalizePlatform(ociv1.Platform{OS: b.OS(), Architecture: b.Architecture(), Variant: b.Variant()})
|
||||||
|
@ -158,6 +159,8 @@ func (b *Builder) fixupConfig(sys *types.SystemContext) {
|
||||||
if b.Variant() == "" {
|
if b.Variant() == "" {
|
||||||
if sys != nil && sys.VariantChoice != "" {
|
if sys != nil && sys.VariantChoice != "" {
|
||||||
b.SetVariant(sys.VariantChoice)
|
b.SetVariant(sys.VariantChoice)
|
||||||
|
} else {
|
||||||
|
b.SetVariant(currentPlatformSpecification.Variant)
|
||||||
}
|
}
|
||||||
// in case the arch string we started with was shorthand for a known arch+variant pair, normalize it
|
// in case the arch string we started with was shorthand for a known arch+variant pair, normalize it
|
||||||
ps := internalUtil.NormalizePlatform(ociv1.Platform{OS: b.OS(), Architecture: b.Architecture(), Variant: b.Variant()})
|
ps := internalUtil.NormalizePlatform(ociv1.Platform{OS: b.OS(), Architecture: b.Architecture(), Variant: b.Variant()})
|
||||||
|
|
|
@ -13,6 +13,7 @@ import (
|
||||||
"os/user"
|
"os/user"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -350,6 +351,7 @@ type GetOptions struct {
|
||||||
ChmodDirs *os.FileMode // set permissions on directories. no effect on archives being extracted
|
ChmodDirs *os.FileMode // set permissions on directories. no effect on archives being extracted
|
||||||
ChownFiles *idtools.IDPair // set ownership of files. no effect on archives being extracted
|
ChownFiles *idtools.IDPair // set ownership of files. no effect on archives being extracted
|
||||||
ChmodFiles *os.FileMode // set permissions on files. no effect on archives being extracted
|
ChmodFiles *os.FileMode // set permissions on files. no effect on archives being extracted
|
||||||
|
Parents bool // maintain the sources parent directory in the destination
|
||||||
StripSetuidBit bool // strip the setuid bit off of items being copied. no effect on archives being extracted
|
StripSetuidBit bool // strip the setuid bit off of items being copied. no effect on archives being extracted
|
||||||
StripSetgidBit bool // strip the setgid bit off of items being copied. no effect on archives being extracted
|
StripSetgidBit bool // strip the setgid bit off of items being copied. no effect on archives being extracted
|
||||||
StripStickyBit bool // strip the sticky bit off of items being copied. no effect on archives being extracted
|
StripStickyBit bool // strip the sticky bit off of items being copied. no effect on archives being extracted
|
||||||
|
@ -1182,6 +1184,49 @@ func errorIsPermission(err error) bool {
|
||||||
return errors.Is(err, os.ErrPermission) || strings.Contains(err.Error(), "permission denied")
|
return errors.Is(err, os.ErrPermission) || strings.Contains(err.Error(), "permission denied")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getParents(path string, stopPath string) []string {
|
||||||
|
out := []string{}
|
||||||
|
for path != "/" && path != "." && path != stopPath {
|
||||||
|
path = filepath.Dir(path)
|
||||||
|
if path == stopPath {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out = append(out, path)
|
||||||
|
}
|
||||||
|
slices.Reverse(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkLinks(item string, req request, info os.FileInfo) (string, os.FileInfo, error) {
|
||||||
|
// chase links. if we hit a dead end, we should just fail
|
||||||
|
oldItem := item
|
||||||
|
followedLinks := 0
|
||||||
|
const maxFollowedLinks = 16
|
||||||
|
for !req.GetOptions.NoDerefSymlinks && info.Mode()&os.ModeType == os.ModeSymlink && followedLinks < maxFollowedLinks {
|
||||||
|
path, err := os.Readlink(item)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if filepath.IsAbs(path) || looksLikeAbs(path) {
|
||||||
|
path = filepath.Join(req.Root, path)
|
||||||
|
} else {
|
||||||
|
path = filepath.Join(filepath.Dir(item), path)
|
||||||
|
}
|
||||||
|
item = path
|
||||||
|
if _, err = convertToRelSubdirectory(req.Root, item); err != nil {
|
||||||
|
return "", nil, fmt.Errorf("copier: get: computing path of %q(%q) relative to %q: %w", oldItem, item, req.Root, err)
|
||||||
|
}
|
||||||
|
if info, err = os.Lstat(item); err != nil {
|
||||||
|
return "", nil, fmt.Errorf("copier: get: lstat %q(%q): %w", oldItem, item, err)
|
||||||
|
}
|
||||||
|
followedLinks++
|
||||||
|
}
|
||||||
|
if followedLinks >= maxFollowedLinks {
|
||||||
|
return "", nil, fmt.Errorf("copier: get: resolving symlink %q(%q): %w", oldItem, item, syscall.ELOOP)
|
||||||
|
}
|
||||||
|
return item, info, nil
|
||||||
|
}
|
||||||
|
|
||||||
func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMatcher, idMappings *idtools.IDMappings) (*response, func() error, error) {
|
func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMatcher, idMappings *idtools.IDMappings) (*response, func() error, error) {
|
||||||
statRequest := req
|
statRequest := req
|
||||||
statRequest.Request = requestStat
|
statRequest.Request = requestStat
|
||||||
|
@ -1196,15 +1241,25 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
|
||||||
return errorResponse("copier: get: expected at least one glob pattern, got 0")
|
return errorResponse("copier: get: expected at least one glob pattern, got 0")
|
||||||
}
|
}
|
||||||
// build a queue of items by globbing
|
// build a queue of items by globbing
|
||||||
var queue []string
|
type queueItem struct {
|
||||||
|
glob string
|
||||||
|
parents []string
|
||||||
|
}
|
||||||
|
var queue []queueItem
|
||||||
globMatchedCount := 0
|
globMatchedCount := 0
|
||||||
for _, glob := range req.Globs {
|
for _, glob := range req.Globs {
|
||||||
globMatched, err := extendedGlob(glob)
|
globMatched, err := extendedGlob(glob)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorResponse("copier: get: glob %q: %v", glob, err)
|
return errorResponse("copier: get: glob %q: %v", glob, err)
|
||||||
}
|
}
|
||||||
globMatchedCount += len(globMatched)
|
for _, path := range globMatched {
|
||||||
queue = append(queue, globMatched...)
|
var parents []string
|
||||||
|
if req.GetOptions.Parents {
|
||||||
|
parents = getParents(path, req.Directory)
|
||||||
|
}
|
||||||
|
globMatchedCount++
|
||||||
|
queue = append(queue, queueItem{glob: path, parents: parents})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// no matches -> error
|
// no matches -> error
|
||||||
if len(queue) == 0 {
|
if len(queue) == 0 {
|
||||||
|
@ -1219,7 +1274,9 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
|
||||||
defer tw.Close()
|
defer tw.Close()
|
||||||
hardlinkChecker := new(hardlinkChecker)
|
hardlinkChecker := new(hardlinkChecker)
|
||||||
itemsCopied := 0
|
itemsCopied := 0
|
||||||
for i, item := range queue {
|
addedParents := map[string]struct{}{}
|
||||||
|
for i, qItem := range queue {
|
||||||
|
item := qItem.glob
|
||||||
// if we're not discarding the names of individual directories, keep track of this one
|
// if we're not discarding the names of individual directories, keep track of this one
|
||||||
relNamePrefix := ""
|
relNamePrefix := ""
|
||||||
if req.GetOptions.KeepDirectoryNames {
|
if req.GetOptions.KeepDirectoryNames {
|
||||||
|
@ -1230,31 +1287,53 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("copier: get: lstat %q: %w", item, err)
|
return fmt.Errorf("copier: get: lstat %q: %w", item, err)
|
||||||
}
|
}
|
||||||
// chase links. if we hit a dead end, we should just fail
|
if req.GetOptions.Parents && info.Mode().IsDir() {
|
||||||
followedLinks := 0
|
if !slices.Contains(qItem.parents, item) {
|
||||||
const maxFollowedLinks = 16
|
qItem.parents = append(qItem.parents, item)
|
||||||
for !req.GetOptions.NoDerefSymlinks && info.Mode()&os.ModeType == os.ModeSymlink && followedLinks < maxFollowedLinks {
|
}
|
||||||
path, err := os.Readlink(item)
|
}
|
||||||
|
// Copy parents in to tarball first if exists
|
||||||
|
for _, parent := range qItem.parents {
|
||||||
|
oldParent := parent
|
||||||
|
parentInfo, err := os.Lstat(parent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
return fmt.Errorf("copier: get: lstat %q: %w", parent, err)
|
||||||
|
}
|
||||||
|
parent, parentInfo, err = checkLinks(parent, req, parentInfo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
parentName, err := convertToRelSubdirectory(req.Directory, oldParent)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("copier: get: error computing path of %q relative to %q: %w", parent, req.Directory, err)
|
||||||
|
}
|
||||||
|
if parentName == "" || parentName == "." {
|
||||||
|
// skip the "." entry
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if filepath.IsAbs(path) || looksLikeAbs(path) {
|
|
||||||
path = filepath.Join(req.Root, path)
|
if _, ok := addedParents[parentName]; ok {
|
||||||
} else {
|
continue
|
||||||
path = filepath.Join(filepath.Dir(item), path)
|
|
||||||
}
|
}
|
||||||
item = path
|
addedParents[parentName] = struct{}{}
|
||||||
if _, err = convertToRelSubdirectory(req.Root, item); err != nil {
|
|
||||||
return fmt.Errorf("copier: get: computing path of %q(%q) relative to %q: %w", queue[i], item, req.Root, err)
|
if err := copierHandlerGetOne(parentInfo, "", parentName, parent, req.GetOptions, tw, hardlinkChecker, idMappings); err != nil {
|
||||||
|
if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) {
|
||||||
|
continue
|
||||||
|
} else if errors.Is(err, os.ErrNotExist) {
|
||||||
|
logrus.Warningf("copier: file disappeared while reading: %q", parent)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("copier: get: %q: %w", queue[i].glob, err)
|
||||||
}
|
}
|
||||||
if info, err = os.Lstat(item); err != nil {
|
itemsCopied++
|
||||||
return fmt.Errorf("copier: get: lstat %q(%q): %w", queue[i], item, err)
|
|
||||||
}
|
|
||||||
followedLinks++
|
|
||||||
}
|
}
|
||||||
if followedLinks >= maxFollowedLinks {
|
|
||||||
return fmt.Errorf("copier: get: resolving symlink %q(%q): %w", queue[i], item, syscall.ELOOP)
|
item, info, err = checkLinks(item, req, info)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// evaluate excludes relative to the root directory
|
// evaluate excludes relative to the root directory
|
||||||
if info.Mode().IsDir() {
|
if info.Mode().IsDir() {
|
||||||
// we don't expand any of the contents that are archives
|
// we don't expand any of the contents that are archives
|
||||||
|
@ -1354,6 +1433,12 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
|
||||||
ok = filepath.SkipDir
|
ok = filepath.SkipDir
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if req.GetOptions.Parents {
|
||||||
|
rel, err = convertToRelSubdirectory(req.Directory, path)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("copier: get: error computing path of %q relative to %q: %w", path, req.Root, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
// add the item to the outgoing tar stream
|
// add the item to the outgoing tar stream
|
||||||
if err := copierHandlerGetOne(info, symlinkTarget, rel, path, options, tw, hardlinkChecker, idMappings); err != nil {
|
if err := copierHandlerGetOne(info, symlinkTarget, rel, path, options, tw, hardlinkChecker, idMappings); err != nil {
|
||||||
if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) {
|
if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) {
|
||||||
|
@ -1368,7 +1453,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
|
||||||
}
|
}
|
||||||
// walk the directory tree, checking/adding items individually
|
// walk the directory tree, checking/adding items individually
|
||||||
if err := filepath.WalkDir(item, walkfn); err != nil {
|
if err := filepath.WalkDir(item, walkfn); err != nil {
|
||||||
return fmt.Errorf("copier: get: %q(%q): %w", queue[i], item, err)
|
return fmt.Errorf("copier: get: %q(%q): %w", queue[i].glob, item, err)
|
||||||
}
|
}
|
||||||
itemsCopied++
|
itemsCopied++
|
||||||
} else {
|
} else {
|
||||||
|
@ -1379,15 +1464,24 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
|
||||||
if skip {
|
if skip {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// add the item to the outgoing tar stream. in
|
|
||||||
// cases where this was a symlink that we
|
name := filepath.Base(queue[i].glob)
|
||||||
// dereferenced, be sure to use the name of the
|
if req.GetOptions.Parents {
|
||||||
// link.
|
name, err = convertToRelSubdirectory(req.Directory, queue[i].glob)
|
||||||
if err := copierHandlerGetOne(info, "", filepath.Base(queue[i]), item, req.GetOptions, tw, hardlinkChecker, idMappings); err != nil {
|
if err != nil {
|
||||||
|
return fmt.Errorf("copier: get: error computing path of %q relative to %q: %w", item, req.Root, err)
|
||||||
|
}
|
||||||
|
if name == "" || name == "." {
|
||||||
|
// skip the "." entry
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := copierHandlerGetOne(info, "", name, item, req.GetOptions, tw, hardlinkChecker, idMappings); err != nil {
|
||||||
if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) {
|
if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return fmt.Errorf("copier: get: %q: %w", queue[i], err)
|
return fmt.Errorf("copier: get: %q: %w", queue[i].glob, err)
|
||||||
}
|
}
|
||||||
itemsCopied++
|
itemsCopied++
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,7 +29,7 @@ const (
|
||||||
// identify working containers.
|
// identify working containers.
|
||||||
Package = "buildah"
|
Package = "buildah"
|
||||||
// Version for the Package. Also used by .packit.sh for Packit builds.
|
// Version for the Package. Also used by .packit.sh for Packit builds.
|
||||||
Version = "1.39.2"
|
Version = "1.40.0-dev"
|
||||||
|
|
||||||
// DefaultRuntime if containers.conf fails.
|
// DefaultRuntime if containers.conf fails.
|
||||||
DefaultRuntime = "runc"
|
DefaultRuntime = "runc"
|
||||||
|
|
|
@ -28,6 +28,7 @@ import (
|
||||||
"github.com/containers/common/pkg/config"
|
"github.com/containers/common/pkg/config"
|
||||||
"github.com/containers/image/v5/docker"
|
"github.com/containers/image/v5/docker"
|
||||||
"github.com/containers/image/v5/docker/reference"
|
"github.com/containers/image/v5/docker/reference"
|
||||||
|
"github.com/containers/image/v5/image"
|
||||||
"github.com/containers/image/v5/manifest"
|
"github.com/containers/image/v5/manifest"
|
||||||
"github.com/containers/image/v5/pkg/shortnames"
|
"github.com/containers/image/v5/pkg/shortnames"
|
||||||
istorage "github.com/containers/image/v5/storage"
|
istorage "github.com/containers/image/v5/storage"
|
||||||
|
@ -92,12 +93,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
|
||||||
}
|
}
|
||||||
logger.SetLevel(logrus.GetLevel())
|
logger.SetLevel(logrus.GetLevel())
|
||||||
|
|
||||||
var dockerfiles []io.ReadCloser
|
var dockerfiles []io.Reader
|
||||||
defer func(dockerfiles ...io.ReadCloser) {
|
|
||||||
for _, d := range dockerfiles {
|
|
||||||
d.Close()
|
|
||||||
}
|
|
||||||
}(dockerfiles...)
|
|
||||||
|
|
||||||
for _, tag := range append([]string{options.Output}, options.AdditionalTags...) {
|
for _, tag := range append([]string{options.Output}, options.AdditionalTags...) {
|
||||||
if tag == "" {
|
if tag == "" {
|
||||||
|
@ -109,7 +105,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, dfile := range paths {
|
for _, dfile := range paths {
|
||||||
var data io.ReadCloser
|
var data io.Reader
|
||||||
|
|
||||||
if strings.HasPrefix(dfile, "http://") || strings.HasPrefix(dfile, "https://") {
|
if strings.HasPrefix(dfile, "http://") || strings.HasPrefix(dfile, "https://") {
|
||||||
logger.Debugf("reading remote Dockerfile %q", dfile)
|
logger.Debugf("reading remote Dockerfile %q", dfile)
|
||||||
|
@ -117,8 +113,8 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
if resp.ContentLength == 0 {
|
if resp.ContentLength == 0 {
|
||||||
resp.Body.Close()
|
|
||||||
return "", nil, fmt.Errorf("no contents in %q", dfile)
|
return "", nil, fmt.Errorf("no contents in %q", dfile)
|
||||||
}
|
}
|
||||||
data = resp.Body
|
data = resp.Body
|
||||||
|
@ -145,13 +141,12 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, fmt.Errorf("reading build instructions: %w", err)
|
return "", nil, fmt.Errorf("reading build instructions: %w", err)
|
||||||
}
|
}
|
||||||
|
defer contents.Close()
|
||||||
dinfo, err = contents.Stat()
|
dinfo, err = contents.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
contents.Close()
|
|
||||||
return "", nil, fmt.Errorf("reading info about %q: %w", dfile, err)
|
return "", nil, fmt.Errorf("reading info about %q: %w", dfile, err)
|
||||||
}
|
}
|
||||||
if dinfo.Mode().IsRegular() && dinfo.Size() == 0 {
|
if dinfo.Mode().IsRegular() && dinfo.Size() == 0 {
|
||||||
contents.Close()
|
|
||||||
return "", nil, fmt.Errorf("no contents in %q", dfile)
|
return "", nil, fmt.Errorf("no contents in %q", dfile)
|
||||||
}
|
}
|
||||||
data = contents
|
data = contents
|
||||||
|
@ -163,7 +158,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
data = io.NopCloser(pData)
|
data = pData
|
||||||
}
|
}
|
||||||
|
|
||||||
dockerfiles = append(dockerfiles, data)
|
dockerfiles = append(dockerfiles, data)
|
||||||
|
@ -369,7 +364,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
defer imgSource.Close()
|
defer imgSource.Close()
|
||||||
manifestBytes, _, err := imgSource.GetManifest(ctx, nil)
|
manifestBytes, _, err := image.UnparsedInstance(imgSource, nil).Manifest(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
@ -430,6 +425,7 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr
|
||||||
builtinArgDefaults["TARGETVARIANT"] = defaultPlatform.Variant
|
builtinArgDefaults["TARGETVARIANT"] = defaultPlatform.Variant
|
||||||
builtinArgDefaults["TARGETARCH"] = defaultPlatform.Architecture
|
builtinArgDefaults["TARGETARCH"] = defaultPlatform.Architecture
|
||||||
builtinArgDefaults["TARGETPLATFORM"] = defaultPlatform.OS + "/" + defaultPlatform.Architecture
|
builtinArgDefaults["TARGETPLATFORM"] = defaultPlatform.OS + "/" + defaultPlatform.Architecture
|
||||||
|
builtinArgDefaults["TARGETPLATFORM"] = defaultPlatform.OS + "/" + defaultPlatform.Architecture
|
||||||
if defaultPlatform.Variant != "" {
|
if defaultPlatform.Variant != "" {
|
||||||
builtinArgDefaults["TARGETPLATFORM"] += "/" + defaultPlatform.Variant
|
builtinArgDefaults["TARGETPLATFORM"] += "/" + defaultPlatform.Variant
|
||||||
}
|
}
|
||||||
|
@ -453,6 +449,7 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr
|
||||||
for k, v := range builtinArgDefaults {
|
for k, v := range builtinArgDefaults {
|
||||||
b.BuiltinArgDefaults[k] = v
|
b.BuiltinArgDefaults[k] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
defaultContainerConfig, err := config.Default()
|
defaultContainerConfig, err := config.Default()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, fmt.Errorf("failed to get container config: %w", err)
|
return "", nil, fmt.Errorf("failed to get container config: %w", err)
|
||||||
|
@ -567,7 +564,7 @@ func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfi
|
||||||
logrus.Debugf("preparing to read image manifest for %q: %v", baseImage, err)
|
logrus.Debugf("preparing to read image manifest for %q: %v", baseImage, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
candidateBytes, candidateType, err := src.GetManifest(ctx, nil)
|
candidateBytes, candidateType, err := image.UnparsedInstance(src, nil).Manifest(ctx)
|
||||||
_ = src.Close()
|
_ = src.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Debugf("reading image manifest for %q: %v", baseImage, err)
|
logrus.Debugf("reading image manifest for %q: %v", baseImage, err)
|
||||||
|
|
|
@ -368,9 +368,6 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
|
||||||
if cp.Link {
|
if cp.Link {
|
||||||
return errors.New("COPY --link is not supported")
|
return errors.New("COPY --link is not supported")
|
||||||
}
|
}
|
||||||
if cp.Parents {
|
|
||||||
return errors.New("COPY --parents is not supported")
|
|
||||||
}
|
|
||||||
if len(cp.Excludes) > 0 {
|
if len(cp.Excludes) > 0 {
|
||||||
excludes = append(slices.Clone(excludes), cp.Excludes...)
|
excludes = append(slices.Clone(excludes), cp.Excludes...)
|
||||||
}
|
}
|
||||||
|
@ -427,7 +424,13 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co
|
||||||
data = strings.TrimPrefix(data, "\n")
|
data = strings.TrimPrefix(data, "\n")
|
||||||
// add breakline when heredoc ends for docker compat
|
// add breakline when heredoc ends for docker compat
|
||||||
data = data + "\n"
|
data = data + "\n"
|
||||||
tmpFile, err := os.Create(filepath.Join(parse.GetTempDir(), path.Base(filepath.ToSlash(file.Name))))
|
// Create seperate subdir for this file.
|
||||||
|
tmpDir, err := os.MkdirTemp(parse.GetTempDir(), "buildah-heredoc")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to create tmp dir for heredoc run %q: %w", parse.GetTempDir(), err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
tmpFile, err := os.Create(filepath.Join(tmpDir, path.Base(filepath.ToSlash(file.Name))))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to create tmp file for COPY instruction at %q: %w", parse.GetTempDir(), err)
|
return fmt.Errorf("unable to create tmp file for COPY instruction at %q: %w", parse.GetTempDir(), err)
|
||||||
}
|
}
|
||||||
|
@ -442,7 +445,7 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co
|
||||||
tmpFile.Close()
|
tmpFile.Close()
|
||||||
return fmt.Errorf("unable to write contents of heredoc file at %q: %w", tmpFile.Name(), err)
|
return fmt.Errorf("unable to write contents of heredoc file at %q: %w", tmpFile.Name(), err)
|
||||||
}
|
}
|
||||||
copySources = append(copySources, filepath.Base(tmpFile.Name()))
|
copySources = append(copySources, filepath.Join(filepath.Base(tmpDir), filepath.Base(tmpFile.Name())))
|
||||||
tmpFile.Close()
|
tmpFile.Close()
|
||||||
}
|
}
|
||||||
contextDir = parse.GetTempDir()
|
contextDir = parse.GetTempDir()
|
||||||
|
@ -554,7 +557,17 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co
|
||||||
return fmt.Errorf("source can't be a URL for COPY")
|
return fmt.Errorf("source can't be a URL for COPY")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
sources = append(sources, filepath.Join(contextDir, src))
|
// filepath.Join clean path so /./ is removed
|
||||||
|
if _, suffix, found := strings.Cut(src, "/./"); found && copy.Parents {
|
||||||
|
fullPath := filepath.Join(contextDir, src)
|
||||||
|
suffix = filepath.Clean(suffix)
|
||||||
|
prefix := strings.TrimSuffix(fullPath, suffix)
|
||||||
|
prefix = filepath.Clean(prefix)
|
||||||
|
src = prefix + "/./" + suffix
|
||||||
|
} else {
|
||||||
|
src = filepath.Join(contextDir, src)
|
||||||
|
}
|
||||||
|
sources = append(sources, src)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
options := buildah.AddAndCopyOptions{
|
options := buildah.AddAndCopyOptions{
|
||||||
|
@ -575,6 +588,7 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co
|
||||||
InsecureSkipTLSVerify: s.executor.systemContext.DockerInsecureSkipTLSVerify,
|
InsecureSkipTLSVerify: s.executor.systemContext.DockerInsecureSkipTLSVerify,
|
||||||
MaxRetries: s.executor.maxPullPushRetries,
|
MaxRetries: s.executor.maxPullPushRetries,
|
||||||
RetryDelay: s.executor.retryPullPushDelay,
|
RetryDelay: s.executor.retryPullPushDelay,
|
||||||
|
Parents: copy.Parents,
|
||||||
}
|
}
|
||||||
if len(copy.Files) > 0 {
|
if len(copy.Files) > 0 {
|
||||||
// If we are copying heredoc files, we need to temporary place
|
// If we are copying heredoc files, we need to temporary place
|
||||||
|
@ -1937,17 +1951,20 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri
|
||||||
if len(node.Original) > 4 {
|
if len(node.Original) > 4 {
|
||||||
shArg = node.Original[4:]
|
shArg = node.Original[4:]
|
||||||
}
|
}
|
||||||
if buildArgs != "" {
|
|
||||||
return "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " /bin/sh -c " + shArg + appendCheckSum, nil
|
heredoc := ""
|
||||||
}
|
result := ""
|
||||||
result := "/bin/sh -c " + shArg
|
|
||||||
if len(node.Heredocs) > 0 {
|
if len(node.Heredocs) > 0 {
|
||||||
for _, doc := range node.Heredocs {
|
for _, doc := range node.Heredocs {
|
||||||
heredocContent := strings.TrimSpace(doc.Content)
|
heredocContent := strings.TrimSpace(doc.Content)
|
||||||
result = result + "\n" + heredocContent
|
heredoc = heredoc + "\n" + heredocContent
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return result + appendCheckSum, nil
|
if buildArgs != "" {
|
||||||
|
result = result + "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " "
|
||||||
|
}
|
||||||
|
result = result + "/bin/sh -c " + shArg + heredoc + appendCheckSum
|
||||||
|
return result, nil
|
||||||
case "ADD", "COPY":
|
case "ADD", "COPY":
|
||||||
destination := node
|
destination := node
|
||||||
for destination.Next != nil {
|
for destination.Next != nil {
|
||||||
|
|
|
@ -39,7 +39,8 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
|
||||||
defer src.Close()
|
defer src.Close()
|
||||||
|
|
||||||
imageDigest := ""
|
imageDigest := ""
|
||||||
manifestBytes, manifestType, err := src.GetManifest(ctx, nil)
|
unparsedTop := image.UnparsedInstance(src, nil)
|
||||||
|
manifestBytes, manifestType, err := unparsedTop.Manifest(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("loading image manifest for %q: %w", transports.ImageName(ref), err)
|
return nil, fmt.Errorf("loading image manifest for %q: %w", transports.ImageName(ref), err)
|
||||||
}
|
}
|
||||||
|
@ -48,6 +49,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
|
||||||
}
|
}
|
||||||
|
|
||||||
var instanceDigest *digest.Digest
|
var instanceDigest *digest.Digest
|
||||||
|
unparsedInstance := unparsedTop // for instanceDigest
|
||||||
if manifest.MIMETypeIsMultiImage(manifestType) {
|
if manifest.MIMETypeIsMultiImage(manifestType) {
|
||||||
list, err := manifest.ListFromBlob(manifestBytes, manifestType)
|
list, err := manifest.ListFromBlob(manifestBytes, manifestType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -58,9 +60,10 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
|
||||||
return nil, fmt.Errorf("finding an appropriate image in manifest list %q: %w", transports.ImageName(ref), err)
|
return nil, fmt.Errorf("finding an appropriate image in manifest list %q: %w", transports.ImageName(ref), err)
|
||||||
}
|
}
|
||||||
instanceDigest = &instance
|
instanceDigest = &instance
|
||||||
|
unparsedInstance = image.UnparsedInstance(src, instanceDigest)
|
||||||
}
|
}
|
||||||
|
|
||||||
image, err := image.FromUnparsedImage(ctx, systemContext, image.UnparsedInstance(src, instanceDigest))
|
image, err := image.FromUnparsedImage(ctx, systemContext, unparsedInstance)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("instantiating image for %q instance %q: %w", transports.ImageName(ref), instanceDigest, err)
|
return nil, fmt.Errorf("instantiating image for %q instance %q: %w", transports.ImageName(ref), instanceDigest, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,9 +21,9 @@ import (
|
||||||
"github.com/containers/buildah/pkg/overlay"
|
"github.com/containers/buildah/pkg/overlay"
|
||||||
"github.com/containers/luksy"
|
"github.com/containers/luksy"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
|
"github.com/containers/storage/pkg/ioutils"
|
||||||
"github.com/containers/storage/pkg/mount"
|
"github.com/containers/storage/pkg/mount"
|
||||||
"github.com/containers/storage/pkg/system"
|
"github.com/containers/storage/pkg/system"
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
|
|
@ -557,14 +557,19 @@ func GetCacheMount(sys *types.SystemContext, args []string, store storage.Store,
|
||||||
return newMount, "", "", "", nil, fmt.Errorf("unable to create build cache directory: %w", err)
|
return newMount, "", "", "", nil, fmt.Errorf("unable to create build cache directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ownerInfo := fmt.Sprintf(":%d:%d", uid, gid)
|
||||||
if id != "" {
|
if id != "" {
|
||||||
// Don't let the user control where we place the directory.
|
// Don't let the user try to inject pathname components by directly using
|
||||||
dirID := digest.FromString(id).Encoded()[:16]
|
// the ID when constructing the cache directory location; distinguish
|
||||||
|
// between caches by ID and ownership
|
||||||
|
dirID := digest.FromString(id + ownerInfo).Encoded()[:16]
|
||||||
thisCacheRoot = filepath.Join(cacheParent, dirID)
|
thisCacheRoot = filepath.Join(cacheParent, dirID)
|
||||||
buildahLockFilesDir = filepath.Join(cacheParent, BuildahCacheLockfileDir, dirID)
|
buildahLockFilesDir = filepath.Join(cacheParent, BuildahCacheLockfileDir, dirID)
|
||||||
} else {
|
} else {
|
||||||
// Don't let the user control where we place the directory.
|
// Don't let the user try to inject pathname components by directly using
|
||||||
dirID := digest.FromString(newMount.Destination).Encoded()[:16]
|
// the target path when constructing the cache directory location;
|
||||||
|
// distinguish between caches by mount target location and ownership
|
||||||
|
dirID := digest.FromString(newMount.Destination + ownerInfo).Encoded()[:16]
|
||||||
thisCacheRoot = filepath.Join(cacheParent, dirID)
|
thisCacheRoot = filepath.Join(cacheParent, dirID)
|
||||||
buildahLockFilesDir = filepath.Join(cacheParent, BuildahCacheLockfileDir, dirID)
|
buildahLockFilesDir = filepath.Join(cacheParent, BuildahCacheLockfileDir, dirID)
|
||||||
}
|
}
|
||||||
|
|
|
@ -195,7 +195,8 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
|
||||||
return nil, fmt.Errorf("instantiating image for %q: %w", transports.ImageName(ref), err)
|
return nil, fmt.Errorf("instantiating image for %q: %w", transports.ImageName(ref), err)
|
||||||
}
|
}
|
||||||
defer srcSrc.Close()
|
defer srcSrc.Close()
|
||||||
manifestBytes, manifestType, err := srcSrc.GetManifest(ctx, nil)
|
unparsedTop := image.UnparsedInstance(srcSrc, nil)
|
||||||
|
manifestBytes, manifestType, err := unparsedTop.Manifest(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("loading image manifest for %q: %w", transports.ImageName(ref), err)
|
return nil, fmt.Errorf("loading image manifest for %q: %w", transports.ImageName(ref), err)
|
||||||
}
|
}
|
||||||
|
@ -203,6 +204,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
|
||||||
imageDigest = manifestDigest.String()
|
imageDigest = manifestDigest.String()
|
||||||
}
|
}
|
||||||
var instanceDigest *digest.Digest
|
var instanceDigest *digest.Digest
|
||||||
|
unparsedInstance := unparsedTop // for instanceDigest
|
||||||
if manifest.MIMETypeIsMultiImage(manifestType) {
|
if manifest.MIMETypeIsMultiImage(manifestType) {
|
||||||
list, err := manifest.ListFromBlob(manifestBytes, manifestType)
|
list, err := manifest.ListFromBlob(manifestBytes, manifestType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -213,8 +215,9 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
|
||||||
return nil, fmt.Errorf("finding an appropriate image in manifest list %q: %w", transports.ImageName(ref), err)
|
return nil, fmt.Errorf("finding an appropriate image in manifest list %q: %w", transports.ImageName(ref), err)
|
||||||
}
|
}
|
||||||
instanceDigest = &instance
|
instanceDigest = &instance
|
||||||
|
unparsedInstance = image.UnparsedInstance(srcSrc, instanceDigest)
|
||||||
}
|
}
|
||||||
src, err = image.FromUnparsedImage(ctx, systemContext, image.UnparsedInstance(srcSrc, instanceDigest))
|
src, err = image.FromUnparsedImage(ctx, systemContext, unparsedInstance)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("instantiating image for %q instance %q: %w", transports.ImageName(ref), instanceDigest, err)
|
return nil, fmt.Errorf("instantiating image for %q instance %q: %w", transports.ImageName(ref), instanceDigest, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,6 +47,7 @@ import (
|
||||||
"github.com/containers/storage/pkg/lockfile"
|
"github.com/containers/storage/pkg/lockfile"
|
||||||
"github.com/containers/storage/pkg/mount"
|
"github.com/containers/storage/pkg/mount"
|
||||||
"github.com/containers/storage/pkg/reexec"
|
"github.com/containers/storage/pkg/reexec"
|
||||||
|
"github.com/containers/storage/pkg/regexp"
|
||||||
"github.com/containers/storage/pkg/unshare"
|
"github.com/containers/storage/pkg/unshare"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/opencontainers/runtime-spec/specs-go"
|
"github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
@ -57,6 +58,10 @@ import (
|
||||||
"golang.org/x/term"
|
"golang.org/x/term"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const maxHostnameLen = 64
|
||||||
|
|
||||||
|
var validHostnames = regexp.Delayed("[A-Za-z0-9][A-Za-z0-9.-]+")
|
||||||
|
|
||||||
func (b *Builder) createResolvConf(rdir string, chownOpts *idtools.IDPair) (string, error) {
|
func (b *Builder) createResolvConf(rdir string, chownOpts *idtools.IDPair) (string, error) {
|
||||||
cfile := filepath.Join(rdir, "resolv.conf")
|
cfile := filepath.Join(rdir, "resolv.conf")
|
||||||
f, err := os.Create(cfile)
|
f, err := os.Create(cfile)
|
||||||
|
@ -2092,3 +2097,21 @@ func relabel(path, mountLabel string, shared bool) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mapContainerNameToHostname returns the passed-in string with characters that
|
||||||
|
// don't match validHostnames (defined above) stripped out.
|
||||||
|
func mapContainerNameToHostname(containerName string) string {
|
||||||
|
match := validHostnames.FindStringIndex(containerName)
|
||||||
|
if match == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
trimmed := containerName[match[0]:]
|
||||||
|
match[1] -= match[0]
|
||||||
|
match[0] = 0
|
||||||
|
for match[1] != len(trimmed) && match[1] < match[0]+maxHostnameLen {
|
||||||
|
trimmed = trimmed[:match[1]] + trimmed[match[1]+1:]
|
||||||
|
match = validHostnames.FindStringIndex(trimmed)
|
||||||
|
match[1] = min(match[1], maxHostnameLen)
|
||||||
|
}
|
||||||
|
return trimmed[:match[1]]
|
||||||
|
}
|
||||||
|
|
|
@ -586,7 +586,17 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions
|
||||||
} else if b.Hostname() != "" {
|
} else if b.Hostname() != "" {
|
||||||
g.SetHostname(b.Hostname())
|
g.SetHostname(b.Hostname())
|
||||||
} else {
|
} else {
|
||||||
g.SetHostname(stringid.TruncateID(b.ContainerID))
|
hostname := stringid.TruncateID(b.ContainerID)
|
||||||
|
defConfig, err := config.Default()
|
||||||
|
if err != nil {
|
||||||
|
return false, "", fmt.Errorf("failed to get container config: %w", err)
|
||||||
|
}
|
||||||
|
if defConfig.Containers.ContainerNameAsHostName {
|
||||||
|
if mapped := mapContainerNameToHostname(b.Container); mapped != "" {
|
||||||
|
hostname = mapped
|
||||||
|
}
|
||||||
|
}
|
||||||
|
g.SetHostname(hostname)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
g.SetHostname("")
|
g.SetHostname("")
|
||||||
|
|
|
@ -991,7 +991,17 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions
|
||||||
} else if b.Hostname() != "" {
|
} else if b.Hostname() != "" {
|
||||||
g.SetHostname(b.Hostname())
|
g.SetHostname(b.Hostname())
|
||||||
} else {
|
} else {
|
||||||
g.SetHostname(stringid.TruncateID(b.ContainerID))
|
hostname := stringid.TruncateID(b.ContainerID)
|
||||||
|
defConfig, err := config.Default()
|
||||||
|
if err != nil {
|
||||||
|
return false, "", fmt.Errorf("failed to get container config: %w", err)
|
||||||
|
}
|
||||||
|
if defConfig.Containers.ContainerNameAsHostName {
|
||||||
|
if mapped := mapContainerNameToHostname(b.Container); mapped != "" {
|
||||||
|
hostname = mapped
|
||||||
|
}
|
||||||
|
}
|
||||||
|
g.SetHostname(hostname)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
g.SetHostname("")
|
g.SetHostname("")
|
||||||
|
@ -1223,9 +1233,17 @@ func setupMaskedPaths(g *generate.Generator, opts *define.CommonBuildOptions) {
|
||||||
if slices.Contains(opts.Unmasks, "all") {
|
if slices.Contains(opts.Unmasks, "all") {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, mp := range append(config.DefaultMaskedPaths, opts.Masks...) {
|
nextMaskedPath:
|
||||||
if slices.Contains(opts.Unmasks, mp) {
|
for _, mp := range append(config.DefaultMaskedPaths(), opts.Masks...) {
|
||||||
continue
|
for _, unmask := range opts.Unmasks {
|
||||||
|
match, err := filepath.Match(unmask, mp)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Warnf("Invalid unmask pattern %q: %v", unmask, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if match {
|
||||||
|
continue nextMaskedPath
|
||||||
|
}
|
||||||
}
|
}
|
||||||
g.AddLinuxMaskedPaths(mp)
|
g.AddLinuxMaskedPaths(mp)
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,6 +39,13 @@ func CommonNetworkCreate(n NetUtil, network *types.Network) error {
|
||||||
network.NetworkInterface = name
|
network.NetworkInterface = name
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate interface name if specified
|
||||||
|
if network.NetworkInterface != "" {
|
||||||
|
if err := ValidateInterfaceName(network.NetworkInterface); err != nil {
|
||||||
|
return fmt.Errorf("network interface name %s invalid: %w", network.NetworkInterface, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,8 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
"github.com/containers/common/libnetwork/types"
|
"github.com/containers/common/libnetwork/types"
|
||||||
"github.com/containers/common/libnetwork/util"
|
"github.com/containers/common/libnetwork/util"
|
||||||
|
@ -159,3 +161,23 @@ func validatePerNetworkOpts(network *types.Network, netOpts *types.PerNetworkOpt
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ValidateInterfaceName validates the interface name based on the following rules:
|
||||||
|
// 1. The name must be less than MaxInterfaceNameLength characters
|
||||||
|
// 2. The name must not be "." or ".."
|
||||||
|
// 3. The name must not contain / or : or any whitespace characters
|
||||||
|
// ref to https://github.com/torvalds/linux/blob/81e4f8d68c66da301bb881862735bd74c6241a19/include/uapi/linux/if.h#L33C18-L33C20
|
||||||
|
func ValidateInterfaceName(ifName string) error {
|
||||||
|
if len(ifName) > types.MaxInterfaceNameLength {
|
||||||
|
return fmt.Errorf("interface name is too long: interface names must be %d characters or less: %w", types.MaxInterfaceNameLength, types.ErrInvalidArg)
|
||||||
|
}
|
||||||
|
if ifName == "." || ifName == ".." {
|
||||||
|
return fmt.Errorf("interface name is . or ..: %w", types.ErrInvalidArg)
|
||||||
|
}
|
||||||
|
if strings.ContainsFunc(ifName, func(r rune) bool {
|
||||||
|
return r == '/' || r == ':' || unicode.IsSpace(r)
|
||||||
|
}) {
|
||||||
|
return fmt.Errorf("interface name contains / or : or whitespace characters: %w", types.ErrInvalidArg)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -30,4 +30,7 @@ var (
|
||||||
// NotHexRegex is a regular expression to check if a string is
|
// NotHexRegex is a regular expression to check if a string is
|
||||||
// a hexadecimal string.
|
// a hexadecimal string.
|
||||||
NotHexRegex = regexp.Delayed(`[^0-9a-fA-F]`)
|
NotHexRegex = regexp.Delayed(`[^0-9a-fA-F]`)
|
||||||
|
|
||||||
|
// MaxInterfaceNameLength is the maximum length of a network interface name
|
||||||
|
MaxInterfaceNameLength = 15
|
||||||
)
|
)
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/containers/common/internal/attributedstring"
|
"github.com/containers/common/internal/attributedstring"
|
||||||
nettypes "github.com/containers/common/libnetwork/types"
|
nettypes "github.com/containers/common/libnetwork/types"
|
||||||
|
@ -36,8 +37,8 @@ const (
|
||||||
defaultInitName = "catatonit"
|
defaultInitName = "catatonit"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
func getMaskedPaths() ([]string, error) {
|
||||||
DefaultMaskedPaths = []string{
|
maskedPaths := []string{
|
||||||
"/proc/acpi",
|
"/proc/acpi",
|
||||||
"/proc/kcore",
|
"/proc/kcore",
|
||||||
"/proc/keys",
|
"/proc/keys",
|
||||||
|
@ -49,8 +50,34 @@ var (
|
||||||
"/sys/devices/virtual/powercap",
|
"/sys/devices/virtual/powercap",
|
||||||
"/sys/firmware",
|
"/sys/firmware",
|
||||||
"/sys/fs/selinux",
|
"/sys/fs/selinux",
|
||||||
|
"/proc/interrupts",
|
||||||
|
}
|
||||||
|
maskedPathsToGlob := []string{
|
||||||
|
"/sys/devices/system/cpu/cpu*/thermal_throttle",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, p := range maskedPathsToGlob {
|
||||||
|
matches, err := filepath.Glob(p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
maskedPaths = append(maskedPaths, matches...)
|
||||||
|
}
|
||||||
|
return maskedPaths, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var DefaultMaskedPaths = sync.OnceValue(func() []string {
|
||||||
|
maskedPaths, err := getMaskedPaths()
|
||||||
|
// this should never happen, the only error possible
|
||||||
|
// is ErrBadPattern and the patterns that were added must be valid
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return maskedPaths
|
||||||
|
})
|
||||||
|
|
||||||
|
var (
|
||||||
DefaultReadOnlyPaths = []string{
|
DefaultReadOnlyPaths = []string{
|
||||||
"/proc/asound",
|
"/proc/asound",
|
||||||
"/proc/bus",
|
"/proc/bus",
|
||||||
|
|
|
@ -1,163 +0,0 @@
|
||||||
package atomicwriter
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
// New returns a WriteCloser so that writing to it writes to a
|
|
||||||
// temporary file and closing it atomically changes the temporary file to
|
|
||||||
// destination path. Writing and closing concurrently is not allowed.
|
|
||||||
// NOTE: umask is not considered for the file's permissions.
|
|
||||||
func New(filename string, perm os.FileMode) (io.WriteCloser, error) {
|
|
||||||
f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
abspath, err := filepath.Abs(filename)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &atomicFileWriter{
|
|
||||||
f: f,
|
|
||||||
fn: abspath,
|
|
||||||
perm: perm,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteFile atomically writes data to a file named by filename and with the specified permission bits.
|
|
||||||
// NOTE: umask is not considered for the file's permissions.
|
|
||||||
func WriteFile(filename string, data []byte, perm os.FileMode) error {
|
|
||||||
f, err := New(filename, perm)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
n, err := f.Write(data)
|
|
||||||
if err == nil && n < len(data) {
|
|
||||||
err = io.ErrShortWrite
|
|
||||||
f.(*atomicFileWriter).writeErr = err
|
|
||||||
}
|
|
||||||
if err1 := f.Close(); err == nil {
|
|
||||||
err = err1
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
type atomicFileWriter struct {
|
|
||||||
f *os.File
|
|
||||||
fn string
|
|
||||||
writeErr error
|
|
||||||
perm os.FileMode
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *atomicFileWriter) Write(dt []byte) (int, error) {
|
|
||||||
n, err := w.f.Write(dt)
|
|
||||||
if err != nil {
|
|
||||||
w.writeErr = err
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *atomicFileWriter) Close() (retErr error) {
|
|
||||||
defer func() {
|
|
||||||
if retErr != nil || w.writeErr != nil {
|
|
||||||
os.Remove(w.f.Name())
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
if err := w.f.Sync(); err != nil {
|
|
||||||
w.f.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.f.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := os.Chmod(w.f.Name(), w.perm); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if w.writeErr == nil {
|
|
||||||
return os.Rename(w.f.Name(), w.fn)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteSet is used to atomically write a set
|
|
||||||
// of files and ensure they are visible at the same time.
|
|
||||||
// Must be committed to a new directory.
|
|
||||||
type WriteSet struct {
|
|
||||||
root string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriteSet creates a new atomic write set to
|
|
||||||
// atomically create a set of files. The given directory
|
|
||||||
// is used as the base directory for storing files before
|
|
||||||
// commit. If no temporary directory is given the system
|
|
||||||
// default is used.
|
|
||||||
func NewWriteSet(tmpDir string) (*WriteSet, error) {
|
|
||||||
td, err := os.MkdirTemp(tmpDir, "write-set-")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &WriteSet{
|
|
||||||
root: td,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteFile writes a file to the set, guaranteeing the file
|
|
||||||
// has been synced.
|
|
||||||
func (ws *WriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error {
|
|
||||||
f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
n, err := f.Write(data)
|
|
||||||
if err == nil && n < len(data) {
|
|
||||||
err = io.ErrShortWrite
|
|
||||||
}
|
|
||||||
if err1 := f.Close(); err == nil {
|
|
||||||
err = err1
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
type syncFileCloser struct {
|
|
||||||
*os.File
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w syncFileCloser) Close() error {
|
|
||||||
err := w.File.Sync()
|
|
||||||
if err1 := w.File.Close(); err == nil {
|
|
||||||
err = err1
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileWriter opens a file writer inside the set. The file
|
|
||||||
// should be synced and closed before calling commit.
|
|
||||||
func (ws *WriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) {
|
|
||||||
f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return syncFileCloser{f}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cancel cancels the set and removes all temporary data
|
|
||||||
// created in the set.
|
|
||||||
func (ws *WriteSet) Cancel() error {
|
|
||||||
return os.RemoveAll(ws.root)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit moves all created files to the target directory. The
|
|
||||||
// target directory must not exist and the parent of the target
|
|
||||||
// directory must exist.
|
|
||||||
func (ws *WriteSet) Commit(target string) error {
|
|
||||||
return os.Rename(ws.root, target)
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the location the set is writing to.
|
|
||||||
func (ws *WriteSet) String() string {
|
|
||||||
return ws.root
|
|
||||||
}
|
|
|
@ -1,44 +0,0 @@
|
||||||
package ioutils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/atomicwriter"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
|
|
||||||
// temporary file and closing it atomically changes the temporary file to
|
|
||||||
// destination path. Writing and closing concurrently is not allowed.
|
|
||||||
// NOTE: umask is not considered for the file's permissions.
|
|
||||||
//
|
|
||||||
// Deprecated: use [atomicwriter.New] instead.
|
|
||||||
func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
|
|
||||||
return atomicwriter.New(filename, perm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AtomicWriteFile atomically writes data to a file named by filename and with the specified permission bits.
|
|
||||||
// NOTE: umask is not considered for the file's permissions.
|
|
||||||
//
|
|
||||||
// Deprecated: use [atomicwriter.WriteFile] instead.
|
|
||||||
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
|
|
||||||
return atomicwriter.WriteFile(filename, data, perm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AtomicWriteSet is used to atomically write a set
|
|
||||||
// of files and ensure they are visible at the same time.
|
|
||||||
// Must be committed to a new directory.
|
|
||||||
//
|
|
||||||
// Deprecated: use [atomicwriter.WriteSet] instead.
|
|
||||||
type AtomicWriteSet = atomicwriter.WriteSet
|
|
||||||
|
|
||||||
// NewAtomicWriteSet creates a new atomic write set to
|
|
||||||
// atomically create a set of files. The given directory
|
|
||||||
// is used as the base directory for storing files before
|
|
||||||
// commit. If no temporary directory is given the system
|
|
||||||
// default is used.
|
|
||||||
//
|
|
||||||
// Deprecated: use [atomicwriter.NewWriteSet] instead.
|
|
||||||
func NewAtomicWriteSet(tmpDir string) (*atomicwriter.WriteSet, error) {
|
|
||||||
return atomicwriter.NewWriteSet(tmpDir)
|
|
||||||
}
|
|
|
@ -1,118 +0,0 @@
|
||||||
package ioutils // import "github.com/docker/docker/pkg/ioutils"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"runtime/debug"
|
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"github.com/containerd/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// readCloserWrapper wraps an io.Reader, and implements an io.ReadCloser
|
|
||||||
// It calls the given callback function when closed. It should be constructed
|
|
||||||
// with NewReadCloserWrapper
|
|
||||||
type readCloserWrapper struct {
|
|
||||||
io.Reader
|
|
||||||
closer func() error
|
|
||||||
closed atomic.Bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close calls back the passed closer function
|
|
||||||
func (r *readCloserWrapper) Close() error {
|
|
||||||
if !r.closed.CompareAndSwap(false, true) {
|
|
||||||
subsequentCloseWarn("ReadCloserWrapper")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return r.closer()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser.
|
|
||||||
// It calls the given callback function when closed.
|
|
||||||
func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
|
|
||||||
return &readCloserWrapper{
|
|
||||||
Reader: r,
|
|
||||||
closer: closer,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read
|
|
||||||
// operations.
|
|
||||||
type cancelReadCloser struct {
|
|
||||||
cancel func()
|
|
||||||
pR *io.PipeReader // Stream to read from
|
|
||||||
pW *io.PipeWriter
|
|
||||||
closed atomic.Bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
|
|
||||||
// context is cancelled. The returned io.ReadCloser must be closed when it is
|
|
||||||
// no longer needed.
|
|
||||||
func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser {
|
|
||||||
pR, pW := io.Pipe()
|
|
||||||
|
|
||||||
// Create a context used to signal when the pipe is closed
|
|
||||||
doneCtx, cancel := context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
p := &cancelReadCloser{
|
|
||||||
cancel: cancel,
|
|
||||||
pR: pR,
|
|
||||||
pW: pW,
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
_, err := io.Copy(pW, in)
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
// If the context was closed, p.closeWithError
|
|
||||||
// was already called. Calling it again would
|
|
||||||
// change the error that Read returns.
|
|
||||||
default:
|
|
||||||
p.closeWithError(err)
|
|
||||||
}
|
|
||||||
in.Close()
|
|
||||||
}()
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
p.closeWithError(ctx.Err())
|
|
||||||
case <-doneCtx.Done():
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read wraps the Read method of the pipe that provides data from the wrapped
|
|
||||||
// ReadCloser.
|
|
||||||
func (p *cancelReadCloser) Read(buf []byte) (n int, err error) {
|
|
||||||
return p.pR.Read(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// closeWithError closes the wrapper and its underlying reader. It will
|
|
||||||
// cause future calls to Read to return err.
|
|
||||||
func (p *cancelReadCloser) closeWithError(err error) {
|
|
||||||
p.pW.CloseWithError(err)
|
|
||||||
p.cancel()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the wrapper its underlying reader. It will cause
|
|
||||||
// future calls to Read to return io.EOF.
|
|
||||||
func (p *cancelReadCloser) Close() error {
|
|
||||||
if !p.closed.CompareAndSwap(false, true) {
|
|
||||||
subsequentCloseWarn("cancelReadCloser")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
p.closeWithError(io.EOF)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func subsequentCloseWarn(name string) {
|
|
||||||
log.G(context.TODO()).Error("subsequent attempt to close " + name)
|
|
||||||
if log.GetLevel() >= log.DebugLevel {
|
|
||||||
log.G(context.TODO()).Errorf("stack trace: %s", string(debug.Stack()))
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,96 +0,0 @@
|
||||||
package ioutils // import "github.com/docker/docker/pkg/ioutils"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WriteFlusher wraps the Write and Flush operation ensuring that every write
|
|
||||||
// is a flush. In addition, the Close method can be called to intercept
|
|
||||||
// Read/Write calls if the targets lifecycle has already ended.
|
|
||||||
type WriteFlusher struct {
|
|
||||||
w io.Writer
|
|
||||||
flusher flusher
|
|
||||||
flushed chan struct{}
|
|
||||||
flushedOnce sync.Once
|
|
||||||
closed chan struct{}
|
|
||||||
closeLock sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
type flusher interface {
|
|
||||||
Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
|
|
||||||
select {
|
|
||||||
case <-wf.closed:
|
|
||||||
return 0, io.EOF
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err = wf.w.Write(b)
|
|
||||||
wf.Flush() // every write is a flush.
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush the stream immediately.
|
|
||||||
func (wf *WriteFlusher) Flush() {
|
|
||||||
select {
|
|
||||||
case <-wf.closed:
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
wf.flushedOnce.Do(func() {
|
|
||||||
close(wf.flushed)
|
|
||||||
})
|
|
||||||
wf.flusher.Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flushed returns the state of flushed.
|
|
||||||
// If it's flushed, return true, or else it return false.
|
|
||||||
func (wf *WriteFlusher) Flushed() bool {
|
|
||||||
// BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
|
|
||||||
// be used to detect whether or a response code has been issued or not.
|
|
||||||
// Another hook should be used instead.
|
|
||||||
var flushed bool
|
|
||||||
select {
|
|
||||||
case <-wf.flushed:
|
|
||||||
flushed = true
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
return flushed
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the write flusher, disallowing any further writes to the
|
|
||||||
// target. After the flusher is closed, all calls to write or flush will
|
|
||||||
// result in an error.
|
|
||||||
func (wf *WriteFlusher) Close() error {
|
|
||||||
wf.closeLock.Lock()
|
|
||||||
defer wf.closeLock.Unlock()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-wf.closed:
|
|
||||||
return io.EOF
|
|
||||||
default:
|
|
||||||
close(wf.closed)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// nopFlusher represents a type which flush operation is nop.
|
|
||||||
type nopFlusher struct{}
|
|
||||||
|
|
||||||
// Flush is a nop operation.
|
|
||||||
func (f *nopFlusher) Flush() {}
|
|
||||||
|
|
||||||
// NewWriteFlusher returns a new WriteFlusher.
|
|
||||||
func NewWriteFlusher(w io.Writer) *WriteFlusher {
|
|
||||||
var fl flusher
|
|
||||||
if f, ok := w.(flusher); ok {
|
|
||||||
fl = f
|
|
||||||
} else {
|
|
||||||
fl = &nopFlusher{}
|
|
||||||
}
|
|
||||||
return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})}
|
|
||||||
}
|
|
|
@ -1,28 +0,0 @@
|
||||||
package ioutils // import "github.com/docker/docker/pkg/ioutils"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"sync/atomic"
|
|
||||||
)
|
|
||||||
|
|
||||||
type writeCloserWrapper struct {
|
|
||||||
io.Writer
|
|
||||||
closer func() error
|
|
||||||
closed atomic.Bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *writeCloserWrapper) Close() error {
|
|
||||||
if !r.closed.CompareAndSwap(false, true) {
|
|
||||||
subsequentCloseWarn("WriteCloserWrapper")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return r.closer()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriteCloserWrapper returns a new io.WriteCloser.
|
|
||||||
func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
|
|
||||||
return &writeCloserWrapper{
|
|
||||||
Writer: r,
|
|
||||||
closer: closer,
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,3 +1,15 @@
|
||||||
|
## 2.23.2
|
||||||
|
|
||||||
|
🎉🎉🎉
|
||||||
|
|
||||||
|
At long last, some long-standing performance gaps between `ginkgo` and `go test` have been resolved!
|
||||||
|
|
||||||
|
Ginkgo operates by running `go test -c` to generate test binaries, and then running those binaries. It turns out that the compilation step of `go test -c` is slower than `go test`'s compilation step because `go test` strips out debug symbols (`ldflags=-w`) whereas `go test -c` does not.
|
||||||
|
|
||||||
|
Ginkgo now passes the appropriate `ldflags` to `go test -c` when running specs to strip out symbols. This is only done when it is safe to do so and symbols are preferred when profiling is enabled and when `ginkgo build` is called explicitly.
|
||||||
|
|
||||||
|
This, coupled, with the [instructions for disabling XProtect on MacOS](https://onsi.github.io/ginkgo/#if-you-are-running-on-macos) yields a much better performance experience with Ginkgo.
|
||||||
|
|
||||||
## 2.23.1
|
## 2.23.1
|
||||||
|
|
||||||
## 🚨 For users on MacOS 🚨
|
## 🚨 For users on MacOS 🚨
|
||||||
|
|
|
@ -44,7 +44,7 @@ func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.Go
|
||||||
internal.VerifyCLIAndFrameworkVersion(suites)
|
internal.VerifyCLIAndFrameworkVersion(suites)
|
||||||
|
|
||||||
opc := internal.NewOrderedParallelCompiler(cliConfig.ComputedNumCompilers())
|
opc := internal.NewOrderedParallelCompiler(cliConfig.ComputedNumCompilers())
|
||||||
opc.StartCompiling(suites, goFlagsConfig)
|
opc.StartCompiling(suites, goFlagsConfig, true)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
suiteIdx, suite := opc.Next()
|
suiteIdx, suite := opc.Next()
|
||||||
|
|
|
@ -26,7 +26,7 @@ func (c Command) Run(args []string, additionalArgs []string) {
|
||||||
}
|
}
|
||||||
for _, arg := range args {
|
for _, arg := range args {
|
||||||
if strings.HasPrefix(arg, "-") {
|
if strings.HasPrefix(arg, "-") {
|
||||||
AbortWith("Malformed arguments - make sure all flags appear {{bold}}after{{/}} the Ginkgo subcommand and {{bold}}before{{/}} your list of packages.\n{{gray}}e.g. 'ginkgo run -p my_package' is valid `ginkgo -p run my_package` is not.{{/}}")
|
AbortWith(types.GinkgoErrors.FlagAfterPositionalParameter().Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
c.Command(args, additionalArgs)
|
c.Command(args, additionalArgs)
|
||||||
|
|
|
@ -11,7 +11,7 @@ import (
|
||||||
"github.com/onsi/ginkgo/v2/types"
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite {
|
func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig, preserveSymbols bool) TestSuite {
|
||||||
if suite.PathToCompiledTest != "" {
|
if suite.PathToCompiledTest != "" {
|
||||||
return suite
|
return suite
|
||||||
}
|
}
|
||||||
|
@ -46,7 +46,7 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite
|
||||||
suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error())
|
suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error())
|
||||||
return suite
|
return suite
|
||||||
}
|
}
|
||||||
args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, "./", pathToInvocationPath)
|
args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, "./", pathToInvocationPath, preserveSymbols)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
suite.State = TestSuiteStateFailedToCompile
|
suite.State = TestSuiteStateFailedToCompile
|
||||||
suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error())
|
suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error())
|
||||||
|
@ -120,7 +120,7 @@ func NewOrderedParallelCompiler(numCompilers int) *OrderedParallelCompiler {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig) {
|
func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig, preserveSymbols bool) {
|
||||||
opc.stopped = false
|
opc.stopped = false
|
||||||
opc.idx = 0
|
opc.idx = 0
|
||||||
opc.numSuites = len(suites)
|
opc.numSuites = len(suites)
|
||||||
|
@ -135,7 +135,7 @@ func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsCon
|
||||||
stopped := opc.stopped
|
stopped := opc.stopped
|
||||||
opc.mutex.Unlock()
|
opc.mutex.Unlock()
|
||||||
if !stopped {
|
if !stopped {
|
||||||
suite = CompileSuite(suite, goFlagsConfig)
|
suite = CompileSuite(suite, goFlagsConfig, preserveSymbols)
|
||||||
}
|
}
|
||||||
c <- suite
|
c <- suite
|
||||||
}
|
}
|
||||||
|
|
|
@ -107,7 +107,7 @@ OUTER_LOOP:
|
||||||
}
|
}
|
||||||
|
|
||||||
opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers())
|
opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers())
|
||||||
opc.StartCompiling(suites, r.goFlagsConfig)
|
opc.StartCompiling(suites, r.goFlagsConfig, false)
|
||||||
|
|
||||||
SUITE_LOOP:
|
SUITE_LOOP:
|
||||||
for {
|
for {
|
||||||
|
|
|
@ -153,7 +153,7 @@ func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite {
|
func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite {
|
||||||
suite = internal.CompileSuite(suite, w.goFlagsConfig)
|
suite = internal.CompileSuite(suite, w.goFlagsConfig, false)
|
||||||
if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
|
if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
|
||||||
fmt.Println(suite.CompilationError.Error())
|
fmt.Println(suite.CompilationError.Error())
|
||||||
return suite
|
return suite
|
||||||
|
|
|
@ -231,6 +231,10 @@ func (g GoFlagsConfig) BinaryMustBePreserved() bool {
|
||||||
return g.BlockProfile != "" || g.CPUProfile != "" || g.MemProfile != "" || g.MutexProfile != ""
|
return g.BlockProfile != "" || g.CPUProfile != "" || g.MemProfile != "" || g.MutexProfile != ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (g GoFlagsConfig) NeedsSymbols() bool {
|
||||||
|
return g.BinaryMustBePreserved()
|
||||||
|
}
|
||||||
|
|
||||||
// Configuration that were deprecated in 2.0
|
// Configuration that were deprecated in 2.0
|
||||||
type deprecatedConfig struct {
|
type deprecatedConfig struct {
|
||||||
DebugParallel bool
|
DebugParallel bool
|
||||||
|
@ -640,7 +644,7 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test
|
// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test
|
||||||
func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild string, pathToInvocationPath string) ([]string, error) {
|
func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild string, pathToInvocationPath string, preserveSymbols bool) ([]string, error) {
|
||||||
// if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure
|
// if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure
|
||||||
// the built test binary can generate a coverprofile
|
// the built test binary can generate a coverprofile
|
||||||
if goFlagsConfig.CoverProfile != "" {
|
if goFlagsConfig.CoverProfile != "" {
|
||||||
|
@ -663,6 +667,10 @@ func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild strin
|
||||||
goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",")
|
goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !goFlagsConfig.NeedsSymbols() && goFlagsConfig.LDFlags == "" && !preserveSymbols {
|
||||||
|
goFlagsConfig.LDFlags = "-w -s"
|
||||||
|
}
|
||||||
|
|
||||||
args := []string{"test", "-c", packageToBuild}
|
args := []string{"test", "-c", packageToBuild}
|
||||||
goArgs, err := GenerateFlagArgs(
|
goArgs, err := GenerateFlagArgs(
|
||||||
GoBuildFlags,
|
GoBuildFlags,
|
||||||
|
|
|
@ -636,6 +636,13 @@ func (g ginkgoErrors) ExpectFilenameNotPath(flag string, path string) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (g ginkgoErrors) FlagAfterPositionalParameter() error {
|
||||||
|
return GinkgoError{
|
||||||
|
Heading: "Malformed arguments - detected a flag after the package liste",
|
||||||
|
Message: "Make sure all flags appear {{bold}}after{{/}} the Ginkgo subcommand and {{bold}}before{{/}} your list of packages (or './...').\n{{gray}}e.g. 'ginkgo run -p my_package' is valid but `ginkgo -p run my_package` is not.\n{{gray}}e.g. 'ginkgo -p -vet ./...' is valid but 'ginkgo -p ./... -vet' is not{{/}}",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Stack-Trace parsing errors */
|
/* Stack-Trace parsing errors */
|
||||||
|
|
||||||
func (g ginkgoErrors) FailedToParseStackTrace(message string) error {
|
func (g ginkgoErrors) FailedToParseStackTrace(message string) error {
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
package types
|
package types
|
||||||
|
|
||||||
const VERSION = "2.23.1"
|
const VERSION = "2.23.2"
|
||||||
|
|
|
@ -3,6 +3,7 @@ package imagebuilder
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
@ -716,14 +717,14 @@ var builtinAllowedBuildArgs = map[string]bool{
|
||||||
"no_proxy": true,
|
"no_proxy": true,
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseIgnore returns a list of the excludes in the specified path
|
// ParseIgnoreReader returns a list of the excludes in the provided file
|
||||||
// path should be a file with the .dockerignore format
|
// which uses the .dockerignore format
|
||||||
// extracted from fsouza/go-dockerclient and modified to drop comments and
|
// extracted from fsouza/go-dockerclient and modified to drop comments and
|
||||||
// empty lines.
|
// empty lines.
|
||||||
func ParseIgnore(path string) ([]string, error) {
|
func ParseIgnoreReader(r io.Reader) ([]string, error) {
|
||||||
var excludes []string
|
var excludes []string
|
||||||
|
|
||||||
ignores, err := ioutil.ReadFile(path)
|
ignores, err := io.ReadAll(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return excludes, err
|
return excludes, err
|
||||||
}
|
}
|
||||||
|
@ -739,6 +740,18 @@ func ParseIgnore(path string) ([]string, error) {
|
||||||
return excludes, nil
|
return excludes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ParseIgnore returns a list returned by having ParseIgnoreReader() read the
|
||||||
|
// specified path
|
||||||
|
func ParseIgnore(path string) ([]string, error) {
|
||||||
|
var excludes []string
|
||||||
|
|
||||||
|
ignores, err := ioutil.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return excludes, err
|
||||||
|
}
|
||||||
|
return ParseIgnoreReader(bytes.NewReader(ignores))
|
||||||
|
}
|
||||||
|
|
||||||
// ParseDockerIgnore returns a list of the excludes in the .containerignore or .dockerignore file.
|
// ParseDockerIgnore returns a list of the excludes in the .containerignore or .dockerignore file.
|
||||||
func ParseDockerignore(root string) ([]string, error) {
|
func ParseDockerignore(root string) ([]string, error) {
|
||||||
excludes, err := ParseIgnore(filepath.Join(root, ".containerignore"))
|
excludes, err := ParseIgnore(filepath.Join(root, ".containerignore"))
|
||||||
|
|
|
@ -41,18 +41,11 @@ var builtinArgDefaults = map[string]string{
|
||||||
"TARGETPLATFORM": localspec.OS + "/" + localspec.Architecture,
|
"TARGETPLATFORM": localspec.OS + "/" + localspec.Architecture,
|
||||||
"TARGETOS": localspec.OS,
|
"TARGETOS": localspec.OS,
|
||||||
"TARGETARCH": localspec.Architecture,
|
"TARGETARCH": localspec.Architecture,
|
||||||
"TARGETVARIANT": localspec.Variant,
|
"TARGETVARIANT": "",
|
||||||
"BUILDPLATFORM": localspec.OS + "/" + localspec.Architecture,
|
"BUILDPLATFORM": localspec.OS + "/" + localspec.Architecture,
|
||||||
"BUILDOS": localspec.OS,
|
"BUILDOS": localspec.OS,
|
||||||
"BUILDARCH": localspec.Architecture,
|
"BUILDARCH": localspec.Architecture,
|
||||||
"BUILDVARIANT": localspec.Variant,
|
"BUILDVARIANT": "",
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
if localspec.Variant != "" {
|
|
||||||
builtinArgDefaults["TARGETPLATFORM"] = builtinArgDefaults["TARGETPLATFORM"] + "/" + localspec.Variant
|
|
||||||
builtinArgDefaults["BUILDPLATFORM"] = builtinArgDefaults["BUILDPLATFORM"] + "/" + localspec.Variant
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ENV foo bar
|
// ENV foo bar
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
#
|
#
|
||||||
|
|
||||||
%global golang_version 1.19
|
%global golang_version 1.19
|
||||||
%{!?version: %global version 1.2.15}
|
%{!?version: %global version 1.2.16-dev}
|
||||||
%{!?release: %global release 1}
|
%{!?release: %global release 1}
|
||||||
%global package_name imagebuilder
|
%global package_name imagebuilder
|
||||||
%global product_name Container Image Builder
|
%global product_name Container Image Builder
|
||||||
|
|
|
@ -1,2 +1,26 @@
|
||||||
|
linters:
|
||||||
|
enable:
|
||||||
|
- errcheck
|
||||||
|
- errorlint
|
||||||
|
- gocritic
|
||||||
|
- gosec
|
||||||
|
- gosimple
|
||||||
|
- govet
|
||||||
|
- gci
|
||||||
|
- misspell
|
||||||
|
- nonamedreturns
|
||||||
|
- staticcheck
|
||||||
|
- unconvert
|
||||||
|
- unparam
|
||||||
|
- unused
|
||||||
|
- whitespace
|
||||||
|
|
||||||
|
linters-settings:
|
||||||
|
gci:
|
||||||
|
sections:
|
||||||
|
- standard
|
||||||
|
- default
|
||||||
|
- prefix(github.com/vishvananda)
|
||||||
|
|
||||||
run:
|
run:
|
||||||
timeout: 5m
|
timeout: 5m
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
---
|
||||||
|
extends: default
|
||||||
|
|
||||||
|
rules:
|
||||||
|
document-start: disable
|
||||||
|
line-length: disable
|
||||||
|
truthy:
|
||||||
|
ignore: |
|
||||||
|
.github/workflows/*.yml
|
|
@ -26,19 +26,19 @@ const bindMountPath = "/run/netns" /* Bind mount path for named netns */
|
||||||
// Setns sets namespace using golang.org/x/sys/unix.Setns.
|
// Setns sets namespace using golang.org/x/sys/unix.Setns.
|
||||||
//
|
//
|
||||||
// Deprecated: Use golang.org/x/sys/unix.Setns instead.
|
// Deprecated: Use golang.org/x/sys/unix.Setns instead.
|
||||||
func Setns(ns NsHandle, nstype int) (err error) {
|
func Setns(ns NsHandle, nstype int) error {
|
||||||
return unix.Setns(int(ns), nstype)
|
return unix.Setns(int(ns), nstype)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set sets the current network namespace to the namespace represented
|
// Set sets the current network namespace to the namespace represented
|
||||||
// by NsHandle.
|
// by NsHandle.
|
||||||
func Set(ns NsHandle) (err error) {
|
func Set(ns NsHandle) error {
|
||||||
return unix.Setns(int(ns), unix.CLONE_NEWNET)
|
return unix.Setns(int(ns), unix.CLONE_NEWNET)
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new network namespace, sets it as current and returns
|
// New creates a new network namespace, sets it as current and returns
|
||||||
// a handle to it.
|
// a handle to it.
|
||||||
func New() (ns NsHandle, err error) {
|
func New() (NsHandle, error) {
|
||||||
if err := unix.Unshare(unix.CLONE_NEWNET); err != nil {
|
if err := unix.Unshare(unix.CLONE_NEWNET); err != nil {
|
||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
|
@ -49,7 +49,7 @@ func New() (ns NsHandle, err error) {
|
||||||
// and returns a handle to it
|
// and returns a handle to it
|
||||||
func NewNamed(name string) (NsHandle, error) {
|
func NewNamed(name string) (NsHandle, error) {
|
||||||
if _, err := os.Stat(bindMountPath); os.IsNotExist(err) {
|
if _, err := os.Stat(bindMountPath); os.IsNotExist(err) {
|
||||||
err = os.MkdirAll(bindMountPath, 0755)
|
err = os.MkdirAll(bindMountPath, 0o755)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return None(), err
|
return None(), err
|
||||||
}
|
}
|
||||||
|
@ -62,7 +62,7 @@ func NewNamed(name string) (NsHandle, error) {
|
||||||
|
|
||||||
namedPath := path.Join(bindMountPath, name)
|
namedPath := path.Join(bindMountPath, name)
|
||||||
|
|
||||||
f, err := os.OpenFile(namedPath, os.O_CREATE|os.O_EXCL, 0444)
|
f, err := os.OpenFile(namedPath, os.O_CREATE|os.O_EXCL, 0o444)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
newNs.Close()
|
newNs.Close()
|
||||||
return None(), err
|
return None(), err
|
||||||
|
@ -217,11 +217,12 @@ func getPidForContainer(id string) (int, error) {
|
||||||
id += "*"
|
id += "*"
|
||||||
|
|
||||||
var pidFile string
|
var pidFile string
|
||||||
if cgroupVer == 1 {
|
switch cgroupVer {
|
||||||
|
case 1:
|
||||||
pidFile = "tasks"
|
pidFile = "tasks"
|
||||||
} else if cgroupVer == 2 {
|
case 2:
|
||||||
pidFile = "cgroup.procs"
|
pidFile = "cgroup.procs"
|
||||||
} else {
|
default:
|
||||||
return -1, fmt.Errorf("Invalid cgroup version '%d'", cgroupVer)
|
return -1, fmt.Errorf("Invalid cgroup version '%d'", cgroupVer)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -247,6 +248,10 @@ func getPidForContainer(id string) (int, error) {
|
||||||
filepath.Join(cgroupRoot, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", pidFile),
|
filepath.Join(cgroupRoot, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", pidFile),
|
||||||
// Same as above but for Guaranteed QoS
|
// Same as above but for Guaranteed QoS
|
||||||
filepath.Join(cgroupRoot, "kubepods.slice", "*", "docker-"+id+".scope", pidFile),
|
filepath.Join(cgroupRoot, "kubepods.slice", "*", "docker-"+id+".scope", pidFile),
|
||||||
|
// Support for nerdctl
|
||||||
|
filepath.Join(cgroupRoot, "system.slice", "nerdctl-"+id+".scope", pidFile),
|
||||||
|
// Support for finch
|
||||||
|
filepath.Join(cgroupRoot, "..", "systemd", "finch", id, pidFile),
|
||||||
}
|
}
|
||||||
|
|
||||||
var filename string
|
var filename string
|
||||||
|
@ -276,7 +281,7 @@ func getPidForContainer(id string) (int, error) {
|
||||||
|
|
||||||
pid, err = strconv.Atoi(result[0])
|
pid, err = strconv.Atoi(result[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pid, fmt.Errorf("Invalid pid '%s': %s", result[0], err)
|
return pid, fmt.Errorf("Invalid pid '%s': %w", result[0], err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return pid, nil
|
return pid, nil
|
||||||
|
|
|
@ -3,27 +3,23 @@
|
||||||
|
|
||||||
package netns
|
package netns
|
||||||
|
|
||||||
import (
|
import "errors"
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
var ErrNotImplemented = errors.New("not implemented")
|
||||||
ErrNotImplemented = errors.New("not implemented")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Setns sets namespace using golang.org/x/sys/unix.Setns on Linux. It
|
// Setns sets namespace using golang.org/x/sys/unix.Setns on Linux. It
|
||||||
// is not implemented on other platforms.
|
// is not implemented on other platforms.
|
||||||
//
|
//
|
||||||
// Deprecated: Use golang.org/x/sys/unix.Setns instead.
|
// Deprecated: Use golang.org/x/sys/unix.Setns instead.
|
||||||
func Setns(ns NsHandle, nstype int) (err error) {
|
func Setns(ns NsHandle, nstype int) error {
|
||||||
return ErrNotImplemented
|
return ErrNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
func Set(ns NsHandle) (err error) {
|
func Set(ns NsHandle) error {
|
||||||
return ErrNotImplemented
|
return ErrNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
func New() (ns NsHandle, err error) {
|
func New() (NsHandle, error) {
|
||||||
return -1, ErrNotImplemented
|
return -1, ErrNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -51,7 +47,7 @@ func GetFromPid(pid int) (NsHandle, error) {
|
||||||
return -1, ErrNotImplemented
|
return -1, ErrNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetFromThread(pid, tid int) (NsHandle, error) {
|
func GetFromThread(pid int, tid int) (NsHandle, error) {
|
||||||
return -1, ErrNotImplemented
|
return -1, ErrNotImplemented
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -147,8 +147,8 @@ github.com/containernetworking/cni/pkg/version
|
||||||
# github.com/containernetworking/plugins v1.6.2
|
# github.com/containernetworking/plugins v1.6.2
|
||||||
## explicit; go 1.23
|
## explicit; go 1.23
|
||||||
github.com/containernetworking/plugins/pkg/ns
|
github.com/containernetworking/plugins/pkg/ns
|
||||||
# github.com/containers/buildah v1.39.2
|
# github.com/containers/buildah v1.39.1-0.20250321123219-bc4d7eb70fe3
|
||||||
## explicit; go 1.22.8
|
## explicit; go 1.23.0
|
||||||
github.com/containers/buildah
|
github.com/containers/buildah
|
||||||
github.com/containers/buildah/bind
|
github.com/containers/buildah/bind
|
||||||
github.com/containers/buildah/chroot
|
github.com/containers/buildah/chroot
|
||||||
|
@ -179,7 +179,7 @@ github.com/containers/buildah/pkg/sshagent
|
||||||
github.com/containers/buildah/pkg/util
|
github.com/containers/buildah/pkg/util
|
||||||
github.com/containers/buildah/pkg/volumes
|
github.com/containers/buildah/pkg/volumes
|
||||||
github.com/containers/buildah/util
|
github.com/containers/buildah/util
|
||||||
# github.com/containers/common v0.62.3-0.20250320113334-33bf9345b5ef
|
# github.com/containers/common v0.62.3-0.20250320215058-fa53559b5062
|
||||||
## explicit; go 1.23.0
|
## explicit; go 1.23.0
|
||||||
github.com/containers/common/internal
|
github.com/containers/common/internal
|
||||||
github.com/containers/common/internal/attributedstring
|
github.com/containers/common/internal/attributedstring
|
||||||
|
@ -333,7 +333,7 @@ github.com/containers/libhvee/pkg/wmiext
|
||||||
# github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01
|
# github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01
|
||||||
## explicit
|
## explicit
|
||||||
github.com/containers/libtrust
|
github.com/containers/libtrust
|
||||||
# github.com/containers/luksy v0.0.0-20250106202729-a3a812db5b72
|
# github.com/containers/luksy v0.0.0-20250217190002-40bd943d93b8
|
||||||
## explicit; go 1.20
|
## explicit; go 1.20
|
||||||
github.com/containers/luksy
|
github.com/containers/luksy
|
||||||
# github.com/containers/ocicrypt v1.2.1
|
# github.com/containers/ocicrypt v1.2.1
|
||||||
|
@ -499,10 +499,8 @@ github.com/docker/docker/errdefs
|
||||||
github.com/docker/docker/internal/lazyregexp
|
github.com/docker/docker/internal/lazyregexp
|
||||||
github.com/docker/docker/internal/multierror
|
github.com/docker/docker/internal/multierror
|
||||||
github.com/docker/docker/pkg/archive
|
github.com/docker/docker/pkg/archive
|
||||||
github.com/docker/docker/pkg/atomicwriter
|
|
||||||
github.com/docker/docker/pkg/homedir
|
github.com/docker/docker/pkg/homedir
|
||||||
github.com/docker/docker/pkg/idtools
|
github.com/docker/docker/pkg/idtools
|
||||||
github.com/docker/docker/pkg/ioutils
|
|
||||||
github.com/docker/docker/pkg/jsonmessage
|
github.com/docker/docker/pkg/jsonmessage
|
||||||
github.com/docker/docker/pkg/meminfo
|
github.com/docker/docker/pkg/meminfo
|
||||||
github.com/docker/docker/pkg/namesgenerator
|
github.com/docker/docker/pkg/namesgenerator
|
||||||
|
@ -810,7 +808,7 @@ github.com/mistifyio/go-zfs/v3
|
||||||
# github.com/mitchellh/mapstructure v1.5.0
|
# github.com/mitchellh/mapstructure v1.5.0
|
||||||
## explicit; go 1.14
|
## explicit; go 1.14
|
||||||
github.com/mitchellh/mapstructure
|
github.com/mitchellh/mapstructure
|
||||||
# github.com/moby/buildkit v0.19.0
|
# github.com/moby/buildkit v0.20.1
|
||||||
## explicit; go 1.22.0
|
## explicit; go 1.22.0
|
||||||
github.com/moby/buildkit/frontend/dockerfile/command
|
github.com/moby/buildkit/frontend/dockerfile/command
|
||||||
github.com/moby/buildkit/frontend/dockerfile/parser
|
github.com/moby/buildkit/frontend/dockerfile/parser
|
||||||
|
@ -860,7 +858,7 @@ github.com/nxadm/tail/winfile
|
||||||
# github.com/oklog/ulid v1.3.1
|
# github.com/oklog/ulid v1.3.1
|
||||||
## explicit
|
## explicit
|
||||||
github.com/oklog/ulid
|
github.com/oklog/ulid
|
||||||
# github.com/onsi/ginkgo/v2 v2.23.1
|
# github.com/onsi/ginkgo/v2 v2.23.2
|
||||||
## explicit; go 1.23.0
|
## explicit; go 1.23.0
|
||||||
github.com/onsi/ginkgo/v2
|
github.com/onsi/ginkgo/v2
|
||||||
github.com/onsi/ginkgo/v2/config
|
github.com/onsi/ginkgo/v2/config
|
||||||
|
@ -933,7 +931,7 @@ github.com/opencontainers/runtime-tools/validate/capabilities
|
||||||
github.com/opencontainers/selinux/go-selinux
|
github.com/opencontainers/selinux/go-selinux
|
||||||
github.com/opencontainers/selinux/go-selinux/label
|
github.com/opencontainers/selinux/go-selinux/label
|
||||||
github.com/opencontainers/selinux/pkg/pwalkdir
|
github.com/opencontainers/selinux/pkg/pwalkdir
|
||||||
# github.com/openshift/imagebuilder v1.2.15
|
# github.com/openshift/imagebuilder v1.2.16-0.20250220150830-7ebfb09d364e
|
||||||
## explicit; go 1.21.0
|
## explicit; go 1.21.0
|
||||||
github.com/openshift/imagebuilder
|
github.com/openshift/imagebuilder
|
||||||
github.com/openshift/imagebuilder/dockerfile/command
|
github.com/openshift/imagebuilder/dockerfile/command
|
||||||
|
@ -1127,7 +1125,7 @@ github.com/vbauerster/mpb/v8/internal
|
||||||
## explicit; go 1.12
|
## explicit; go 1.12
|
||||||
github.com/vishvananda/netlink
|
github.com/vishvananda/netlink
|
||||||
github.com/vishvananda/netlink/nl
|
github.com/vishvananda/netlink/nl
|
||||||
# github.com/vishvananda/netns v0.0.4
|
# github.com/vishvananda/netns v0.0.5
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
github.com/vishvananda/netns
|
github.com/vishvananda/netns
|
||||||
# github.com/yusufpapurcu/wmi v1.2.4
|
# github.com/yusufpapurcu/wmi v1.2.4
|
||||||
|
|
Loading…
Reference in New Issue