vendor: update buildah to latest

Includes a fix for CVE-2024-9407

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
This commit is contained in:
Paul Holzinger 2024-10-02 11:29:28 +02:00
parent dde1c3d98c
commit 83a0299309
No known key found for this signature in database
GPG Key ID: EB145DD938A3CAF2
106 changed files with 1414 additions and 1115 deletions

11
go.mod
View File

@ -12,7 +12,7 @@ require (
github.com/checkpoint-restore/checkpointctl v1.2.1
github.com/checkpoint-restore/go-criu/v7 v7.2.0
github.com/containernetworking/plugins v1.5.1
github.com/containers/buildah v1.37.0
github.com/containers/buildah v1.37.1-0.20241002152719-c68e17b4ffed
github.com/containers/common v0.60.1-0.20241001171026-c3edf18f3339
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/gvisor-tap-vsock v0.7.5
@ -98,15 +98,14 @@ require (
github.com/chenzhuoyu/iasm v0.9.1 // indirect
github.com/chzyer/readline v1.5.1 // indirect
github.com/containerd/cgroups/v3 v3.0.3 // indirect
github.com/containerd/containerd v1.7.20 // indirect
github.com/containerd/errdefs v0.1.0 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/platforms v0.2.1 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
github.com/containerd/typeurl/v2 v2.1.1 // indirect
github.com/containerd/typeurl/v2 v2.2.0 // indirect
github.com/containernetworking/cni v1.2.3 // indirect
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
github.com/containers/luksy v0.0.0-20240618143119-a8846e21c08c // indirect
github.com/containers/luksy v0.0.0-20240812184316-2e7307c02f06 // indirect
github.com/coreos/go-oidc/v3 v3.11.0 // indirect
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect
@ -117,7 +116,7 @@ require (
github.com/docker/docker-credential-helpers v0.8.2 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fsouza/go-dockerclient v1.11.2 // indirect
github.com/fsouza/go-dockerclient v1.12.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/gin-gonic/gin v1.9.1 // indirect
@ -169,7 +168,7 @@ require (
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/buildkit v0.15.1 // indirect
github.com/moby/buildkit v0.16.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/sys/mountinfo v0.7.2 // indirect

26
go.sum
View File

@ -63,8 +63,6 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
github.com/containerd/containerd v1.7.20 h1:Sl6jQYk3TRavaU83h66QMbI2Nqg9Jm6qzwX57Vsn1SQ=
github.com/containerd/containerd v1.7.20/go.mod h1:52GsS5CwquuqPuLncsXwG0t2CiUce+KsNHJZQJvAgR0=
github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM=
github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
@ -73,14 +71,14 @@ github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpS
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4=
github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0=
github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso=
github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g=
github.com/containernetworking/cni v1.2.3 h1:hhOcjNVUQTnzdRJ6alC5XF+wd9mfGIUaj8FuJbEslXM=
github.com/containernetworking/cni v1.2.3/go.mod h1:DuLgF+aPd3DzcTQTtp/Nvl1Kim23oFKdm2okJzBQA5M=
github.com/containernetworking/plugins v1.5.1 h1:T5ji+LPYjjgW0QM+KyrigZbLsZ8jaX+E5J/EcKOE4gQ=
github.com/containernetworking/plugins v1.5.1/go.mod h1:MIQfgMayGuHYs0XdNudf31cLLAC+i242hNm6KuDGqCM=
github.com/containers/buildah v1.37.0 h1:jvHwu1vIwIqnHyOSg9eef9Apdpry+5oWLrm43gdf8Rk=
github.com/containers/buildah v1.37.0/go.mod h1:MKd79tkluMf6vtH06SedhBQK5OB7E0pFVIuiTTw3dJk=
github.com/containers/buildah v1.37.1-0.20241002152719-c68e17b4ffed h1:qiE4J6RukT5+a2wV+Xeimu0c4Xx6DZrdt8JiP3c9CY8=
github.com/containers/buildah v1.37.1-0.20241002152719-c68e17b4ffed/go.mod h1:ytj7qYHUdP/p+2lAXVaFSHDyYFJZ3y1ikpFERypXbCI=
github.com/containers/common v0.60.1-0.20241001171026-c3edf18f3339 h1:VjK9wBKZTbmZqZ0qW2QlbW81xOu8YxXecek5MUSLGKc=
github.com/containers/common v0.60.1-0.20241001171026-c3edf18f3339/go.mod h1:vuBEtzP83Fa7mgk0BJdHF2BDfFRfNayeYyVHRJw8hSM=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
@ -93,8 +91,8 @@ github.com/containers/libhvee v0.7.1 h1:dWGF5GLq9DZvXo3P8aDp3cNieL5eCaSell4UmeA/
github.com/containers/libhvee v0.7.1/go.mod h1:fRKB3AyIqHMvq6xaeYhTpckM2cdoq0oecolyoiuLP7M=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/luksy v0.0.0-20240618143119-a8846e21c08c h1:gJDiBJYc8JFD46IJmr8SqGOcueGSRGnuhW6wgXiAjr0=
github.com/containers/luksy v0.0.0-20240618143119-a8846e21c08c/go.mod h1:Ufusu7xAtl0LSTry0JS6dSxbxR/XJQSEqlhLqTkCaH8=
github.com/containers/luksy v0.0.0-20240812184316-2e7307c02f06 h1:XDof6h9ujycjzF89x7hCBRpCSvFs9plbPHa7c2EtZrk=
github.com/containers/luksy v0.0.0-20240812184316-2e7307c02f06/go.mod h1:jaWkqhbHvO+7rFQ86KcXlNmkM9UZsyWm8alsmaYcMzw=
github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sirVuPM=
github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U=
github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g=
@ -163,8 +161,8 @@ github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/fsouza/go-dockerclient v1.11.2 h1:Wos4OMUwIjOW2rt8Z10TZSJHxgQH0KcYyf3O86dqFII=
github.com/fsouza/go-dockerclient v1.11.2/go.mod h1:HZN6ky2Mg5mfZO/WZBFDe6XCricqTnDJntfXHZTYnQQ=
github.com/fsouza/go-dockerclient v1.12.0 h1:S2f2crEUbBNCFiF06kR/GvioEB8EMsb3Td/bpawD+aU=
github.com/fsouza/go-dockerclient v1.12.0/go.mod h1:YWUtjg8japrqD/80L98nTtCoxQFp5B5wrSsnyeB5lFo=
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
@ -238,7 +236,6 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@ -247,7 +244,6 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
@ -359,8 +355,8 @@ github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPn
github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/buildkit v0.15.1 h1:J6wrew7hphKqlq1wuu6yaUb/1Ra7gEzDAovylGztAKM=
github.com/moby/buildkit v0.15.1/go.mod h1:Yis8ZMUJTHX9XhH9zVyK2igqSHV3sxi3UN0uztZocZk=
github.com/moby/buildkit v0.16.0 h1:wOVBj1o5YNVad/txPQNXUXdelm7Hs/i0PUFjzbK0VKE=
github.com/moby/buildkit v0.16.0/go.mod h1:Xqx/5GlrqE1yIRORk0NSCVDFpQAU1WjlT6KHYZdisIQ=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
@ -715,8 +711,6 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright The containerd Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,16 +0,0 @@
Docker
Copyright 2012-2015 Docker, Inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see https://www.bis.doc.gov
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.

View File

@ -1,176 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package platforms
import (
"github.com/containerd/platforms"
specs "github.com/opencontainers/image-spec/specs-go/v1"
)
// Platform is a type alias for convenience, so there is no need to import image-spec package everywhere.
//
// Deprecated: use [specs.Platform].
type Platform = specs.Platform
// DefaultSpec returns the current platform's default platform specification.
//
// Deprecated: use [platforms.DefaultSpec].
func DefaultSpec() specs.Platform {
return platforms.DefaultSpec()
}
// Default returns the default matcher for the platform.
//
// Deprecated: use [platforms.Default].
func Default() platforms.MatchComparer {
return platforms.Default()
}
// DefaultString returns the default string specifier for the platform.
//
// Deprecated: use [platforms.DefaultString].
func DefaultString() string {
return platforms.Format(platforms.DefaultSpec()) // For 1.7 continue using the old format without os-version included.
}
// DefaultStrict returns strict form of Default.
//
// Deprecated: use [platforms.DefaultStrict].
func DefaultStrict() MatchComparer {
return platforms.DefaultStrict()
}
// MatchComparer is able to match and compare platforms to
// filter and sort platforms.
//
// Deprecated: use [platforms.MatchComparer].
type MatchComparer = platforms.MatchComparer
// Matcher matches platforms specifications, provided by an image or runtime.
//
// Deprecated: use [platforms.Matcher].
type Matcher = platforms.Matcher
// NewMatcher returns a simple matcher based on the provided platform
// specification. The returned matcher only looks for equality based on os,
// architecture and variant.
//
// One may implement their own matcher if this doesn't provide the required
// functionality.
//
// Applications should opt to use `Match` over directly parsing specifiers.
//
// Deprecated: use [platforms.NewMatcher].
func NewMatcher(platform specs.Platform) platforms.Matcher {
return platforms.NewMatcher(platform)
}
// Parse parses the platform specifier syntax into a platform declaration.
//
// Platform specifiers are in the format `<os>|<arch>|<os>/<arch>[/<variant>]`.
// The minimum required information for a platform specifier is the operating
// system or architecture. If there is only a single string (no slashes), the
// value will be matched against the known set of operating systems, then fall
// back to the known set of architectures. The missing component will be
// inferred based on the local environment.
//
// Deprecated: use [platforms.Parse].
func Parse(specifier string) (specs.Platform, error) {
return platforms.Parse(specifier)
}
// MustParse is like Parses but panics if the specifier cannot be parsed.
// Simplifies initialization of global variables.
//
// Deprecated: use [platforms.MustParse].
func MustParse(specifier string) specs.Platform {
return platforms.MustParse(specifier)
}
// Format returns a string specifier from the provided platform specification.
//
// Deprecated: use [platforms.Format].
func Format(platform specs.Platform) string {
return platforms.Format(platform)
}
// Normalize validates and translate the platform to the canonical value.
//
// For example, if "Aarch64" is encountered, we change it to "arm64" or if
// "x86_64" is encountered, it becomes "amd64".
//
// Deprecated: use [platforms.Normalize].
func Normalize(platform specs.Platform) specs.Platform {
return platforms.Normalize(platform)
}
// Only returns a match comparer for a single platform
// using default resolution logic for the platform.
//
// For arm/v8, will also match arm/v7, arm/v6 and arm/v5
// For arm/v7, will also match arm/v6 and arm/v5
// For arm/v6, will also match arm/v5
// For amd64, will also match 386
//
// Deprecated: use [platforms.Only].
func Only(platform specs.Platform) platforms.MatchComparer {
return platforms.Only(platform)
}
// OnlyStrict returns a match comparer for a single platform.
//
// Unlike Only, OnlyStrict does not match sub platforms.
// So, "arm/vN" will not match "arm/vM" where M < N,
// and "amd64" will not also match "386".
//
// OnlyStrict matches non-canonical forms.
// So, "arm64" matches "arm/64/v8".
//
// Deprecated: use [platforms.OnlyStrict].
func OnlyStrict(platform specs.Platform) platforms.MatchComparer {
return platforms.OnlyStrict(platform)
}
// Ordered returns a platform MatchComparer which matches any of the platforms
// but orders them in order they are provided.
//
// Deprecated: use [platforms.Ordered].
func Ordered(platform ...specs.Platform) platforms.MatchComparer {
return platforms.Ordered(platform...)
}
// Any returns a platform MatchComparer which matches any of the platforms
// with no preference for ordering.
//
// Deprecated: use [platforms.Any].
func Any(platform ...specs.Platform) platforms.MatchComparer {
return platforms.Any(platform...)
}
// All is a platform MatchComparer which matches all platforms
// with preference for ordering.
//
// Deprecated: use [platforms.All].
var All = platforms.All
// GetWindowsOsVersion returns the version of Windows of the local system,
// it returns an empty string on other platforms.
//
// Deprecated: this function is deprecated, and removed in github.com/containerd/platforms
func GetWindowsOsVersion() string {
return getWindowsOsVersion()
}

View File

@ -1,23 +0,0 @@
//go:build !windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package platforms
func getWindowsOsVersion() string {
return ""
}

View File

@ -1,49 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package platforms
import (
"fmt"
"strconv"
"strings"
"github.com/Microsoft/hcsshim/osversion"
"golang.org/x/sys/windows"
)
func getWindowsOsVersion() string {
major, minor, build := windows.RtlGetNtVersionNumbers()
return fmt.Sprintf("%d.%d.%d", major, minor, build)
}
// Deprecated: this function is deprecated, and removed in github.com/containerd/platforms
func GetOsVersion(osVersionPrefix string) osversion.OSVersion {
parts := strings.Split(osVersionPrefix, ".")
if len(parts) < 3 {
return osversion.OSVersion{}
}
majorVersion, _ := strconv.Atoi(parts[0])
minorVersion, _ := strconv.Atoi(parts[1])
buildNumber, _ := strconv.Atoi(parts[2])
return osversion.OSVersion{
MajorVersion: uint8(majorVersion),
MinorVersion: uint8(minorVersion),
Build: uint16(buildNumber),
}
}

View File

@ -2,7 +2,7 @@
[![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/typeurl)](https://pkg.go.dev/github.com/containerd/typeurl)
[![Build Status](https://github.com/containerd/typeurl/workflows/CI/badge.svg)](https://github.com/containerd/typeurl/actions?query=workflow%3ACI)
[![codecov](https://codecov.io/gh/containerd/typeurl/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/typeurl)
[![codecov](https://codecov.io/gh/containerd/typeurl/branch/main/graph/badge.svg)](https://codecov.io/gh/containerd/typeurl)
[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/typeurl)](https://goreportcard.com/report/github.com/containerd/typeurl)
A Go package for managing the registration, marshaling, and unmarshaling of encoded types.
@ -13,8 +13,8 @@ This package helps when types are sent over a ttrpc/GRPC API and marshaled as a
**typeurl** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
As a containerd sub-project, you will find the:
* [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
* [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
* and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
* [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
* [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
* and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
information in our [`containerd/project`](https://github.com/containerd/project) repository.

View File

@ -27,6 +27,7 @@ import (
gogoproto "github.com/gogo/protobuf/proto"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/types/known/anypb"
)
var (
@ -122,6 +123,9 @@ func TypeURL(v interface{}) (string, error) {
// Is returns true if the type of the Any is the same as v.
func Is(any Any, v interface{}) bool {
if any == nil {
return false
}
// call to check that v is a pointer
tryDereference(v)
url, err := TypeURL(v)
@ -193,6 +197,31 @@ func UnmarshalToByTypeURL(typeURL string, value []byte, out interface{}) error {
return err
}
// MarshalProto converts typeurl.Any to google.golang.org/protobuf/types/known/anypb.Any.
func MarshalProto(from Any) *anypb.Any {
if from == nil {
return nil
}
if pbany, ok := from.(*anypb.Any); ok {
return pbany
}
return &anypb.Any{
TypeUrl: from.GetTypeUrl(),
Value: from.GetValue(),
}
}
// MarshalAnyToProto converts an arbitrary interface to google.golang.org/protobuf/types/known/anypb.Any.
func MarshalAnyToProto(from interface{}) (*anypb.Any, error) {
anyType, err := MarshalAny(from)
if err != nil {
return nil, err
}
return MarshalProto(anyType), nil
}
func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) {
t, err := getTypeByUrl(typeURL)
if err != nil {

View File

@ -27,12 +27,12 @@ env:
####
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
FEDORA_NAME: "fedora-39"
PRIOR_FEDORA_NAME: "fedora-38"
FEDORA_NAME: "fedora-40"
PRIOR_FEDORA_NAME: "fedora-39"
DEBIAN_NAME: "debian-13"
# Image identifiers
IMAGE_SUFFIX: "c20240708t152000z-f40f39d13"
IMAGE_SUFFIX: "c20240826t190000z-f40f39d13"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
@ -208,10 +208,11 @@ integration_task:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs'
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs'
# Disabled until we update to f40/41 as f39 does not have go 1.22
# - env:
# DISTRO_NV: "${PRIOR_FEDORA_NAME}"
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
# STORAGE_DRIVER: 'vfs'
- env:
DISTRO_NV: "${DEBIAN_NAME}"
IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"
@ -221,10 +222,11 @@ integration_task:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
# Disabled until we update to f40/41 as f39 does not have go 1.22
# - env:
# DISTRO_NV: "${PRIOR_FEDORA_NAME}"
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
# STORAGE_DRIVER: 'overlay'
- env:
DISTRO_NV: "${DEBIAN_NAME}"
IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"
@ -265,11 +267,12 @@ integration_rootless_task:
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
PRIV_NAME: rootless
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
PRIV_NAME: rootless
# Disabled until we update to f40/41 as f39 does not have go 1.22
# - env:
# DISTRO_NV: "${PRIOR_FEDORA_NAME}"
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
# STORAGE_DRIVER: 'overlay'
# PRIV_NAME: rootless
- env:
DISTRO_NV: "${DEBIAN_NAME}"
IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"

View File

@ -8,6 +8,9 @@ run:
concurrency: 4
linters:
enable:
- gofmt
- gofumpt
- revive
- unconvert
- unparam
- whitespace

View File

@ -14,6 +14,8 @@ packages:
specfile_path: rpm/buildah.spec
buildah-rhel:
specfile_path: rpm/buildah.spec
buildah-eln:
specfile_path: rpm/buildah.spec
srpm_build_deps:
- make
@ -26,8 +28,21 @@ jobs:
failure_comment:
message: "Ephemeral COPR build failed. @containers/packit-build please check."
targets:
fedora-all-x86_64: {}
fedora-all-aarch64: {}
- fedora-development-x86_64
- fedora-development-aarch64
- fedora-latest-x86_64
- fedora-latest-aarch64
- fedora-latest-stable-x86_64
- fedora-latest-stable-aarch64
- fedora-40-x86_64
- fedora-40-aarch64
enable_net: true
- job: copr_build
trigger: pull_request
packages: [buildah-eln]
notifications: *copr_build_failure_notification
targets:
fedora-eln-x86_64:
additional_repos:
- "https://kojipkgs.fedoraproject.org/repos/eln-build/latest/x86_64/"
@ -47,14 +62,15 @@ jobs:
- centos-stream-10-aarch64
enable_net: true
- job: copr_build
trigger: pull_request
packages: [buildah-rhel]
notifications: *copr_build_failure_notification
targets:
- epel-9-x86_64
- epel-9-aarch64
enable_net: true
# Disabled until there is go 1.22 in epel-9
# - job: copr_build
# trigger: pull_request
# packages: [buildah-rhel]
# notifications: *copr_build_failure_notification
# targets:
# - epel-9-x86_64
# - epel-9-aarch64
# enable_net: true
# Run on commit to main branch
- job: copr_build
@ -73,7 +89,7 @@ jobs:
trigger: release
packages: [buildah-fedora]
update_release: false
dist_git_branches:
dist_git_branches: &fedora_targets
- fedora-all
# Sync to CentOS Stream
@ -84,12 +100,13 @@ jobs:
dist_git_branches:
- c10s
# Fedora Koji build
- job: koji_build
trigger: commit
dist_git_branches:
- fedora-all
- job: bodhi_update
trigger: commit
dist_git_branches:
- fedora-branched # rawhide updates are created automatically
sidetag_group: podman-releases
# Dependents are not rpm dependencies, but the package whose bodhi update
# should include this package.
# Ref: https://packit.dev/docs/fedora-releases-guide/releasing-multiple-packages
dependents:
- podman
dist_git_branches: *fedora_targets

View File

@ -41,7 +41,7 @@ LIBSECCOMP_COMMIT := release-2.3
EXTRA_LDFLAGS ?=
BUILDAH_LDFLAGS := $(GO_LDFLAGS) '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)'
SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go internal/config/*.go internal/mkcw/*.go internal/mkcw/types/*.go internal/parse/*.go internal/sbom/*.go internal/source/*.go internal/tmpdir/*.go internal/*.go internal/util/*.go internal/volumes/*.go manifests/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/jail/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go pkg/volumes/*.go util/*.go
SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go internal/config/*.go internal/mkcw/*.go internal/mkcw/types/*.go internal/parse/*.go internal/sbom/*.go internal/source/*.go internal/tmpdir/*.go internal/*.go internal/util/*.go internal/volumes/*.go manifests/*.go pkg/binfmt/*.go pkg/blobcache/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/jail/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go pkg/volumes/*.go util/*.go
LINTFLAGS ?=
@ -133,6 +133,7 @@ validate: install.tools
./tests/validate/whitespace.sh
./hack/xref-helpmsgs-manpages
./tests/validate/pr-should-include-tests
./hack/makefile_sources
.PHONY: install.tools
install.tools:
@ -184,7 +185,6 @@ test-conformance:
.PHONY: test-integration
test-integration: install.tools
./tests/tools/build/ginkgo $(BUILDFLAGS) -v tests/e2e/.
cd tests; ./test_runner.sh
tests/testreport/testreport: tests/testreport/testreport.go
@ -199,7 +199,11 @@ test-unit: tests/testreport/testreport
$(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) ./cmd/buildah -args --root $$tmp/root --runroot $$tmp/runroot --storage-driver vfs --signature-policy $(shell pwd)/tests/policy.json --registries-conf $(shell pwd)/tests/registries.conf
vendor-in-container:
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.21 make vendor
if test -d `go env GOCACHE` && test -w `go env GOCACHE` ; then \
podman run --privileged --rm --env HOME=/root -v `go env GOCACHE`:/root/.cache/go-build --env GOCACHE=/root/.cache/go-build -v `pwd`:/src -w /src docker.io/library/golang:1.21 make vendor ; \
else \
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.21 make vendor ; \
fi
.PHONY: vendor
vendor:

View File

@ -2,6 +2,8 @@ package buildah
import (
"archive/tar"
"context"
"crypto/tls"
"errors"
"fmt"
"io"
@ -18,9 +20,15 @@ import (
"github.com/containers/buildah/copier"
"github.com/containers/buildah/define"
"github.com/containers/buildah/internal/tmpdir"
"github.com/containers/buildah/pkg/chrootuser"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/pkg/tlsclientconfig"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/regexp"
"github.com/docker/go-connections/tlsconfig"
"github.com/hashicorp/go-multierror"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/runtime-spec/specs-go"
@ -29,7 +37,7 @@ import (
// AddAndCopyOptions holds options for add and copy commands.
type AddAndCopyOptions struct {
//Chmod sets the access permissions of the destination content.
// Chmod sets the access permissions of the destination content.
Chmod string
// Chown is a spec for the user who should be given ownership over the
// newly-added content, potentially overriding permissions which would
@ -72,20 +80,61 @@ type AddAndCopyOptions struct {
// Clear the sticky bit on items being copied. Has no effect on
// archives being extracted, where the bit is always preserved.
StripStickyBit bool
// If not "", a directory containing a CA certificate (ending with
// ".crt"), a client certificate (ending with ".cert") and a client
// certificate key (ending with ".key") used when downloading sources
// from locations protected with TLS.
CertPath string
// Allow downloading sources from HTTPS where TLS verification fails.
InsecureSkipTLSVerify types.OptionalBool
// MaxRetries is the maximum number of attempts we'll make to retrieve
// contents from a remote location.
MaxRetries int
// RetryDelay is how long to wait before retrying attempts to retrieve
// remote contents.
RetryDelay time.Duration
}
// sourceIsRemote returns true if "source" is a remote location.
// gitURLFragmentSuffix matches fragments to use as Git reference and build
// context from the Git repository e.g.
//
// github.com/containers/buildah.git
// github.com/containers/buildah.git#main
// github.com/containers/buildah.git#v1.35.0
var gitURLFragmentSuffix = regexp.Delayed(`\.git(?:#.+)?$`)
// sourceIsGit returns true if "source" is a git location.
func sourceIsGit(source string) bool {
return isURL(source) && gitURLFragmentSuffix.MatchString(source)
}
func isURL(url string) bool {
return strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://")
}
// sourceIsRemote returns true if "source" is a remote location
// and *not* a git repo. Certain github urls such as raw.github.* are allowed.
func sourceIsRemote(source string) bool {
return strings.HasPrefix(source, "http://") || strings.HasPrefix(source, "https://")
return isURL(source) && !gitURLFragmentSuffix.MatchString(source)
}
// getURL writes a tar archive containing the named content
func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, writer io.Writer, chmod *os.FileMode, srcDigest digest.Digest) error {
func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, writer io.Writer, chmod *os.FileMode, srcDigest digest.Digest, certPath string, insecureSkipTLSVerify types.OptionalBool) error {
url, err := url.Parse(src)
if err != nil {
return err
}
response, err := http.Get(src)
tlsClientConfig := &tls.Config{
CipherSuites: tlsconfig.DefaultServerAcceptedCiphers,
}
if err := tlsclientconfig.SetupCertificates(certPath, tlsClientConfig); err != nil {
return err
}
tlsClientConfig.InsecureSkipVerify = insecureSkipTLSVerify == types.OptionalBoolTrue
tr := &http.Transport{TLSClientConfig: tlsClientConfig}
httpClient := &http.Client{Transport: tr}
response, err := httpClient.Get(src)
if err != nil {
return err
}
@ -147,7 +196,7 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string,
uid = chown.UID
gid = chown.GID
}
var mode int64 = 0600
var mode int64 = 0o600
if chmod != nil {
mode = int64(*chmod)
}
@ -201,6 +250,18 @@ func includeDirectoryAnyway(path string, pm *fileutils.PatternMatcher) bool {
return false
}
// globbedToGlobbable takes a pathname which might include the '[', *, or ?
// characters, and converts it into a glob pattern that matches itself by
// marking the '[' characters as _not_ the beginning of match ranges and
// escaping the * and ? characters.
func globbedToGlobbable(glob string) string {
result := glob
result = strings.ReplaceAll(result, "[", "[[]")
result = strings.ReplaceAll(result, "?", "\\?")
result = strings.ReplaceAll(result, "*", "\\*")
return result
}
// Add copies the contents of the specified sources into the container's root
// filesystem, optionally extracting contents of local files that look like
// non-empty archives.
@ -233,18 +294,31 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
}
// Figure out what sorts of sources we have.
var localSources, remoteSources []string
var localSources, remoteSources, gitSources []string
for i, src := range sources {
if src == "" {
return errors.New("empty source location")
}
if sourceIsRemote(src) {
remoteSources = append(remoteSources, src)
continue
}
if sourceIsGit(src) {
gitSources = append(gitSources, src)
continue
}
if !filepath.IsAbs(src) && options.ContextDir == "" {
sources[i] = filepath.Join(currentDir, src)
}
localSources = append(localSources, sources[i])
}
// Treat git sources as a subset of remote sources
// differentiating only in how we fetch the two later on.
if len(gitSources) > 0 {
remoteSources = append(remoteSources, gitSources...)
}
// Check how many items our local source specs matched. Each spec
// should have matched at least one item, otherwise we consider it an
// error.
@ -276,7 +350,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
}
numLocalSourceItems += len(localSourceStat.Globbed)
}
if numLocalSourceItems+len(remoteSources) == 0 {
if numLocalSourceItems+len(remoteSources)+len(gitSources) == 0 {
return fmt.Errorf("no sources %v found: %w", sources, syscall.ENOENT)
}
@ -333,6 +407,9 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
destCanBeFile = true
}
}
if len(gitSources) > 0 {
destMustBeDirectory = true
}
}
// We care if the destination either doesn't exist, or exists and is a
@ -404,7 +481,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
var multiErr *multierror.Error
var getErr, closeErr, renameErr, putErr error
var wg sync.WaitGroup
if sourceIsRemote(src) {
if sourceIsRemote(src) || sourceIsGit(src) {
pipeReader, pipeWriter := io.Pipe()
var srcDigest digest.Digest
if options.Checksum != "" {
@ -413,12 +490,43 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
return fmt.Errorf("invalid checksum flag: %w", err)
}
}
wg.Add(1)
go func() {
getErr = getURL(src, chownFiles, mountPoint, renameTarget, pipeWriter, chmodDirsFiles, srcDigest)
pipeWriter.Close()
wg.Done()
}()
if sourceIsGit(src) {
go func() {
var cloneDir string
cloneDir, _, getErr = define.TempDirForURL(tmpdir.GetTempDir(), "", src)
getOptions := copier.GetOptions{
UIDMap: srcUIDMap,
GIDMap: srcGIDMap,
Excludes: options.Excludes,
ExpandArchives: extract,
ChownDirs: chownDirs,
ChmodDirs: chmodDirsFiles,
ChownFiles: chownFiles,
ChmodFiles: chmodDirsFiles,
StripSetuidBit: options.StripSetuidBit,
StripSetgidBit: options.StripSetgidBit,
StripStickyBit: options.StripStickyBit,
}
writer := io.WriteCloser(pipeWriter)
getErr = copier.Get(cloneDir, cloneDir, getOptions, []string{"."}, writer)
pipeWriter.Close()
wg.Done()
}()
} else {
go func() {
getErr = retry.IfNecessary(context.TODO(), func() error {
return getURL(src, chownFiles, mountPoint, renameTarget, pipeWriter, chmodDirsFiles, srcDigest, options.CertPath, options.InsecureSkipTLSVerify)
}, &retry.Options{
MaxRetry: options.MaxRetries,
Delay: options.RetryDelay,
})
pipeWriter.Close()
wg.Done()
}()
}
wg.Add(1)
go func() {
b.ContentDigester.Start("")
@ -480,27 +588,27 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
// Iterate through every item that matched the glob.
itemsCopied := 0
for _, glob := range localSourceStat.Globbed {
rel := glob
if filepath.IsAbs(glob) {
if rel, err = filepath.Rel(contextDir, glob); err != nil {
return fmt.Errorf("computing path of %q relative to %q: %w", glob, contextDir, err)
for _, globbed := range localSourceStat.Globbed {
rel := globbed
if filepath.IsAbs(globbed) {
if rel, err = filepath.Rel(contextDir, globbed); err != nil {
return fmt.Errorf("computing path of %q relative to %q: %w", globbed, contextDir, err)
}
}
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
return fmt.Errorf("possible escaping context directory error: %q is outside of %q", glob, contextDir)
return fmt.Errorf("possible escaping context directory error: %q is outside of %q", globbed, contextDir)
}
// Check for dockerignore-style exclusion of this item.
if rel != "." {
excluded, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck
if err != nil {
return fmt.Errorf("checking if %q(%q) is excluded: %w", glob, rel, err)
return fmt.Errorf("checking if %q(%q) is excluded: %w", globbed, rel, err)
}
if excluded {
// non-directories that are excluded are excluded, no question, but
// directories can only be skipped if we don't have to allow for the
// possibility of finding things to include under them
globInfo := localSourceStat.Results[glob]
globInfo := localSourceStat.Results[globbed]
if !globInfo.IsDir || !includeDirectoryAnyway(rel, pm) {
continue
}
@ -517,7 +625,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
// due to potentially not having anything in the tarstream that we passed.
itemsCopied++
}
st := localSourceStat.Results[glob]
st := localSourceStat.Results[globbed]
pipeReader, pipeWriter := io.Pipe()
wg.Add(1)
go func() {
@ -530,7 +638,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
return false, false, nil
})
}
writer = newTarFilterer(writer, func(hdr *tar.Header) (bool, bool, io.Reader) {
writer = newTarFilterer(writer, func(_ *tar.Header) (bool, bool, io.Reader) {
itemsCopied++
return false, false, nil
})
@ -547,7 +655,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
StripSetgidBit: options.StripSetgidBit,
StripStickyBit: options.StripStickyBit,
}
getErr = copier.Get(contextDir, contextDir, getOptions, []string{glob}, writer)
getErr = copier.Get(contextDir, contextDir, getOptions, []string{globbedToGlobbable(globbed)}, writer)
closeErr = writer.Close()
if renameTarget != "" && renamedItems > 1 {
renameErr = fmt.Errorf("internal error: renamed %d items when we expected to only rename 1", renamedItems)
@ -640,7 +748,6 @@ func (b *Builder) userForRun(mountPoint string, userspec string) (specs.User, st
} else {
u.AdditionalGids = groups
}
}
return u, homeDir, err
}

View File

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package buildah

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package bind
@ -49,7 +48,7 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou
if err != nil {
return nil, fmt.Errorf("checking permissions on %q: %w", bundlePath, err)
}
if err = os.Chmod(bundlePath, info.Mode()|0111); err != nil {
if err = os.Chmod(bundlePath, info.Mode()|0o111); err != nil {
return nil, fmt.Errorf("loosening permissions on %q: %w", bundlePath, err)
}
@ -116,7 +115,7 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou
// other unprivileged users outside of containers, shouldn't be able to
// access.
mnt := filepath.Join(bundlePath, "mnt")
if err = idtools.MkdirAndChown(mnt, 0100, idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}); err != nil {
if err = idtools.MkdirAndChown(mnt, 0o100, idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}); err != nil {
return unmountAll, fmt.Errorf("creating %q owned by the container's root user: %w", mnt, err)
}
@ -129,7 +128,7 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou
// Create a bind mount for the root filesystem and add it to the list.
rootfs := filepath.Join(mnt, "rootfs")
if err = os.Mkdir(rootfs, 0000); err != nil {
if err = os.Mkdir(rootfs, 0o000); err != nil {
return unmountAll, fmt.Errorf("creating directory %q: %w", rootfs, err)
}
if err = unix.Mount(rootPath, rootfs, "", unix.MS_BIND|unix.MS_REC|unix.MS_PRIVATE, ""); err != nil {
@ -160,13 +159,13 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou
if info.IsDir() {
// If the source is a directory, make one to use as the
// mount target.
if err = os.Mkdir(stage, 0000); err != nil {
if err = os.Mkdir(stage, 0o000); err != nil {
return unmountAll, fmt.Errorf("creating directory %q: %w", stage, err)
}
} else {
// If the source is not a directory, create an empty
// file to use as the mount target.
file, err := os.OpenFile(stage, os.O_WRONLY|os.O_CREATE, 0000)
file, err := os.OpenFile(stage, os.O_WRONLY|os.O_CREATE, 0o000)
if err != nil {
return unmountAll, fmt.Errorf("creating file %q: %w", stage, err)
}

View File

@ -1,4 +1,4 @@
// +build !linux
//go:build !linux
package bind

View File

@ -191,6 +191,13 @@ type Builder struct {
// CDIConfigDir is the location of CDI configuration files, if the files in
// the default configuration locations shouldn't be used.
CDIConfigDir string
// PrependedLinkedLayers and AppendedLinkedLayers are combinations of
// history entries and locations of either directory trees (if
// directories, per os.Stat()) or uncompressed layer blobs which should
// be added to the image at commit-time. The order of these relative
// to PrependedEmptyLayers and AppendedEmptyLayers in the committed
// image is not guaranteed.
PrependedLinkedLayers, AppendedLinkedLayers []LinkedLayer
}
// BuilderInfo are used as objects to display container information
@ -370,6 +377,11 @@ type BuilderOptions struct {
// CDIConfigDir is the location of CDI configuration files, if the files in
// the default configuration locations shouldn't be used.
CDIConfigDir string
// CompatScratchConfig controls whether a "scratch" image is created
// with a truly empty configuration, as would have happened in the past
// (when set to true), or with a minimal initial configuration which
// has a working directory set in it.
CompatScratchConfig types.OptionalBool
}
// ImportOptions are used to initialize a Builder from an existing container
@ -563,7 +575,7 @@ func (b *Builder) Save() error {
if err != nil {
return err
}
if err = ioutils.AtomicWriteFile(filepath.Join(cdir, stateFile), buildstate, 0600); err != nil {
if err = ioutils.AtomicWriteFile(filepath.Join(cdir, stateFile), buildstate, 0o600); err != nil {
return fmt.Errorf("saving builder state to %q: %w", filepath.Join(cdir, stateFile), err)
}
return nil

View File

@ -1,5 +1,4 @@
//go:build freebsd && cgo
// +build freebsd,cgo
package chroot

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package chroot
@ -16,7 +15,7 @@ import (
// this instead of posix_openpt is that it avoids cgo.
func getPtyDescriptors() (int, int, error) {
// Create a pseudo-terminal -- open a copy of the master side.
controlFd, err := unix.Open("/dev/ptmx", os.O_RDWR, 0600)
controlFd, err := unix.Open("/dev/ptmx", os.O_RDWR, 0o600)
if err != nil {
return -1, -1, fmt.Errorf("opening PTY master using /dev/ptmx: %v", err)
}
@ -37,7 +36,7 @@ func getPtyDescriptors() (int, int, error) {
return -1, -1, fmt.Errorf("getting PTY number: %v", err)
}
ptyName := fmt.Sprintf("/dev/pts/%d", ptyN)
fd, err := unix.Open(ptyName, unix.O_RDWR|unix.O_NOCTTY, 0620)
fd, err := unix.Open(ptyName, unix.O_RDWR|unix.O_NOCTTY, 0o620)
if err != nil {
return -1, -1, fmt.Errorf("opening PTY %q: %v", ptyName, err)
}

View File

@ -1,6 +1,4 @@
//go:build !linux && !(freebsd && cgo)
// +build !linux
// +build !freebsd !cgo
package chroot

View File

@ -1,5 +1,4 @@
//go:build linux || freebsd
// +build linux freebsd
package chroot
@ -74,7 +73,7 @@ func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reade
if err != nil {
return err
}
if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil {
if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0o600); err != nil {
return fmt.Errorf("storing runtime configuration: %w", err)
}
logrus.Debugf("config = %v", string(specbytes))
@ -266,7 +265,7 @@ func runUsingChrootMain() {
logrus.Warnf("error %s ownership of container PTY %sto %d/%d: %v", op, from, rootUID, rootGID, err)
}
// Set permissions on the PTY.
if err = ctty.Chmod(0620); err != nil {
if err = ctty.Chmod(0o620); err != nil {
logrus.Errorf("error setting permissions of container PTY: %v", err)
os.Exit(1)
}
@ -526,7 +525,6 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io
cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...)
if err := setPlatformUnshareOptions(spec, cmd); err != nil {
return 1, fmt.Errorf("setting platform unshare options: %w", err)
}
interrupted := make(chan os.Signal, 100)
cmd.Hook = func(int) error {

View File

@ -1,5 +1,4 @@
//go:build freebsd
// +build freebsd
package chroot
@ -191,12 +190,12 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
// XXX: This was copied from the linux version which supports bind mounting files.
// Leaving it here since I plan to add this to FreeBSD's nullfs.
if m.Type != "nullfs" || srcinfo.IsDir() {
if err = os.MkdirAll(target, 0111); err != nil {
if err = os.MkdirAll(target, 0o111); err != nil {
return undoBinds, fmt.Errorf("creating mountpoint %q in mount namespace: %w", target, err)
}
removes = append(removes, target)
} else {
if err = os.MkdirAll(filepath.Dir(target), 0111); err != nil {
if err = os.MkdirAll(filepath.Dir(target), 0o111); err != nil {
return undoBinds, fmt.Errorf("ensuring parent of mountpoint %q (%q) is present in mount namespace: %w", target, filepath.Dir(target), err)
}
// Don't do this until we can support file mounts in nullfs
@ -219,7 +218,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
save := saveDir(spec, target)
if err := fileutils.Exists(save); err != nil {
if errors.Is(err, fs.ErrNotExist) {
err = os.MkdirAll(save, 0111)
err = os.MkdirAll(save, 0o111)
}
if err != nil {
return undoBinds, fmt.Errorf("creating file mount save directory %q: %w", save, err)

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package chroot
@ -16,10 +15,10 @@ import (
"github.com/containers/buildah/copier"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/unshare"
"github.com/moby/sys/capability"
"github.com/opencontainers/runc/libcontainer/apparmor"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"github.com/syndtr/gocapability/capability"
"golang.org/x/sys/unix"
)
@ -179,39 +178,39 @@ func setCapabilities(spec *specs.Spec, keepCaps ...string) error {
capMap := map[capability.CapType][]string{
capability.BOUNDING: spec.Process.Capabilities.Bounding,
capability.EFFECTIVE: spec.Process.Capabilities.Effective,
capability.INHERITABLE: []string{},
capability.INHERITABLE: {},
capability.PERMITTED: spec.Process.Capabilities.Permitted,
capability.AMBIENT: spec.Process.Capabilities.Ambient,
capability.AMBIENT: {},
}
knownCaps := capability.List()
knownCaps := capability.ListKnown()
noCap := capability.Cap(-1)
for capType, capList := range capMap {
for _, capToSet := range capList {
cap := noCap
for _, capSpec := range capList {
capToSet := noCap
for _, c := range knownCaps {
if strings.EqualFold("CAP_"+c.String(), capToSet) {
cap = c
if strings.EqualFold("CAP_"+c.String(), capSpec) {
capToSet = c
break
}
}
if cap == noCap {
return fmt.Errorf("mapping capability %q to a number", capToSet)
if capToSet == noCap {
return fmt.Errorf("mapping capability %q to a number", capSpec)
}
caps.Set(capType, cap)
caps.Set(capType, capToSet)
}
for _, capToSet := range keepCaps {
cap := noCap
for _, capSpec := range keepCaps {
capToSet := noCap
for _, c := range knownCaps {
if strings.EqualFold("CAP_"+c.String(), capToSet) {
cap = c
if strings.EqualFold("CAP_"+c.String(), capSpec) {
capToSet = c
break
}
}
if cap == noCap {
return fmt.Errorf("mapping capability %q to a number", capToSet)
if capToSet == noCap {
return fmt.Errorf("mapping capability %q to a number", capSpec)
}
if currentCaps.Get(capType, cap) {
caps.Set(capType, cap)
if currentCaps.Get(capType, capToSet) {
caps.Set(capType, capToSet)
}
}
}
@ -225,7 +224,7 @@ func makeRlimit(limit specs.POSIXRlimit) unix.Rlimit {
return unix.Rlimit{Cur: limit.Soft, Max: limit.Hard}
}
func createPlatformContainer(options runUsingChrootExecSubprocOptions) error {
func createPlatformContainer(_ runUsingChrootExecSubprocOptions) error {
return errors.New("unsupported createPlatformContainer")
}
@ -302,7 +301,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
subDev := filepath.Join(spec.Root.Path, "/dev")
if err := unix.Mount("/dev", subDev, "bind", devFlags, ""); err != nil {
if errors.Is(err, os.ErrNotExist) {
err = os.Mkdir(subDev, 0755)
err = os.Mkdir(subDev, 0o755)
if err == nil {
err = unix.Mount("/dev", subDev, "bind", devFlags, "")
}
@ -326,7 +325,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
subProc := filepath.Join(spec.Root.Path, "/proc")
if err := unix.Mount("/proc", subProc, "bind", procFlags, ""); err != nil {
if errors.Is(err, os.ErrNotExist) {
err = os.Mkdir(subProc, 0755)
err = os.Mkdir(subProc, 0o755)
if err == nil {
err = unix.Mount("/proc", subProc, "bind", procFlags, "")
}
@ -341,7 +340,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
subSys := filepath.Join(spec.Root.Path, "/sys")
if err := unix.Mount("/sys", subSys, "bind", sysFlags, ""); err != nil {
if errors.Is(err, os.ErrNotExist) {
err = os.Mkdir(subSys, 0755)
err = os.Mkdir(subSys, 0o755)
if err == nil {
err = unix.Mount("/sys", subSys, "bind", sysFlags, "")
}
@ -364,9 +363,9 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
if err := unix.Mount(m.Mountpoint, subSys, "bind", sysFlags, ""); err != nil {
msg := fmt.Sprintf("could not bind mount %q, skipping: %v", m.Mountpoint, err)
if strings.HasPrefix(m.Mountpoint, "/sys") {
logrus.Infof(msg)
logrus.Info(msg)
} else {
logrus.Warningf(msg)
logrus.Warning(msg)
}
continue
}
@ -433,15 +432,15 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
// The target isn't there yet, so create it. If the source is a directory,
// we need a directory, otherwise we need a non-directory (i.e., a file).
if srcinfo.IsDir() {
if err = os.MkdirAll(target, 0755); err != nil {
if err = os.MkdirAll(target, 0o755); err != nil {
return undoBinds, fmt.Errorf("creating mountpoint %q in mount namespace: %w", target, err)
}
} else {
if err = os.MkdirAll(filepath.Dir(target), 0755); err != nil {
if err = os.MkdirAll(filepath.Dir(target), 0o755); err != nil {
return undoBinds, fmt.Errorf("ensuring parent of mountpoint %q (%q) is present in mount namespace: %w", target, filepath.Dir(target), err)
}
var file *os.File
if file, err = os.OpenFile(target, os.O_WRONLY|os.O_CREATE, 0755); err != nil {
if file, err = os.OpenFile(target, os.O_WRONLY|os.O_CREATE, 0o755); err != nil {
return undoBinds, fmt.Errorf("creating mountpoint %q in mount namespace: %w", target, err)
}
file.Close()
@ -594,7 +593,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
// Create an empty directory for to use for masking directories.
roEmptyDir := filepath.Join(bundlePath, "empty")
if len(spec.Linux.MaskedPaths) > 0 {
if err := os.Mkdir(roEmptyDir, 0700); err != nil {
if err := os.Mkdir(roEmptyDir, 0o700); err != nil {
return undoBinds, fmt.Errorf("creating empty directory %q: %w", roEmptyDir, err)
}
}

View File

@ -1,5 +1,4 @@
//go:build linux && seccomp
// +build linux,seccomp
package chroot

View File

@ -1,5 +1,4 @@
//go:build freebsd && seccomp
// +build freebsd,seccomp
package chroot

View File

@ -1,5 +1,4 @@
//go:build (!linux && !freebsd) || !seccomp
// +build !linux,!freebsd !seccomp
package chroot

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package chroot

View File

@ -1,5 +1,4 @@
//go:build !linux && !freebsd
// +build !linux,!freebsd
package chroot

View File

@ -1,5 +1,4 @@
//go:build !linux && !freebsd
// +build !linux,!freebsd
package chroot

View File

@ -24,6 +24,7 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/stringid"
digest "github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
)
@ -120,10 +121,11 @@ type CommitOptions struct {
// OverrideConfig is applied.
OverrideChanges []string
// ExtraImageContent is a map which describes additional content to add
// to the committed image. The map's keys are filesystem paths in the
// image and the corresponding values are the paths of files whose
// contents will be used in their place. The contents will be owned by
// 0:0 and have mode 0644. Currently only accepts regular files.
// to the new layer in the committed image. The map's keys are
// filesystem paths in the image and the corresponding values are the
// paths of files whose contents will be used in their place. The
// contents will be owned by 0:0 and have mode 0o644. Currently only
// accepts regular files.
ExtraImageContent map[string]string
// SBOMScanOptions encapsulates options which control whether or not we
// run scanners on the rootfs that we're about to commit, and how.
@ -132,17 +134,32 @@ type CommitOptions struct {
// the image in Docker format. Newer BuildKit-based builds don't set
// this field.
CompatSetParent types.OptionalBool
// PrependedLinkedLayers and AppendedLinkedLayers are combinations of
// history entries and locations of either directory trees (if
// directories, per os.Stat()) or uncompressed layer blobs which should
// be added to the image at commit-time. The order of these relative
// to PrependedEmptyLayers and AppendedEmptyLayers, and relative to the
// corresponding members in the Builder object, in the committed image
// is not guaranteed.
PrependedLinkedLayers, AppendedLinkedLayers []LinkedLayer
}
var (
// storageAllowedPolicyScopes overrides the policy for local storage
// to ensure that we can read images from it.
storageAllowedPolicyScopes = signature.PolicyTransportScopes{
"": []signature.PolicyRequirement{
signature.NewPRInsecureAcceptAnything(),
},
}
)
// LinkedLayer combines a history entry with the location of either a directory
// tree (if it's a directory, per os.Stat()) or an uncompressed layer blob
// which should be added to the image at commit-time. The BlobPath and
// History.EmptyLayer fields should be considered mutually-exclusive.
type LinkedLayer struct {
History v1.History // history entry to add
BlobPath string // corresponding uncompressed blob file (layer as a tar archive), or directory tree to archive
}
// storageAllowedPolicyScopes overrides the policy for local storage
// to ensure that we can read images from it.
var storageAllowedPolicyScopes = signature.PolicyTransportScopes{
"": []signature.PolicyRequirement{
signature.NewPRInsecureAcceptAnything(),
},
}
// checkRegistrySourcesAllows checks the $BUILD_REGISTRY_SOURCES environment
// variable, if it's set. The contents are expected to be a JSON-encoded
@ -348,6 +365,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
if options.ExtraImageContent == nil {
options.ExtraImageContent = make(map[string]string, len(extraImageContent))
}
// merge in the scanner-generated content
for k, v := range extraImageContent {
if _, set := options.ExtraImageContent[k]; !set {
options.ExtraImageContent[k] = v
@ -438,7 +456,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
logrus.Debugf("removing %v from assigned names to image %q", nameToRemove, img.ID)
}
if options.IIDFile != "" {
if err = os.WriteFile(options.IIDFile, []byte("sha256:"+img.ID), 0644); err != nil {
if err = os.WriteFile(options.IIDFile, []byte("sha256:"+img.ID), 0o644); err != nil {
return imgID, nil, "", err
}
}
@ -487,7 +505,6 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
return imgID, nil, "", err
}
logrus.Debugf("added imgID %s to manifestID %s", imgID, manifestID)
}
return imgID, ref, manifestDigest, nil
}

View File

@ -2,6 +2,7 @@ package buildah
import (
"context"
"errors"
"io"
"path/filepath"
"time"
@ -11,6 +12,7 @@ import (
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/signature"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/types"
encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
@ -67,22 +69,31 @@ func getSystemContext(store storage.Store, defaults *types.SystemContext, signat
return sc
}
func retryCopyImage(ctx context.Context, policyContext *signature.PolicyContext, dest, src, registry types.ImageReference, copyOptions *cp.Options, maxRetries int, retryDelay time.Duration) ([]byte, error) {
func retryCopyImage(ctx context.Context, policyContext *signature.PolicyContext, maybeWrappedDest, maybeWrappedSrc, directDest types.ImageReference, copyOptions *cp.Options, maxRetries int, retryDelay time.Duration) ([]byte, error) {
return retryCopyImageWithOptions(ctx, policyContext, maybeWrappedDest, maybeWrappedSrc, directDest, copyOptions, maxRetries, retryDelay, true)
}
func retryCopyImageWithOptions(ctx context.Context, policyContext *signature.PolicyContext, maybeWrappedDest, maybeWrappedSrc, directDest types.ImageReference, copyOptions *cp.Options, maxRetries int, retryDelay time.Duration, retryOnLayerUnknown bool) ([]byte, error) {
var (
manifestBytes []byte
err error
lastErr error
)
err = retry.RetryIfNecessary(ctx, func() error {
manifestBytes, err = cp.Image(ctx, policyContext, dest, src, copyOptions)
if registry != nil && registry.Transport().Name() != docker.Transport.Name() {
lastErr = err
return nil
}
err = retry.IfNecessary(ctx, func() error {
manifestBytes, err = cp.Image(ctx, policyContext, maybeWrappedDest, maybeWrappedSrc, copyOptions)
return err
}, &retry.RetryOptions{MaxRetry: maxRetries, Delay: retryDelay})
if lastErr != nil {
err = lastErr
}
}, &retry.RetryOptions{MaxRetry: maxRetries, Delay: retryDelay, IsErrorRetryable: func(err error) bool {
if retryOnLayerUnknown && directDest.Transport().Name() == is.Transport.Name() && errors.Is(err, storage.ErrLayerUnknown) {
// we were trying to reuse a layer that belonged to an
// image that was deleted at just the right (worst
// possible) time? yeah, try again
return true
}
if directDest.Transport().Name() != docker.Transport.Name() {
// if we're not talking to a registry, then nah
return false
}
// hand it off to the default should-this-be-retried logic
return retry.IsErrorRetryable(err)
}})
return manifestBytes, err
}

View File

@ -61,7 +61,7 @@ func unmarshalConvertedConfig(ctx context.Context, dest interface{}, img types.I
return nil
}
func (b *Builder) initConfig(ctx context.Context, img types.Image, sys *types.SystemContext) error {
func (b *Builder) initConfig(ctx context.Context, sys *types.SystemContext, img types.Image, options *BuilderOptions) error {
if img != nil { // A pre-existing image, as opposed to a "FROM scratch" new one.
rawManifest, manifestMIMEType, err := img.Manifest(ctx)
if err != nil {
@ -101,6 +101,21 @@ func (b *Builder) initConfig(ctx context.Context, img types.Image, sys *types.Sy
}
}
}
} else {
if options == nil || options.CompatScratchConfig != types.OptionalBoolTrue {
b.Docker = docker.V2Image{
V1Image: docker.V1Image{
Config: &docker.Config{
WorkingDir: "/",
},
},
}
b.OCIv1 = ociv1.Image{
Config: ociv1.ImageConfig{
WorkingDir: "/",
},
}
}
}
b.setupLogger()
@ -753,3 +768,62 @@ func (b *Builder) AddAppendedEmptyLayer(created *time.Time, createdBy, author, c
func (b *Builder) ClearAppendedEmptyLayers() {
b.AppendedEmptyLayers = nil
}
// AddPrependedLinkedLayer adds an item to the history that we'll create when
// committing the image, optionally with a layer, after any history we inherit
// from the base image, but before the history item that we'll use to describe
// the new layer that we're adding.
// The blobPath can be either the location of an uncompressed archive, or a
// directory whose contents will be archived to use as a layer blob. Leaving
// blobPath empty is functionally similar to calling AddPrependedEmptyLayer().
func (b *Builder) AddPrependedLinkedLayer(created *time.Time, createdBy, author, comment, blobPath string) {
if created != nil {
copiedTimestamp := *created
created = &copiedTimestamp
}
b.PrependedLinkedLayers = append(b.PrependedLinkedLayers, LinkedLayer{
BlobPath: blobPath,
History: ociv1.History{
Created: created,
CreatedBy: createdBy,
Author: author,
Comment: comment,
EmptyLayer: blobPath == "",
},
})
}
// ClearPrependedLinkedLayers clears the list of history entries that we'll add
// the committed image before the layer that we're adding (if we're adding it).
func (b *Builder) ClearPrependedLinkedLayers() {
b.PrependedLinkedLayers = nil
}
// AddAppendedLinkedLayer adds an item to the history that we'll create when
// committing the image, optionally with a layer, after the history item that
// we'll use to describe the new layer that we're adding.
// The blobPath can be either the location of an uncompressed archive, or a
// directory whose contents will be archived to use as a layer blob. Leaving
// blobPath empty is functionally similar to calling AddAppendedEmptyLayer().
func (b *Builder) AddAppendedLinkedLayer(created *time.Time, createdBy, author, comment, blobPath string) {
if created != nil {
copiedTimestamp := *created
created = &copiedTimestamp
}
b.AppendedLinkedLayers = append(b.AppendedLinkedLayers, LinkedLayer{
BlobPath: blobPath,
History: ociv1.History{
Created: created,
CreatedBy: createdBy,
Author: author,
Comment: comment,
EmptyLayer: blobPath == "",
},
})
}
// ClearAppendedLinkedLayers clears the list of linked layers that we'll add to
// the committed image after the layer that we're adding (if we're adding it).
func (b *Builder) ClearAppendedLinkedLayers() {
b.AppendedLinkedLayers = nil
}

View File

@ -32,15 +32,85 @@ const (
copierCommand = "buildah-copier"
maxLoopsFollowed = 64
// See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06, from archive/tar
cISUID = 04000 // Set uid, from archive/tar
cISGID = 02000 // Set gid, from archive/tar
cISVTX = 01000 // Save text (sticky bit), from archive/tar
cISUID = 0o4000 // Set uid, from archive/tar
cISGID = 0o2000 // Set gid, from archive/tar
cISVTX = 0o1000 // Save text (sticky bit), from archive/tar
)
func init() {
reexec.Register(copierCommand, copierMain)
}
// extendedGlob calls filepath.Glob() on the passed-in patterns. If there is a
// "**" component in the pattern, filepath.Glob() will be called with the "**"
// replaced with all of the subdirectories under that point, and the results
// will be concatenated.
func extendedGlob(pattern string) (matches []string, err error) {
subdirs := func(dir string) []string {
var subdirectories []string
if err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return nil
}
if d.IsDir() {
if rel, err := filepath.Rel(dir, path); err == nil {
subdirectories = append(subdirectories, rel)
}
}
return nil
}); err != nil {
subdirectories = []string{"."}
}
return subdirectories
}
expandPatterns := func(pattern string) []string {
components := []string{}
dir := pattern
file := ""
for dir != "" && dir != string(os.PathSeparator) {
dir, file = filepath.Split(dir)
components = append([]string{file}, components...)
dir = strings.TrimSuffix(dir, string(os.PathSeparator))
}
patterns := []string{string(os.PathSeparator)}
for i := range components {
var nextPatterns []string
if components[i] == "**" {
for _, parent := range patterns {
nextSubdirs := subdirs(parent)
for _, nextSubdir := range nextSubdirs {
nextPatterns = append(nextPatterns, filepath.Join(parent, nextSubdir))
}
}
} else {
for _, parent := range patterns {
nextPattern := filepath.Join(parent, components[i])
nextPatterns = append(nextPatterns, nextPattern)
}
}
patterns = []string{}
seen := map[string]struct{}{}
for _, nextPattern := range nextPatterns {
if _, seen := seen[nextPattern]; seen {
continue
}
patterns = append(patterns, nextPattern)
seen[nextPattern] = struct{}{}
}
}
return patterns
}
patterns := expandPatterns(pattern)
for _, pattern := range patterns {
theseMatches, err := filepath.Glob(pattern)
if err != nil {
return nil, err
}
matches = append(matches, theseMatches...)
}
return matches, nil
}
// isArchivePath returns true if the specified path can be read like a (possibly
// compressed) tarball.
func isArchivePath(path string) bool {
@ -196,24 +266,19 @@ type StatForItem struct {
}
// getResponse encodes a response for a single Get request.
type getResponse struct {
}
type getResponse struct{}
// putResponse encodes a response for a single Put request.
type putResponse struct {
}
type putResponse struct{}
// mkdirResponse encodes a response for a single Mkdir request.
type mkdirResponse struct {
}
type mkdirResponse struct{}
// removeResponse encodes a response for a single Remove request.
type removeResponse struct {
}
type removeResponse struct{}
// EvalOptions controls parts of Eval()'s behavior.
type EvalOptions struct {
}
type EvalOptions struct{}
// Eval evaluates the directory's path, including any intermediate symbolic
// links.
@ -222,7 +287,7 @@ type EvalOptions struct {
// If the directory is specified as an absolute path, it should either be the
// root directory or a subdirectory of the root directory. Otherwise, the
// directory is treated as a path relative to the root directory.
func Eval(root string, directory string, options EvalOptions) (string, error) {
func Eval(root string, directory string, _ EvalOptions) (string, error) {
req := request{
Request: requestEval,
Root: root,
@ -1004,7 +1069,7 @@ func copierHandlerStat(req request, pm *fileutils.PatternMatcher) *response {
Glob: req.preservedGlobs[i],
}
// glob this pattern
globMatched, err := filepath.Glob(glob)
globMatched, err := extendedGlob(glob)
if err != nil {
s.Error = fmt.Sprintf("copier: stat: %q while matching glob pattern %q", err.Error(), glob)
}
@ -1132,7 +1197,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
var queue []string
globMatchedCount := 0
for _, glob := range req.Globs {
globMatched, err := filepath.Glob(glob)
globMatched, err := extendedGlob(glob)
if err != nil {
return errorResponse("copier: get: glob %q: %v", glob, err)
}
@ -1518,7 +1583,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
dirUID, dirGID = req.PutOptions.ChownDirs.UID, req.PutOptions.ChownDirs.GID
defaultDirUID, defaultDirGID = dirUID, dirGID
}
defaultDirMode := os.FileMode(0755)
defaultDirMode := os.FileMode(0o755)
if req.PutOptions.ChmodDirs != nil {
defaultDirMode = *req.PutOptions.ChmodDirs
}
@ -1559,7 +1624,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
for _, component := range strings.Split(rel, string(os.PathSeparator)) {
subdir = filepath.Join(subdir, component)
path := filepath.Join(req.Root, subdir)
if err := os.Mkdir(path, 0700); err == nil {
if err := os.Mkdir(path, 0o700); err == nil {
if err = lchown(path, defaultDirUID, defaultDirGID); err != nil {
return fmt.Errorf("copier: put: error setting owner of %q to %d:%d: %w", path, defaultDirUID, defaultDirGID, err)
}
@ -1593,7 +1658,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
return nil
}
createFile := func(path string, tr *tar.Reader) (int64, error) {
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600)
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0o600)
if err != nil && errors.Is(err, os.ErrExist) {
if req.PutOptions.NoOverwriteDirNonDir {
if st, err2 := os.Lstat(path); err2 == nil && st.IsDir() {
@ -1611,13 +1676,13 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
return 0, fmt.Errorf("copier: put: error removing item to be overwritten %q: %w", path, err)
}
}
f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600)
f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0o600)
}
if err != nil && os.IsPermission(err) {
if err = makeDirectoryWriteable(filepath.Dir(path)); err != nil {
return 0, err
}
f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600)
f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0o600)
}
if err != nil {
return 0, fmt.Errorf("copier: put: error opening file %q for writing: %w", path, err)
@ -1781,14 +1846,14 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
ignoredItems[nameBeforeRenaming] = struct{}{}
goto nextHeader
}
if err = mknod(path, chrMode(0600), int(mkdev(devMajor, devMinor))); err != nil && errors.Is(err, os.ErrExist) {
if err = mknod(path, chrMode(0o600), int(mkdev(devMajor, devMinor))); err != nil && errors.Is(err, os.ErrExist) {
if req.PutOptions.NoOverwriteDirNonDir {
if st, err := os.Lstat(path); err == nil && st.IsDir() {
break
}
}
if err = os.RemoveAll(path); err == nil {
err = mknod(path, chrMode(0600), int(mkdev(devMajor, devMinor)))
err = mknod(path, chrMode(0o600), int(mkdev(devMajor, devMinor)))
}
}
case tar.TypeBlock:
@ -1796,26 +1861,26 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
ignoredItems[nameBeforeRenaming] = struct{}{}
goto nextHeader
}
if err = mknod(path, blkMode(0600), int(mkdev(devMajor, devMinor))); err != nil && errors.Is(err, os.ErrExist) {
if err = mknod(path, blkMode(0o600), int(mkdev(devMajor, devMinor))); err != nil && errors.Is(err, os.ErrExist) {
if req.PutOptions.NoOverwriteDirNonDir {
if st, err := os.Lstat(path); err == nil && st.IsDir() {
break
}
}
if err = os.RemoveAll(path); err == nil {
err = mknod(path, blkMode(0600), int(mkdev(devMajor, devMinor)))
err = mknod(path, blkMode(0o600), int(mkdev(devMajor, devMinor)))
}
}
case tar.TypeDir:
// FreeBSD can return EISDIR for "mkdir /":
// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=59739.
if err = os.Mkdir(path, 0700); err != nil && (errors.Is(err, os.ErrExist) || errors.Is(err, syscall.EISDIR)) {
if err = os.Mkdir(path, 0o700); err != nil && (errors.Is(err, os.ErrExist) || errors.Is(err, syscall.EISDIR)) {
if st, stErr := os.Lstat(path); stErr == nil && !st.IsDir() {
if req.PutOptions.NoOverwriteNonDirDir {
break
}
if err = os.Remove(path); err == nil {
err = os.Mkdir(path, 0700)
err = os.Mkdir(path, 0o700)
}
} else {
err = stErr
@ -1836,14 +1901,14 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
// the archive more than once for whatever reason
directoryModes[path] = mode
case tar.TypeFifo:
if err = mkfifo(path, 0600); err != nil && errors.Is(err, os.ErrExist) {
if err = mkfifo(path, 0o600); err != nil && errors.Is(err, os.ErrExist) {
if req.PutOptions.NoOverwriteDirNonDir {
if st, err := os.Lstat(path); err == nil && st.IsDir() {
break
}
}
if err = os.RemoveAll(path); err == nil {
err = mkfifo(path, 0600)
err = mkfifo(path, 0o600)
}
}
case tar.TypeXGlobalHeader:
@ -1930,7 +1995,7 @@ func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response,
if req.MkdirOptions.ChownNew != nil {
dirUID, dirGID = req.MkdirOptions.ChownNew.UID, req.MkdirOptions.ChownNew.GID
}
dirMode := os.FileMode(0755)
dirMode := os.FileMode(0o755)
if req.MkdirOptions.ChmodNew != nil {
dirMode = *req.MkdirOptions.ChmodNew
}
@ -1957,7 +2022,7 @@ func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response,
for _, component := range strings.Split(rel, string(os.PathSeparator)) {
subdir = filepath.Join(subdir, component)
path := filepath.Join(req.Root, subdir)
if err := os.Mkdir(path, 0700); err == nil {
if err := os.Mkdir(path, 0o700); err == nil {
if err = chown(path, dirUID, dirGID); err != nil {
return errorResponse("copier: mkdir: error setting owner of %q to %d:%d: %v", path, dirUID, dirGID, err)
}

View File

@ -1,5 +1,4 @@
//go:build darwin || (linux && mips) || (linux && mipsle) || (linux && mips64) || (linux && mips64le)
// +build darwin linux,mips linux,mipsle linux,mips64 linux,mips64le
package copier

View File

@ -1,5 +1,4 @@
//go:build (linux && !mips && !mipsle && !mips64 && !mips64le) || freebsd || netbsd
// +build linux,!mips,!mipsle,!mips64,!mips64le freebsd netbsd
package copier

View File

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package copier
@ -25,6 +24,7 @@ func (h *hardlinkChecker) Check(fi os.FileInfo) string {
}
return ""
}
func (h *hardlinkChecker) Add(fi os.FileInfo, name string) {
if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 {
h.hardlinks.Store(makeHardlinkDeviceAndInode(st), name)

View File

@ -1,5 +1,4 @@
//go:build !linux && !darwin
// +build !linux,!darwin
package copier
@ -7,11 +6,11 @@ import (
"os"
)
type hardlinkChecker struct {
}
type hardlinkChecker struct{}
func (h *hardlinkChecker) Check(fi os.FileInfo) string {
return ""
}
func (h *hardlinkChecker) Add(fi os.FileInfo, name string) {
}

View File

@ -1,5 +1,4 @@
//go:build !windows && !freebsd
// +build !windows,!freebsd
package copier

View File

@ -1,5 +1,4 @@
//go:build freebsd
// +build freebsd
package copier

View File

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package copier
@ -58,7 +57,7 @@ func lchown(path string, uid, gid int) error {
return os.Lchown(path, uid, gid)
}
func lutimes(isSymlink bool, path string, atime, mtime time.Time) error {
func lutimes(_ bool, path string, atime, mtime time.Time) error {
if atime.IsZero() || mtime.IsZero() {
now := time.Now()
if atime.IsZero() {

View File

@ -1,4 +1,4 @@
// +build windows
//go:build windows
package copier
@ -83,6 +83,6 @@ func sameDevice(a, b os.FileInfo) bool {
}
const (
testModeMask = int64(0600)
testModeMask = int64(0o600)
testIgnoreSymlinkDates = true
)

View File

@ -1,5 +1,4 @@
//go:build linux || netbsd || freebsd || darwin
// +build linux netbsd freebsd darwin
package copier
@ -10,15 +9,18 @@ import (
"strings"
"syscall"
"github.com/containers/storage/pkg/unshare"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
const (
xattrsSupported = true
imaXattr = "security.ima"
)
var (
relevantAttributes = []string{"security.capability", "security.ima", "user.*"} // the attributes that we preserve - we discard others
relevantAttributes = []string{"security.capability", imaXattr, "user.*"} // the attributes that we preserve - we discard others
initialXattrListSize = 64 * 1024
initialXattrValueSize = 64 * 1024
)
@ -93,7 +95,11 @@ func Lsetxattrs(path string, xattrs map[string]string) error {
for attribute, value := range xattrs {
if isRelevantXattr(attribute) {
if err := unix.Lsetxattr(path, attribute, []byte(value), 0); err != nil {
return fmt.Errorf("setting value of extended attribute %q on %q: %w", attribute, path, err)
if unshare.IsRootless() && attribute == imaXattr {
logrus.Warnf("Unable to set %q xattr on %q: %v", attribute, path, err)
} else {
return fmt.Errorf("setting value of extended attribute %q on %q: %w", attribute, path, err)
}
}
}
}

View File

@ -1,4 +1,4 @@
// +build !linux,!netbsd,!freebsd,!darwin
//go:build !linux && !netbsd && !freebsd && !darwin
package copier

View File

@ -19,12 +19,11 @@ type AdditionalBuildContext struct {
IsURL bool
// Value is the name of an image which may or may not have already been pulled.
IsImage bool
// Value holds a URL, an image name, or an absolute filesystem path.
// Value holds a URL (if IsURL), an image name (if IsImage), or an absolute filesystem path.
Value string
// Absolute filesystem path to downloaded and exported build context
// from external tar archive. This will be populated only if following
// buildcontext is created from IsURL and was downloaded before in any
// of the RUN step.
// Absolute filesystem path to a downloaded and exported build context
// from an external tar archive. This will be populated only if the
// build context was a URL and its contents have been downloaded.
DownloadedCache string
}
@ -374,4 +373,10 @@ type BuildOptions struct {
// base images or by a VOLUME instruction to be preserved during RUN
// instructions. Newer BuildKit-based docker build doesn't bother.
CompatVolumes types.OptionalBool
// CompatScratchConfig causes the image, if it does not have a base
// image, to begin with a truly empty default configuration instead of
// a minimal default configuration. Newer BuildKit-based docker build
// provides a minimal initial configuration with a working directory
// set in it.
CompatScratchConfig types.OptionalBool
}

View File

@ -1,5 +1,4 @@
//go:build freebsd
// +build freebsd
package define
@ -11,7 +10,5 @@ const (
TempDir = "/var/tmp"
)
var (
// Mount potions for bind
BindOptions = []string{}
)
// Mount potions for bind
var BindOptions = []string{}

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package define
@ -11,7 +10,5 @@ const (
TempDir = "/dev/shm"
)
var (
// Mount potions for bind
BindOptions = []string{"bind"}
)
// Mount potions for bind
var BindOptions = []string{"bind"}

View File

@ -1,5 +1,4 @@
//go:build darwin || windows || netbsd
// +build darwin windows netbsd
package define
@ -11,7 +10,5 @@ const (
TempDir = "/var/tmp"
)
var (
// Mount potions for bind
BindOptions = []string{""}
)
// Mount potions for bind
var BindOptions = []string{""}

View File

@ -29,7 +29,7 @@ const (
// identify working containers.
Package = "buildah"
// Version for the Package. Also used by .packit.sh for Packit builds.
Version = "1.37.0"
Version = "1.38.0-dev"
// DefaultRuntime if containers.conf fails.
DefaultRuntime = "runc"
@ -254,9 +254,16 @@ func parseGitBuildContext(url string) (string, string, string) {
return gitBranchPart[0], gitSubdir, gitBranch
}
func isGitTag(remote, ref string) bool {
if _, err := exec.Command("git", "ls-remote", "--exit-code", remote, ref).Output(); err != nil {
return true
}
return false
}
func cloneToDirectory(url, dir string) ([]byte, string, error) {
var cmd *exec.Cmd
gitRepo, gitSubdir, gitBranch := parseGitBuildContext(url)
gitRepo, gitSubdir, gitRef := parseGitBuildContext(url)
// init repo
cmd = exec.Command("git", "init", dir)
combinedOutput, err := cmd.CombinedOutput()
@ -270,27 +277,23 @@ func cloneToDirectory(url, dir string) ([]byte, string, error) {
if err != nil {
return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git remote add`: %w", err)
}
// fetch required branch or commit and perform checkout
// Always default to `HEAD` if nothing specified
fetch := "HEAD"
if gitBranch != "" {
fetch = gitBranch
if gitRef != "" {
if ok := isGitTag(url, gitRef); ok {
gitRef += ":refs/tags/" + gitRef
}
}
logrus.Debugf("fetching repo %q and branch (or commit ID) %q to %q", gitRepo, fetch, dir)
cmd = exec.Command("git", "fetch", "--depth=1", "origin", "--", fetch)
logrus.Debugf("fetching repo %q and branch (or commit ID) %q to %q", gitRepo, gitRef, dir)
args := []string{"fetch", "-u", "--depth=1", "origin", "--", gitRef}
cmd = exec.Command("git", args...)
cmd.Dir = dir
combinedOutput, err = cmd.CombinedOutput()
if err != nil {
return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git fetch`: %w", err)
}
if fetch == "HEAD" {
// We fetched default branch therefore
// we don't have any valid `branch` or
// `commit` name hence checkout detached
// `FETCH_HEAD`
fetch = "FETCH_HEAD"
}
cmd = exec.Command("git", "checkout", fetch)
cmd = exec.Command("git", "checkout", "FETCH_HEAD")
cmd.Dir = dir
combinedOutput, err = cmd.CombinedOutput()
if err != nil {
@ -324,7 +327,7 @@ func downloadToDirectory(url, dir string) error {
}
dockerfile := filepath.Join(dir, "Dockerfile")
// Assume this is a Dockerfile
if err := ioutils.AtomicWriteFile(dockerfile, body, 0600); err != nil {
if err := ioutils.AtomicWriteFile(dockerfile, body, 0o600); err != nil {
return fmt.Errorf("failed to write %q to %q: %w", url, dockerfile, err)
}
}
@ -342,7 +345,7 @@ func stdinToDirectory(dir string) error {
if err := chrootarchive.Untar(reader, dir, nil); err != nil {
dockerfile := filepath.Join(dir, "Dockerfile")
// Assume this is a Dockerfile
if err := ioutils.AtomicWriteFile(dockerfile, b, 0600); err != nil {
if err := ioutils.AtomicWriteFile(dockerfile, b, 0o600); err != nil {
return fmt.Errorf("failed to write bytes to %q: %w", dockerfile, err)
}
}

View File

@ -1,4 +1,4 @@
// +build darwin linux
//go:build darwin || linux
package define

View File

@ -1,4 +1,4 @@
// +build !linux,!darwin
//go:build !linux && !darwin
package define

View File

@ -26,6 +26,7 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
digest "github.com/opencontainers/go-digest"
@ -33,6 +34,7 @@ import (
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
const (
@ -80,7 +82,9 @@ type containerImageRef struct {
parent string
blobDirectory string
preEmptyLayers []v1.History
preLayers []commitLinkedLayerInfo
postEmptyLayers []v1.History
postLayers []commitLinkedLayerInfo
overrideChanges []string
overrideConfig *manifest.Schema2Config
extraImageContent map[string]string
@ -92,6 +96,13 @@ type blobLayerInfo struct {
Size int64
}
type commitLinkedLayerInfo struct {
layerID string // more like layer "ID"
linkedLayer LinkedLayer
uncompressedDigest digest.Digest
size int64
}
type containerImageSource struct {
path string
ref *containerImageRef
@ -278,7 +289,6 @@ func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadClo
err := copier.Get(mountPoint, mountPoint, copierOptions, []string{"."}, pipeWriter)
errChan <- err
pipeWriter.Close()
}()
return ioutils.NewReadCloserWrapper(pipeReader, func() error {
if err = pipeReader.Close(); err != nil {
@ -398,7 +408,7 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest,
return oimage, omanifest, dimage, dmanifest, nil
}
func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.SystemContext) (src types.ImageSource, err error) {
func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemContext) (src types.ImageSource, err error) {
// Decide which type of manifest and configuration output we're going to provide.
manifestType := i.preferredManifestType
// If it's not a format we support, return an error.
@ -406,8 +416,17 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
return nil, fmt.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)",
manifestType, v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType)
}
// Start building the list of layers using the read-write layer.
// These maps will let us check if a layer ID is part of one group or another.
parentLayerIDs := make(map[string]bool)
apiLayerIDs := make(map[string]bool)
// Start building the list of layers with any prepended layers.
layers := []string{}
for _, preLayer := range i.preLayers {
layers = append(layers, preLayer.layerID)
apiLayerIDs[preLayer.layerID] = true
}
// Now look at the read-write layer, and prepare to work our way back
// through all of its parent layers.
layerID := i.layerID
layer, err := i.store.Layer(layerID)
if err != nil {
@ -417,7 +436,15 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// or making a confidential workload, we're only producing one layer, so stop at
// the layer ID of the top layer, which we won't really be using anyway.
for layer != nil {
layers = append(append([]string{}, layerID), layers...)
if layerID == i.layerID {
// append the layer for this container to the list,
// whether it's first or after some prepended layers
layers = append(layers, layerID)
} else {
// prepend this parent layer to the list
layers = append(append([]string{}, layerID), layers...)
parentLayerIDs[layerID] = true
}
layerID = layer.Parent
if layerID == "" || i.confidentialWorkload.Convert || i.squash {
err = nil
@ -430,14 +457,24 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
}
layer = nil
// If we're slipping in a synthesized layer, we need to add a placeholder for it
// to the list.
// If we're slipping in a synthesized layer to hold some files, we need
// to add a placeholder for it to the list just after the read-write
// layer. Confidential workloads and squashed images will just inline
// the files, so we don't need to create a layer in those cases.
const synthesizedLayerID = "(synthesized layer)"
if len(i.extraImageContent) > 0 && !i.confidentialWorkload.Convert && !i.squash {
layers = append(layers, synthesizedLayerID)
}
// Now add any API-supplied layers we have to append.
for _, postLayer := range i.postLayers {
layers = append(layers, postLayer.layerID)
apiLayerIDs[postLayer.layerID] = true
}
logrus.Debugf("layer list: %q", layers)
// It's simpler from here on to keep track of these as a group.
apiLayers := append(slices.Clone(i.preLayers), slices.Clone(i.postLayers)...)
// Make a temporary directory to hold blobs.
path, err := os.MkdirTemp(tmpdir.GetTempDir(), define.Package)
if err != nil {
@ -469,21 +506,26 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
if i.confidentialWorkload.Convert || i.squash {
what = fmt.Sprintf("container %q", i.containerID)
}
if layerID == synthesizedLayerID {
what = synthesizedLayerID
}
if apiLayerIDs[layerID] {
what = layerID
}
// The default layer media type assumes no compression.
omediaType := v1.MediaTypeImageLayer
dmediaType := docker.V2S2MediaTypeUncompressedLayer
// Look up this layer.
var layerUncompressedDigest digest.Digest
var layerUncompressedSize int64
if layerID != synthesizedLayerID {
layer, err := i.store.Layer(layerID)
if err != nil {
return nil, fmt.Errorf("unable to locate layer %q: %w", layerID, err)
}
layerID = layer.ID
layerUncompressedDigest = layer.UncompressedDigest
layerUncompressedSize = layer.UncompressedSize
} else {
linkedLayerHasLayerID := func(l commitLinkedLayerInfo) bool { return l.layerID == layerID }
if apiLayerIDs[layerID] {
// API-provided prepended or appended layer
apiLayerIndex := slices.IndexFunc(apiLayers, linkedLayerHasLayerID)
layerUncompressedDigest = apiLayers[apiLayerIndex].uncompressedDigest
layerUncompressedSize = apiLayers[apiLayerIndex].size
} else if layerID == synthesizedLayerID {
// layer diff consisting of extra files to synthesize into a layer
diffFilename, digest, size, err := i.makeExtraImageContentDiff(true)
if err != nil {
return nil, fmt.Errorf("unable to generate layer for additional content: %w", err)
@ -492,10 +534,20 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
extraImageContentDiffDigest = digest
layerUncompressedDigest = digest
layerUncompressedSize = size
} else {
// "normal" layer
layer, err := i.store.Layer(layerID)
if err != nil {
return nil, fmt.Errorf("unable to locate layer %q: %w", layerID, err)
}
layerID = layer.ID
layerUncompressedDigest = layer.UncompressedDigest
layerUncompressedSize = layer.UncompressedSize
}
// If we already know the digest of the contents of parent
// layers, reuse their blobsums, diff IDs, and sizes.
if !i.confidentialWorkload.Convert && !i.squash && layerID != i.layerID && layerID != synthesizedLayerID && layerUncompressedDigest != "" {
// We already know the digest of the contents of parent layers,
// so if this is a parent layer, and we know its digest, reuse
// its blobsum, diff ID, and size.
if !i.confidentialWorkload.Convert && !i.squash && parentLayerIDs[layerID] && layerUncompressedDigest != "" {
layerBlobSum := layerUncompressedDigest
layerBlobSize := layerUncompressedSize
diffID := layerUncompressedDigest
@ -546,7 +598,20 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
return nil, err
}
} else {
if layerID != synthesizedLayerID {
if apiLayerIDs[layerID] {
// We're reading an API-supplied blob.
apiLayerIndex := slices.IndexFunc(apiLayers, linkedLayerHasLayerID)
f, err := os.Open(apiLayers[apiLayerIndex].linkedLayer.BlobPath)
if err != nil {
return nil, fmt.Errorf("opening layer blob for %s: %w", layerID, err)
}
rc = f
} else if layerID == synthesizedLayerID {
// Slip in additional content as an additional layer.
if rc, err = os.Open(extraImageContentDiff); err != nil {
return nil, err
}
} else {
// If we're up to the final layer, but we don't want to
// include a diff for it, we're done.
if i.emptyLayer && layerID == i.layerID {
@ -557,16 +622,11 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
if err != nil {
return nil, fmt.Errorf("extracting %s: %w", what, err)
}
} else {
// Slip in additional content as an additional layer.
if rc, err = os.Open(extraImageContentDiff); err != nil {
return nil, err
}
}
}
srcHasher := digest.Canonical.Digester()
// Set up to write the possibly-recompressed blob.
layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600)
layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0o600)
if err != nil {
rc.Close()
return nil, fmt.Errorf("opening file for %s: %w", what, err)
@ -678,7 +738,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
}
// Build history notes in the image configurations.
appendHistory := func(history []v1.History) {
appendHistory := func(history []v1.History, empty bool) {
for i := range history {
var created *time.Time
if history[i].Created != nil {
@ -690,7 +750,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
CreatedBy: history[i].CreatedBy,
Author: history[i].Author,
Comment: history[i].Comment,
EmptyLayer: true,
EmptyLayer: empty,
}
oimage.History = append(oimage.History, onews)
if created == nil {
@ -701,7 +761,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
CreatedBy: history[i].CreatedBy,
Author: history[i].Author,
Comment: history[i].Comment,
EmptyLayer: true,
EmptyLayer: empty,
}
dimage.History = append(dimage.History, dnews)
}
@ -712,36 +772,38 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// Keep track of how many entries the base image's history had
// before we started adding to it.
baseImageHistoryLen := len(oimage.History)
appendHistory(i.preEmptyLayers)
// Add history entries for prepended empty layers.
appendHistory(i.preEmptyLayers, true)
// Add history entries for prepended API-supplied layers.
for _, h := range i.preLayers {
appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer)
}
// Add a history entry for this layer, empty or not.
created := time.Now().UTC()
if i.created != nil {
created = (*i.created).UTC()
}
comment := i.historyComment
// Add a comment indicating which base image was used, if it wasn't
// just an image ID.
if strings.Contains(i.parent, i.fromImageID) && i.fromImageName != i.fromImageID {
comment += "FROM " + i.fromImageName
}
onews := v1.History{
Created: &created,
CreatedBy: i.createdBy,
Author: oimage.Author,
EmptyLayer: i.emptyLayer,
Comment: i.historyComment,
}
oimage.History = append(oimage.History, onews)
oimage.History[baseImageHistoryLen].Comment = comment
dnews := docker.V2S2History{
Created: created,
CreatedBy: i.createdBy,
Author: dimage.Author,
EmptyLayer: i.emptyLayer,
Comment: i.historyComment,
}
dimage.History = append(dimage.History, dnews)
dimage.History[baseImageHistoryLen].Comment = comment
appendHistory(i.postEmptyLayers)
// Add a history entry for the extra image content if we added a layer for it.
// This diff was added to the list of layers before API-supplied layers that
// needed to be appended, and we need to keep the order of history entries for
// not-empty layers consistent with that.
if extraImageContentDiff != "" {
createdBy := fmt.Sprintf(`/bin/sh -c #(nop) ADD dir:%s in /",`, extraImageContentDiffDigest.Encoded())
onews := v1.History{
@ -755,6 +817,24 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
}
dimage.History = append(dimage.History, dnews)
}
// Add history entries for appended empty layers.
appendHistory(i.postEmptyLayers, true)
// Add history entries for appended API-supplied layers.
for _, h := range i.postLayers {
appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer)
}
// Assemble a comment indicating which base image was used, if it wasn't
// just an image ID, and add it to the first history entry we added.
var fromComment string
if strings.Contains(i.parent, i.fromImageID) && i.fromImageName != "" && !strings.HasPrefix(i.fromImageID, i.fromImageName) {
if oimage.History[baseImageHistoryLen].Comment != "" {
fromComment = " "
}
fromComment += "FROM " + i.fromImageName
}
oimage.History[baseImageHistoryLen].Comment += fromComment
dimage.History[baseImageHistoryLen].Comment += fromComment
// Confidence check that we didn't just create a mismatch between non-empty layers in the
// history and the number of diffIDs. Only applicable if the base image (if there was
@ -841,7 +921,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
return src, nil
}
func (i *containerImageRef) NewImageDestination(ctx context.Context, sc *types.SystemContext) (types.ImageDestination, error) {
func (i *containerImageRef) NewImageDestination(_ context.Context, _ *types.SystemContext) (types.ImageDestination, error) {
return nil, errors.New("can't write to a container")
}
@ -885,15 +965,15 @@ func (i *containerImageSource) Reference() types.ImageReference {
return i.ref
}
func (i *containerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
func (i *containerImageSource) GetSignatures(_ context.Context, _ *digest.Digest) ([][]byte, error) {
return nil, nil
}
func (i *containerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
func (i *containerImageSource) GetManifest(_ context.Context, _ *digest.Digest) ([]byte, string, error) {
return i.manifest, i.manifestType, nil
}
func (i *containerImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
func (i *containerImageSource) LayerInfosForCopy(_ context.Context, _ *digest.Digest) ([]types.BlobInfo, error) {
return nil, nil
}
@ -901,7 +981,7 @@ func (i *containerImageSource) HasThreadSafeGetBlob() bool {
return false
}
func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, cache types.BlobInfoCache) (reader io.ReadCloser, size int64, err error) {
func (i *containerImageSource) GetBlob(_ context.Context, blob types.BlobInfo, _ types.BlobInfoCache) (reader io.ReadCloser, size int64, err error) {
if blob.Digest == i.configDigest {
logrus.Debugf("start reading config")
reader := bytes.NewReader(i.config)
@ -923,7 +1003,7 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo,
} else {
for _, blobDir := range []string{i.blobDirectory, i.path} {
var layerFile *os.File
layerFile, err = os.OpenFile(filepath.Join(blobDir, blob.Digest.String()), os.O_RDONLY, 0600)
layerFile, err = os.OpenFile(filepath.Join(blobDir, blob.Digest.String()), os.O_RDONLY, 0o600)
if err == nil {
st, err := layerFile.Stat()
if err != nil {
@ -1018,11 +1098,92 @@ func (i *containerImageRef) makeExtraImageContentDiff(includeFooter bool) (_ str
return diff.Name(), digester.Digest(), counter.Count, nil
}
// makeLinkedLayerInfos calculates the size and digest information for a layer
// we intend to add to the image that we're committing.
func (b *Builder) makeLinkedLayerInfos(layers []LinkedLayer, layerType string) ([]commitLinkedLayerInfo, error) {
if layers == nil {
return nil, nil
}
infos := make([]commitLinkedLayerInfo, 0, len(layers))
for i, layer := range layers {
// complain if EmptyLayer and "is the BlobPath empty" don't agree
if layer.History.EmptyLayer != (layer.BlobPath == "") {
return nil, fmt.Errorf("internal error: layer-is-empty = %v, but content path is %q", layer.History.EmptyLayer, layer.BlobPath)
}
// if there's no layer contents, we're done with this one
if layer.History.EmptyLayer {
continue
}
// check if it's a directory or a non-directory
st, err := os.Stat(layer.BlobPath)
if err != nil {
return nil, fmt.Errorf("checking if layer content %s is a directory: %w", layer.BlobPath, err)
}
info := commitLinkedLayerInfo{
layerID: fmt.Sprintf("(%s %d)", layerType, i+1),
linkedLayer: layer,
}
if err = func() error {
if st.IsDir() {
// if it's a directory, archive it and digest the archive while we're storing a copy somewhere
cdir, err := b.store.ContainerDirectory(b.ContainerID)
if err != nil {
return fmt.Errorf("determining directory for working container: %w", err)
}
f, err := os.CreateTemp(cdir, "")
if err != nil {
return fmt.Errorf("creating temporary file to hold blob for %q: %w", info.linkedLayer.BlobPath, err)
}
defer f.Close()
rc, err := chrootarchive.Tar(info.linkedLayer.BlobPath, nil, info.linkedLayer.BlobPath)
if err != nil {
return fmt.Errorf("generating a layer blob from %q: %w", info.linkedLayer.BlobPath, err)
}
digester := digest.Canonical.Digester()
sizeCounter := ioutils.NewWriteCounter(digester.Hash())
_, copyErr := io.Copy(f, io.TeeReader(rc, sizeCounter))
if err := rc.Close(); err != nil {
return fmt.Errorf("storing a copy of %q: %w", info.linkedLayer.BlobPath, err)
}
if copyErr != nil {
return fmt.Errorf("storing a copy of %q: %w", info.linkedLayer.BlobPath, copyErr)
}
info.uncompressedDigest = digester.Digest()
info.size = sizeCounter.Count
info.linkedLayer.BlobPath = f.Name()
} else {
// if it's not a directory, just digest it
f, err := os.Open(info.linkedLayer.BlobPath)
if err != nil {
return err
}
defer f.Close()
sizeCounter := ioutils.NewWriteCounter(io.Discard)
uncompressedDigest, err := digest.Canonical.FromReader(io.TeeReader(f, sizeCounter))
if err != nil {
return err
}
info.uncompressedDigest = uncompressedDigest
info.size = sizeCounter.Count
}
return nil
}(); err != nil {
return nil, err
}
infos = append(infos, info)
}
return infos, nil
}
// makeContainerImageRef creates a containers/image/v5/types.ImageReference
// which is mainly used for representing the working container as a source
// image that can be copied, which is how we commit container to create the
// image that can be copied, which is how we commit the container to create the
// image.
func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageRef, error) {
if (len(options.PrependedLinkedLayers) > 0 || len(options.AppendedLinkedLayers) > 0) &&
(options.ConfidentialWorkloadOptions.Convert || options.Squash) {
return nil, errors.New("can't add prebuilt layers and produce an image with only one layer, at the same time")
}
var name reference.Named
container, err := b.store.Container(b.ContainerID)
if err != nil {
@ -1080,6 +1241,15 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
}
}
preLayerInfos, err := b.makeLinkedLayerInfos(append(slices.Clone(b.PrependedLinkedLayers), slices.Clone(options.PrependedLinkedLayers)...), "prepended layer")
if err != nil {
return nil, err
}
postLayerInfos, err := b.makeLinkedLayerInfos(append(slices.Clone(options.AppendedLinkedLayers), slices.Clone(b.AppendedLinkedLayers)...), "appended layer")
if err != nil {
return nil, err
}
ref := &containerImageRef{
fromImageName: b.FromImage,
fromImageID: b.FromImageID,
@ -1104,13 +1274,29 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
idMappingOptions: &b.IDMappingOptions,
parent: parent,
blobDirectory: options.BlobDirectory,
preEmptyLayers: b.PrependedEmptyLayers,
postEmptyLayers: b.AppendedEmptyLayers,
preEmptyLayers: slices.Clone(b.PrependedEmptyLayers),
preLayers: preLayerInfos,
postEmptyLayers: slices.Clone(b.AppendedEmptyLayers),
postLayers: postLayerInfos,
overrideChanges: options.OverrideChanges,
overrideConfig: options.OverrideConfig,
extraImageContent: maps.Clone(options.ExtraImageContent),
compatSetParent: options.CompatSetParent,
}
if ref.created != nil {
for i := range ref.preEmptyLayers {
ref.preEmptyLayers[i].Created = ref.created
}
for i := range ref.preLayers {
ref.preLayers[i].linkedLayer.History.Created = ref.created
}
for i := range ref.postEmptyLayers {
ref.postEmptyLayers[i].Created = ref.created
}
for i := range ref.postLayers {
ref.postLayers[i].linkedLayer.History.Created = ref.created
}
}
return ref, nil
}

View File

@ -16,7 +16,7 @@ import (
"strings"
"sync"
"github.com/containerd/containerd/platforms"
"github.com/containerd/platforms"
"github.com/containers/buildah"
"github.com/containers/buildah/define"
internalUtil "github.com/containers/buildah/internal/util"
@ -143,7 +143,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
}
contents, err = os.Open(dfile)
if err != nil {
return "", nil, err
return "", nil, fmt.Errorf("reading build instructions: %w", err)
}
dinfo, err = contents.Stat()
if err != nil {
@ -229,6 +229,17 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
if platform.OS == "" && platform.Arch != "" {
platform.OS = runtime.GOOS
}
if platform.OS == "" && platform.Arch == "" {
if targetPlatform, ok := options.Args["TARGETPLATFORM"]; ok {
targetPlatform, err := platforms.Parse(targetPlatform)
if err != nil {
return "", nil, fmt.Errorf("parsing TARGETPLATFORM value %q: %w", targetPlatform, err)
}
platform.OS = targetPlatform.OS
platform.Arch = targetPlatform.Architecture
platform.Variant = targetPlatform.Variant
}
}
platformSpec := internalUtil.NormalizePlatform(v1.Platform{
OS: platform.OS,
Architecture: platform.Arch,

View File

@ -162,6 +162,7 @@ type Executor struct {
cdiConfigDir string
compatSetParent types.OptionalBool
compatVolumes types.OptionalBool
compatScratchConfig types.OptionalBool
}
type imageTypeAndHistoryAndDiffIDs struct {
@ -220,9 +221,9 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
if options.RusageLogFile == "" {
rusageLogFile = options.Out
} else {
rusageLogFile, err = os.OpenFile(options.RusageLogFile, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
rusageLogFile, err = os.OpenFile(options.RusageLogFile, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0o644)
if err != nil {
return nil, err
return nil, fmt.Errorf("creating file to store rusage logs: %w", err)
}
}
}
@ -320,6 +321,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
cdiConfigDir: options.CDIConfigDir,
compatSetParent: options.CompatSetParent,
compatVolumes: options.CompatVolumes,
compatScratchConfig: options.CompatScratchConfig,
}
if exec.err == nil {
exec.err = os.Stderr
@ -1050,7 +1052,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
}
logrus.Debugf("printing final image id %q", imageID)
if b.iidfile != "" {
if err = os.WriteFile(b.iidfile, []byte("sha256:"+imageID), 0644); err != nil {
if err = os.WriteFile(b.iidfile, []byte("sha256:"+imageID), 0o644); err != nil {
return imageID, ref, fmt.Errorf("failed to write image ID to file %q: %w", b.iidfile, err)
}
} else {

View File

@ -67,9 +67,9 @@ type StageExecutor struct {
name string
builder *buildah.Builder
preserved int
volumes imagebuilder.VolumeSet
volumeCache map[string]string
volumeCacheInfo map[string]os.FileInfo
volumes imagebuilder.VolumeSet // list of directories which are volumes
volumeCache map[string]string // mapping from volume directories to cache archives (used by vfs method)
volumeCacheInfo map[string]os.FileInfo // mapping from volume directories to perms/datestamps to reset after restoring
mountPoint string
output string
containerIDs []string
@ -92,7 +92,7 @@ type StageExecutor struct {
// writeable while the RUN instruction is being handled, even if any changes
// made within the directory are ultimately discarded.
func (s *StageExecutor) Preserve(path string) error {
logrus.Debugf("PRESERVE %q in %q", path, s.builder.ContainerID)
logrus.Debugf("PRESERVE %q in %q (already preserving %v)", path, s.builder.ContainerID, s.volumes)
// Try and resolve the symlink (if one exists)
// Set archivedPath and path based on whether a symlink is found or not
@ -111,71 +111,61 @@ func (s *StageExecutor) Preserve(path string) error {
return fmt.Errorf("evaluating path %q: %w", path, err)
}
// Whether or not we're caching and restoring the contents of this
// directory, we need to ensure it exists now.
const createdDirPerms = os.FileMode(0o755)
if s.executor.compatVolumes != types.OptionalBoolTrue {
logrus.Debugf("ensuring volume path %q exists", path)
st, err := os.Stat(archivedPath)
if errors.Is(err, os.ErrNotExist) {
// Yup, we do have to create it. That means it's not in any
// cached copy of the path that covers it, so we have to
// invalidate such cached copy.
logrus.Debugf("have to create volume %q", path)
createdDirPerms := createdDirPerms
if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
return fmt.Errorf("ensuring volume path exists: %w", err)
}
logrus.Debugf("not doing volume save-and-restore of %q in %q", path, s.builder.ContainerID)
return nil
if err := s.volumeCacheInvalidate(path); err != nil {
return fmt.Errorf("ensuring volume path %q is preserved: %w", filepath.Join(s.mountPoint, path), err)
}
if st, err = os.Stat(archivedPath); err != nil {
return fmt.Errorf("checking on just-created volume path: %w", err)
}
}
if err != nil {
return fmt.Errorf("reading info cache for volume at %q: %w", path, err)
}
if s.volumes.Covers(path) {
// This path is a subdirectory of a volume path that we're
// already preserving, so there's nothing new to be done except
// ensure that it exists.
st, err := os.Stat(archivedPath)
if errors.Is(err, os.ErrNotExist) {
// We do have to create it. That means it's not in any
// cached copy of the path that covers it, so we have
// to invalidate such cached copy.
logrus.Debugf("have to create volume %q", path)
createdDirPerms := createdDirPerms
if err := copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
return fmt.Errorf("ensuring volume path exists: %w", err)
}
if err := s.volumeCacheInvalidate(path); err != nil {
return fmt.Errorf("ensuring volume path %q is preserved: %w", filepath.Join(s.mountPoint, path), err)
}
if st, err = os.Stat(archivedPath); err != nil {
return fmt.Errorf("checking on just-created volume path: %w", err)
}
}
// already preserving, so there's nothing new to be done now
// that we've ensured that it exists.
s.volumeCacheInfo[path] = st
return nil
}
// Figure out where the cache for this volume would be stored.
s.preserved++
cacheDir, err := s.executor.store.ContainerDirectory(s.builder.ContainerID)
if err != nil {
return fmt.Errorf("unable to locate temporary directory for container")
}
cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", s.preserved))
// Save info about the top level of the location that we'll be archiving.
st, err := os.Stat(archivedPath)
if errors.Is(err, os.ErrNotExist) {
logrus.Debugf("have to create volume %q", path)
createdDirPerms := os.FileMode(0o755)
if err = copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
return fmt.Errorf("ensuring volume path exists: %w", err)
}
st, err = os.Stat(archivedPath)
}
if err != nil {
logrus.Debugf("error reading info about %q: %v", archivedPath, err)
return err
}
s.volumeCacheInfo[path] = st
// Add the new volume path to the ones that we're tracking.
if !s.volumes.Add(path) {
// This path is not a subdirectory of a volume path that we're
// already preserving, so adding it to the list should have
// worked.
return fmt.Errorf("adding %q to the volume cache", path)
}
s.volumeCacheInfo[path] = st
// If we're not doing save/restore, we're done, since volumeCache
// should be empty.
if s.executor.compatVolumes != types.OptionalBoolTrue {
logrus.Debugf("not doing volume save-and-restore of %q in %q", path, s.builder.ContainerID)
return nil
}
// Decide where the cache for this volume will be stored.
s.preserved++
cacheDir, err := s.executor.store.ContainerDirectory(s.builder.ContainerID)
if err != nil {
return fmt.Errorf("unable to locate temporary directory for container")
}
cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", s.preserved))
s.volumeCache[path] = cacheFile
// Now prune cache files for volumes that are newly supplanted by this one.
@ -206,7 +196,7 @@ func (s *StageExecutor) Preserve(path string) error {
if errors.Is(err, os.ErrNotExist) {
continue
}
return err
return fmt.Errorf("removing cache of %q: %w", archivedPath, err)
}
delete(s.volumeCache, cachedPath)
}
@ -256,16 +246,12 @@ func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) {
continue
}
if !errors.Is(err, os.ErrNotExist) {
return nil, err
}
createdDirPerms := os.FileMode(0755)
if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
return nil, fmt.Errorf("ensuring volume path exists: %w", err)
return nil, fmt.Errorf("checking for presence of a cached copy of %q at %q: %w", cachedPath, cacheFile, err)
}
logrus.Debugf("caching contents of volume %q in %q", archivedPath, cacheFile)
cache, err := os.Create(cacheFile)
if err != nil {
return nil, err
return nil, fmt.Errorf("creating cache for volume %q: %w", archivedPath, err)
}
defer cache.Close()
rc, err := chrootarchive.Tar(archivedPath, nil, s.mountPoint)
@ -298,16 +284,12 @@ func (s *StageExecutor) volumeCacheRestoreVFS() (err error) {
logrus.Debugf("restoring contents of volume %q from %q", archivedPath, cacheFile)
cache, err := os.Open(cacheFile)
if err != nil {
return err
return fmt.Errorf("restoring contents of volume %q: %w", archivedPath, err)
}
defer cache.Close()
if err := copier.Remove(s.mountPoint, archivedPath, copier.RemoveOptions{All: true}); err != nil {
return err
}
createdDirPerms := os.FileMode(0o755)
if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
return err
}
err = chrootarchive.Untar(cache, archivedPath, nil)
if err != nil {
return fmt.Errorf("extracting archive at %q: %w", archivedPath, err)
@ -334,13 +316,11 @@ func (s *StageExecutor) volumeCacheRestoreVFS() (err error) {
}
// Save the contents of each of the executor's list of volumes for which we
// don't already have a cache file.
// don't already have a cache file. For overlay, we "save" and "restore" by
// using it as a lower for an overlay mount in the same location, and then
// discarding the upper.
func (s *StageExecutor) volumeCacheSaveOverlay() (mounts []specs.Mount, err error) {
for cachedPath := range s.volumeCache {
err = copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.MkdirOptions{})
if err != nil {
return nil, fmt.Errorf("ensuring volume exists: %w", err)
}
volumePath := filepath.Join(s.mountPoint, cachedPath)
mount := specs.Mount{
Source: volumePath,
@ -455,7 +435,7 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co
if err != nil {
return fmt.Errorf("unable to create tmp file for COPY instruction at %q: %w", parse.GetTempDir(), err)
}
err = tmpFile.Chmod(0644) // 644 is consistent with buildkit
err = tmpFile.Chmod(0o644) // 644 is consistent with buildkit
if err != nil {
tmpFile.Close()
return fmt.Errorf("unable to chmod tmp file created for COPY instruction at %q: %w", tmpFile.Name(), err)
@ -592,6 +572,13 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co
IDMappingOptions: idMappingOptions,
StripSetuidBit: stripSetuid,
StripSetgidBit: stripSetgid,
// The values for these next two fields are ultimately
// based on command line flags with names that sound
// much more generic.
CertPath: s.executor.systemContext.DockerCertPath,
InsecureSkipTLSVerify: s.executor.systemContext.DockerInsecureSkipTLSVerify,
MaxRetries: s.executor.maxPullPushRetries,
RetryDelay: s.executor.retryPullPushDelay,
}
if len(copy.Files) > 0 {
// If we are copying heredoc files, we need to temporary place
@ -616,10 +603,8 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co
return nil
}
// Returns a map of StageName/ImageName:internal.StageMountDetails for RunOpts if any --mount with from is provided
// Stage can automatically cleanup this mounts when a stage is removed
// check if RUN contains `--mount` with `from`. If yes pre-mount images or stages from executor for Run.
// stages mounted here will we used be Run().
// Returns a map of StageName/ImageName:internal.StageMountDetails for the
// items in the passed-in mounts list which include a "from=" value.
func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]internal.StageMountDetails, error) {
stageMountPoints := make(map[string]internal.StageMountDetails)
for _, flag := range mountList {
@ -642,9 +627,11 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte
if fromErr != nil {
return nil, fmt.Errorf("unable to resolve argument %q: %w", val, fromErr)
}
// If additional buildContext contains this
// give priority to that and break if additional
// is not an external image.
// If the value corresponds to an additional build context,
// the mount source is either either the rootfs of the image,
// the filesystem path, or a temporary directory populated
// with the contents of the URL, all in preference to any
// stage which might have the value as its name.
if additionalBuildContext, ok := s.executor.additionalBuildContexts[from]; ok {
if additionalBuildContext.IsImage {
mountPoint, err := s.getImageRootfs(s.ctx, additionalBuildContext.Value)
@ -657,39 +644,38 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte
// `from` to refer from stageMountPoints map later.
stageMountPoints[from] = internal.StageMountDetails{IsStage: false, DidExecute: true, MountPoint: mountPoint}
break
} else {
// Most likely this points to path on filesystem
// or external tar archive, Treat it as a stage
// nothing is different for this. So process and
// point mountPoint to path on host and it will
// be automatically handled correctly by since
// GetBindMount will honor IsStage:false while
// processing stageMountPoints.
mountPoint := additionalBuildContext.Value
if additionalBuildContext.IsURL {
// Check if following buildContext was already
// downloaded before in any other RUN step. If not
// download it and populate DownloadCache field for
// future RUN steps.
if additionalBuildContext.DownloadedCache == "" {
// additional context contains a tar file
// so download and explode tar to buildah
// temp and point context to that.
path, subdir, err := define.TempDirForURL(tmpdir.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value)
if err != nil {
return nil, fmt.Errorf("unable to download context from external source %q: %w", additionalBuildContext.Value, err)
}
// point context dir to the extracted path
mountPoint = filepath.Join(path, subdir)
// populate cache for next RUN step
additionalBuildContext.DownloadedCache = mountPoint
} else {
mountPoint = additionalBuildContext.DownloadedCache
}
}
stageMountPoints[from] = internal.StageMountDetails{IsStage: true, DidExecute: true, MountPoint: mountPoint}
break
}
// Most likely this points to path on filesystem
// or external tar archive, Treat it as a stage
// nothing is different for this. So process and
// point mountPoint to path on host and it will
// be automatically handled correctly by since
// GetBindMount will honor IsStage:false while
// processing stageMountPoints.
mountPoint := additionalBuildContext.Value
if additionalBuildContext.IsURL {
// Check if following buildContext was already
// downloaded before in any other RUN step. If not
// download it and populate DownloadCache field for
// future RUN steps.
if additionalBuildContext.DownloadedCache == "" {
// additional context contains a tar file
// so download and explode tar to buildah
// temp and point context to that.
path, subdir, err := define.TempDirForURL(tmpdir.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value)
if err != nil {
return nil, fmt.Errorf("unable to download context from external source %q: %w", additionalBuildContext.Value, err)
}
// point context dir to the extracted path
mountPoint = filepath.Join(path, subdir)
// populate cache for next RUN step
additionalBuildContext.DownloadedCache = mountPoint
} else {
mountPoint = additionalBuildContext.DownloadedCache
}
}
stageMountPoints[from] = internal.StageMountDetails{IsStage: true, DidExecute: true, MountPoint: mountPoint}
break
}
// If the source's name corresponds to the
// result of an earlier stage, wait for that
@ -697,10 +683,13 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte
if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil {
return nil, err
}
// If the source's name is a stage, return a
// pointer to its rootfs.
if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index {
stageMountPoints[from] = internal.StageMountDetails{IsStage: true, DidExecute: otherStage.didExecute, MountPoint: otherStage.mountPoint}
break
} else {
// Treat the source's name as the name of an image.
mountPoint, err := s.getImageRootfs(s.ctx, from)
if err != nil {
return nil, fmt.Errorf("%s from=%s: no stage or image found with that name", flag, from)
@ -728,7 +717,7 @@ func (s *StageExecutor) createNeededHeredocMountsForRun(files []imagebuilder.Fil
f.Close()
return nil, err
}
err = f.Chmod(0755)
err = f.Chmod(0o755)
if err != nil {
f.Close()
return nil, err
@ -898,20 +887,20 @@ func (s *StageExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error {
errStr := fmt.Sprintf("Build error: Unknown instruction: %q ", strings.ToUpper(step.Command))
err := fmt.Sprintf(errStr+"%#v", step)
if s.executor.ignoreUnrecognizedInstructions {
logrus.Debugf(err)
logrus.Debug(err)
return nil
}
switch logrus.GetLevel() {
case logrus.ErrorLevel:
s.executor.logger.Errorf(errStr)
s.executor.logger.Error(errStr)
case logrus.DebugLevel:
logrus.Debugf(err)
logrus.Debug(err)
default:
s.executor.logger.Errorf("+(UNHANDLED LOGLEVEL) %#v", step)
}
return fmt.Errorf(err)
return errors.New(err)
}
// prepare creates a working container based on the specified image, or if one
@ -998,6 +987,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
MountLabel: s.executor.mountLabel,
PreserveBaseImageAnns: preserveBaseImageAnnotations,
CDIConfigDir: s.executor.cdiConfigDir,
CompatScratchConfig: s.executor.compatScratchConfig,
}
builder, err = buildah.NewBuilder(ctx, s.executor.store, builderOptions)
@ -1079,6 +1069,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
s.mountPoint = mountPoint
s.builder = builder
// Now that the rootfs is mounted, set up handling of volumes from the base image.
s.volumes = make([]string, 0, len(s.volumes))
s.volumeCache = make(map[string]string)
s.volumeCacheInfo = make(map[string]os.FileInfo)
for _, v := range builder.Volumes() {
@ -1217,7 +1208,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
if output != "" {
commitMessage = fmt.Sprintf("%s %s", commitMessage, output)
}
logrus.Debugf(commitMessage)
logrus.Debug(commitMessage)
if !s.executor.quiet {
s.log(commitMessage)
}
@ -1369,14 +1360,13 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// since this additional context
// is not an image.
break
} else {
// replace with image set in build context
from = additionalBuildContext.Value
if _, err := s.getImageRootfs(ctx, from); err != nil {
return "", nil, false, fmt.Errorf("%s --from=%s: no stage or image found with that name", command, from)
}
break
}
// replace with image set in build context
from = additionalBuildContext.Value
if _, err := s.getImageRootfs(ctx, from); err != nil {
return "", nil, false, fmt.Errorf("%s --from=%s: no stage or image found with that name", command, from)
}
break
}
// If the source's name corresponds to the
@ -1425,30 +1415,29 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
}
s.builder.AddPrependedEmptyLayer(&timestamp, s.getCreatedBy(node, addedContentSummary), "", "")
continue
} else {
// This is the last instruction for this stage,
// so we should commit this container to create
// an image, but only if it's the last stage,
// or if it's used as the basis for a later
// stage.
if lastStage || imageIsUsedLater {
logCommit(s.output, i)
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output, s.executor.squash, lastStage && lastInstruction)
if err != nil {
return "", nil, false, fmt.Errorf("committing container for step %+v: %w", *step, err)
}
logImageID(imgID)
// Generate build output if needed.
if canGenerateBuildOutput {
if err := s.generateBuildOutput(buildOutputOption); err != nil {
return "", nil, false, err
}
}
} else {
imgID = ""
}
break
}
// This is the last instruction for this stage,
// so we should commit this container to create
// an image, but only if it's the last stage,
// or if it's used as the basis for a later
// stage.
if lastStage || imageIsUsedLater {
logCommit(s.output, i)
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output, s.executor.squash, lastStage && lastInstruction)
if err != nil {
return "", nil, false, fmt.Errorf("committing container for step %+v: %w", *step, err)
}
logImageID(imgID)
// Generate build output if needed.
if canGenerateBuildOutput {
if err := s.generateBuildOutput(buildOutputOption); err != nil {
return "", nil, false, err
}
}
} else {
imgID = ""
}
break
}
// We're in a multi-layered build.
@ -2125,7 +2114,7 @@ func (s *StageExecutor) pullCache(ctx context.Context, cacheKey string) (referen
if err != nil {
logrus.Debugf("failed pulling cache from source %s: %v", src, err)
continue // failed pulling this one try next
//return "", fmt.Errorf("failed while pulling cache from %q: %w", src, err)
// return "", fmt.Errorf("failed while pulling cache from %q: %w", src, err)
}
logrus.Debugf("successfully pulled cache from repo %s: %s", src, id)
return src.DockerReference(), id, nil

View File

@ -109,7 +109,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
CommonBuildOpts: &CommonBuildOptions{},
}
if err := builder.initConfig(ctx, image, systemContext); err != nil {
if err := builder.initConfig(ctx, systemContext, image, nil); err != nil {
return nil, fmt.Errorf("preparing image configuration: %w", err)
}

View File

@ -252,7 +252,7 @@ In Ubuntu 22.10 (Karmic) or Debian 12 (Bookworm) you can use these commands:
```
sudo apt-get -y -qq update
sudo apt-get -y install bats btrfs-progs git go-md2man golang libapparmor-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev make skopeo
sudo apt-get -y install bats btrfs-progs git go-md2man golang libapparmor-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev make skopeo libbtrfs-dev
```
Then to install Buildah follow the steps in this example:

View File

@ -17,26 +17,26 @@ import (
// from a Dockerfile. Try anything more than that and it'll return an error.
type configOnlyExecutor struct{}
func (g *configOnlyExecutor) Preserve(path string) error {
func (g *configOnlyExecutor) Preserve(_ string) error {
return errors.New("ADD/COPY/RUN not supported as changes")
}
func (g *configOnlyExecutor) EnsureContainerPath(path string) error {
func (g *configOnlyExecutor) EnsureContainerPath(_ string) error {
return nil
}
func (g *configOnlyExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error {
func (g *configOnlyExecutor) EnsureContainerPathAs(_, _ string, _ *os.FileMode) error {
return nil
}
func (g *configOnlyExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
func (g *configOnlyExecutor) Copy(_ []string, copies ...imagebuilder.Copy) error {
if len(copies) == 0 {
return nil
}
return errors.New("ADD/COPY not supported as changes")
}
func (g *configOnlyExecutor) Run(run imagebuilder.Run, config dockerclient.Config) error {
func (g *configOnlyExecutor) Run(_ imagebuilder.Run, _ dockerclient.Config) error {
return errors.New("RUN not supported as changes")
}

View File

@ -319,7 +319,7 @@ func Archive(rootfsPath string, ociConfig *v1.Image, options ArchiveOptions) (io
imageSize := slop(options.ImageSize, options.Slop)
if imageSize == 0 {
var sourceSize int64
if err := filepath.WalkDir(rootfsPath, func(path string, d fs.DirEntry, err error) error {
if err := filepath.WalkDir(rootfsPath, func(_ string, d fs.DirEntry, err error) error {
if err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, os.ErrPermission) {
return err
}

View File

@ -1,18 +1,18 @@
package internal
const (
// Temp directory which stores external artifacts which are download for a build.
// Example: tar files from external sources.
// BuildahExternalArtifactsDir is the pattern passed to os.MkdirTemp()
// to generate a temporary directory which will be used to hold
// external items which are downloaded for a build, typically a tarball
// being used as an additional build context.
BuildahExternalArtifactsDir = "buildah-external-artifacts"
)
// Types is internal packages are suspected to change with releases avoid using these outside of buildah
// StageMountDetails holds the Stage/Image mountpoint returned by StageExecutor
// StageExecutor has ability to mount stages/images in current context and
// automatically clean them up.
type StageMountDetails struct {
DidExecute bool // tells if the stage which is being mounted was freshly executed or was part of older cache
IsStage bool // tells if mountpoint returned from stage executor is stage or image
MountPoint string // mountpoint of stage/image
IsStage bool // true if the mountpoint is a temporary directory or a stage's rootfs, false if it's an image
MountPoint string // mountpoint of the stage or image's root directory
}

View File

@ -72,7 +72,7 @@ func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error {
noLChown = true
}
err = os.MkdirAll(opts.Path, 0700)
err = os.MkdirAll(opts.Path, 0o700)
if err != nil {
return fmt.Errorf("failed while creating the destination path %q: %w", opts.Path, err)
}

View File

@ -2,6 +2,7 @@ package volumes
import (
"context"
"errors"
"fmt"
"os"
"path"
@ -9,8 +10,6 @@ import (
"strconv"
"strings"
"errors"
"github.com/containers/buildah/copier"
"github.com/containers/buildah/define"
"github.com/containers/buildah/internal"
@ -105,6 +104,12 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st
if !hasArgValue {
return newMount, "", fmt.Errorf("%v: %w", argName, errBadOptionArg)
}
switch argValue {
default:
return newMount, "", fmt.Errorf("%v: %q: %w", argName, argValue, errBadMntOption)
case "shared", "rshared", "private", "rprivate", "slave", "rslave":
// this should be the relevant parts of the same list of options we accepted above
}
newMount.Options = append(newMount.Options, argValue)
case "src", "source":
if !hasArgValue {
@ -224,7 +229,7 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st
// GetCacheMount parses a single cache mount entry from the --mount flag.
//
// If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??).
func GetCacheMount(args []string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, *lockfile.LockFile, error) {
func GetCacheMount(args []string, _ storage.Store, _ string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, *lockfile.LockFile, error) {
var err error
var mode uint64
var buildahLockFilesDir string
@ -240,7 +245,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
}
// if id is set a new subdirectory with `id` will be created under /host-temp/buildah-build-cache/id
id := ""
// buildkit parity: cache directory defaults to 755
// buildkit parity: cache directory defaults to 0o755
mode = 0o755
// buildkit parity: cache directory defaults to uid 0 if not specified
uid := 0
@ -277,6 +282,12 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
if !hasArgValue {
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
}
switch argValue {
default:
return newMount, nil, fmt.Errorf("%v: %q: %w", argName, argValue, errBadMntOption)
case "shared", "rshared", "private", "rprivate", "slave", "rslave":
// this should be the relevant parts of the same list of options we accepted above
}
newMount.Options = append(newMount.Options, argValue)
case "id":
if !hasArgValue {
@ -346,8 +357,9 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
}
if fromStage != "" {
// do not create cache on host
// instead use read-only mounted stage as cache
// do not create and use a cache directory on the host,
// instead use the location in the mounted stage or
// temporary directory as the cache
mountPoint := ""
if additionalMountPoints != nil {
if val, ok := additionalMountPoints[fromStage]; ok {
@ -356,24 +368,24 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
}
}
}
// Cache does not supports using image so if not stage found
// return with error
// Cache does not support using an image so if there's no such
// stage or temporary directory, return an error
if mountPoint == "" {
return newMount, nil, fmt.Errorf("no stage found with name %s", fromStage)
}
// path should be /contextDir/specified path
newMount.Source = filepath.Join(mountPoint, filepath.Clean(string(filepath.Separator)+newMount.Source))
} else {
// we need to create cache on host if no image is being used
// we need to create the cache directory on the host if no image is being used
// since type is cache and cache can be reused by consecutive builds
// since type is cache and a cache can be reused by consecutive builds
// create a common cache directory, which persists on hosts within temp lifecycle
// add subdirectory if specified
// cache parent directory: creates separate cache parent for each user.
cacheParent := CacheParent()
// create cache on host if not present
err = os.MkdirAll(cacheParent, os.FileMode(0755))
err = os.MkdirAll(cacheParent, os.FileMode(0o755))
if err != nil {
return newMount, nil, fmt.Errorf("unable to create build cache directory: %w", err)
}
@ -389,7 +401,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
UID: uid,
GID: gid,
}
// buildkit parity: change uid and gid if specified otheriwise keep `0`
// buildkit parity: change uid and gid if specified, otherwise keep `0`
err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair)
if err != nil {
return newMount, nil, fmt.Errorf("unable to change uid,gid of cache directory: %w", err)
@ -397,7 +409,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
// create a subdirectory inside `cacheParent` just to store lockfiles
buildahLockFilesDir = filepath.Join(cacheParent, buildahLockFilesDir)
err = os.MkdirAll(buildahLockFilesDir, os.FileMode(0700))
err = os.MkdirAll(buildahLockFilesDir, os.FileMode(0o700))
if err != nil {
return newMount, nil, fmt.Errorf("unable to create build cache lockfiles directory: %w", err)
}

View File

@ -243,7 +243,6 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
suffixDigitsModulo := 100
for {
var flags map[string]interface{}
// check if we have predefined ProcessLabel and MountLabel
// this could be true if this is another stage in a build
@ -335,7 +334,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
}
}
if err := builder.initConfig(ctx, src, systemContext); err != nil {
if err := builder.initConfig(ctx, systemContext, src, &options); err != nil {
return nil, fmt.Errorf("preparing image configuration: %w", err)
}

View File

@ -0,0 +1,83 @@
//go:build linux
package binfmt
import (
"bufio"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/containers/storage/pkg/unshare"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
// MaybeRegister() calls Register() if the current context is a rootless one,
// or if the "container" environment variable suggests that we're in a
// container.
func MaybeRegister(configurationSearchDirectories []string) error {
if unshare.IsRootless() || os.Getenv("container") != "" { // we _also_ own our own mount namespace
return Register(configurationSearchDirectories)
}
return nil
}
// Register() registers binfmt.d emulators described by configuration files in
// the passed-in slice of directories, or in the union of /etc/binfmt.d,
// /run/binfmt.d, and /usr/lib/binfmt.d if the slice has no items. If any
// emulators are configured, it will attempt to mount a binfmt_misc filesystem
// in the current mount namespace first, ignoring only EPERM and EACCES errors.
func Register(configurationSearchDirectories []string) error {
if len(configurationSearchDirectories) == 0 {
configurationSearchDirectories = []string{"/etc/binfmt.d", "/run/binfmt.d", "/usr/lib/binfmt.d"}
}
mounted := false
for _, searchDir := range configurationSearchDirectories {
globs, err := filepath.Glob(filepath.Join(searchDir, "*.conf"))
if err != nil {
return fmt.Errorf("looking for binfmt.d configuration in %q: %w", searchDir, err)
}
for _, conf := range globs {
f, err := os.Open(conf)
if err != nil {
return fmt.Errorf("reading binfmt.d configuration: %w", err)
}
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if len(line) == 0 || line[0] == ';' || line[0] == '#' {
continue
}
if !mounted {
if err = unix.Mount("none", "/proc/sys/fs/binfmt_misc", "binfmt_misc", 0, ""); err != nil {
if errors.Is(err, syscall.EPERM) || errors.Is(err, syscall.EACCES) {
// well, we tried. no need to make a stink about it
return nil
}
return fmt.Errorf("mounting binfmt_misc: %w", err)
}
mounted = true
}
reg, err := os.Create("/proc/sys/fs/binfmt_misc/register")
if err != nil {
return fmt.Errorf("registering(open): %w", err)
}
if _, err = fmt.Fprintf(reg, "%s\n", line); err != nil {
return fmt.Errorf("registering(write): %w", err)
}
logrus.Tracef("registered binfmt %q", line)
if err = reg.Close(); err != nil {
return fmt.Errorf("registering(close): %w", err)
}
}
if err := f.Close(); err != nil {
return fmt.Errorf("reading binfmt.d configuration: %w", err)
}
}
}
return nil
}

View File

@ -0,0 +1,15 @@
//go:build !linux
package binfmt
import "syscall"
// MaybeRegister() returns no error.
func MaybeRegister(configurationSearchDirectories []string) error {
return nil
}
// Register() returns an error.
func Register(configurationSearchDirectories []string) error {
return syscall.ENOSYS
}

View File

@ -8,11 +8,9 @@ import (
"strings"
)
var (
// ErrNoSuchUser indicates that the user provided by the caller does not
// exist in /etc/passws
ErrNoSuchUser = errors.New("user does not exist in /etc/passwd")
)
// ErrNoSuchUser indicates that the user provided by the caller does not
// exist in /etc/passws
var ErrNoSuchUser = errors.New("user does not exist in /etc/passwd")
// GetUser will return the uid, gid of the user specified in the userspec
// it will use the /etc/passwd and /etc/group files inside of the rootdir

View File

@ -1,5 +1,4 @@
//go:build !linux && !freebsd
// +build !linux,!freebsd
package chrootuser

View File

@ -1,5 +1,4 @@
//go:build linux || freebsd
// +build linux freebsd
package chrootuser
@ -76,9 +75,7 @@ func openChrootedFile(rootdir, filename string) (*exec.Cmd, io.ReadCloser, error
return cmd, stdout, nil
}
var (
lookupUser, lookupGroup sync.Mutex
)
var lookupUser, lookupGroup sync.Mutex
type lookupPasswdEntry struct {
name string

View File

@ -60,7 +60,6 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
if c.Flag("dns-search").Changed {
return options, nil, nil, errors.New("the --dns-search option cannot be used with --network=none")
}
}
if c.Flag("tag").Changed {
tags = iopts.Tag

View File

@ -270,7 +270,7 @@ always: pull base and SBOM scanner images even if the named images are present
missing: pull base and SBOM scanner images if the named images are not present in store.
never: only use images present in store if available.
newer: only pull base and SBOM scanner images when newer images exist on the registry than those in the store.`)
fs.Lookup("pull").NoOptDefVal = "missing" //treat a --pull with no argument like --pull=missing
fs.Lookup("pull").NoOptDefVal = "missing" // treat a --pull with no argument like --pull=missing
fs.BoolVar(&flags.PullAlways, "pull-always", false, "pull the image even if the named image is present in store")
if err := fs.MarkHidden("pull-always"); err != nil {
panic(fmt.Sprintf("error marking the pull-always flag as hidden: %v", err))
@ -516,7 +516,7 @@ func VerifyFlagsArgsOrder(args []string) error {
}
// AliasFlags is a function to handle backwards compatibility with old flags
func AliasFlags(f *pflag.FlagSet, name string) pflag.NormalizedName {
func AliasFlags(_ *pflag.FlagSet, name string) pflag.NormalizedName {
switch name {
case "net":
name = "network"
@ -550,10 +550,8 @@ func LookupEnvVarReferences(specs, environ []string) []string {
for _, spec := range specs {
if key, _, ok := strings.Cut(spec, "="); ok {
result = append(result, spec)
} else if key == "*" {
result = append(result, environ...)
} else {
prefix := key + "="
if strings.HasSuffix(key, "*") {

View File

@ -10,7 +10,7 @@ import (
// AutocompleteNamespaceFlag - Autocomplete the userns flag.
// -> host, private, container, ns:[path], [path]
func AutocompleteNamespaceFlag(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
func AutocompleteNamespaceFlag(_ *cobra.Command, _ []string, toComplete string) ([]string, cobra.ShellCompDirective) {
var completions []string
// If we don't filter on "toComplete", zsh and fish will not do file completion
// even if the prefix typed by the user does not match the returned completions

View File

@ -1,6 +1,7 @@
package overlay
import (
"errors"
"fmt"
"os"
"os/exec"
@ -8,8 +9,6 @@ import (
"strings"
"syscall"
"errors"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/unshare"
@ -54,7 +53,7 @@ type Options struct {
// TempDir generates an overlay Temp directory in the container content
func TempDir(containerDir string, rootUID, rootGID int) (string, error) {
contentDir := filepath.Join(containerDir, "overlay")
if err := idtools.MkdirAllAs(contentDir, 0700, rootUID, rootGID); err != nil {
if err := idtools.MkdirAllAs(contentDir, 0o700, rootUID, rootGID); err != nil {
return "", fmt.Errorf("failed to create the overlay %s directory: %w", contentDir, err)
}
@ -69,7 +68,7 @@ func TempDir(containerDir string, rootUID, rootGID int) (string, error) {
// GenerateStructure generates an overlay directory structure for container content
func GenerateStructure(containerDir, containerID, name string, rootUID, rootGID int) (string, error) {
contentDir := filepath.Join(containerDir, "overlay-containers", containerID, name)
if err := idtools.MkdirAllAs(contentDir, 0700, rootUID, rootGID); err != nil {
if err := idtools.MkdirAllAs(contentDir, 0o700, rootUID, rootGID); err != nil {
return "", fmt.Errorf("failed to create the overlay %s directory: %w", contentDir, err)
}
@ -80,14 +79,14 @@ func GenerateStructure(containerDir, containerID, name string, rootUID, rootGID
func generateOverlayStructure(containerDir string, rootUID, rootGID int) (string, error) {
upperDir := filepath.Join(containerDir, "upper")
workDir := filepath.Join(containerDir, "work")
if err := idtools.MkdirAllAs(upperDir, 0700, rootUID, rootGID); err != nil {
if err := idtools.MkdirAllAs(upperDir, 0o700, rootUID, rootGID); err != nil {
return "", fmt.Errorf("failed to create the overlay %s directory: %w", upperDir, err)
}
if err := idtools.MkdirAllAs(workDir, 0700, rootUID, rootGID); err != nil {
if err := idtools.MkdirAllAs(workDir, 0o700, rootUID, rootGID); err != nil {
return "", fmt.Errorf("failed to create the overlay %s directory: %w", workDir, err)
}
mergeDir := filepath.Join(containerDir, "merge")
if err := idtools.MkdirAllAs(mergeDir, 0700, rootUID, rootGID); err != nil {
if err := idtools.MkdirAllAs(mergeDir, 0o700, rootUID, rootGID); err != nil {
return "", fmt.Errorf("failed to create the overlay %s directory: %w", mergeDir, err)
}

View File

@ -27,7 +27,7 @@ func MountWithOptions(contentDir, source, dest string, opts *Options) (mount spe
if opts.ReadOnly {
// Read-only overlay mounts require two lower layer.
lowerTwo := filepath.Join(contentDir, "lower")
if err := os.Mkdir(lowerTwo, 0755); err != nil {
if err := os.Mkdir(lowerTwo, 0o755); err != nil {
return mount, err
}
overlayOptions = fmt.Sprintf("lowerdir=%s:%s,private", escapeColon(source), lowerTwo)

View File

@ -1,5 +1,4 @@
//go:build !freebsd && !linux
// +build !freebsd,!linux
package overlay

View File

@ -15,7 +15,7 @@ import (
"strings"
"unicode"
"github.com/containerd/containerd/platforms"
"github.com/containerd/platforms"
"github.com/containers/buildah/define"
mkcwtypes "github.com/containers/buildah/internal/mkcw/types"
internalParse "github.com/containers/buildah/internal/parse"
@ -250,7 +250,6 @@ func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOpti
default:
return fmt.Errorf("invalid --security-opt 2: %q", opt)
}
}
if commonOpts.SeccompProfilePath == "" {
@ -328,7 +327,7 @@ func validateExtraHost(val string) error {
// validateIPAddress validates an Ip address.
// for dns, ip, and ip6 flags also
func validateIPAddress(val string) (string, error) {
var ip = net.ParseIP(strings.TrimSpace(val))
ip := net.ParseIP(strings.TrimSpace(val))
if ip != nil {
return ip.String(), nil
}
@ -636,7 +635,9 @@ func AuthConfig(creds string) (*types.DockerAuthConfig, error) {
username, password := parseCreds(creds)
if username == "" {
fmt.Print("Username: ")
fmt.Scanln(&username)
if _, err := fmt.Scanln(&username); err != nil {
return nil, fmt.Errorf("reading user name: %w", err)
}
}
if password == "" {
fmt.Print("Password: ")
@ -659,15 +660,19 @@ func GetBuildOutput(buildOutput string) (define.BuildOutputOption, error) {
if len(buildOutput) == 1 && buildOutput == "-" {
// Feature parity with buildkit, output tar to stdout
// Read more here: https://docs.docker.com/engine/reference/commandline/build/#custom-build-outputs
return define.BuildOutputOption{Path: "",
return define.BuildOutputOption{
Path: "",
IsDir: false,
IsStdout: true}, nil
IsStdout: true,
}, nil
}
if !strings.Contains(buildOutput, ",") {
// expect default --output <dirname>
return define.BuildOutputOption{Path: buildOutput,
return define.BuildOutputOption{
Path: buildOutput,
IsDir: true,
IsStdout: false}, nil
IsStdout: false,
}, nil
}
isDir := true
isStdout := false
@ -712,9 +717,11 @@ func GetBuildOutput(buildOutput string) (define.BuildOutputOption, error) {
if isDir {
return define.BuildOutputOption{}, fmt.Errorf("invalid build output option %q, type=local and dest=- is not supported", buildOutput)
}
return define.BuildOutputOption{Path: "",
return define.BuildOutputOption{
Path: "",
IsDir: false,
IsStdout: true}, nil
IsStdout: true,
}, nil
}
return define.BuildOutputOption{Path: path, IsDir: isDir, IsStdout: isStdout}, nil
@ -750,7 +757,7 @@ func GetConfidentialWorkloadOptions(arg string) (define.ConfidentialWorkloadOpti
if options.AttestationURL == option {
options.AttestationURL = strings.TrimPrefix(option, "attestation-url=")
}
case strings.HasPrefix(option, "passphrase="), strings.HasPrefix(option, "passphrase="):
case strings.HasPrefix(option, "passphrase="):
options.Convert = true
options.DiskEncryptionPassphrase = strings.TrimPrefix(option, "passphrase=")
case strings.HasPrefix(option, "workload_id="), strings.HasPrefix(option, "workload-id="):
@ -801,7 +808,7 @@ func SBOMScanOptions(c *cobra.Command) (*define.SBOMScanOptions, error) {
}
// SBOMScanOptionsFromFlagSet parses scan settings from the cli
func SBOMScanOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (*define.SBOMScanOptions, error) {
func SBOMScanOptionsFromFlagSet(flags *pflag.FlagSet, _ func(name string) *pflag.Flag) (*define.SBOMScanOptions, error) {
preset, err := flags.GetString("sbom")
if err != nil {
return nil, fmt.Errorf("invalid value for --sbom: %w", err)
@ -866,7 +873,7 @@ func SBOMScanOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name str
}
// IDMappingOptions parses the build options related to user namespaces and ID mapping.
func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) {
func IDMappingOptions(c *cobra.Command, _ define.Isolation) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) {
return IDMappingOptionsFromFlagSet(c.Flags(), c.PersistentFlags(), c.Flag)
}
@ -1209,7 +1216,7 @@ func Device(device string) (string, string, string, error) {
// isValidDeviceMode checks if the mode for device is valid or not.
// isValid mode is a composition of r (read), w (write), and m (mknod).
func isValidDeviceMode(mode string) bool {
var legalDeviceMode = map[rune]struct{}{
legalDeviceMode := map[rune]struct{}{
'r': {},
'w': {},
'm': {},
@ -1285,7 +1292,6 @@ func Secrets(secrets []string) (map[string]define.Secret, error) {
SourceType: typ,
}
parsed[id] = newSecret
}
return parsed, nil
}

View File

@ -1,5 +1,4 @@
//go:build linux || darwin
// +build linux darwin
package parse

View File

@ -1,5 +1,4 @@
//go:build !linux && !darwin
// +build !linux,!darwin
package parse

View File

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package rusage

View File

@ -1,5 +1,4 @@
//go:build windows
// +build windows
package rusage

View File

@ -64,7 +64,6 @@ func newAgentServerSocket(socketPath string) (*AgentServer, error) {
conn: &conn,
shutdown: make(chan bool, 1),
}, nil
}
// Serve starts the SSH agent on the host and returns the path of the socket where the agent is serving
@ -104,7 +103,7 @@ func (a *AgentServer) Serve(processLabel string) (string, error) {
go func() {
for {
//listener.Accept blocks
// listener.Accept blocks
c, err := listener.Accept()
if err != nil {
select {

View File

@ -1,5 +1,4 @@
//go:build linux || freebsd || darwin
// +build linux freebsd darwin
package util

View File

@ -3,8 +3,8 @@ package util
import (
"bytes"
"errors"
"time"
"os"
"time"
)
func ReadUptime() (time.Duration, error) {

View File

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package util

View File

@ -6,5 +6,4 @@ import (
func ReadKernelVersion() (string, error) {
return "", errors.New("readKernelVersion not supported on windows")
}

View File

@ -62,7 +62,7 @@ type PullOptions struct {
// Pull copies the contents of the image from somewhere else to local storage. Returns the
// ID of the local image or an error.
func Pull(ctx context.Context, imageName string, options PullOptions) (imageID string, err error) {
func Pull(_ context.Context, imageName string, options PullOptions) (imageID string, err error) {
libimageOptions := &libimage.PullOptions{}
libimageOptions.SignaturePolicyPath = options.SignaturePolicyPath
libimageOptions.Writer = options.ReportWriter

View File

@ -222,9 +222,10 @@ type IDMaps struct {
// netResult type to hold network info for hosts/resolv.conf
type netResult struct {
entries etchosts.HostEntries
dnsServers []string
excludeIPs []net.IP
ipv6 bool
keepHostResolvers bool
entries etchosts.HostEntries
dnsServers []string
excludeIPs []net.IP
ipv6 bool
keepHostResolvers bool
preferredHostContainersInternalIP string
}

View File

@ -1,5 +1,4 @@
//go:build linux || freebsd
// +build linux freebsd
package buildah
@ -85,7 +84,8 @@ func (b *Builder) createResolvConf(rdir string, chownOpts *idtools.IDPair) (stri
// addResolvConf copies files from host and sets them up to bind mount into container
func (b *Builder) addResolvConfEntries(file string, networkNameServer []string,
spec *specs.Spec, keepHostServers, ipv6 bool) error {
spec *specs.Spec, keepHostServers, ipv6 bool,
) error {
defaultConfig, err := config.Default()
if err != nil {
return fmt.Errorf("failed to get config: %w", err)
@ -152,7 +152,7 @@ func (b *Builder) createHostsFile(rdir string, chownOpts *idtools.IDPair) (strin
return targetfile, nil
}
func (b *Builder) addHostsEntries(file, imageRoot string, entries etchosts.HostEntries, exculde []net.IP) error {
func (b *Builder) addHostsEntries(file, imageRoot string, entries etchosts.HostEntries, exclude []net.IP, preferIP string) error {
conf, err := config.Default()
if err != nil {
return err
@ -163,11 +163,15 @@ func (b *Builder) addHostsEntries(file, imageRoot string, entries etchosts.HostE
return err
}
return etchosts.New(&etchosts.Params{
BaseFile: base,
ExtraHosts: b.CommonBuildOpts.AddHost,
HostContainersInternalIP: etchosts.GetHostContainersInternalIPExcluding(conf, nil, nil, exculde),
TargetFile: file,
ContainerIPs: entries,
BaseFile: base,
ExtraHosts: b.CommonBuildOpts.AddHost,
HostContainersInternalIP: etchosts.GetHostContainersInternalIP(etchosts.HostContainersInternalOptions{
Conf: conf,
Exclude: exclude,
PreferIP: preferIP,
}),
TargetFile: file,
ContainerIPs: entries,
})
}
@ -180,7 +184,7 @@ func (b *Builder) generateHostname(rdir, hostname string, chownOpts *idtools.IDP
hostnameBuffer.Write([]byte(fmt.Sprintf("%s\n", hostname)))
cfile := filepath.Join(rdir, filepath.Base(hostnamePath))
if err = ioutils.AtomicWriteFile(cfile, hostnameBuffer.Bytes(), 0644); err != nil {
if err = ioutils.AtomicWriteFile(cfile, hostnameBuffer.Bytes(), 0o644); err != nil {
return "", fmt.Errorf("writing /etc/hostname into the container: %w", err)
}
@ -258,7 +262,7 @@ func runLookupPath(g *generate.Generator, command []string) []string {
// check if it's there,
if fi, err := os.Lstat(filepath.Join(spec.Root.Path, candidate)); fi != nil && err == nil {
// and if it's not a directory, and either a symlink or executable,
if !fi.IsDir() && ((fi.Mode()&os.ModeSymlink != 0) || (fi.Mode()&0111 != 0)) {
if !fi.IsDir() && ((fi.Mode()&os.ModeSymlink != 0) || (fi.Mode()&0o111 != 0)) {
// use that.
return append([]string{candidate}, command[1:]...)
}
@ -440,7 +444,8 @@ func waitForSync(pipeR *os.File) error {
}
func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs []string, spec *specs.Spec, bundlePath, containerName string,
containerCreateW io.WriteCloser, containerStartR io.ReadCloser) (wstatus unix.WaitStatus, err error) {
containerCreateW io.WriteCloser, containerStartR io.ReadCloser,
) (wstatus unix.WaitStatus, err error) {
if options.Logger == nil {
options.Logger = logrus.StandardLogger()
}
@ -466,7 +471,7 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs [
if err != nil {
return 1, fmt.Errorf("encoding configuration %#v as json: %w", spec, err)
}
if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil {
if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0o600); err != nil {
return 1, fmt.Errorf("storing runtime configuration: %w", err)
}
@ -1138,7 +1143,8 @@ func runUsingRuntimeMain() {
}
func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options RunOptions, configureNetwork bool, networkString string,
moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName, buildContainerName, hostsFile, resolvFile string) (err error) {
moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName, buildContainerName, hostsFile, resolvFile string,
) (err error) {
// Lock the caller to a single OS-level thread.
runtime.LockOSThread()
defer runtime.UnlockOSThread()
@ -1253,7 +1259,7 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run
// only add hosts if we manage the hosts file
if hostsFile != "" {
err = b.addHostsEntries(hostsFile, rootPath, netResult.entries, netResult.excludeIPs)
err = b.addHostsEntries(hostsFile, rootPath, netResult.entries, netResult.excludeIPs, netResult.preferredHostContainersInternalIP)
if err != nil {
return err
}
@ -1340,8 +1346,8 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
}
// Get host UID and GID of the container process.
var uidMap = []specs.LinuxIDMapping{}
var gidMap = []specs.LinuxIDMapping{}
uidMap := []specs.LinuxIDMapping{}
gidMap := []specs.LinuxIDMapping{}
if spec.Linux != nil {
uidMap = spec.Linux.UIDMappings
gidMap = spec.Linux.GIDMappings
@ -1381,7 +1387,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
}
// Get the list of explicitly-specified volume mounts.
var mountLabel = ""
mountLabel := ""
if spec.Linux != nil {
mountLabel = spec.Linux.MountLabel
}
@ -1442,7 +1448,7 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtin
return nil, err
}
logrus.Debugf("setting up built-in volume path at %q for %q", volumePath, volume)
if err = os.MkdirAll(volumePath, 0755); err != nil {
if err = os.MkdirAll(volumePath, 0o755); err != nil {
return nil, err
}
if err = relabel(volumePath, mountLabel, false); err != nil {
@ -1681,7 +1687,7 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr
var id, target string
var required bool
var uid, gid uint32
var mode uint32 = 0400
var mode uint32 = 0o400
for _, val := range tokens {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
@ -1775,10 +1781,10 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr
// Copy secrets to container working dir (or tmp dir if it's an env), since we need to chmod,
// chown and relabel it for the container user and we don't want to mess with the original file
if err := os.MkdirAll(filepath.Dir(ctrFileOnHost), 0755); err != nil {
if err := os.MkdirAll(filepath.Dir(ctrFileOnHost), 0o755); err != nil {
return nil, "", err
}
if err := os.WriteFile(ctrFileOnHost, data, 0644); err != nil {
if err := os.WriteFile(ctrFileOnHost, data, 0o644); err != nil {
return nil, "", err
}
@ -1924,7 +1930,7 @@ func (b *Builder) cleanupTempVolumes() {
for tempVolume, val := range b.TempVolumes {
if val {
if err := overlay.RemoveTemp(tempVolume); err != nil {
b.Logger.Errorf(err.Error())
b.Logger.Error(err.Error())
}
b.TempVolumes[tempVolume] = false
}
@ -1940,7 +1946,7 @@ func (b *Builder) cleanupRunMounts(context *imageTypes.SystemContext, mountpoint
}
}
//cleanup any mounted images for this run
// cleanup any mounted images for this run
for _, image := range artifacts.MountedImages {
if image != "" {
// if flow hits here some image was mounted for this run

View File

@ -1,5 +1,4 @@
//go:build freebsd
// +build freebsd
package buildah
@ -46,16 +45,14 @@ const (
PROC_REAP_RELEASE = 3
)
var (
// We dont want to remove destinations with /etc, /dev as
// rootfs already contains these files and unionfs will create
// a `whiteout` i.e `.wh` files on removal of overlapping
// files from these directories. everything other than these
// will be cleaned up
nonCleanablePrefixes = []string{
"/etc", "/dev",
}
)
// We dont want to remove destinations with /etc, /dev as
// rootfs already contains these files and unionfs will create
// a `whiteout` i.e `.wh` files on removal of overlapping
// files from these directories. everything other than these
// will be cleaned up
var nonCleanablePrefixes = []string{
"/etc", "/dev",
}
func procctl(idtype int, id int, cmd int, arg *byte) error {
_, _, e1 := unix.Syscall6(
@ -185,7 +182,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
uid, gid := spec.Process.User.UID, spec.Process.User.GID
idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)}
mode := os.FileMode(0755)
mode := os.FileMode(0o755)
coptions := copier.MkdirOptions{
ChownNew: idPair,
ChmodNew: &mode,
@ -226,7 +223,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
})
}
}
err = b.addHostsEntries(hostsFile, mountPoint, entries, nil)
err = b.addHostsEntries(hostsFile, mountPoint, entries, nil, "")
if err != nil {
return err
}
@ -244,7 +241,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
// Only add entries here if we do not have to do setup network,
// if we do we have to do it much later after the network setup.
if !configureNetwork {
err = b.addResolvConfEntries(resolvFile, nil, nil, false, true)
err = b.addResolvConfEntries(resolvFile, nil, spec, false, true)
if err != nil {
return err
}
@ -536,7 +533,7 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions
namespaceOptions.AddOrReplace(options.NamespaceOptions...)
networkPolicy := options.ConfigureNetwork
//Nothing was specified explicitly so network policy should be inherited from builder
// Nothing was specified explicitly so network policy should be inherited from builder
if networkPolicy == NetworkDefault {
networkPolicy = b.ConfigureNetwork

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package buildah
@ -10,6 +9,7 @@ import (
"os"
"path/filepath"
"strings"
"sync"
"syscall"
"github.com/containers/buildah/bind"
@ -19,6 +19,7 @@ import (
"github.com/containers/buildah/internal"
"github.com/containers/buildah/internal/tmpdir"
"github.com/containers/buildah/internal/volumes"
"github.com/containers/buildah/pkg/binfmt"
"github.com/containers/buildah/pkg/overlay"
"github.com/containers/buildah/pkg/parse"
butil "github.com/containers/buildah/pkg/util"
@ -59,6 +60,9 @@ var (
nonCleanablePrefixes = []string{
"/etc", "/dev", "/sys", "/proc",
}
// binfmtRegistered makes sure we only try to register binfmt_misc
// interpreters once, the first time we handle a RUN instruction.
binfmtRegistered sync.Once
)
func setChildProcess() error {
@ -161,6 +165,21 @@ func separateDevicesFromRuntimeSpec(g *generate.Generator) define.ContainerDevic
// Run runs the specified command in the container's root filesystem.
func (b *Builder) Run(command []string, options RunOptions) error {
if os.Getenv("container") != "" {
os, arch, variant, err := parse.Platform("")
if err != nil {
return fmt.Errorf("reading the current default platform")
}
platform := b.OCIv1.Platform
if os != platform.OS || arch != platform.Architecture || variant != platform.Variant {
binfmtRegistered.Do(func() {
if err := binfmt.Register(nil); err != nil {
logrus.Warnf("registering binfmt_misc interpreters: %v", err)
}
})
}
}
p, err := os.MkdirTemp(tmpdir.GetTempDir(), define.Package)
if err != nil {
return err
@ -346,7 +365,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)}
mode := os.FileMode(0755)
mode := os.FileMode(0o755)
coptions := copier.MkdirOptions{
ChownNew: idPair,
ChmodNew: &mode,
@ -396,7 +415,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
})
}
}
err = b.addHostsEntries(hostsFile, mountPoint, entries, nil)
err = b.addHostsEntries(hostsFile, mountPoint, entries, nil, "")
if err != nil {
return err
}
@ -432,7 +451,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
// Empty file, so no need to recreate if it exists
if _, ok := bindFiles["/run/.containerenv"]; !ok {
containerenvPath := filepath.Join(path, "/run/.containerenv")
if err = os.MkdirAll(filepath.Dir(containerenvPath), 0755); err != nil {
if err = os.MkdirAll(filepath.Dir(containerenvPath), 0o755); err != nil {
return err
}
@ -450,7 +469,7 @@ imageid=%q
rootless=%d
`, define.Version, b.Container, b.ContainerID, b.FromImage, b.FromImageID, rootless)
if err = ioutils.AtomicWriteFile(containerenvPath, []byte(containerenv), 0755); err != nil {
if err = ioutils.AtomicWriteFile(containerenvPath, []byte(containerenv), 0o755); err != nil {
return err
}
if err := relabel(containerenvPath, b.MountLabel, false); err != nil {
@ -655,7 +674,7 @@ func setupSlirp4netnsNetwork(config *config.Config, netns, cid string, options,
}
func setupPasta(config *config.Config, netns string, options, hostnames []string) (func(), *netResult, error) {
res, err := pasta.Setup2(&pasta.SetupOptions{
res, err := pasta.Setup(&pasta.SetupOptions{
Config: config,
Netns: netns,
ExtraOptions: options,
@ -669,12 +688,18 @@ func setupPasta(config *config.Config, netns string, options, hostnames []string
entries = etchosts.HostEntries{{IP: res.IPAddresses[0].String(), Names: hostnames}}
}
mappedIP := ""
if len(res.MapGuestAddrIPs) > 0 {
mappedIP = res.MapGuestAddrIPs[0]
}
result := &netResult{
entries: entries,
dnsServers: res.DNSForwardIPs,
excludeIPs: res.IPAddresses,
ipv6: res.IPv6,
keepHostResolvers: true,
entries: entries,
dnsServers: res.DNSForwardIPs,
excludeIPs: res.IPAddresses,
ipv6: res.IPv6,
keepHostResolvers: true,
preferredHostContainersInternalIP: mappedIP,
}
return nil, result, nil
@ -786,7 +811,7 @@ func runMakeStdioPipe(uid, gid int) ([][]int, error) {
return stdioPipe, nil
}
func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOptions define.NamespaceOptions, idmapOptions define.IDMappingOptions, policy define.NetworkConfigurationPolicy) (configureNetwork bool, networkString string, configureUTS bool, err error) {
func setupNamespaces(_ *logrus.Logger, g *generate.Generator, namespaceOptions define.NamespaceOptions, idmapOptions define.IDMappingOptions, policy define.NetworkConfigurationPolicy) (configureNetwork bool, networkString string, configureUTS bool, err error) {
defaultContainerConfig, err := config.Default()
if err != nil {
return false, "", false, fmt.Errorf("failed to get container config: %w", err)
@ -916,7 +941,7 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions
namespaceOptions.AddOrReplace(options.NamespaceOptions...)
networkPolicy := options.ConfigureNetwork
//Nothing was specified explicitly so network policy should be inherited from builder
// Nothing was specified explicitly so network policy should be inherited from builder
if networkPolicy == NetworkDefault {
networkPolicy = b.ConfigureNetwork
@ -1193,9 +1218,6 @@ func setupCapAdd(g *generate.Generator, caps ...string) error {
if err := g.AddProcessCapabilityPermitted(cap); err != nil {
return fmt.Errorf("adding %q to the permitted capability set: %w", cap, err)
}
if err := g.AddProcessCapabilityAmbient(cap); err != nil {
return fmt.Errorf("adding %q to the ambient capability set: %w", cap, err)
}
}
return nil
}
@ -1211,9 +1233,6 @@ func setupCapDrop(g *generate.Generator, caps ...string) error {
if err := g.DropProcessCapabilityPermitted(cap); err != nil {
return fmt.Errorf("removing %q from the permitted capability set: %w", cap, err)
}
if err := g.DropProcessCapabilityAmbient(cap); err != nil {
return fmt.Errorf("removing %q from the ambient capability set: %w", cap, err)
}
}
return nil
}
@ -1296,10 +1315,10 @@ func setupSpecialMountSpecChanges(spec *specs.Spec, shmSize string) ([]specs.Mou
if err != nil {
return nil, err
}
gid5Available = checkIdsGreaterThan5(gids)
gid5Available = checkIDsGreaterThan5(gids)
}
if gid5Available && len(spec.Linux.GIDMappings) > 0 {
gid5Available = checkIdsGreaterThan5(spec.Linux.GIDMappings)
gid5Available = checkIDsGreaterThan5(spec.Linux.GIDMappings)
}
if !gid5Available {
// If we have no GID mappings, the gid=5 default option would fail, so drop it.
@ -1370,7 +1389,7 @@ func setupSpecialMountSpecChanges(spec *specs.Spec, shmSize string) ([]specs.Mou
return mounts, nil
}
func checkIdsGreaterThan5(ids []specs.LinuxIDMapping) bool {
func checkIDsGreaterThan5(ids []specs.LinuxIDMapping) bool {
for _, r := range ids {
if r.ContainerID <= 5 && 5 < r.ContainerID+r.Size {
return true

View File

@ -1,5 +1,4 @@
//go:build darwin
// +build darwin
package buildah
@ -24,6 +23,7 @@ func runUsingRuntimeMain() {}
func (b *Builder) Run(command []string, options RunOptions) error {
return errors.New("function not supported on non-linux systems")
}
func DefaultNamespaceOptions() (NamespaceOptions, error) {
options := NamespaceOptions{
{Name: string(specs.CgroupNamespace), Host: false},

View File

@ -1,5 +1,4 @@
//go:build !linux && !darwin && !freebsd
// +build !linux,!darwin,!freebsd
package buildah
@ -19,6 +18,7 @@ func runUsingRuntimeMain() {}
func (b *Builder) Run(command []string, options RunOptions) error {
return errors.New("function not supported on non-linux systems")
}
func DefaultNamespaceOptions() (NamespaceOptions, error) {
return NamespaceOptions{}, errors.New("function not supported on non-linux systems")
}

View File

@ -1,5 +1,4 @@
//go:build seccomp && linux
// +build seccomp,linux
package buildah

View File

@ -1,4 +1,4 @@
// +build !seccomp !linux
//go:build !seccomp || !linux
package buildah

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package buildah

View File

@ -1,4 +1,4 @@
// +build !linux
//go:build !linux
package buildah

View File

@ -143,20 +143,19 @@ func ReserveSELinuxLabels(store storage.Store, id string) error {
for _, c := range containers {
if id == c.ID {
continue
} else {
b, err := OpenBuilder(store, c.ID)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
// Ignore not exist errors since containers probably created by other tool
// TODO, we need to read other containers json data to reserve their SELinux labels
continue
}
return err
}
// Prevent different containers from using same MCS label
if err := label.ReserveLabel(b.ProcessLabel); err != nil {
return fmt.Errorf("reserving SELinux label %q: %w", b.ProcessLabel, err)
}
b, err := OpenBuilder(store, c.ID)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
// Ignore not exist errors since containers probably created by other tool
// TODO, we need to read other containers json data to reserve their SELinux labels
continue
}
return err
}
// Prevent different containers from using same MCS label
if err := label.ReserveLabel(b.ProcessLabel); err != nil {
return fmt.Errorf("reserving SELinux label %q: %w", b.ProcessLabel, err)
}
}
}

View File

@ -35,14 +35,12 @@ const (
DefaultTransport = "docker://"
)
var (
// RegistryDefaultPathPrefix contains a per-registry listing of default prefixes
// to prepend to image names that only contain a single path component.
RegistryDefaultPathPrefix = map[string]string{
"index.docker.io": "library",
"docker.io": "library",
}
)
// RegistryDefaultPathPrefix contains a per-registry listing of default prefixes
// to prepend to image names that only contain a single path component.
var RegistryDefaultPathPrefix = map[string]string{
"index.docker.io": "library",
"docker.io": "library",
}
// StringInSlice is deprecated, use golang.org/x/exp/slices.Contains
func StringInSlice(s string, slice []string) bool {
@ -138,10 +136,10 @@ func ExpandNames(names []string, systemContext *types.SystemContext, store stora
return expanded, nil
}
// FindImage locates the locally-stored image which corresponds to a given name.
// Please note that the `firstRegistry` argument has been deprecated and has no
// FindImage locates the locally-stored image which corresponds to a given
// name. Please note that the second argument has been deprecated and has no
// effect anymore.
func FindImage(store storage.Store, firstRegistry string, systemContext *types.SystemContext, image string) (types.ImageReference, *storage.Image, error) {
func FindImage(store storage.Store, _ string, systemContext *types.SystemContext, image string) (types.ImageReference, *storage.Image, error) {
runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
if err != nil {
return nil, nil, err
@ -190,9 +188,8 @@ func ResolveNameToReferences(
}
// AddImageNames adds the specified names to the specified image. Please note
// that the `firstRegistry` argument has been deprecated and has no effect
// anymore.
func AddImageNames(store storage.Store, firstRegistry string, systemContext *types.SystemContext, image *storage.Image, addNames []string) error {
// that the second argument has been deprecated and has no effect anymore.
func AddImageNames(store storage.Store, _ string, systemContext *types.SystemContext, image *storage.Image, addNames []string) error {
runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
if err != nil {
return err

View File

@ -1,5 +1,4 @@
//go:build linux || darwin || freebsd || netbsd
// +build linux darwin freebsd netbsd
package util

View File

@ -1,4 +1,4 @@
// +build !linux
//go:build !linux
package util

View File

@ -1,5 +1,4 @@
//go:build !linux && !darwin
// +build !linux,!darwin
package util

Some files were not shown because too many files have changed in this diff Show More