Compare commits

...

412 Commits

Author SHA1 Message Date
openshift-merge-bot[bot] b03bc242a7
Merge pull request #6294 from containers/renovate/github.com-containers-common-0.x
fix(deps): update module github.com/containers/common to v0.64.0
2025-07-17 17:11:10 +00:00
openshift-merge-bot[bot] 1c0fb347c3
Merge pull request #6292 from containers/renovate/github.com-spf13-pflag-1.x
fix(deps): update module github.com/spf13/pflag to v1.0.7
2025-07-17 14:30:13 +00:00
renovate[bot] e729f60d50
fix(deps): update module github.com/containers/common to v0.64.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-17 05:14:17 +00:00
renovate[bot] 8b5354ee8c
fix(deps): update module github.com/spf13/pflag to v1.0.7
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-16 22:12:08 +00:00
openshift-merge-bot[bot] b3eab30b05
Merge pull request #6290 from containers/renovate/github.com-containers-image-v5-5.x
fix(deps): update module github.com/containers/image/v5 to v5.36.0
2025-07-16 14:05:02 +00:00
renovate[bot] 9c525fd526
fix(deps): update module github.com/containers/image/v5 to v5.36.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-15 23:26:18 +00:00
openshift-merge-bot[bot] db61e10b3f
Merge pull request #6287 from containers/renovate/github.com-containers-storage-1.x
fix(deps): update module github.com/containers/storage to v1.59.0
2025-07-14 21:07:21 +00:00
renovate[bot] b8c6fcf211
fix(deps): update module github.com/containers/storage to v1.59.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-14 18:59:21 +00:00
openshift-merge-bot[bot] 8403fd604b
Merge pull request #6269 from flouthoc/cache-canidates
stage_executor: check platform of cache candidates
2025-07-11 17:45:22 +00:00
flouthoc 3502889676
stage_executor: check platform of cache candidates
When building images for `manifest` list using `--platform` same image
is used for multiple platform if base is `scratch` , following PR adds a
check to always verify `platform` of `cache` with `target`.

Closes: https://github.com/containers/podman/issues/18723

Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-07-11 09:44:07 -07:00
openshift-merge-bot[bot] 252cc24fd5
Merge pull request #6240 from 2004joshua/link
feat: ADD/COPY --link support for buildah
2025-07-11 15:27:00 +00:00
openshift-merge-bot[bot] 869997e379
Merge pull request #6278 from containers/renovate/golang.org-x-crypto-0.x
fix(deps): update module golang.org/x/crypto to v0.40.0
2025-07-11 15:07:26 +00:00
openshift-merge-bot[bot] aa807d5863
Merge pull request #6252 from pstoeckle/main
chore(typos): fix typos
2025-07-11 09:20:15 +00:00
renovate[bot] 96588db2e6
fix(deps): update module golang.org/x/crypto to v0.40.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-10 18:07:11 +00:00
openshift-merge-bot[bot] 1c0e79c974
Merge pull request #6277 from containers/renovate/golang.org-x-term-0.x
fix(deps): update module golang.org/x/term to v0.33.0
2025-07-10 18:05:47 +00:00
openshift-merge-bot[bot] a2108716b5
Merge pull request #6272 from containers/renovate/github.com-docker-docker-28.x
fix(deps): update module github.com/docker/docker to v28.3.2+incompatible
2025-07-10 17:38:18 +00:00
renovate[bot] d9443fe42f
fix(deps): update module golang.org/x/term to v0.33.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-10 16:53:00 +00:00
openshift-merge-bot[bot] cc938ce53f
Merge pull request #6273 from containers/renovate/golang.org-x-sync-0.x
fix(deps): update module golang.org/x/sync to v0.16.0
2025-07-10 16:11:04 +00:00
renovate[bot] d6e5cbaf74
fix(deps): update module golang.org/x/sync to v0.16.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-10 02:45:42 +00:00
renovate[bot] c694e3c7a2
fix(deps): update module github.com/docker/docker to v28.3.2+incompatible
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-09 23:15:31 +00:00
Joshua Arrevillaga eea4838d88 ADD/COPY --link support added
What type of PR is this?
/kind feature

What this PR does / why we need it:
It implements --link for COPY and ADD instructions and enables the creation of
cachable layers that can be reused independently across builds.

Follows buildkit `--link` specifications

How to verify it
bats tests/bud.bats

Which issue(s) this PR fixes:
Fixes #4325

Does this PR introduce a user-facing change?
Yes, gives extra functionality to Containerfiles

Signed-off-by: Joshua Arrevillaga <2004jarrevillaga@gmail.com>
2025-07-09 17:14:28 -04:00
openshift-merge-bot[bot] 1b5cdd50fa
Merge pull request #6259 from containers/renovate/github.com-docker-docker-28.x
fix(deps): update module github.com/docker/docker to v28.3.1+incompatible
2025-07-09 18:43:29 +00:00
openshift-merge-bot[bot] d23f64120a
Merge pull request #6264 from flouthoc/passwd-test
buildah: move `passwd` command to tests
2025-07-09 17:51:38 +00:00
Lokesh Mandvekar 063ee76258
RPM/TMT: account for passwd binary moving to tests
Signed-off-by: Lokesh Mandvekar <lsm5@redhat.com>
Co-authored-by: flouthoc <flouthoc.git@gmail.com>
Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-07-09 06:31:09 -07:00
flouthoc af210ea877
buildah: move passwd command to tests
https://github.com/containers/buildah/issues/6182

Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-07-09 06:31:04 -07:00
openshift-merge-bot[bot] f67acf9a69
Merge pull request #6267 from rahilarious/main
Remove BUILDTAG btrfs_noversion as no longer effective
2025-07-09 09:21:08 +00:00
openshift-merge-bot[bot] 68c0bbacdf
Merge pull request #6271 from nalind/cgroups
Update "bud with --cpu-shares" test, and rename it
2025-07-09 04:42:40 +00:00
openshift-merge-bot[bot] 1b4cb34e0f
Merge pull request #6256 from containers/renovate/github.com-moby-buildkit-0.x
fix(deps): update module github.com/moby/buildkit to v0.23.2
2025-07-08 23:24:28 +00:00
Nalin Dahyabhai 16c0bdad5f Update "bud with --cpu-shares" test, and rename it
Update "the bud with --cpu-shares" test to expect the a cgroupsv2 value
computed using either the older formula or the newer one introduced in
github.com/opencontainers/cgroups v0.0.3, and give it a unique name so
that it can be selected more easily with bats's "--filter" flag.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-07-08 16:38:40 -04:00
openshift-merge-bot[bot] 564f525213
Merge pull request #6255 from containers/renovate/github.com-containers-luksy-digest
fix(deps): update github.com/containers/luksy digest to bc60f96
2025-07-08 18:06:09 +00:00
Rahil Bhimjiani 7a482acebc
Remove BUILDTAG btrfs_noversion as no longer effective
https://github.com/containers/storage/pull/2308

Signed-off-by: Rahil Bhimjiani <me@rahil.rocks>
2025-07-07 19:30:09 +05:30
renovate[bot] c0fdc9a056
fix(deps): update module github.com/docker/docker to v28.3.1+incompatible
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-03 02:37:47 +00:00
renovate[bot] 513775f3f3
fix(deps): update module github.com/moby/buildkit to v0.23.2
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-01 20:44:06 +00:00
renovate[bot] 34488b5497
fix(deps): update github.com/containers/luksy digest to bc60f96
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-01 05:10:26 +00:00
openshift-merge-bot[bot] 5408a8b3e9
Merge pull request #6251 from Luap99/vendor
vendor: update c/{common,image,storage} to main
2025-06-30 16:57:15 +00:00
Patrick Stoeckle 9f809a88ff
chore(typos): fix typos
Signed-off-by: Patrick Stoeckle <patrick.stoeckle@siemens.com>
2025-06-30 15:45:47 +02:00
Paul Holzinger d95b2dad17
vendor: update c/{common,image,storage} to main
Pull in the rekor removal from c/image which reduces the dependencies.

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
2025-06-30 13:40:22 +02:00
openshift-merge-bot[bot] 4ceee32d77
Merge pull request #6250 from containers/renovate/go-github.com-go-viper-mapstructure-v2-vulnerability
chore(deps): update module github.com/go-viper/mapstructure/v2 to v2.3.0 [security]
2025-06-27 21:36:35 +00:00
renovate[bot] b47791a931
chore(deps): update module github.com/go-viper/mapstructure/v2 to v2.3.0 [security]
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-06-27 17:10:01 +00:00
openshift-merge-bot[bot] 7a243f955e
Merge pull request #6249 from containers/renovate/go.etcd.io-bbolt-1.x
fix(deps): update module go.etcd.io/bbolt to v1.4.2
2025-06-27 17:08:38 +00:00
renovate[bot] 46e8141739
fix(deps): update module go.etcd.io/bbolt to v1.4.2
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-06-27 15:27:30 +00:00
openshift-merge-bot[bot] f28c074787
Merge pull request #6247 from nalind/source-date-epoch-build-arg
Accept SOURCE_DATE_EPOCH as a build-arg
2025-06-26 19:30:16 +00:00
openshift-merge-bot[bot] e6d26dd363
Merge pull request #6248 from actionmancan/update-neil-smith-github-username
Update Neil Smith's GitHub username in MAINTAINERS.md
2025-06-26 18:18:34 +00:00
G A Neil Smith f172a9d1de Update Neil Smith's GitHub username in MAINTAINERS.md
- Changed GitHub username from 'Neil-Smith' to 'actionmancan'
- Fixed GitHub URL from https://github.com/Neil-Smith to https://github.com/actionmancan
- Corrected column alignment by removing extra spaces
- Maintains Neil Smith's role as Community Manager

Does this PR introduce a user-facing change?

```release-note
None
```

Signed-off-by: G A Neil Smith <nesmith@redhat.com>
2025-06-26 13:14:41 -04:00
openshift-merge-bot[bot] 9f6205610c
Merge pull request #6242 from nalind/common-formats
Use containers/common's formats package instead of our own
2025-06-25 19:53:42 +00:00
openshift-merge-bot[bot] f828ea2f75
Merge pull request #6243 from cevich/add_release_test
Add conditional release-checking system test
2025-06-25 19:26:02 +00:00
Nalin Dahyabhai 149bf968f5 Accept SOURCE_DATE_EPOCH as a build-arg
When SOURCE_DATE_EPOCH is passed in as a build-arg, treat it as we would
if it was passed in via the environment or its own CLI flag.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-25 15:19:32 -04:00
openshift-merge-bot[bot] b9c485c123
Merge pull request #6239 from nalind/oci-created-annotation
build, commit: set the OCI ...created annotation on OCI images
2025-06-25 19:17:49 +00:00
openshift-merge-bot[bot] 06abe5fc77
Merge pull request #6244 from containers/renovate/github.com-docker-docker-28.x
fix(deps): update module github.com/docker/docker to v28.3.0+incompatible
2025-06-25 14:18:35 +00:00
renovate[bot] 5bb8d5ed43
fix(deps): update module github.com/docker/docker to v28.3.0+incompatible
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-06-25 02:29:26 +00:00
Chris Evich 0832c3e081
Add conditional release-checking system test
Unfortunately on a number of occasions, Buildah has been released
officially with a `-dev` suffix in the version number.  Assist in
catching this mistake at release time by the addition of a simple
conditional test.  Note that it must be positively enabled by a
magic env. var. before executing the system tests.

Also (thanks to @lsm5) update the TMT test to trigger the new
condition for future Fedora releases.

Signed-off-by: Chris Evich <cevich@redhat.com>
2025-06-24 10:57:46 -04:00
Nalin Dahyabhai 52bbc61e1f info,inspect: use the "formats" package to get some builtins
Use the "formats" package to format `info` and `inspect` output, so that
template users will be able to use whatever functions are provided with
`images` and `containers` output, including "json", "lower", and "upper".

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-23 16:17:04 -04:00
Nalin Dahyabhai 7944b4b2b0 Use containers/common's formats package instead of our own
Use the containers/common "formats" package, which started off as a copy
of our own and is still mostly the same, instead of our own.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-23 15:53:55 -04:00
Nalin Dahyabhai 5968d82047 build, commit: set the OCI ...created annotation on OCI images
When building or committing an image in OCI format, default to setting
the org.opencontainers.image.created annotation to the value used in the
image's config blob for the image's creation date. The behavior can be
controlled using the new --created-annotation flag.

Add --annotation and --unsetannotation flags to `buildah commit` which
mimic the same flags for `buildah build`.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-23 15:05:02 -04:00
openshift-merge-bot[bot] e6375b3c28
Merge pull request #6236 from nalind/omit-layer-history-entry
Add CommitOptions.OmitLayerHistoryEntry, for skipping the new bits
2025-06-22 16:16:08 +00:00
openshift-merge-bot[bot] 9bfdf2a0b5
Merge pull request #6233 from nalind/mount-targets
run,commit: clean up parents of mount targets, too
2025-06-22 16:13:24 +00:00
Nalin Dahyabhai df36bb835a commit: exclude parents of mount targets, too
When RUN requires us to create the target for a mountpoint, make note of
it and any parent directories that needed to be created, and filter them
out when generating a layer diff or --output data.

The exceptions will be directories that the conformance tests confirm
that BuildKit also leaves behind, though for compatibility with the
classic builder, we have to make that conditional.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-20 11:19:27 -04:00
Nalin Dahyabhai a3bea818b8 run: clean up parents of mount targets, too
When RUN requires us to create the target for a mountpoint, make note of
it and any parent directories that needed to be created, and clear them
all out if they look basically the same after the command finishes.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-20 11:19:27 -04:00
Nalin Dahyabhai 7d5f1e1ca9 tarFilterer: always flush after writing
Always call the nested TarWriter's Flush() method after writing file
contents.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-20 11:19:27 -04:00
Nalin Dahyabhai fce45b77d0 Builder: drop the TempVolumes field
The TempVolumes field is used to track data that's specific to a Run()
call, and there can be more than one of those going at a time.  Merge
its data into the runMountArtifacts struct, which already keeps a list
of locations that we have to clean up in exactly the same way.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-20 11:19:27 -04:00
openshift-merge-bot[bot] dc1a1f5b3b
Merge pull request #6238 from containers/renovate/github.com-moby-buildkit-0.x
Update module github.com/moby/buildkit to v0.23.1
2025-06-20 15:18:37 +00:00
openshift-merge-bot[bot] c2a39c2cfa
Merge pull request #6237 from containers/renovate/github.com-opencontainers-cgroups-0.x
Update module github.com/opencontainers/cgroups to v0.0.3
2025-06-20 15:15:51 +00:00
renovate[bot] fe4c1b7342
Update module github.com/moby/buildkit to v0.23.1
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-06-20 11:58:54 +00:00
renovate[bot] 128153867b
Update module github.com/opencontainers/cgroups to v0.0.3
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-06-19 03:32:15 +00:00
Nalin Dahyabhai 5b1d3e666f Add CommitOptions.OmitLayerHistoryEntry, for skipping the new bits
Add an OmitLayerHistoryEntry field to CommitOptions, which more or less
causes us to reproduce our base image, except with PrependedEmptyLayers,
AppendedEmptyLayers, PrependedLinkedLayers, AppendedLinkedLayers, and
config changes still added in.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-18 10:48:54 -04:00
openshift-merge-bot[bot] 0b3a01c386
Merge pull request #6235 from containers/renovate/github.com-fsouza-go-dockerclient-1.x
Update module github.com/fsouza/go-dockerclient to v1.12.1
2025-06-18 12:49:33 +00:00
openshift-merge-bot[bot] 18deda6ee2
Merge pull request #6229 from containers/renovate/github.com-moby-buildkit-0.x
fix(deps): update module github.com/moby/buildkit to v0.23.0
2025-06-18 12:35:27 +00:00
renovate[bot] 5995db7af7
Update module github.com/fsouza/go-dockerclient to v1.12.1
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-06-18 04:08:59 +00:00
openshift-merge-bot[bot] d944211073
Merge pull request #6231 from nalind/bats-tmpdir
commit-with-extra-files test: use $TEST_SCRATCH_DIR
2025-06-18 04:07:49 +00:00
openshift-merge-bot[bot] 3ffed0fd6b
Merge pull request #6228 from nalind/ci-rootless-supplemental-groups
CI: ensure rootless groups aren't duplicates
2025-06-18 01:15:20 +00:00
openshift-merge-bot[bot] 8b31e72fa1
Merge pull request #6232 from nalind/mirror-frontend
conformance: use mirrored frontend and base images
2025-06-17 21:35:03 +00:00
openshift-merge-bot[bot] b0541b60a2
Merge pull request #6230 from nalind/mounted-once-test
"root fs only mounted once" test: accept root with only the rw option
2025-06-17 21:13:03 +00:00
Nalin Dahyabhai c12808594b conformance: use mirrored frontend and base images
In conformance tests, use more mirrored frontend and base images.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-17 15:30:56 -04:00
Nalin Dahyabhai ee5fd19c8f commit-with-extra-files test: use $TEST_SCRATCH_DIR
Use $TEST_SCRATCH_DIR instead of $BATS_TMPDIR for temporary files that
we create during this test.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-17 15:02:16 -04:00
renovate[bot] c578786e55
fix(deps): update module github.com/moby/buildkit to v0.23.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-06-17 17:41:27 +00:00
Nalin Dahyabhai 3e11fea02e "root fs only mounted once" test: accept root with only the rw option
When checking /proc/self/mountinfo for a root filesystem, also match "/"
mounted with "rw" as its only mount option, as an alternative to being
mounted with "rw" and other options at the same time.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-17 12:55:55 -04:00
Nalin Dahyabhai f8f398bb3c Run with --device /dev/fuse and not just -v /dev/fuse:/dev/fuse
When passing /dev/fuse to a container, use --device instead of a plain
volume mount.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-17 09:22:47 -04:00
Nalin Dahyabhai a9a9a43962 CI: pass $BUILDAH_RUNTIME through to in-container test runs
Pass BUILDAH_RUNTIME through to tests that we run inside of containers,
and discard the CI_DESIRED_RUNTIME variable, which sort of did the same
thing.  We still set OCI to the name of the runtime because tests
consult it directly.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-17 09:22:14 -04:00
Nalin Dahyabhai b97fd7d686 CI: ensure rootless groups aren't duplicates
When adding an unprivileged user to run tests as, ensure that the
primary and supplemental groups are not repeated, and that at least one
supplemental group is outside of the user's subgid range.  When skipping
a test because the invoking user doesn't have any supplemental groups,
log the user's IDs.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-17 09:18:47 -04:00
openshift-merge-bot[bot] 6a367e69a3
Merge pull request #6226 from ricardobranco777/runc
bud,run: runc does not support keep-groups
2025-06-17 12:08:30 +00:00
openshift-merge-bot[bot] 228f0cd1c5
Merge pull request #6227 from nalind/ci-rootless-supplemental-group
CI: give the rootless test user some supplemental groups
2025-06-16 15:44:06 +00:00
openshift-merge-bot[bot] 37706208b4
Merge pull request #6198 from flouthoc/inherit-annotation
build: add support for `--inherit-annotations`
2025-06-16 15:35:41 +00:00
flouthoc 47da2f29b2
build: add support for --inherit-annotations
Allows users to specify if they want to inherit annotations from base image
or not.

Closes: https://github.com/containers/buildah/issues/6153

Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-06-13 12:28:12 -07:00
Nalin Dahyabhai 9ecc98e584 CI: give the rootless test user some supplemental groups
Exercise preservation of supplemental groups in the tests that use
`buildah build` and `buildah from` with `--group-add keep-groups`.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-13 10:32:33 -04:00
openshift-merge-bot[bot] 1b39f59dd3
Merge pull request #5452 from aaronlehmann/support-zstd-commit
Support zstd compression in image commit
2025-06-12 20:43:05 +00:00
Ricardo Branco ab8fc6deeb
bud,run: runc does not support keep-groups
Signed-off-by: Ricardo Branco <rbranco@suse.de>
2025-06-12 15:19:57 +02:00
Aaron Lehmann 2eb666c22d Fix lint issue in TestCommitCompression
Signed-off-by: Aaron Lehmann <alehmann@netflix.com>
2025-06-11 22:53:13 +00:00
Nalin Dahyabhai 873e5458c6 Add a unit test for compression types in OCI images
Add a unit test that commits OCI layouts with various types of
compression specified, and verifies that the layers end up written with
the desired compression and media type descriptors.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-11 22:42:59 +00:00
Aaron Lehmann aa84d9c347 Support zstd compression in image commit
Without this change, specifying `Compression: imagebuildah.Zstd` in
`imagebuildah`'s `BuildOptions fails, so it is not possible to push
cache to a registry with zstd compression.

Note this is only applicable to OCI manifests.

Signed-off-by: Aaron Lehmann <alehmann@netflix.com>
2025-06-11 22:42:59 +00:00
openshift-merge-bot[bot] 22201dbaa0
Merge pull request #6219 from containers/renovate/go.etcd.io-bbolt-1.x
fix(deps): update module go.etcd.io/bbolt to v1.4.1
2025-06-11 17:38:29 +00:00
openshift-merge-bot[bot] bf6d1ccbb8
Merge pull request #6214 from nalind/source-date-epoch-no-identity-label
commit,build: --source-date-epoch/--timestamp omit identity label
2025-06-11 17:24:36 +00:00
renovate[bot] 1aa6c1e35a
fix(deps): update module go.etcd.io/bbolt to v1.4.1
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-06-11 14:42:22 +00:00
openshift-merge-bot[bot] 2d8c3078ad
Merge pull request #6216 from Luap99/sqlite-dynamic-link
dynamically link sqlite
2025-06-11 13:28:45 +00:00
Paul Holzinger 03b980a3ef
rpm: build rpm with libsqlite3 tag
So we dynamically link to sqlite in fedora instead of vendoring a static
copy.

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
2025-06-11 10:31:54 +02:00
Paul Holzinger e5b876571b
Makefile: use libsqlite3 build when possible
Dynamically link sqlite3 when installed, the main motivation is that we
reduce the podman binary size with that. I see about 3.2 MB savings.

But also dynamically linking it means if there a vulnerabilities only
the sqlite3 distro package needs updating and we don't have to make a
new podman release with the vendored update.

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
2025-06-11 10:31:53 +02:00
Nalin Dahyabhai 962ab4bb1e commit,build: --source-date-epoch/--timestamp omit identity label
When using either --source-date-epoch or --timestamp, default to not
adding a label with our version number in it, since it can change
between builds.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-10 16:18:50 -04:00
openshift-merge-bot[bot] 5777aa3694
Merge pull request #6215 from nalind/countme
docs: add --setopt "*.countme=false" to dnf examples
2025-06-10 19:58:02 +00:00
Nalin Dahyabhai b167073d54 docs: add --setopt "*.countme=false" to dnf examples
* Consistently use --releasever instead of --release in dnf examples
* Remove trailing whitespace
* Use --use-host-config --setopt "*.countme=false" when running dnf with
  an empty --installroot
* Use Fedora 42 instead of Fedora 30 in examples
* Block quote console examples in tutorials

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-10 14:16:36 -04:00
openshift-merge-bot[bot] f46d15d721
Merge pull request #6217 from nalind/sbom-user
Builder.sbomScan(): don't break non-root scanners
2025-06-10 18:15:44 +00:00
Nalin Dahyabhai 9f35e8a2ac Builder.sbomScan(): don't break non-root scanners
Set up permissions on the scanner output directory so that scanners
whose images specify that they be run as non-root users can still write
to it.  The most recent syft image exposed our bug.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-10 13:22:20 -04:00
openshift-merge-bot[bot] d14b4f8dc7
Merge pull request #6211 from nalind/source-date-epoch-static-hostname
build: --source-date-epoch/--timestamp use static hostname/cid
2025-06-10 13:55:54 +00:00
openshift-merge-bot[bot] 714f5ba2e5
Merge pull request #6195 from flouthoc/unsetanno
build,config: add support for `--unsetannotation`
2025-06-09 14:19:35 +00:00
Nalin Dahyabhai 2d0152e99d build: --source-date-epoch/--timestamp use static hostname/cid
When using either --source-date-epoch or --timestamp, make sure that
handling of RUN instructions uses a defined hostname if possible, and
commits using a reference to a static container name.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-06 15:33:01 -04:00
openshift-merge-bot[bot] 12e41eca79
Merge pull request #6210 from containers/renovate/golang.org-x-crypto-0.x
fix(deps): update module golang.org/x/crypto to v0.39.0
2025-06-06 19:22:48 +00:00
renovate[bot] 87d9db82c6
fix(deps): update module golang.org/x/crypto to v0.39.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-06-05 21:55:11 +00:00
openshift-merge-bot[bot] f05fe6e568
Merge pull request #6208 from nalind/build-source-date-epoch
build: add --source-date-epoch and --rewrite-timestamp flags
2025-06-05 21:54:10 +00:00
openshift-merge-bot[bot] da45fc8b82
Merge pull request #6209 from containers/renovate/golang.org-x-sync-0.x
fix(deps): update module golang.org/x/sync to v0.15.0
2025-06-05 17:35:29 +00:00
renovate[bot] 5842593d28
fix(deps): update module golang.org/x/sync to v0.15.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-06-05 16:11:23 +00:00
Nalin Dahyabhai 865daceebb build: add --source-date-epoch and --rewrite-timestamp flags
Use $SOURCE_DATE_EPOCH as the default for the --source-date-epoch flag
to the "build" CLI.

When a source-date-epoch is set, we'll use it when writing new history
entries, force timestamps in data written for --output to the specified
timestamp, and populate a "SOURCE_DATE_EPOCH" ARG that we treat as
always being set, and which we don't complain about being left unused.
By default, this will not affect timestamps in newly-added layers.

Add a --rewrite-timestamp flag, which "clamps" timestamps in newly-added
layers to not be later than the --source-date-epoch value if the
--source-date-epoch flag is set, but has no effect otherwise.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-05 10:22:56 -04:00
flouthoc 83acf720d9
build,config: add support for --unsetannotation
Just like `--unsetlabel` add support for `--unsetannotation`.

Closes: https://github.com/containers/buildah/issues/6183

Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-06-04 12:58:43 -07:00
openshift-merge-bot[bot] b8d8cc375f
Merge pull request #6189 from nalind/commit-source-date-epoch
commit: add --source-date-epoch and --rewrite-timestamp flags
2025-06-04 19:30:37 +00:00
Nalin Dahyabhai 6c82e7eac0 commit: add --source-date-epoch and --rewrite-timestamp flags
Add a --source-date-epoch flag, defaulting to $SOURCE_DATE_EPOCH if set,
which sets the created-on date and the timestamp for the new history
entries, but does not default to modifying the timestamps on contents in
new layers.

Add a --rewrite-timestamp flag, which "clamps" timestamps in the new
layers to not be later than the --source-date-epoch value if both
the --rewrite-timestamp and --source-date-epoch flags were set.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-04 13:02:26 -04:00
openshift-merge-bot[bot] 2d32c9af2f
Merge pull request #6169 from Honny1/sort-glob-output
Ensure extendedGlob returns paths in lexical order
2025-06-04 17:02:03 +00:00
openshift-merge-bot[bot] bd021becfb
Merge pull request #6203 from containers/renovate/github.com-openshift-imagebuilder-1.x
fix(deps): update module github.com/openshift/imagebuilder to v1.2.16
2025-06-03 20:08:20 +00:00
renovate[bot] a4a30cf5e8
fix(deps): update module github.com/openshift/imagebuilder to v1.2.16
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-06-03 18:33:08 +00:00
openshift-merge-bot[bot] 9ece6d4ce8
Merge pull request #6200 from Luap99/vendor
vendor latest c/{common,image,storage}
2025-06-03 10:23:05 +00:00
Paul Holzinger 5b26b79221
vendor latest c/{common,image,storage}
Just to make sure tests are still passing, I mostly care about
https://github.com/containers/common/pull/2445.

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
2025-06-03 11:28:06 +02:00
openshift-merge-bot[bot] 7be35e17d3
Merge pull request #6190 from nalind/platforms
Tweak our handling of variant values, again
2025-06-02 15:46:18 +00:00
openshift-merge-bot[bot] 5da34800de
Merge pull request #6192 from mtrmac/ostree
Don't BuildRequires: ostree-devel
2025-06-02 14:23:27 +00:00
Nalin Dahyabhai 9c2b43cccb Tweak our handling of variant values, again
Ensure that when not invoked with --platform, we use the default
platform, and that when invoked with --platform, we use the platform
information from the base image that we ended up using for the build,
with no accidental mixing of the two.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-06-02 09:33:25 -04:00
openshift-merge-bot[bot] a5f229f155
Merge pull request #6188 from flouthoc/host-gateway
parse, validateExtraHost: honor `host-gateway` in format
2025-06-01 10:47:38 +00:00
Miloslav Trmač db336cef4d Don't BuildRequires: ostree-devel
We are not opting into the ostree backend, and it doesn't
build: https://github.com/containers/image/pull/2821 .
So, stop referencing the dependency.

Should not change behavior.

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
2025-05-31 01:38:27 +02:00
openshift-merge-bot[bot] 7a56587b5d
Merge pull request #6191 from Luap99/nix-rm
remove static nix build
2025-05-30 17:51:37 +00:00
flouthoc 56f3171ab0
parse, validateExtraHost: honor Hostgateway in format
Flag `--add-host` should support `host-gateway` when argument is in the
form of `buildah build --add-host=proxyhost:host-gateway .`

This is consistent with podman.

Closes: https://github.com/containers/podman/issues/26034

Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-05-30 09:56:35 -07:00
Paul Holzinger 56cfd2fdfc
remove static nix build
It is no longer working and no maintainer is using it.

Fixes: #6086

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
2025-05-30 18:53:28 +02:00
openshift-merge-bot[bot] 5cc3e7d776
Merge pull request #6187 from JayKayy/fix-maintainers-link
fix link to Maintainers file
2025-05-30 14:25:49 +00:00
Jan Rodák 2717599f93
Ensure extendedGlob returns paths in lexical order
The `filepath.Glob` function does not provide deterministic output. In order to achieve a reproducible build, files must be copied in a deterministic manner, and `filepath.Glob` did not guarantee this. Other functions such as `filepath.Walk` and `os.ReadDir` return deterministic output. So copying files to the image is done in the same order each time.

Fixes: https://issues.redhat.com/browse/RUN-2661

Signed-off-by: Jan Rodák <hony.com@seznam.cz>
2025-05-30 10:32:11 +02:00
John Kwiatkoski 2158a0c331
Merge branch 'main' into fix-maintainers-link 2025-05-29 21:32:27 -04:00
openshift-merge-bot[bot] 5a02e74b5d
Merge pull request #6132 from nalind/relabel-binds
run: handle relabeling bind mounts ourselves
2025-05-29 23:50:22 +00:00
Nalin Dahyabhai 910933b33f CI: run integration tests on Fedora with both crun and runc
Run integration tests (both as root and rootless) with both crun and
runc on Fedora, to help ensure that we can use either.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-05-29 14:17:43 -04:00
Nalin Dahyabhai b122c325fa buildah-build(1): clarify that --cgroup-parent affects RUN instructions
Tweak the wording that describes the effects of --cgroup-parent to be
clear that it only affects handling of RUN instructions.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-05-29 14:17:43 -04:00
Nalin Dahyabhai 8178c3a5d3 runUsingRuntime: use named constants for runtime states
Use the named constants for the status values that runtimes can report
to us when we run them with the "state" command.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-05-29 14:17:43 -04:00
Nalin Dahyabhai 4ea64c3871 Add a dummy "runtime" that just dumps its config file
Add a dummy "runtime" that just dumps its runtime config, either the
entirety of it, or a section of it corresponding to each command line
argument.  Tests can use it to ensure that we set the right thing in the
configuration without also depending on the runtime to do as its asked,
which isn't always something we have control over.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-05-29 14:17:37 -04:00
Nalin Dahyabhai d53d837e0e run: handle relabeling bind mounts ourselves
Handle requested relabeling of bind mounts (i.e., the "z" and "Z" flags)
directly, instead of letting the runtime handle the relabeling.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-05-29 09:27:09 -04:00
openshift-merge-bot[bot] 328e613826
Merge pull request #6186 from containers/renovate/github.com-docker-docker-28.x
fix(deps): update module github.com/docker/docker to v28.2.0+incompatible
2025-05-29 13:22:03 +00:00
John Kwiatkoski 913e4d2d35 fix link to Maintainers file
Signed-off-by: John Kwiatkoski <jkwiatkoski@protonmail.com>
2025-05-28 22:34:33 -04:00
Nalin Dahyabhai c0c4148fc5 Update to avoid deprecated types
Replace our use of the
github.com/docker/docker/api/types.BuildCachePruneOptions with the
github.com/docker/docker/api/types/build.CachePruneOptions type, which
the former is now an alias for.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-05-28 17:55:13 -04:00
renovate[bot] 285dcc9140
fix(deps): update module github.com/docker/docker to v28.2.0+incompatible
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-05-28 21:31:30 +00:00
openshift-merge-bot[bot] 199468a453
Merge pull request #6181 from lsm5/openssf-passing
[CI:DOCS] README.md: add openssf passing badge
2025-05-28 19:24:13 +00:00
openshift-merge-bot[bot] 279e47d3ca
Merge pull request #6124 from TomSweeneyRedHat/dev/tsweeney/cncfstart
[CI:DOCS] Add CNCF roadmap, touchup other CNCF files
2025-05-28 19:21:27 +00:00
openshift-merge-bot[bot] bdc01479ad
Merge pull request #6164 from lsm5/disable-osh-diff-scan
[skip-ci] Packit: Disable osh_diff_scan
2025-05-28 19:18:44 +00:00
openshift-merge-bot[bot] 9986534eea
Merge pull request #6178 from nalind/add-timestamp
add: add a new --timestamp flag
2025-05-28 19:16:00 +00:00
openshift-merge-bot[bot] 5168237d2b
Merge pull request #6179 from nalind/copier-ensure
copier: add Ensure and ConditionalRemove
2025-05-28 19:07:41 +00:00
openshift-merge-bot[bot] b178f3b7ef
Merge pull request #6177 from nalind/build-outputs
build: allow --output to be specified multiple times
2025-05-28 19:02:01 +00:00
openshift-merge-bot[bot] 0637485b62
Merge pull request #6176 from nalind/parse-get-build-output-cut
pkg/parse.GetBuildOutput(): use strings.Cut()
2025-05-28 18:58:37 +00:00
openshift-merge-bot[bot] 232cb99b76
Merge pull request #6175 from nalind/man-lists
[CI:DOCS] update a couple of lists in the build man page
2025-05-28 18:55:49 +00:00
openshift-merge-bot[bot] b535d02d0e
Merge pull request #6174 from nalind/42
Use Fedora 42 instead of 41 in that one conformance test
2025-05-23 15:16:39 +00:00
openshift-merge-bot[bot] 8b940c7062
Merge pull request #6170 from lsm5/packit-post-f40
[skip-ci] Packit: set fedora-all after F40 EOL
2025-05-22 14:59:36 +00:00
Lokesh Mandvekar 1f6302a6ef
[skip-ci] Packit: cleanup redundant targets and unused anchors
Signed-off-by: Lokesh Mandvekar <lsm5@fedoraproject.org>
2025-05-22 09:02:23 -04:00
Lokesh Mandvekar a99ad87afe
[skip-ci] Packit: set fedora-all after F40 EOL
As the title says.

Signed-off-by: Lokesh Mandvekar <lsm5@fedoraproject.org>
2025-05-22 09:02:22 -04:00
Nalin Dahyabhai 26f4f3a025 Use Fedora 42 instead of 41 in that one conformance test
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-05-21 14:35:12 -04:00
openshift-merge-bot[bot] cc66b6e9d8
Merge pull request #6171 from flouthoc/select-recent-cache
imagebuildah: select most recent layer for cache in case on conflict
2025-05-21 17:55:04 +00:00
Lokesh Mandvekar eee1f2102a
[CI:DOCS] README.md: add openssf passing badge
Signed-off-by: Lokesh Mandvekar <lsm5@fedoraproject.org>
2025-05-21 10:50:49 -04:00
openshift-merge-bot[bot] ad17dac13a
Merge pull request #6180 from containers/renovate/github.com-moby-buildkit-0.x
fix(deps): update module github.com/moby/buildkit to v0.22.0
2025-05-21 14:36:21 +00:00
renovate[bot] 48a4a446e3
fix(deps): update module github.com/moby/buildkit to v0.22.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-05-21 14:04:19 +00:00
Nalin Dahyabhai dda8e65e84 copier: add Ensure and ConditionalRemove
Add copier.Ensure() and copier.ConditionalRemove(), for controlling the
permissions and datestamps we set on multiple items we create with one
call (along with any parents), and selectively removing multiple items
with one call.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-05-21 09:54:21 -04:00
Nalin Dahyabhai d568dda6c0 [CI:DOCS] update a couple of lists in the build man page
The buildah-build man page enumerates the default set of masked and
read-only paths, but that list is hardwired into one of our
dependencies, and we didn't update this man page when it changed.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-05-21 09:53:28 -04:00
Nalin Dahyabhai 3a85df003e build: allow --output to be specified multiple times
Allow --output to be specified multiple times for `buildah build`.
That's of limited usefulness right now, but as exporters get added, it
won't be, and it's better to provide the new multiple-values API field
sooner rather than later.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-05-21 09:53:04 -04:00
Nalin Dahyabhai b9a65a9d86 add: add a new --timestamp flag
Add a --timestamp flag to the "add" and "copy" CLIs, along with a
corresponding field in AddAndCopyOptions.

When a timestamp is set, we'll force the timestamp on data copied in to
be the specified value while reading it, so that the content will have
the specified datestamp in the rootfs and when the image is committed.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-05-21 09:51:51 -04:00
Nalin Dahyabhai a57e7f4b24 tests/helpers.bash: add some helpers for parsing images
Add some helper functions for parsing the information from dir: and oci:
locations that we'd just have to do over and over with minor variations,
to try to save some time and a refactor down the road.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-05-21 09:51:51 -04:00
Nalin Dahyabhai 5cfbf72410 pkg/parse.GetBuildOutput(): use strings.Cut()
Use strings.Cut() to make this a little easier to follow.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-05-21 09:44:46 -04:00
Lokesh Mandvekar ca3271a15c
[skip-ci] Packit: Disable osh_diff_scan
No golang support yet in osh diff scan.
Ref: https://github.com/openscanhub/known-false-positives/pull/30#issuecomment-2858698495

Signed-off-by: Lokesh Mandvekar <lsm5@fedoraproject.org>
2025-05-21 09:19:42 -04:00
openshift-merge-bot[bot] e7b6bd0089
Merge pull request #6173 from nalind/set-has
internal/util.SetHas(): handle maps of [generic]generic
2025-05-20 23:59:25 +00:00
Nalin Dahyabhai ee0f750ea7 internal/util.SetHas(): handle maps of [generic]generic
Make SetHas() a generic function for checking if a map holds a value of
whatever kind for a key of some comparable kind.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-05-20 17:53:33 -04:00
openshift-merge-bot[bot] 5a60789759
Merge pull request #6172 from lsm5/ignore-centos-eln
[skip-ci] Packit: Ignore ELN and CentOS Stream jobs
2025-05-20 21:33:33 +00:00
Aaron Lehmann cf4635e86b
Refactor NewImageSource to add a manifest type abstraction (#5743)
* Refactor NewImageSource to add a manifest type abstraction

Currently, NewImageSource creates a Docker schema2 manifest and an OCI
manifest at the same time. This precludes functionality that isn't
supported by both manifest types, for example zstd compression.
Refactoring this to create only the desired manifest type solves this
and also cleans up the code by separating manifest-type-specific code
into distinct implementations of a "manifest builder".

See discussion in https://github.com/containers/buildah/pull/5452.

Signed-off-by: Aaron Lehmann <alehmann@netflix.com>

* Review feedback

Signed-off-by: Aaron Lehmann <alehmann@netflix.com>

* Review feedback, round 2

Signed-off-by: Aaron Lehmann <alehmann@netflix.com>

---------

Signed-off-by: Aaron Lehmann <alehmann@netflix.com>
Co-authored-by: flouthoc <flouthoc@users.noreply.github.com>
2025-05-20 13:08:17 -07:00
Lokesh Mandvekar 964ec24c13
[skip-ci] Packit: Ignore ELN and CentOS Stream jobs
Ignore these jobs until go 1.23.3+ is available in their buildroots

Signed-off-by: Lokesh Mandvekar <lsm5@fedoraproject.org>
2025-05-20 15:48:28 -04:00
flouthoc 8f885843db
imagebuildah: select most recent layer for cache
If multiple layers are found as cache candidates then select the layer
which was created recently.

Closes: https://github.com/containers/buildah/issues/6152

Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-05-20 10:34:21 -07:00
tomsweeneyredhat 7fbca08c02 [CI:DOCS] Add CNCF roadmap, touchup other CNCF files
Added a ROADMAP.md file per CNCF requirements.

Updated the GOVERNANCE.md file to point to and use the local
MAINTAINERS.md file instead of the one in Podman.  Also added
links to the OWNERS file.

Updated the MAINTAINERS.md file to bump Aditya Rajan to a
Maintainer from a reviewer. Having him as a reviewer was
a cut/paste error that was not caught when the file was
first generated from the Podman file.

Signed-off-by: tomsweeneyredhat <tsweeney@redhat.com>
2025-05-20 13:21:45 -04:00
openshift-merge-bot[bot] a1dad28e32
Merge pull request #6156 from containers/renovate/golang.org-x-crypto-0.x
fix(deps): update module golang.org/x/crypto to v0.38.0
2025-05-14 14:53:50 +00:00
renovate[bot] 168eef4734
fix(deps): update module golang.org/x/crypto to v0.38.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-05-14 13:33:24 +00:00
Sascha Brawer ada0ac0b89
Fix typo in comment (#6167)
Signed-off-by: Sascha Brawer <sascha@brawer.ch>
2025-05-13 19:04:04 -07:00
openshift-merge-bot[bot] cd2bb6169f
Merge pull request #6161 from rhatdan/security
Support label_users in buildah
2025-05-06 14:20:15 +00:00
Daniel J Walsh 081e9b308d
Support label_users in buildah
Fixes: https://github.com/containers/buildah/issues/6160

label_users tells buildah and podman to maintain the user and role
from the SELinux label, the default is to change the user and role to
system_u:system_r.

With this change we end up with an unconfined_u user running the
container as unconfined_u:unconfined_r.

I don't believe our CI/CD system can test this, because it runs with
system_u:system_r by default.

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
2025-05-06 07:13:27 -04:00
openshift-merge-bot[bot] 646fb21ef2
Merge pull request #6154 from containers/renovate/golang.org-x-sync-0.x
fix(deps): update module golang.org/x/sync to v0.14.0
2025-05-05 15:41:52 +00:00
renovate[bot] 27c2c3e4ba
fix(deps): update module golang.org/x/sync to v0.14.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-05-05 14:48:34 +00:00
openshift-merge-bot[bot] 7ba98de415
Merge pull request #6151 from containers/renovate/github.com-containers-luksy-digest
fix(deps): update github.com/containers/luksy digest to 4bb4c3f
2025-05-02 13:44:03 +00:00
openshift-merge-bot[bot] a79f540723
Merge pull request #6146 from nalind/serve-leak
test/serve: fix a descriptor leak, add preliminary directory support
2025-05-02 09:38:09 +00:00
renovate[bot] ffcbec7f79
fix(deps): update github.com/containers/luksy digest to 4bb4c3f
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-05-01 03:02:39 +00:00
Nalin Dahyabhai cbe5823958 test/serve: fix a descriptor leak, add preliminary directory support
Fix a descriptor leak in the helper, and add some minimal support for
clients that might want to scrape HTML of a directory to walk the
filesystem.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-04-30 11:50:42 -04:00
openshift-merge-bot[bot] 36c3f13d85
Merge pull request #6150 from containers/renovate/github.com-opencontainers-cgroups-0.x
fix(deps): update module github.com/opencontainers/cgroups to v0.0.2
2025-04-30 15:47:51 +00:00
renovate[bot] f97596adf3
fix(deps): update module github.com/opencontainers/cgroups to v0.0.2
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-04-30 14:45:32 +00:00
openshift-merge-bot[bot] 8e43db65ca
Merge pull request #6147 from containers/renovate/github.com-opencontainers-runc-1.x
fix(deps): update module github.com/opencontainers/runc to v1.3.0
2025-04-30 14:44:23 +00:00
openshift-merge-bot[bot] f91b3f70fe
Merge pull request #6149 from containers/renovate/github.com-moby-buildkit-0.x
fix(deps): update module github.com/moby/buildkit to v0.21.1
2025-04-30 14:41:37 +00:00
openshift-merge-bot[bot] 5731583e94
Merge pull request #6141 from hanwen-flow/image-fixes
Filter image only when necessary
2025-04-30 13:02:18 +00:00
renovate[bot] fd8e85e833
fix(deps): update module github.com/moby/buildkit to v0.21.1
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-04-29 23:28:31 +00:00
Nalin Dahyabhai 9ac03e6bf9 Update to avoid deprecated types
Replace our use of the
github.com/opencontainers/runc/libcontainer/devices.Permissions type
with the github.com/opencontainers/cgroups/devices/config.Permissions
type, which the former is now an alias for.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-04-29 14:17:29 -04:00
renovate[bot] 7ce7072f25
fix(deps): update module github.com/opencontainers/runc to v1.3.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-04-29 17:49:49 +00:00
Han-Wen Nienhuys a59af89cd8 Only filter if containerImageRef.created != nil
This option is set from CommitOptions.HistoryTimestamp, which
corresponds to the buildah option '--timestamp', and therefore is off
by default.

If this option is not given, we can save ourselves one layer of
copying (tar filtering is a tar.Reader/tar.Writer connnected with io.Pipe())

Signed-off-by: Han-Wen Nienhuys <hanwen@engflow.com>
2025-04-29 08:21:28 +02:00
Han-Wen Nienhuys 2dae3b4656 Drop superfluous cast
Signed-off-by: Han-Wen Nienhuys <hanwen@engflow.com>
2025-04-29 08:21:28 +02:00
Han-Wen Nienhuys 15278aa27d Remove UID/GID scrubbing.
As of
e024854ba3,
Uname/Gname fields are not populated on Unix. On Windows, the golang
tar package leaves the Uname/Gname fields empty.

Signed-off-by: Han-Wen Nienhuys <hanwen@engflow.com>
2025-04-29 08:21:28 +02:00
openshift-merge-bot[bot] e886f271ee
Merge pull request #6144 from containers/renovate/github.com-seccomp-libseccomp-golang-0.x
fix(deps): update module github.com/seccomp/libseccomp-golang to v0.11.0
2025-04-24 20:06:56 +00:00
renovate[bot] ef7ad851a6
fix(deps): update module github.com/seccomp/libseccomp-golang to v0.11.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-04-23 22:55:14 +00:00
openshift-merge-bot[bot] 420e4bfd0a
Merge pull request #6142 from containers/renovate/major-ci-vm-image
chore(deps): update dependency containers/automation_images to v20250422
2025-04-23 15:48:04 +00:00
Paul Holzinger e4cf6cbb90
cirrus: turn prior fedora testing back on
Signed-off-by: Paul Holzinger <pholzing@redhat.com>
2025-04-23 16:07:43 +02:00
renovate[bot] 5de286ca8e
chore(deps): update dependency containers/automation_images to v20250422
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Signed-off-by: Paul Holzinger <pholzing@redhat.com>
2025-04-23 16:05:19 +02:00
openshift-merge-bot[bot] d6370d08de
Merge pull request #6138 from containers/renovate/github.com-docker-docker-28.x
fix(deps): update module github.com/docker/docker to v28.1.1+incompatible
2025-04-21 19:40:56 +00:00
renovate[bot] 97cf88f504
fix(deps): update module github.com/docker/docker to v28.1.1+incompatible
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-04-21 19:03:21 +00:00
openshift-merge-bot[bot] 614e42b808
Merge pull request #6136 from TomSweeneyRedHat/dev/tsweeney/v1.40.0
Bump c/storage v1.58.0, c/image v5.35.0, c/common v0.63.0, Buildah v1.40.0, then to dev
2025-04-21 15:10:17 +00:00
Tom Sweeney 58b1f29836
Merge branch 'main' into dev/tsweeney/v1.40.0 2025-04-17 16:59:01 -04:00
tomsweeneyredhat 36adabb2d6 Bump to Buildah v1.41.0-dev
Bump the main Buildah branch to the next dev version,
Buildah v1.41.0-dev

Signed-off-by: tomsweeneyredhat <tsweeney@redhat.com>
2025-04-17 15:21:28 -04:00
tomsweeneyredhat ff62e3e8a5 Bump Buildah to v1.40.0
Bump Buildah to v1.40.0 in preparation
for Podman v5.5.0

Signed-off-by: tomsweeneyredhat <tsweeney@redhat.com>
2025-04-17 15:21:28 -04:00
tomsweeneyredhat d8495826cc Bump c/storage to v1.58.0, c/image v5.35.0, c/common v0.63.0
Bumps:
c/storage to v1.58.0
c/image to v5.35.0
c/common to v0.63.0

In preparation for Buildah 1.40.0 and Podman v5.5.0

Signed-off-by: tomsweeneyredhat <tsweeney@redhat.com>
2025-04-17 15:21:20 -04:00
openshift-merge-bot[bot] 1db3b5465b
Merge pull request #6135 from containers/renovate/github.com-containers-common-0.x
fix(deps): update module github.com/containers/common to v0.63.0
2025-04-17 17:37:55 +00:00
Nalin Dahyabhai 365ad53dfc CI vendor_task: pin to go 1.23.3 for now
Pin to 1.23.3 specifically instead of 1.23, for now.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-04-17 12:41:11 -04:00
renovate[bot] 522bc8ba58
fix(deps): update module github.com/containers/common to v0.63.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-04-17 15:36:04 +00:00
openshift-merge-bot[bot] a0945beb1e
Merge pull request #6134 from containers/renovate/github.com-docker-docker-28.x
fix(deps): update module github.com/docker/docker to v28.1.0+incompatible
2025-04-17 15:29:22 +00:00
renovate[bot] a1df886a81
fix(deps): update module github.com/docker/docker to v28.1.0+incompatible
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-04-17 14:26:17 +00:00
openshift-merge-bot[bot] 01d92cd614
Merge pull request #6129 from nalind/chroot-troubleshoot
chroot: use symbolic names when complaining about mount() errors
2025-04-17 13:51:54 +00:00
openshift-merge-bot[bot] 3a4249802e
Merge pull request #6099 from ninja-quokka/update_issue_template
Update Buildah issue template to new version and support podman build
2025-04-17 13:49:12 +00:00
openshift-merge-bot[bot] 01b8522d7a
Merge pull request #6127 from nalind/doc-src
[CI:DOCS] Document rw/src for --mount in buildah-run(1)
2025-04-17 13:45:52 +00:00
openshift-merge-bot[bot] a9d6f2d7f8
Merge pull request #6131 from containers/renovate/github.com-containers-storage-1.x
fix(deps): update module github.com/containers/storage to v1.58.0
2025-04-16 17:57:35 +00:00
openshift-merge-bot[bot] 018e1915d2
Merge pull request #6130 from Luap99/cirrus-rootless
cirrus: make Total Success wait for rootless integration
2025-04-16 17:54:53 +00:00
renovate[bot] aedcd3d19e
fix(deps): update module github.com/containers/storage to v1.58.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-04-16 16:19:48 +00:00
openshift-merge-bot[bot] 352eda35bc
Merge pull request #6128 from nalind/completion
cli: hide the `completion` command instead of disabling it outright
2025-04-16 16:18:31 +00:00
Paul Holzinger 5789a60072
cirrus: make Total Success wait for rootless integration
As discovered by Matt[1], without this we could merge PRs even if they
have failing rootless integration tests.

[1] https://github.com/containers/buildah/pull/6120#issuecomment-2797581476

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
2025-04-16 18:09:30 +02:00
Nalin Dahyabhai 0da7ea1cc4 chroot: use symbolic names when complaining about mount() errors
Translate flags passed to mount() and read back using statfs() from hex
to named constants, to make troubleshooting a bit easier.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-04-15 17:53:43 -04:00
Nalin Dahyabhai 08cc023eef cli: hide the `completion` command instead of disabling it outright
Hide the cobra built-in `completion` command instead of disabling it
outright, since eventually we want to switch to using it instead of the
hand-written scripts we're currently using.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-04-15 17:28:28 -04:00
Nalin Dahyabhai 26ad103469 Document rw and src options for --mount flag in buildah-run(1)
Per discussion item 6084.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-04-15 17:22:01 -04:00
openshift-merge-bot[bot] dca9197ba1
Merge pull request #6125 from containers/renovate/github.com-moby-buildkit-0.x
fix(deps): update module github.com/moby/buildkit to v0.21.0
2025-04-15 19:35:33 +00:00
renovate[bot] b85a67e5ce
fix(deps): update module github.com/moby/buildkit to v0.21.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-04-15 17:24:09 +00:00
openshift-merge-bot[bot] 9a82bcc989
Merge pull request #6103 from flouthoc/inherit-labels
build: add support for `--inherit-labels`
2025-04-15 17:15:12 +00:00
Aditya R a235033662
build: add support for inherit-labels
Allows users to specify if they want to inherit labels from base image
or not.

Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-04-14 14:01:00 -07:00
openshift-merge-bot[bot] ebbfb3ab13
Merge pull request #6122 from containers/renovate/golangci-golangci-lint-2.x
chore(deps): update dependency golangci/golangci-lint to v2.1.0
2025-04-14 14:08:47 +00:00
renovate[bot] d2e334df97
chore(deps): update dependency golangci/golangci-lint to v2.1.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-04-12 18:13:55 +00:00
openshift-merge-bot[bot] 0db475592c
Merge pull request #6120 from Luap99/github
.github: check_cirrus_cron work around github bug
2025-04-11 17:30:22 +00:00
Paul Holzinger e6a7608219
.github: check_cirrus_cron work around github bug
So I wondered why our email workflow only reported things for podman...

It seems the secrets: inherit is broken and no longer working, I see all
jobs on all repos failing with:

Error when evaluating 'secrets'. .github/workflows/check_cirrus_cron.yml (Line: 19, Col: 11): Secret SECRET_CIRRUS_API_KEY is required, but not provided while calling.

This makes no sense to me I doubled checked the names, nothing changed
on our side and it is consistent for all projects. Interestingly this
same thing passed on March 10 and 11 (on all repos) but failed before
and after this as well.

Per[1] we are not alone, anyway let's try to get this working again even
if it means more duplication.

[1] actions/runner#2709

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
2025-04-11 18:59:18 +02:00
openshift-merge-bot[bot] 025e3e572f
Merge pull request #6110 from flouthoc/expand-mount-source
stage_executor,getCreatedBy: expand buildArgs before invoking `generatePathChecksum`
2025-04-10 17:34:59 +00:00
flouthoc bc05858e50
stage_executor,getCreatedBy: expand buildArgs for sources correctly
Patch in PR https://github.com/containers/buildah/pull/5691 added a
function to calculate and write checksum to history of `source` in
`--mount` instructions but it did not add part to expand build args if
they are present in `source` path.

Following PR Just corrects the above issue and also adds a new test to
make sure we don't break this in future again.

Closes: https://github.com/containers/podman/issues/25425

Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-04-10 10:02:56 -07:00
openshift-merge-bot[bot] 2de361a69a
Merge pull request #6108 from mheon/add_governance_and_owners
Add a link to project governance and MAINTAINERS file
2025-04-10 15:11:16 +00:00
Matt Heon 53cd313e10 Add a link to project governance and MAINTAINERS file
The MAINTAINERS file is based on the current OWNERS file. I added
our core maintainers as they are expected to have commit bits on
all project repositories.

The existing MAINTAINERS file, which appears out of date, was
removed in favor of the new one (which has all CNCF mandated
information).

Signed-off-by: Matt Heon <mheon@redhat.com>
2025-04-10 10:39:49 -04:00
openshift-merge-bot[bot] 5367a714f2
Merge pull request #6118 from kolyshkin/modernize
Modernize
2025-04-10 10:04:57 +00:00
openshift-merge-bot[bot] 47b3b1e783
Merge pull request #6100 from containers/renovate/github.com-containers-storage-digest
fix(deps): update github.com/containers/storage digest to b1d1b45
2025-04-09 21:57:59 +00:00
renovate[bot] d753211f60
fix(deps): update github.com/containers/storage digest to b1d1b45
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-04-09 20:15:23 +00:00
Kir Kolyshkin 608e4cc24f generateHostname: simplify
No functionality change, just code simplification.

Covered by existing test (e.g. "run should also override /etc/hostname"
in tests/run.bats).

NO NEW TESTS NEEDED

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-08 15:26:09 -07:00
Kir Kolyshkin f261d6641c Use maps.Copy
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-08 15:26:09 -07:00
Kir Kolyshkin 76e58eea14 Use slices.Concat
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-08 15:26:09 -07:00
Kir Kolyshkin 0835cb4760 Use slices.Clone
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-08 15:26:09 -07:00
Kir Kolyshkin 84a3905f61 Use slices.Contains
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-08 15:26:01 -07:00
Kir Kolyshkin 17777cf8ac Use for range over integers
Available since Go 1.22 (see https://tip.golang.org/ref/spec#For_range).

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-08 10:19:51 -07:00
Kir Kolyshkin c46bbf46af tests/testreport: don't copy os.Environ
A slice returned by os.Environ is a already a brand new copy, no need to
copy it again.

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-08 10:19:51 -07:00
Kir Kolyshkin 1ede7ddce7 Use any instead of interface{}
Brought to you by

	gofmt -r 'interface{} -> any' -w .

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-08 10:19:51 -07:00
openshift-merge-bot[bot] bb240a6e40
Merge pull request #6109 from kolyshkin/golangci-v2
Golangci v2
2025-04-08 15:36:10 +00:00
openshift-merge-bot[bot] f42c70ea3a
Merge pull request #6117 from containers/renovate/github.com-containernetworking-cni-1.x
fix(deps): update module github.com/containernetworking/cni to v1.3.0
2025-04-07 21:59:58 +00:00
Kir Kolyshkin d9978eb2fa ci: add golangci-lint run with --tests=false
This helps to find out code which is unused except in its own self
tests. For example, see PR 6101.

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-07 13:02:39 -07:00
Kir Kolyshkin e8dba98314 ci: add nolintlint, fix found issues
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-07 13:02:39 -07:00
Kir Kolyshkin 2162b3c93a copier: rm nolint:unparam annotation
This function always returns nil as the first parameter, which makes
unparam linter sad.

Rather than adding //nolint:unparam, let's just move nil to actual
returns.

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-07 13:02:34 -07:00
Kir Kolyshkin 757175d2e6 .golangci.yml: add unused linter
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-07 13:01:56 -07:00
Kir Kolyshkin 4249d94d76 chroot: fix unused warnings
When running golangci-lint run --tests=false, it complains:

> chroot/seccomp.go:15:7: const `seccompAvailable` is unused (unused)
> const seccompAvailable = true
>       ^
> chroot/seccomp.go:182:6: func `setupSeccomp` is unused (unused)
> func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
>      ^

Fix this.

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-07 13:01:56 -07:00
Kir Kolyshkin e548eaf146 copier: fix unused warnings
Found when running golangci-lint with --tests=false, which results in:

> copier/syscall_unix.go:89:2: const `testModeMask` is unused (unused)
> 	testModeMask           = int64(os.ModePerm)
> 	^
> copier/syscall_unix.go:90:2: const `testIgnoreSymlinkDates` is unused (unused)
> 	testIgnoreSymlinkDates = false
> 	^

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-07 13:01:56 -07:00
Kir Kolyshkin 8b3c045638 tests/conformance: fix unused warning
There is some code in tests/conformance which is only used by tests.
Move it to *_test.go files.

Found by golangci-lint run --tests=false, which shows this warning:

> tests/conformance/selinux.go:9:6: func `selinuxMountFlag` is unused (unused)
> func selinuxMountFlag() string {
>      ^

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-07 13:01:56 -07:00
Kir Kolyshkin e8a5d77b09 ci: switch to golangci-lint v2
Also, add a way to update golangci-lint locally, if an old version is
installed.

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-07 13:01:56 -07:00
Kir Kolyshkin 195c6b0c9e internal/mkcw: disable ST1003 warnings
Disable warnings like this one:

> internal/mkcw/workload.go:34:2: ST1003: should not use ALL_CAPS in Go names; use CamelCase instead (staticcheck)
> 	SEV_NO_ES = types.SEV_NO_ES //revive:disable-line:var-naming
> 	^

(

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-07 13:01:56 -07:00
Kir Kolyshkin b13670be84 tests/conformance: do not double import (fix ST1019)
This fixes the following warning:

> tests/conformance/conformance_test.go:37:2: ST1019: package "github.com/containers/image/v5/storage" is being imported more than once (staticcheck)
> 	is "github.com/containers/image/v5/storage"
> 	^
> tests/conformance/conformance_test.go:38:2: ST1019(related information): other import of "github.com/containers/image/v5/storage" (staticcheck)
> 	istorage "github.com/containers/image/v5/storage"
> 	^

Fixes: 3a61cc099 ("Add OverrideChanges and OverrideConfig to CommitOptions")
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-07 13:01:56 -07:00
Kir Kolyshkin 794715f9ae cmd/buildah: don't double import (fix ST1019)
This fixes the following warning:

> cmd/buildah/manifest.go:29:2: ST1019: package "github.com/opencontainers/image-spec/specs-go/v1" is being imported more than once (staticcheck)
> 	imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
> 	^
> cmd/buildah/manifest.go:30:2: ST1019(related information): other import of "github.com/opencontainers/image-spec/specs-go/v1" (staticcheck)
> 	v1 "github.com/opencontainers/image-spec/specs-go/v1"
> 	^

Fixes: aca884a89 ("`buildah manifest`: add artifact-related options")
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-07 13:01:56 -07:00
Kir Kolyshkin b343757b11 Do not capitalize error strings
This fixes "ST1005: error strings should not be capitalized
(staticcheck)" warnings.

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-07 13:01:56 -07:00
Kir Kolyshkin 444edbee0c cmd/buildah: do not capitalize error strings
This fixes "ST1005: error strings should not be capitalized (staticcheck)" warnings

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-07 13:01:56 -07:00
Kir Kolyshkin 6cf417995b tests/conformance: fix QF1012 warnings
These:

> tests/conformance/conformance_test.go:1361:16: QF1012: Use fmt.Fprintf(...) instead of Write([]byte(fmt.Sprintf(...))) (staticcheck)
> 		if _, err := tw.Write([]byte(fmt.Sprintf("Field\tDocker\t%s\n", notDocker))); err != nil {
> 		             ^
> tests/conformance/conformance_test.go:1393:16: QF1012: Use fmt.Fprintf(...) instead of Write([]byte(fmt.Sprintf(...))) (staticcheck)
> 		if _, err := tw.Write([]byte(fmt.Sprintf("File:attr\tDocker\t%s\n", notDocker))); err != nil {
> 		             ^

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-07 13:01:56 -07:00
Kir Kolyshkin f6e63ef10c tests/serve: fix QF1012 warning
This one:

> tests/serve/serve.go:56:16: QF1012: Use fmt.Fprintf(...) instead of WriteString(fmt.Sprintf(...)) (staticcheck)
> 		if _, err := f.WriteString(fmt.Sprintf("%d", os.Getpid())); err != nil {
> 		             ^

Instead of following the (decent) recommendation, use os.WriteFile
and replace printf(%d) with strconv.Itoa.

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-07 13:01:56 -07:00
Kir Kolyshkin f9aed9704e Use strings.ReplaceAll to fix QF1004 warnings
These:

> cmd/buildah/images.go:192:10: QF1004: could use strings.ReplaceAll instead (staticcheck)
> 		return strings.Replace(opts.format, `\t`, "\t", -1)
> 		       ^
> pkg/formats/formats.go:97:16: QF1004: could use strings.ReplaceAll instead (staticcheck)
> 		t.Template = strings.Replace(strings.TrimSpace(t.Template[5:]), " ", "\t", -1)
> 		             ^
> tests/testreport/testreport.go:328:13: QF1004: could use strings.ReplaceAll instead (staticcheck)
> 		sysctl := strings.Replace(path, "/", ".", -1)
> 		          ^

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-07 13:01:56 -07:00
Kir Kolyshkin 52001ac4de Use switch to fix QF1003 warnings
These:

> copier/copier.go:1664:2: QF1003: could use tagged switch on hdr.Typeflag (staticcheck)
> 	if hdr.Typeflag == tar.TypeReg {
> 	^
> pkg/parse/parse.go:708:4: QF1003: could use tagged switch on arr[1] (staticcheck)
> 			if arr[1] == "local" {
> 			^

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-07 13:01:56 -07:00
Kir Kolyshkin 7b15646547 Apply De Morgan's law to fix QF1001 warnings
This fixes the following warnings:

> chroot/run_test.go:319:39: QF1001: could apply De Morgan's law (staticcheck)
> 				if limit == unix.RLIM_INFINITY && !(rlim == nil || (rlim.Soft == unix.RLIM_INFINITY && rlim.Hard == unix.RLIM_INFINITY)) {
> 				                                  ^
> copier/copier.go:1012:92: QF1001: could apply De Morgan's law (staticcheck)
> 			if target, err := os.Readlink(filepath.Join(workingPath, components[0])); err == nil && !(len(components) == 1 && !evaluateFinalComponent) {
> 			                                                                                        ^
> run_linux.go:459:118: QF1001: could apply De Morgan's law (staticcheck)
> 	if !slices.Contains(volumes, resolvconf.DefaultResolvConf) && options.ConfigureNetwork != define.NetworkDisabled && !(len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none") {
> 	                                                                                                                    ^

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-07 13:01:56 -07:00
Kir Kolyshkin 35a5b3aaef Fix QF1007 staticcheck warnings
These:

> add.go:457:2: QF1007: could merge conditional assignment into variable declaration (staticcheck)
> 	newDestDirFound := false
> 	^
> cmd/buildah/umount.go:33:2: QF1007: could merge conditional assignment into variable declaration (staticcheck)
> 	umountAll := false
> 	^

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-07 13:01:56 -07:00
Kir Kolyshkin cc2c6d361c imagebuildah: fix revive warning
> imagebuildah/stage_executor.go:714:13: superfluous-else: if block ends with a break statement, so drop this else and outdent its block (move short variable declaration to its own line if necessary) (revive)
> 					} else {
> 						// Treat the source's name as the name of an image.
> 						mountPoint, err := s.getImageRootfs(s.ctx, from)
> 						if err != nil {
> 							return nil, fmt.Errorf("%s from=%s: no stage or image found with that name", flag, from)
> 						}
> 						stageMountPoints[from] = internal.StageMountDetails{
> 							IsImage:    true,
> 							DidExecute: true,
> 							MountPoint: mountPoint,
> 						}
> 						break
> 					}
>

(The alternative is to keep "else" and remove "break", but there are
other break statements above it, so for style consistency it's better to
keep using break.

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-07 13:01:56 -07:00
Kir Kolyshkin dcabec56ec Rename max variable
This fixes the following warnings:

> run_linux.go:1065:3: redefines-builtin-id: redefinition of the built-in function max (revive)
> 		max := define.RLimitDefaultValue
> 		^
> run_linux.go:1069:5: redefines-builtin-id: redefinition of the built-in function max (revive)
> 				max = rlimit.Max
> 				^
> run_linux.go:1077:3: redefines-builtin-id: redefinition of the built-in function max (revive)
> 		max := define.RLimitDefaultValue
> 		^

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-07 13:01:56 -07:00
Kir Kolyshkin ea1baab2c3 tests/tools: install lint from binary, use renovate
This way is recommended by golangci-lint developers, plus we'll save
some build time.

In addition, add GOLANGCI_LINT_VERSION to the top-level Makefile,
so it can be updated by renovate.

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-04-07 13:01:56 -07:00
openshift-merge-bot[bot] 1051965337
Merge pull request #5971 from flouthoc/reset-context-platform
stage_executor: reset platform in systemcontext for every stage.
2025-04-07 18:41:45 +00:00
openshift-merge-bot[bot] b25cf06bd3
Merge pull request #6115 from containers/renovate/golang.org-x-crypto-0.x
fix(deps): update module golang.org/x/crypto to v0.37.0
2025-04-07 17:26:35 +00:00
renovate[bot] 76be539d63
fix(deps): update module github.com/containernetworking/cni to v1.3.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-04-07 16:41:19 +00:00
Lewis 17bb74308c
Update Buildah issue template to new version and support podman build
Signed-off-by: Lewis Denny <lewis@redhat.com>
2025-04-07 09:51:57 +10:00
renovate[bot] b459a0720d
fix(deps): update module golang.org/x/crypto to v0.37.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-04-06 17:32:12 +00:00
flouthoc 022ff233fc
stage_executor: reset platform in systemcontext for stages
Every stage now has its own copy of systemcontext.

On processing of every stage platform spec in systemcontext must be
correctly reset.

Closes: https://github.com/containers/buildah/issues/5968

Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-04-03 09:19:05 -07:00
openshift-merge-bot[bot] 7509c34542
Merge pull request #6102 from containers/renovate/github.com-opencontainers-runtime-tools-digest
fix(deps): update github.com/opencontainers/runtime-tools digest to 260e151
2025-04-02 20:45:52 +00:00
openshift-merge-bot[bot] 72fddb21db
Merge pull request #6101 from kolyshkin/unused
cmd/buildah: rm unused code
2025-04-02 18:44:47 +00:00
openshift-merge-bot[bot] 32d78c69be
Merge pull request #6092 from flouthoc/git-error-cde
build: return `ExecErrorCodeGeneric` when git operation fails instead of relaying error code directly from git
2025-04-01 18:08:30 +00:00
renovate[bot] e758787602
fix(deps): update github.com/opencontainers/runtime-tools digest to 260e151
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-04-01 07:28:54 +00:00
Kir Kolyshkin 03da194c75 cmd/buildah: rm unused containerOutputUsingTemplate
Its last user was removed by commit 726e24d5e ("make --format
columnizing consistent with buildah images) in 2019, so since that time
it is not used by anyone but its own unit tests.

Remove it.

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-03-31 23:01:38 -07:00
Kir Kolyshkin 5e6c645e63 cmd/buildah: rm unused getDateAndDigestAndSize
The last user of getDateAndDigestAndSize was removed by commit dcd2a92e5
("use new containers/common/libimage package") in 2021, so since that it
is not used anywhere except its own unit test.

Remove it.

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2025-03-31 23:00:49 -07:00
flouthoc a4bc357820
build: return ExecErrorCodeGeneric when git operation fails
Only propagate error message from git and let buildah reflect error code
`125`.

Reason: Buildah should return predicatable error code from the set of
defined error codes in exec_codes.go at https://github.com/containers/buildah/blob/main/pkg/cli/exec_codes.go#L6
anything other that predefined error codes introduces inconsistency thus making testing difficult in CI and podman.

Users should expect buildah to refect ExecErrorCodeGeneric with error message kept intact from the underlying `git`
commands.

Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-03-28 07:06:44 -07:00
openshift-merge-bot[bot] 898fbb2d25
Merge pull request #6083 from danegsta/danegsta/windowsExtendedGlob
Allow extendedGlob to work with Windows paths
2025-03-26 20:40:00 +00:00
openshift-merge-bot[bot] e4d3dc08b1
Merge pull request #6087 from flouthoc/add-report-err
add: report error while creating dir for URL source.
2025-03-26 18:00:37 +00:00
openshift-merge-bot[bot] 6e77a5cf84
Merge pull request #6085 from containers/renovate/github.com-docker-docker-28.x
fix(deps): update module github.com/docker/docker to v28.0.3+incompatible
2025-03-26 17:08:15 +00:00
flouthoc 0c377816a9
add: report error while creating dir for URL source.
Correctly report back error when attempting to create `Tmpdir` for a
given url source.

Also remove superfluous `isGitTag` from define/types.go since `git
fetch` correctly resolves by provided reference.

Closes: https://github.com/containers/podman/issues/25679

Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-03-26 08:10:59 -07:00
openshift-merge-bot[bot] f15866c9f7
Merge pull request #6088 from nalind/image-mode
createPlatformContainer: drop MS_REMOUNT|MS_BIND
2025-03-26 14:42:06 +00:00
Nalin Dahyabhai c1fcbb2d96 createPlatformContainer: drop MS_REMOUNT|MS_BIND
When attempting to change the mount propagation of the old root
directory tree before unmounting it, it's enough that we pass the
requested propagation flags.

In particular, MS_REC is the only flag that is supposed to be allowed to
be specified along with a mount propagation flag, but in practice it was
only triggering an error some of the time, and CI wasn't one of those
times.

The added test mounts the root filesystem as an overlay and then runs
buildah as a rootless user on top of that, which is more comparable to a
root-on-composefs configuration, which manages to trigger the error.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-03-25 17:47:46 -04:00
openshift-merge-bot[bot] 243d8976d8
Merge pull request #6023 from aeijdenberg/usehistorytimestampinociarchive
feat: use HistoryTimestamp, if set, for oci-archive entries
2025-03-25 15:28:22 +00:00
openshift-merge-bot[bot] a6826938b8
Merge pull request #6082 from containers/renovate/github.com-moby-buildkit-0.x
fix(deps): update module github.com/moby/buildkit to v0.20.2
2025-03-25 14:35:50 +00:00
renovate[bot] 28dec564af
fix(deps): update module github.com/docker/docker to v28.0.3+incompatible
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-03-25 14:34:25 +00:00
openshift-merge-bot[bot] 3828ac91b6
Merge pull request #6017 from containers/renovate/github.com-openshift-imagebuilder-digest
fix(deps): update github.com/openshift/imagebuilder digest to e87e4e1
2025-03-25 14:33:10 +00:00
Adam Eijdenberg 42e63dc0b9 fix: bats won't fail on ! without cleverness
Signed-off-by: Adam Eijdenberg <adam@continusec.com>
2025-03-25 10:23:44 +00:00
Adam Eijdenberg 9b41f3c9b5 feat: use HistoryTimestamp, if set, for oci-archive entries
Signed-off-by: Adam Eijdenberg <adam@continusec.com>
2025-03-25 07:14:04 +00:00
David Negstad 74cfd3cfc3 Allow extendedGlob to work with Windows paths
Signed-off-by: David Negstad <David.Negstad@microsoft.com>
2025-03-24 18:15:16 -07:00
openshift-merge-bot[bot] 915769a07b
Merge pull request #5998 from containers/renovate/github.com-docker-docker-28.x
fix(deps): update module github.com/docker/docker to v28.0.2+incompatible
2025-03-24 20:43:19 +00:00
renovate[bot] 42b5630f56
fix(deps): update module github.com/moby/buildkit to v0.20.2
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-03-24 20:42:14 +00:00
renovate[bot] 72562d8145
fix(deps): update github.com/openshift/imagebuilder digest to e87e4e1
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-03-24 20:41:59 +00:00
openshift-merge-bot[bot] 06b397f4c2
Merge pull request #6081 from containers/renovate/major-ci-vm-image
chore(deps): update dependency containers/automation_images to v20250324
2025-03-24 18:28:34 +00:00
renovate[bot] 1edce9c5e0
fix(deps): update module github.com/docker/docker to v28.0.2+incompatible
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-03-24 17:22:43 +00:00
openshift-merge-bot[bot] bf439b70fb
Merge pull request #6078 from containers/renovate/tags.cncf.io-container-device-interface-1.x
fix(deps): update module tags.cncf.io/container-device-interface to v1.0.1
2025-03-24 17:08:48 +00:00
renovate[bot] 3466f149a5
fix(deps): update module tags.cncf.io/container-device-interface to v1.0.1
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-03-24 15:38:52 +00:00
renovate[bot] d38eceee19
chore(deps): update dependency containers/automation_images to v20250324
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-03-24 15:30:45 +00:00
openshift-merge-bot[bot] 6d9381d082
Merge pull request #6079 from Luap99/selinux
vendor: update github.com/opencontainers/selinux to v1.12.0
2025-03-24 15:30:01 +00:00
Paul Holzinger cbf7e361e6
vendor: update github.com/opencontainers/selinux to v1.12.0
Signed-off-by: Paul Holzinger <pholzing@redhat.com>
2025-03-24 13:36:19 +01:00
Paul Holzinger a1715cc863
replace deprecated selinux/label calls
These functions were removed in github.com/opencontainers/selinux
v1.12.0.

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
2025-03-24 13:32:00 +01:00
openshift-merge-bot[bot] b478b9f6d8
Merge pull request #6076 from Honny1/fix-arm64-1
Fix built-in args on ARM64
2025-03-21 19:08:47 +00:00
openshift-merge-bot[bot] a7ea2aeb2d
Merge pull request #6039 from flouthoc/vendor-common
vendor: bump c/common to `dbeb17e40c80`
2025-03-21 19:06:01 +00:00
flouthoc 2029431a2d
vendor: bump c/common to dbeb17e40c80
Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-03-21 10:27:52 -07:00
Jan Rodák 38df169fcb
Use builtin arg defaults from imagebuilder
Signed-off-by: Jan Rodák <hony.com@seznam.cz>
2025-03-21 15:06:10 +01:00
openshift-merge-bot[bot] bc4d7eb70f
Merge pull request #6074 from giuseppe/mask-thermal-paths
Mask thermal paths
2025-03-21 12:32:19 +00:00
Giuseppe Scrivano 2031709a69
linux: accept unmask paths as glob values
That is the same configuration used already by Podman.

Signed-off-by: Giuseppe Scrivano <gscrivan@redhat.com>
2025-03-21 08:54:32 +01:00
Giuseppe Scrivano 4dcc397ac5
vendor: update containers/common
Closes: https://github.com/containers/buildah/issues/6073

Signed-off-by: Giuseppe Scrivano <gscrivan@redhat.com>
2025-03-21 08:50:11 +01:00
openshift-merge-bot[bot] 3e3baeeb2c
Merge pull request #6008 from Honny1/copy-parents
Add --parents option for COPY in Containerfiles
2025-03-20 10:48:59 +00:00
Jan Rodák 7ca9f3464b
Add --parents option for COPY in Dockerfiles
It also includes an implementation of the --parents flag for the buildah copy command.

Fixes: https://issues.redhat.com/browse/RUN-2193
Fixes: https://github.com/containers/buildah/issues/5557

Signed-off-by: Jan Rodák <hony.com@seznam.cz>
2025-03-18 21:58:27 +01:00
openshift-merge-bot[bot] e8d8be71cd
Merge pull request #6070 from containers/renovate/github.com-opencontainers-runc-1.x
fix(deps): update module github.com/opencontainers/runc to v1.2.6
2025-03-18 15:50:21 +00:00
renovate[bot] e2062cb0f3
fix(deps): update module github.com/opencontainers/runc to v1.2.6
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-03-18 02:20:03 +00:00
openshift-merge-bot[bot] 981bea61b7
Merge pull request #6045 from containers/renovate/tags.cncf.io-container-device-interface-1.x
fix(deps): update module tags.cncf.io/container-device-interface to v1
2025-03-17 15:01:29 +00:00
Nalin Dahyabhai 2adb9e288e update go.sum from the previous commit
Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-03-13 15:13:36 -04:00
renovate[bot] 218afc9211
fix(deps): update module tags.cncf.io/container-device-interface to v1
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-03-13 18:48:21 +00:00
openshift-merge-bot[bot] 78d2c1f03e
Merge pull request #6064 from containers/renovate/go-golang.org-x-net-vulnerability
chore(deps): update module golang.org/x/net to v0.36.0 [security]
2025-03-13 18:37:24 +00:00
renovate[bot] ab5c5e519f
chore(deps): update module golang.org/x/net to v0.36.0 [security]
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-03-13 15:41:10 +00:00
openshift-merge-bot[bot] bfe568d354
Merge pull request #6060 from Luap99/go-1.23
Update to Go 1.23 and bump golang.org/x/crypto v0.36.0
2025-03-13 15:39:51 +00:00
Paul Holzinger 8e9e02c103
packit: remove f40 from copr builds
go 1.22 is to old, once F40 is updated to go 1.23 we can revert this.

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
2025-03-13 14:36:42 +01:00
Paul Holzinger 8670aebc68
cirrus: update to go 1.23 image
Signed-off-by: Paul Holzinger <pholzing@redhat.com>
2025-03-13 14:32:48 +01:00
Paul Holzinger 12f89d1314
vendor bump to golang.org/x/crypto v0.36.0
This also bumps us to go 1.23 as minimum supported version.

Fixes: CVE-2025-22869
Fixes: https://issues.redhat.com/browse/RHEL-82771
Fixes: https://issues.redhat.com/browse/RHEL-81310

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
2025-03-13 14:32:48 +01:00
Paul Holzinger 9d1ecbb7f5
cirrus: update PRIOR_FEDORA comment
Oh well, so much to paying attention to buildah CI. Nobody seemed to
have noticed that F40 is not tested, anyway now that we bumpt to go 1.23
we can no longer build on it until go 1.23 is shipped on f40 which might
still take a few weeks.

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
2025-03-13 14:32:48 +01:00
openshift-merge-bot[bot] 1f44a9760c
Merge pull request #6047 from aeijdenberg/altsimpler
fix: dockerfiles... always evaluates to an empty list
2025-03-12 20:06:04 +00:00
openshift-merge-bot[bot] 0850612f5d
Merge pull request #6063 from Luap99/remove-cirrus-rerun
github: remove cirrus rerun action
2025-03-12 19:39:50 +00:00
openshift-merge-bot[bot] 443eace8be
Merge pull request #6062 from containers/renovate/github.com-containers-common-0.x
fix(deps): update module github.com/containers/common to v0.62.2
2025-03-12 19:33:00 +00:00
Paul Holzinger 1192c71b63
github: remove cirrus rerun action
It is broken and we have removed the original in podman, so remove it
here as well as it will no longer work.

Fixes #6035

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
2025-03-12 18:24:52 +01:00
renovate[bot] 21f8dd3731
fix(deps): update module github.com/containers/common to v0.62.2
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-03-12 15:30:38 +00:00
openshift-merge-bot[bot] 45b31a92ee
Merge pull request #5999 from hdub-tech/secret-examples
buildah-build.1.md: secret examples
2025-03-11 17:41:54 +00:00
openshift-merge-bot[bot] dea716529e
Merge pull request #6057 from containers/renovate/github.com-containers-image-v5-5.x
fix(deps): update module github.com/containers/image/v5 to v5.34.2
2025-03-11 14:10:30 +00:00
renovate[bot] 225a6fcd2b
fix(deps): update module github.com/containers/image/v5 to v5.34.2
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-03-11 02:30:20 +00:00
Adam Eijdenberg 2bc7ac11a2 fix: close files properly when BuildDockerfiles exits
Previous code called defer with argument "dockerfiles..." which is
always empty at time of evaluation.

Even when dockerfiles is appended to later in this method, it has no
entries at time of evaluation as an argument to the defer function, thus
no files were ever closed as a result.

Minor refactor so that all referenced files and HTTP requests are now
closed at end of function.

Signed-off-by: Adam Eijdenberg <adam@continusec.com>
2025-03-10 03:35:18 +00:00
openshift-merge-bot[bot] 8fd8abcf38
Merge pull request #6041 from flouthoc/heredoc-buildarg
stage_executor: history should include heredoc summary correctly
2025-03-07 21:25:22 +00:00
openshift-merge-bot[bot] ac000a6f23
Merge pull request #6042 from containers/renovate/github.com-containers-storage-1.x
fix(deps): update module github.com/containers/storage to v1.57.2
2025-03-07 19:13:22 +00:00
openshift-merge-bot[bot] 81eaf4fc3a
Merge pull request #6014 from mtrmac/enforce-digests
Use UnparsedInstance.Manifest instead of ImageSource.GetManifest
2025-03-07 18:34:53 +00:00
renovate[bot] 1c9cc54aa1
fix(deps): update module github.com/containers/storage to v1.57.2
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-03-07 16:28:16 +00:00
openshift-merge-bot[bot] 1922841243
Merge pull request #6021 from containers/renovate/github.com-containers-common-0.x
fix(deps): update module github.com/containers/common to v0.62.1
2025-03-07 16:14:39 +00:00
flouthoc 4dd300b1a7
stage_executor: history should include heredoc summary correctly
getCreatedBy ignores heredoc summary when build args are specified
following PR makes sure the behaviour is correct.

Also test is modified to make sure buildah correctly burst cache if
heredoc content is changed.

Closes: https://github.com/containers/podman/issues/25469

Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-03-07 07:56:52 -08:00
renovate[bot] 03c54f8647
fix(deps): update module github.com/containers/common to v0.62.1
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-03-06 21:48:30 +00:00
openshift-merge-bot[bot] 7776f50395
Merge pull request #6036 from Luap99/cron-rerun-disable
github: disable cron rerun action
2025-03-06 21:13:46 +00:00
openshift-merge-bot[bot] b2d07d9710
Merge pull request #6016 from containers/renovate/github.com-containers-luksy-digest
fix(deps): update github.com/containers/luksy digest to 40bd943
2025-03-06 20:29:54 +00:00
Paul Holzinger df4b04f959
github: disable cron rerun action
Something is very wrong with the rerun script here, it needs more
investigation. For now let's disable it as it doesn't work correctly.

see #6035

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
2025-03-06 19:43:37 +01:00
openshift-merge-bot[bot] 0bab4329ee
Merge pull request #6032 from containers/renovate/github.com-moby-buildkit-0.x
fix(deps): update module github.com/moby/buildkit to v0.20.1
2025-03-05 18:39:07 +00:00
renovate[bot] c5d6b9e7b9
fix(deps): update module github.com/moby/buildkit to v0.20.1
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-03-05 17:20:35 +00:00
openshift-merge-bot[bot] 524a4c63f1
Merge pull request #6031 from nalind/fix-import
internal/mkcw.Archive(): use github.com/containers/storage/pkg/ioutils
2025-03-05 17:19:23 +00:00
Nalin Dahyabhai 264f557dbe internal/mkcw.Archive(): use github.com/containers/storage/pkg/ioutils
Use the AtomicWriteFile() from github.com/containers/storage/pkg/ioutils
instead of the one from github.com/docker/docker/pkg/ioutils.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-03-05 10:54:36 -05:00
openshift-merge-bot[bot] ae1e1d55dc
Merge pull request #5885 from lsm5/tmt-system-tests
[skip-ci] TMT: system tests
2025-03-04 15:54:44 +00:00
Lokesh Mandvekar c54e43a9fb
[skip-ci] TMT: system tests
This commit adds TMT jobs for system tests triggered via Packit for
all active Fedora and CentOS Stream releases on x86_64.

TODO: enable aarch64 tests.

Official Fedora and CentOS Stream spec and gating test configs can be
synced from upstream by Packit, effectively upstreaming almost all
mainteenance.

Signed-off-by: Lokesh Mandvekar <lsm5@fedoraproject.org>
2025-03-04 16:22:48 +05:30
H Dub 1547d4f787
buildah-build.1.md: secret examples
Signed-off-by: H Dub <14808878+hdub-tech@users.noreply.github.com>
Helped-by: tomsweeneyredhat <tsweeney@redhat.com>
2025-03-03 21:39:52 -06:00
renovate[bot] a1054580ec
fix(deps): update github.com/containers/luksy digest to 40bd943
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-03-03 19:16:55 +00:00
openshift-merge-bot[bot] 2517aa202f
Merge pull request #6020 from containers/renovate/github.com-opencontainers-image-spec-1.x
fix(deps): update module github.com/opencontainers/image-spec to v1.1.1
2025-03-03 18:53:02 +00:00
openshift-merge-bot[bot] 23b7736113
Merge pull request #6015 from containers/renovate/github.com-containers-image-v5-5.x
fix(deps): update module github.com/containers/image/v5 to v5.34.1
2025-03-03 18:39:16 +00:00
renovate[bot] 68330c1699
fix(deps): update module github.com/opencontainers/image-spec to v1.1.1
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-03-03 16:15:17 +00:00
renovate[bot] 1f8f75eefa
fix(deps): update module github.com/containers/image/v5 to v5.34.1
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-28 22:34:02 +00:00
Miloslav Trmač cc4cca08d4 Use UnparsedInstance.Manifest instead of ImageSource.GetManifest
... to validate that the manifests match expected digests, if any.

In some cases, using an UnparsedInstance can also avoid redundant I/O.

Do this everywhere, even where we read local storage which is
mostly trusted, because it is cheap enough and being consistent
makes it less likely for the code to be copied into other
contexts where the sources are not trusted.

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
2025-02-28 20:16:07 +01:00
openshift-merge-bot[bot] c2e6d012d4
Merge pull request #6013 from containers/renovate/github.com-opencontainers-runtime-spec-1.x
fix(deps): update module github.com/opencontainers/runtime-spec to v1.2.1
2025-02-28 18:00:37 +00:00
renovate[bot] e10af36639
fix(deps): update module github.com/opencontainers/runtime-spec to v1.2.1
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-28 16:26:01 +00:00
openshift-merge-bot[bot] 5a57a86a38
Merge pull request #6012 from nalind/origin-raw
tests/conformance/testdata/Dockerfile.add: update some URLs
2025-02-28 15:36:24 +00:00
Nalin Dahyabhai d5b30b000a tests/conformance/testdata/Dockerfile.add: update some URLs
The origin repository renamed its "master" branch to "main", so we need
to update our references to items in that branch.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-02-27 14:09:15 -05:00
openshift-merge-bot[bot] 4c3081b63e
Merge pull request #5990 from Honny1/fix-arm64
Fix Conformance tests on ARM64
2025-02-27 16:00:04 +00:00
openshift-merge-bot[bot] bb6e3d45de
Merge pull request #6005 from containers/renovate/go-github.com-go-jose-go-jose-v4-vulnerability
chore(deps): update module github.com/go-jose/go-jose/v4 to v4.0.5 [security]
2025-02-25 16:42:17 +00:00
Jan Rodák 177b845bd6
Vendor imagebuilder
New imagebuilder contains fix that sets empty TARGETVARIANT and BUILDVARIANT. Docker does not set these values for ARM64.
This fixes the TestConformance/multistage-builtin-args and TestConformance/builtins tests on ARM64.

Signed-off-by: Jan Rodák <hony.com@seznam.cz>
2025-02-25 17:01:21 +01:00
Jan Rodák 1f142eb581
Fix source of OS, architecture and variant
This fixes getting of cpu variants for ARM64
Fixes the TestConformance/setuid-file-in-archive test on ARM64.

Signed-off-by: Jan Rodák <hony.com@seznam.cz>
2025-02-25 17:01:21 +01:00
renovate[bot] 2b96c386e0
chore(deps): update module github.com/go-jose/go-jose/v4 to v4.0.5 [security]
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-25 15:24:34 +00:00
openshift-merge-bot[bot] f61d2ae9d3
Merge pull request #5995 from flouthoc/parallel-conformance
conformance: make `TestCommit` and `TestConformance` parallel
2025-02-25 14:59:53 +00:00
openshift-merge-bot[bot] 9743a49322
Merge pull request #6004 from containers/renovate/tags.cncf.io-container-device-interface-0.x
fix(deps): update module tags.cncf.io/container-device-interface to v0.8.1
2025-02-24 23:23:31 +00:00
renovate[bot] ee83bf608e
fix(deps): update module tags.cncf.io/container-device-interface to v0.8.1
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-24 16:41:09 +00:00
openshift-merge-bot[bot] 778213a7c6
Merge pull request #5997 from containers/renovate/github.com-moby-buildkit-0.x
fix(deps): update module github.com/moby/buildkit to v0.20.0
2025-02-20 15:41:36 +00:00
renovate[bot] 7ed99c6ae7
fix(deps): update module github.com/moby/buildkit to v0.20.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-19 22:59:11 +00:00
openshift-merge-bot[bot] 5b8423e1bc
Merge pull request #5992 from nalind/remount-pivot
chroot createPlatformContainer: use MS_REMOUNT
2025-02-19 19:09:36 +00:00
Nalin Dahyabhai 699c84d40b chroot createPlatformContainer: use MS_REMOUNT
When setting mount propagation on the root mount before unmounting it,
use MS_REBIND, since we know it's already a bind mount, and we actually
want to affect the extant bind mount instead of creating another right
over it. Otherwise, we might as well have not bothered.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-02-19 13:29:20 -05:00
openshift-merge-bot[bot] 318f35e75c
Merge pull request #5994 from Luap99/cirrus-timeout
cirrus: reduce task timeout
2025-02-19 18:00:53 +00:00
flouthoc fca3ca46bf
conformance: make TestCommit and TestConformance parallel
Two tests `TestCommit` and `TestConformance` can be made parallel given
that we are already using multiple cores for conformance tests.

Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-02-19 08:36:40 -08:00
openshift-merge-bot[bot] 5868f44b52
Merge pull request #5943 from nalind/container_name_as_hostname
Support the containers.conf container_name_as_hostname option
2025-02-19 14:07:03 +00:00
openshift-merge-bot[bot] 3d14858281
Merge pull request #5959 from flouthoc/integrate-experiment
Use tmpfs for integration tests
2025-02-19 14:04:18 +00:00
Paul Holzinger 36467356d6
cirrus: reduce task timeout
With all the recent speed-ups here the timeout is way to high, all tasks
should complete in under 30 mins generally. The smoke test in under
10min as it does not do much.

In particular I noticed at least two separate rootless integration tests
time out after 120min[1,2], obviously the tests do not take that long and
they are hanging somehwere instead. With a lower timeout we do not waste
so much time when this happens.

[1] https://cirrus-ci.com/task/4733420225429504
[2] https://cirrus-ci.com/task/5597909967699968

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
2025-02-19 14:06:19 +01:00
flouthoc c87fd8e191
mkcw: mkcw_check_image use bats run_with_log
Add `run_with_log` to mkcw tests.

Add `sleep 1` during cleanup between attempting `luksClose`
and unmounting the filesystem mounted on the device /dev/mapper/"$uuid".
Without this somehow we end up in a state where mount is still being
used by the kernel because when we do `lsof /dev/mapper/"$uuid"` it
shows nothing but `dmsetup info -c $uuid` shows the device is still
under use. Adding `sleep 1` in between somehow fixes this.

Also this problem with `cryptsetup` is pretty common for reference
one thread which I found https://lore.kernel.org/all/508950BA.1030309@dennis.birkholz.biz/T/

Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-02-18 14:50:04 -08:00
flouthoc d7d7878622
test: use /tmp as TMPDIR
use /tmp as TMPDIR so tests use tmpfs

Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-02-18 14:50:01 -08:00
flouthoc efb28dcf7b
heredoc: create temp subdirs for each build
Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-02-18 14:49:57 -08:00
flouthoc c86f554a18
test: heredoc remove python dependency from test
Use regular `cat` to test the same functionality instead
of using python image specifically for this part of test.

Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-02-18 14:49:52 -08:00
Nalin Dahyabhai 72e2bf4c69 Support the containers.conf container_name_as_hostname option
When containers.conf has the "container_name_as_hostname" option set,
use that value, with values that don't fit `[A-Za-z0-9][A-Za-z0-9.-]+`
stripped out.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-02-18 17:43:07 -05:00
openshift-merge-bot[bot] 5dc3c23a3e
Merge pull request #5989 from containers/renovate/github.com-opencontainers-runc-1.x
fix(deps): update module github.com/opencontainers/runc to v1.2.5
2025-02-18 22:37:03 +00:00
renovate[bot] a45e659216
fix(deps): update module github.com/opencontainers/runc to v1.2.5
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-18 21:23:49 +00:00
Tom Sweeney 7fea494a9c
Merge pull request #5991 from containers/renovate/github.com-spf13-cobra-1.x
fix(deps): update module github.com/spf13/cobra to v1.9.0
2025-02-17 16:54:47 -05:00
renovate[bot] 6e5f6d8fd2
fix(deps): update module github.com/spf13/cobra to v1.9.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-15 19:06:27 +00:00
openshift-merge-bot[bot] 6cc75b8e0f
Merge pull request #5986 from flouthoc/bump-smoke-cores
.cirrus: use more cores for smoke task
2025-02-11 21:13:02 +00:00
flouthoc b5e7a84d2e
.cirrus: use more cores for smoke
Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-02-11 10:42:24 -08:00
openshift-merge-bot[bot] d5a4770e8b
Merge pull request #5982 from mheon/cncf_code_of_conduct
Switch to the CNCF Code of Conduct
2025-02-11 16:02:37 +00:00
openshift-merge-bot[bot] 57b0afcfb3
Merge pull request #5981 from containers/renovate/golang.org-x-crypto-0.x
fix(deps): update module golang.org/x/crypto to v0.33.0
2025-02-11 15:29:22 +00:00
Matt Heon bd62d5afc7 Switch to the CNCF Code of Conduct
As part of the CNCF Sandbox, we are replacing our existing COC
with the standard CNCF version.

Signed-off-by: Matt Heon <mheon@redhat.com>
2025-02-10 14:40:48 -05:00
openshift-merge-bot[bot] f89450213c
Merge pull request #5978 from nalind/cache-ownership
Distinguish --mount=type=cache locations by ownership, too
2025-02-10 19:31:25 +00:00
openshift-merge-bot[bot] a35f5fa04d
Merge pull request #5979 from flouthoc/bump-ci-resources
.cirrus: bump ci resources
2025-02-10 18:58:23 +00:00
flouthoc 03cd7821c9
.cirrus: bump ci resources
Timeplot of various tests after the resource bump

>> 28:49
| type        | user     | fs      |      d13 |      f40 |      f41 |
| ----------- | -------- | ------- | -------- | -------- | -------- |
| Unit        | root     | vfs     |          |    08:22 |          |
| Unit        | root     | overlay |          |    08:18 |          |
| Conformance | root     | vfs     |    20:52 |          |          |
| Conformance | root     | overlay |    11:55 |          |          |
| Integration | root     | vfs     |    19:15 |    17:36 |    17:18 |
| Integration | root     | overlay |    15:14 |          |    20:52 |
| Integration | rootless | overlay |    17:27 |          |    17:06 |

Timeplot of various tests before the test bump from PR #5978

>> 01:03:34
| type        | user     | fs      |      d13 |      f40 |      f41 |
| ----------- | -------- | ------- | -------- | -------- | -------- |
| Unit        | root     | vfs     |          |    08:31 |          |
| Unit        | root     | overlay |          |    08:15 |          |
| Conformance | root     | vfs     |    24:30 |          |          |
| Conformance | root     | overlay |    14:27 |          |          |
| Integration | root     | vfs     |    25:40 |    28:08 |    26:19 |
| Integration | root     | overlay |    23:15 |          |    24:10 |
| Integration | rootless | overlay |    27:01 |          |    28:46 |

Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-02-10 06:54:41 -08:00
renovate[bot] ecd200a89c
fix(deps): update module golang.org/x/crypto to v0.33.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-08 01:49:41 +00:00
Nalin Dahyabhai 24826435f8 Distinguish --mount=type=cache locations by ownership, too
Normally, we select and distinguish --mount=type=cache directories that
we create by either the "id" or "target" value used when mounting them,
but we should also be distinguishing them by the "uid" and "gid" flags,
or lack thereof.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2025-02-07 13:38:56 -05:00
openshift-merge-bot[bot] a3701cb97d
Merge pull request #5972 from containers/renovate/golang.org-x-term-0.x
fix(deps): update module golang.org/x/term to v0.29.0
2025-02-06 14:53:30 +00:00
renovate[bot] 4b234a19c1
fix(deps): update module golang.org/x/term to v0.29.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-06 00:03:05 +00:00
openshift-merge-bot[bot] d5ed8b4788
Merge pull request #5954 from flouthoc/parallel-unit-test
CI: parallize unit tests
2025-02-05 21:28:43 +00:00
flouthoc 147a3ca916
.cirrus: run -race only on non-PR branch
Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-02-05 06:57:14 -08:00
flouthoc de3c3baf09
unit: deparallize some tests
See issue: https://github.com/containers/buildah/issues/5967

Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-02-04 11:07:43 -08:00
flouthoc 81479b200c
.cirrus: use multiple cpu for unit tests
Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-02-04 11:07:36 -08:00
flouthoc 1879f3dc65
Makefile: use -parallel for go test
Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-02-04 11:07:27 -08:00
flouthoc aadfc5cf30
unit_test: use Parallel test where possible
Add `t.Parallel()` to unit tests whereever its possible without race.

Signed-off-by: flouthoc <flouthoc.git@gmail.com>
2025-02-04 11:07:15 -08:00
openshift-merge-bot[bot] ae5e123314
Merge pull request #5970 from containers/renovate/golang.org-x-sys-0.x
Update module golang.org/x/sys to v0.30.0
2025-02-04 17:53:15 +00:00
renovate[bot] 3f9bc73d59
Update module golang.org/x/sys to v0.30.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-04 16:08:05 +00:00
openshift-merge-bot[bot] 30bf35f3f1
Merge pull request #5969 from containers/renovate/golang.org-x-sync-0.x
Update module golang.org/x/sync to v0.11.0
2025-02-04 16:05:52 +00:00
renovate[bot] 04febc5813
Update module golang.org/x/sync to v0.11.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-04 14:39:20 +00:00
openshift-merge-bot[bot] 324df7da73
Merge pull request #5966 from containers/renovate/major-ci-vm-image
Update dependency containers/automation_images to v20250131
2025-02-03 21:24:54 +00:00
renovate[bot] a13fa87ea2
Update dependency containers/automation_images to v20250131
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-03 17:03:29 +00:00
openshift-merge-bot[bot] 53872d88fa
Merge pull request #5962 from TomSweeneyRedHat/dev/tsweeney/1.39.0
Bump Buildah to v1.39.0, c/storage v1.57.1, c/image v5.34.0, c/common v0.62.0
2025-02-03 15:54:59 +00:00
tomsweeneyredhat 6c3d7546e3 Bump to Buildah v1.40.0-dev
Bumping back to a dev version.

Signed-off-by: tomsweeneyredhat <tsweeney@redhat.com>
2025-01-31 18:19:08 -05:00
5054 changed files with 62904 additions and 922763 deletions

View File

@ -22,18 +22,20 @@ env:
IN_PODMAN: 'false'
# root or rootless
PRIV_NAME: root
# default "mention the $BUILDAH_RUNTIME in the task alias, with initial whitespace" value
RUNTIME_N: ""
####
#### Cache-image names to test with
####
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
FEDORA_NAME: "fedora-41"
PRIOR_FEDORA_NAME: "fedora-40"
FEDORA_NAME: "fedora-42"
PRIOR_FEDORA_NAME: "fedora-41"
DEBIAN_NAME: "debian-13"
# Image identifiers
IMAGE_SUFFIX: "c20250107t132430z-f41f40d13"
IMAGE_SUFFIX: "c20250422t130822z-f42f41d13"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
@ -50,7 +52,7 @@ env:
gcp_credentials: ENCRYPTED[ae0bf7370f0b6e446bc61d0865a2c55d3e166b3fab9466eb0393e38e1c66a31ca4c71ddc7e0139d47d075c36dd6d3fd7]
# Default timeout for each task
timeout_in: 120m
timeout_in: 30m
# Default VM to use unless set or modified by task
gce_instance: &standardvm
@ -95,12 +97,12 @@ smoke_task:
gce_instance:
memory: "12G"
cpu: 4
cpu: 8
# Don't bother running on branches (including cron), or for tags.
skip: $CIRRUS_PR == ''
timeout_in: 30m
timeout_in: 10m
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}'
@ -122,7 +124,7 @@ vendor_task:
# Runs within Cirrus's "community cluster"
container:
image: docker.io/library/golang:1.22
image: docker.io/library/golang:1.23.3
cpu: 1
memory: 1
@ -154,6 +156,8 @@ cross_build_task:
unit_task:
name: 'Unit tests w/ $STORAGE_DRIVER'
gce_instance:
cpu: 4
alias: unit
skip: &not_build_docs >-
$CIRRUS_CHANGE_TITLE =~ '.*CI:DOCS.*' ||
@ -162,8 +166,6 @@ unit_task:
- smoke
- vendor
timeout_in: 90m
matrix:
- env:
STORAGE_DRIVER: 'vfs'
@ -181,10 +183,9 @@ conformance_task:
depends_on: *smoke_vendor
gce_instance:
cpu: 4
image_name: "${DEBIAN_CACHE_IMAGE_NAME}"
timeout_in: 65m
matrix:
- env:
STORAGE_DRIVER: 'vfs'
@ -197,7 +198,7 @@ conformance_task:
integration_task:
name: "Integration $DISTRO_NV w/ $STORAGE_DRIVER"
name: "Integration $DISTRO_NV$RUNTIME_N w/ $STORAGE_DRIVER"
alias: integration
skip: *not_build_docs
depends_on: *smoke_vendor
@ -208,11 +209,26 @@ integration_task:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs'
# Disabled until we update to f40/41 as f39 does not have go 1.22
# - env:
# DISTRO_NV: "${PRIOR_FEDORA_NAME}"
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
# STORAGE_DRIVER: 'vfs'
BUILDAH_RUNTIME: crun
RUNTIME_N: " using crun"
- env:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs'
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs'
BUILDAH_RUNTIME: crun
RUNTIME_N: " using crun"
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs'
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
- env:
DISTRO_NV: "${DEBIAN_NAME}"
IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"
@ -222,11 +238,26 @@ integration_task:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
# Disabled until we update to f40/41 as f39 does not have go 1.22
# - env:
# DISTRO_NV: "${PRIOR_FEDORA_NAME}"
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
# STORAGE_DRIVER: 'overlay'
BUILDAH_RUNTIME: crun
RUNTIME_N: " using crun"
- env:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
BUILDAH_RUNTIME: crun
RUNTIME_N: " using crun"
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
- env:
DISTRO_NV: "${DEBIAN_NAME}"
IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"
@ -234,7 +265,8 @@ integration_task:
gce_instance:
image_name: "$IMAGE_NAME"
cpu: 4
cpu: 8
memory: "8G"
# Separate scripts for separate outputs, makes debugging easier.
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
@ -255,7 +287,7 @@ integration_task:
golang_version_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh golang'
integration_rootless_task:
name: "Integration rootless $DISTRO_NV w/ $STORAGE_DRIVER"
name: "Integration rootless $DISTRO_NV$RUNTIME_N w/ $STORAGE_DRIVER"
alias: integration_rootless
skip: *not_build_docs
depends_on: *smoke_vendor
@ -268,12 +300,29 @@ integration_rootless_task:
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
PRIV_NAME: rootless
# Disabled until we update to f40/41 as f39 does not have go 1.22
# - env:
# DISTRO_NV: "${PRIOR_FEDORA_NAME}"
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
# STORAGE_DRIVER: 'overlay'
# PRIV_NAME: rootless
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
- env:
DISTRO_NV: "${FEDORA_NAME}"
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
PRIV_NAME: rootless
BUILDAH_RUNTIME: crun
RUNTIME_N: " using crun"
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
PRIV_NAME: rootless
BUILDAH_RUNTIME: runc
RUNTIME_N: " using runc"
- env:
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
PRIV_NAME: rootless
BUILDAH_RUNTIME: crun
RUNTIME_N: " using crun"
- env:
DISTRO_NV: "${DEBIAN_NAME}"
IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"
@ -282,7 +331,8 @@ integration_rootless_task:
gce_instance:
image_name: "$IMAGE_NAME"
cpu: 4
cpu: 8
memory: "8G"
# Separate scripts for separate outputs, makes debugging easier.
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
@ -302,7 +352,8 @@ in_podman_task:
depends_on: *smoke_vendor
gce_instance:
cpu: 4
cpu: 8
memory: "8G"
env:
# This is key, cause the scripts to re-execute themselves inside a container.
@ -339,6 +390,7 @@ success_task:
- vendor
- cross_build
- integration
- integration_rootless
- in_podman
container:

1
.fmf/version Normal file
View File

@ -0,0 +1 @@
1

View File

@ -1,71 +0,0 @@
<!--
If you are reporting a new issue, make sure that we do not have any duplicates
already open. You can ensure this by searching the issue list for this
repository. If there is a duplicate, please close your issue and add a comment
to the existing issue instead.
If you suspect your issue is a bug, please edit your issue description to
include the BUG REPORT INFORMATION shown below. If you fail to provide this
information within 7 days, we cannot debug your issue and will close it. We
will, however, reopen it if you later provide the information.
---------------------------------------------------
BUG REPORT INFORMATION
---------------------------------------------------
Use the commands below to provide key information from your environment:
You do NOT have to include this information if this is a FEATURE REQUEST
-->
**Description**
<!--
Briefly describe the problem you are having in a few paragraphs.
-->
**Steps to reproduce the issue:**
1.
2.
3.
**Describe the results you received:**
**Describe the results you expected:**
**Output of `rpm -q buildah` or `apt list buildah`:**
```
(paste your output here)
```
**Output of `buildah version`:**
```
(paste your output here)
```
**Output of `podman version` if reporting a `podman build` issue:**
```
(paste your output here)
```
**Output of `cat /etc/*release`:**
```
(paste your output here)
```
**Output of `uname -a`:**
```
(paste your output here)
```
**Output of `cat /etc/containers/storage.conf`:**
```
(paste your output here)
```

View File

@ -0,0 +1,99 @@
---
name: Buildah Bug Report
description: File a Buildah bug report
labels: ["kind/bug", "triage-needed"]
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to fill out this bug report!
**NOTE** A large number of issues reported against Buildah are often found to already be fixed in more current versions of the project.
Before reporting an issue, please verify the version you are running with `buildah version` and compare it to the latest released version under
[releases](https://github.com/containers/buildah/releases).
If they differ, please update your version of Buildah to the latest possible and retry your command before creating an issue.
Commands you might need to run to create the issue
$ `buildah version`
$ `buildah info`
$ `rpm -q buildah` or `apt list buildah`
- type: textarea
id: description
attributes:
label: Issue Description
description: Please explain your issue
value: "Describe your issue"
validations:
required: true
- type: textarea
id: reproducer
attributes:
label: Steps to reproduce the issue
description: Please explain the steps to reproduce the issue
value: "Steps to reproduce the issue\n1.\n2.\n3.\n"
validations:
required: true
- type: textarea
id: received_results
attributes:
label: Describe the results you received
description: Please explain the results you are noticing
value: "Describe the results you received"
validations:
required: true
- type: textarea
id: expected_results
attributes:
label: Describe the results you expected
description: Please explain the results you are expecting
value: "Describe the results you expected"
validations:
required: true
- type: textarea
id: buildah_version
attributes:
label: buildah version output
description: Please copy and paste `buildah version` output.
value: If you are unable to run `buildah version` for any reason, please provide the output of `rpm -q buildah` or `apt list buildah`.
render: yaml
validations:
required: true
- type: textarea
id: buildah_info
attributes:
label: buildah info output
description: Please copy and paste `buildah info` output.
value: If you are unable to run `buildah info` for any reason, please provide the operating system and its version and the architecture you are running.
render: yaml
validations:
required: true
- type: textarea
id: storage_conf
attributes:
label: Provide your storage.conf
description: "Please provide the relevant [storage.conf file](https://github.com/containers/storage/blob/main/docs/containers-storage.conf.5.md#files)"
render: toml
validations:
required: true
- type: dropdown
id: upstream_latest
attributes:
label: Upstream Latest Release
description: Have you tried running the [latest upstream release](https://github.com/containers/buildah/releases/latest)
options:
- "Yes"
- "No"
validations:
required: true
- type: textarea
id: additional_environment
attributes:
label: Additional environment details
description: Please describe any additional environment details like (AWS, VirtualBox,...)
value: "Additional environment details"
- type: textarea
id: additional_info
attributes:
label: Additional information
description: Please explain the additional information you deem important
value: "Additional information like issue happens only occasionally or issue happens with a particular architecture or on a particular setting"

11
.github/ISSUE_TEMPLATE/config.yaml vendored Normal file
View File

@ -0,0 +1,11 @@
---
blank_issues_enabled: true
contact_links:
- name: Ask a question
url: https://github.com/containers/buildah/discussions/new
about: Ask a question about Buildah
- name:
If your issue is a general Podman issue unrelated to `podman build`, please open an issue in the Podman repository.
If the issue is with the `podman build` command, please report it here.
url: https://github.com/containers/podman/issues
about: Please report issues with Podman here.

View File

@ -0,0 +1,133 @@
---
name: Podman Build Bug Report
description: File a Podman build bug report
labels: ["kind/bug", "triage-needed"]
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to fill out this bug report!
**NOTE** A large number of issues reported against Buildah are often found to already be fixed in more current versions of the project.
Before reporting an issue, please verify the version you are running with `podman version` and compare it to the latest released version under
[releases](https://github.com/containers/podman/releases).
If they differ, please update your version of Podman to the latest possible and retry your command before creating an issue.
Commands you might need to run to create the issue
$ `podman version`
$ `podman info`
$ `rpm -q podman` or `apt list podman`
- type: textarea
id: description
attributes:
label: Issue Description
description: Please explain your issue
value: "Describe your issue"
validations:
required: true
- type: textarea
id: reproducer
attributes:
label: Steps to reproduce the issue
description: Please explain the steps to reproduce the issue
value: "Steps to reproduce the issue\n1.\n2.\n3.\n"
validations:
required: true
- type: textarea
id: received_results
attributes:
label: Describe the results you received
description: Please explain the results you are noticing
value: "Describe the results you received"
validations:
required: true
- type: textarea
id: expected_results
attributes:
label: Describe the results you expected
description: Please explain the results you are expecting
value: "Describe the results you expected"
validations:
required: true
- type: textarea
id: podman_version
attributes:
label: podman version output
description: Please copy and paste `podman version` output.
value: If you are unable to run `podman version` for any reason, please provide the output of `rpm -q podman` or `apt list podman`.
render: yaml
validations:
required: true
- type: textarea
id: podman_info
attributes:
label: podman info output
description: Please copy and paste `podman info` output.
value: If you are unable to run `podman info` for any reason, please provide the operating system and its version and the architecture you are running.
render: yaml
validations:
required: true
- type: textarea
id: storage_conf
attributes:
label: Provide your storage.conf
description: "Please provide the relevant [storage.conf file](https://github.com/containers/storage/blob/main/docs/containers-storage.conf.5.md#files)"
render: toml
validations:
required: true
- type: dropdown
id: podman_in_a_container
attributes:
label: Podman in a container
description: Please select Yes if you are running Podman in a container
options:
- "No"
- "Yes"
validations:
required: true
- type: dropdown
id: privileged_rootless
attributes:
label: Privileged Or Rootless
description:
Are you running the containers as privileged or non-root user? Note that using `su` or `sudo` does not establish a proper login session required for running
Podman as a non-root user. Please refer to the [troubleshooting guide](https://github.com/containers/podman/blob/main/troubleshooting.md#solution-28) for alternatives.
options:
- Privileged
- Rootless
- type: dropdown
id: upstream_latest
attributes:
label: Upstream Latest Release
description: Have you tried running the [latest upstream release](https://github.com/containers/podman/releases/latest)
options:
- "Yes"
- "No"
validations:
required: true
- type: dropdown
id: installation_source
attributes:
label: Installation Source
description: What installion type did you use?
multiple: false
options:
- Distribution package (DNF, apt, yay)
- Brew
- Offical Podman Installer (Mac)
- Podman Desktop
default: 0
validations:
required: true
- type: textarea
id: additional_environment
attributes:
label: Additional environment details
description: Please describe any additional environment details like (AWS, VirtualBox,...)
value: "Additional environment details"
- type: textarea
id: additional_info
attributes:
label: Additional information
description: Please explain the additional information you deem important
value: "Additional information like issue happens only occasionally or issue happens with a particular architecture or on a particular setting"

View File

@ -17,4 +17,9 @@ jobs:
# Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
call_cron_failures:
uses: containers/podman/.github/workflows/check_cirrus_cron.yml@main
secrets: inherit
secrets:
SECRET_CIRRUS_API_KEY: ${{secrets.SECRET_CIRRUS_API_KEY}}
ACTION_MAIL_SERVER: ${{secrets.ACTION_MAIL_SERVER}}
ACTION_MAIL_USERNAME: ${{secrets.ACTION_MAIL_USERNAME}}
ACTION_MAIL_PASSWORD: ${{secrets.ACTION_MAIL_PASSWORD}}
ACTION_MAIL_SENDER: ${{secrets.ACTION_MAIL_SENDER}}

View File

@ -1,19 +0,0 @@
---
# See also: https://github.com/containers/podman/blob/main/.github/workflows/rerun_cirrus_cron.yml
on:
# Note: This only applies to the default branch.
schedule:
# N/B: This should correspond to a period slightly after
# the last job finishes running. See job defs. at:
# https://cirrus-ci.com/settings/repository/6706677464432640
- cron: '01 01 * * 1-5'
# Debug: Allow triggering job manually in github-actions WebUI
workflow_dispatch: {}
jobs:
# Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
call_cron_rerun:
uses: containers/podman/.github/workflows/rerun_cirrus_cron.yml@main
secrets: inherit

View File

@ -1,16 +1,29 @@
---
version: "2"
run:
build-tags:
- apparmor
- seccomp
- selinux
# Don't exceed number of threads available when running under CI
concurrency: 4
formatters:
enable:
- gofumpt
linters:
enable:
- gofmt
- gofumpt
- nolintlint
- revive
- unconvert
- unparam
- unused
- whitespace
exclusions:
presets:
- comments
- std-error-handling
settings:
staticcheck:
checks:
- all
- -QF1008 # https://staticcheck.dev/docs/checks/#QF1008 Omit embedded fields from selector expression.

View File

@ -5,6 +5,27 @@
downstream_package_name: buildah
upstream_tag_template: v{version}
# These files get synced from upstream to downstream (Fedora / CentOS Stream) on every
# propose-downstream job. This is done so tests maintained upstream can be run
# downstream in Zuul CI and Bodhi.
# Ref: https://packit.dev/docs/configuration#files_to_sync
files_to_sync:
- src: rpm/gating.yaml
dest: gating.yaml
delete: true
- src: plans/
dest: plans/
delete: true
mkpath: true
- src: tests/tmt/
dest: tests/tmt/
delete: true
mkpath: true
- src: .fmf/
dest: .fmf/
delete: true
- .packit.yaml
packages:
buildah-fedora:
pkg_tool: fedpkg
@ -25,13 +46,19 @@ jobs:
notifications: &copr_build_failure_notification
failure_comment:
message: "Ephemeral COPR build failed. @containers/packit-build please check."
targets:
# Fedora aliases documentation: https://packit.dev/docs/configuration#aliases
# python3-fedora-distro-aliases provides `resolve-fedora-aliases` command
targets: &fedora_copr_targets
- fedora-all-x86_64
- fedora-all-aarch64
enable_net: true
# Disable osh diff scan until Go support is available
# Ref: https://github.com/openscanhub/known-false-positives/pull/30#issuecomment-2858698495
osh_diff_scan_after_copr_build: false
# Ignore until golang is updated in distro buildroot to 1.23.3+
- job: copr_build
trigger: pull_request
trigger: ignore
packages: [buildah-eln]
notifications: *copr_build_failure_notification
targets:
@ -43,11 +70,12 @@ jobs:
- "https://kojipkgs.fedoraproject.org/repos/eln-build/latest/aarch64/"
enable_net: true
# Ignore until golang is updated in distro buildroot to 1.23.3+
- job: copr_build
trigger: pull_request
trigger: ignore
packages: [buildah-centos]
notifications: *copr_build_failure_notification
targets:
targets: &centos_copr_targets
- centos-stream-9-x86_64
- centos-stream-9-aarch64
- centos-stream-10-x86_64
@ -66,6 +94,32 @@ jobs:
project: podman-next
enable_net: true
# Tests on Fedora for main branch PRs
- job: tests
trigger: pull_request
packages: [buildah-fedora]
targets:
- fedora-all-x86_64
tf_extra_params:
environments:
- artifacts:
- type: repository-file
id: https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/repo/fedora-$releasever/rhcontainerbot-podman-next-fedora-$releasever.repo
# Ignore until golang is updated in distro buildroot to 1.23.3+
# Tests on CentOS Stream for main branch PRs
- job: tests
trigger: ignore
packages: [buildah-centos]
targets:
- centos-stream-9-x86_64
- centos-stream-10-x86_64
tf_extra_params:
environments:
- artifacts:
- type: repository-file
id: https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/repo/centos-stream-$releasever/rhcontainerbot-podman-next-centos-stream-$releasever.repo
# Sync to Fedora
- job: propose_downstream
trigger: release

View File

@ -2,6 +2,135 @@
# Changelog
## v1.40.0 (2025-04-17)
Bump c/storage to v1.58.0, c/image v5.35.0, c/common v0.63.0
fix(deps): update module github.com/docker/docker to v28.1.0+incompatible
fix(deps): update module github.com/containers/storage to v1.58.0
cirrus: make Total Success wait for rootless integration
chroot: use symbolic names when complaining about mount() errors
cli: hide the `completion` command instead of disabling it outright
Document rw and src options for --mount flag in buildah-run(1)
fix(deps): update module github.com/moby/buildkit to v0.21.0
build: add support for inherit-labels
chore(deps): update dependency golangci/golangci-lint to v2.1.0
.github: check_cirrus_cron work around github bug
stage_executor,getCreatedBy: expand buildArgs for sources correctly
Add a link to project governance and MAINTAINERS file
fix(deps): update github.com/containers/storage digest to b1d1b45
generateHostname: simplify
Use maps.Copy
Use slices.Concat
Use slices.Clone
Use slices.Contains
Use for range over integers
tests/testreport: don't copy os.Environ
Use any instead of interface{}
ci: add golangci-lint run with --tests=false
ci: add nolintlint, fix found issues
copier: rm nolint:unparam annotation
.golangci.yml: add unused linter
chroot: fix unused warnings
copier: fix unused warnings
tests/conformance: fix unused warning
ci: switch to golangci-lint v2
internal/mkcw: disable ST1003 warnings
tests/conformance: do not double import (fix ST1019)
cmd/buildah: don't double import (fix ST1019)
Do not capitalize error strings
cmd/buildah: do not capitalize error strings
tests/conformance: fix QF1012 warnings
tests/serve: fix QF1012 warning
Use strings.ReplaceAll to fix QF1004 warnings
Use switch to fix QF1003 warnings
Apply De Morgan's law to fix QF1001 warnings
Fix QF1007 staticcheck warnings
imagebuildah: fix revive warning
Rename max variable
tests/tools: install lint from binary, use renovate
fix(deps): update module github.com/containernetworking/cni to v1.3.0
Update Buildah issue template to new version and support podman build
fix(deps): update module golang.org/x/crypto to v0.37.0
stage_executor: reset platform in systemcontext for stages
fix(deps): update github.com/opencontainers/runtime-tools digest to 260e151
cmd/buildah: rm unused containerOutputUsingTemplate
cmd/buildah: rm unused getDateAndDigestAndSize
build: return ExecErrorCodeGeneric when git operation fails
add: report error while creating dir for URL source.
createPlatformContainer: drop MS_REMOUNT|MS_BIND
fix(deps): update module github.com/docker/docker to v28.0.3+incompatible
fix: bats won't fail on ! without cleverness
feat: use HistoryTimestamp, if set, for oci-archive entries
Allow extendedGlob to work with Windows paths
fix(deps): update module github.com/moby/buildkit to v0.20.2
fix(deps): update github.com/openshift/imagebuilder digest to e87e4e1
fix(deps): update module github.com/docker/docker to v28.0.2+incompatible
fix(deps): update module tags.cncf.io/container-device-interface to v1.0.1
chore(deps): update dependency containers/automation_images to v20250324
vendor: update github.com/opencontainers/selinux to v1.12.0
replace deprecated selinux/label calls
vendor: bump c/common to dbeb17e40c80
Use builtin arg defaults from imagebuilder
linux: accept unmask paths as glob values
vendor: update containers/common
Add --parents option for COPY in Dockerfiles
fix(deps): update module github.com/opencontainers/runc to v1.2.6
update go.sum from the previous commit
fix(deps): update module tags.cncf.io/container-device-interface to v1
chore(deps): update module golang.org/x/net to v0.36.0 [security]
packit: remove f40 from copr builds
cirrus: update to go 1.23 image
vendor bump to golang.org/x/crypto v0.36.0
cirrus: update PRIOR_FEDORA comment
github: remove cirrus rerun action
fix(deps): update module github.com/containers/common to v0.62.2
fix(deps): update module github.com/containers/image/v5 to v5.34.2
fix: close files properly when BuildDockerfiles exits
fix(deps): update module github.com/containers/storage to v1.57.2
stage_executor: history should include heredoc summary correctly
fix(deps): update module github.com/containers/common to v0.62.1
github: disable cron rerun action
fix(deps): update module github.com/moby/buildkit to v0.20.1
internal/mkcw.Archive(): use github.com/containers/storage/pkg/ioutils
[skip-ci] TMT: system tests
buildah-build.1.md: secret examples
fix(deps): update github.com/containers/luksy digest to 40bd943
fix(deps): update module github.com/opencontainers/image-spec to v1.1.1
fix(deps): update module github.com/containers/image/v5 to v5.34.1
Use UnparsedInstance.Manifest instead of ImageSource.GetManifest
fix(deps): update module github.com/opencontainers/runtime-spec to v1.2.1
tests/conformance/testdata/Dockerfile.add: update some URLs
Vendor imagebuilder
Fix source of OS, architecture and variant
chore(deps): update module github.com/go-jose/go-jose/v4 to v4.0.5 [security]
fix(deps): update module tags.cncf.io/container-device-interface to v0.8.1
fix(deps): update module github.com/moby/buildkit to v0.20.0
chroot createPlatformContainer: use MS_REMOUNT
conformance: make TestCommit and TestConformance parallel
cirrus: reduce task timeout
mkcw: mkcw_check_image use bats run_with_log
test: use /tmp as TMPDIR
heredoc: create temp subdirs for each build
test: heredoc remove python dependency from test
Support the containers.conf container_name_as_hostname option
fix(deps): update module github.com/opencontainers/runc to v1.2.5
fix(deps): update module github.com/spf13/cobra to v1.9.0
.cirrus: use more cores for smoke
Switch to the CNCF Code of Conduct
.cirrus: bump ci resources
fix(deps): update module golang.org/x/crypto to v0.33.0
Distinguish --mount=type=cache locations by ownership, too
fix(deps): update module golang.org/x/term to v0.29.0
.cirrus: run -race only on non-PR branch
unit: deparallize some tests
.cirrus: use multiple cpu for unit tests
Makefile: use -parallel for go test
unit_test: use Parallel test where possible
Update module golang.org/x/sys to v0.30.0
Update module golang.org/x/sync to v0.11.0
Update dependency containers/automation_images to v20250131
Bump to Buildah v1.40.0-dev
## v1.39.0 (2025-01-31)
Bump c/storage v1.57.1, c/image 5.34.0, c/common v0.62.0

View File

@ -1,3 +1,3 @@
## The Buildah Project Community Code of Conduct
The Buildah Project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md).
The Buildah Project, as part of Podman Container Tools, follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).

View File

@ -173,5 +173,5 @@ Normally, a maintainer will only be removed if they are considered to be
inactive for a long period of time or are viewed as disruptive to the community.
The current list of maintainers can be found in the
[MAINTAINERS](MAINTAINERS) file.
[MAINTAINERS](./MAINTAINERS.md) file.

12
GOVERNANCE.md Normal file
View File

@ -0,0 +1,12 @@
## The Buildah Project Community Governance
The Buildah project, as part of Podman Container Tools, follows the [Podman Project Governance](https://github.com/containers/podman/blob/main/GOVERNANCE.md)
except sections found in this document, which override those found in Podman's Governance.
---
# Maintainers File
The definitive source of truth for maintainers of this repository is the local [MAINTAINERS.md](./MAINTAINERS.md) file. The [MAINTAINERS.md](https://github.com/containers/podman/blob/main/MAINTAINERS.md) file in the main Podman repository is used for project-spanning roles, including Core Maintainer and Community Manager. Some repositories in the project will also have a local [OWNERS](./OWNERS) file, which the CI system uses to map users to roles. Any changes to the [OWNERS](./OWNERS) file must make a corresponding change to the [MAINTAINERS.md](./MAINTAINERS.md) file to ensure that the file remains up to date. Most changes to [MAINTAINERS.md](./MAINTAINERS.md) will require a change to the repositorys [OWNERS](.OWNERS) file (e.g., adding a Reviewer), but some will not (e.g., promoting a Maintainer to a Core Maintainer, which comes with no additional CI-related privileges).
Any Core Maintainers listed in Podmans [MAINTAINERS.md](https://github.com/containers/podman/blob/main/MAINTAINERS.md) file should also be added to the list of “approvers” in the local [OWNERS](./OWNERS) file and as a Core Maintainer in the list of “Maintainers” in the local [MAINTAINERS.md](./MAINTAINERS.md) file.

View File

@ -1,4 +0,0 @@
Dan Walsh <dwalsh@redhat.com> (@rhatdan)
Nalin Dahyabhai <nalin@redhat.com> (@nalind)
Tom Sweeney <tsweeney@redhat.com> (@tomsweeneyredhat)
Urvashi Mohnani <umohnani@redhat.com> (@umohnani8)

35
MAINTAINERS.md Normal file
View File

@ -0,0 +1,35 @@
# Buildah Maintainers
[GOVERNANCE.md](GOVERNANCE.md)
describes the project's governance and the Project Roles used below.
## Maintainers
| Maintainer | GitHub ID | Project Roles | Affiliation |
|-------------------|----------------------------------------------------------|----------------------------------|----------------------------------------------|
| Brent Baude | [baude](https://github.com/baude) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
| Nalin Dahyabhai | [nalind](https://github.com/nalind) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
| Matthew Heon | [mheon](https://github.com/mheon) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
| Paul Holzinger | [Luap99](https://github.com/Luap99) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
| Giuseppe Scrivano | [giuseppe](https://github.com/giuseppe) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
| Miloslav Trmač | [mtrmac](https://github.com/mtrmac) | Core Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
| Neil Smith | [actionmancan](https://github.com/actionmancan) | Community Manager | [Red Hat](https://github.com/RedHatOfficial) |
| Tom Sweeney | [TomSweeneyRedHat](https://github.com/TomSweeneyRedHat/) | Maintainer and Community Manager | [Red Hat](https://github.com/RedHatOfficial) |
| Lokesh Mandvekar | [lsm5](https://github.com/lsm5) | Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
| Aditya Rajan | [flouthoc](https://github.com/flouthoc) | Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
| Dan Walsh | [rhatdan](https://github.com/rhatdan) | Maintainer | [Red Hat](https://github.com/RedHatOfficial) |
| Ashley Cui | [ashley-cui](https://github.com/ashley-cui) | Reviewer | [Red Hat](https://github.com/RedHatOfficial) |
| Jan Rodák | [Honny1](https://github.com/Honny1) | Reviewer | [Red Hat](https://github.com/RedHatOfficial) |
| Valentin Rothberg | [vrothberg](https://github.com/vrothberg) | Reviewer | [Red Hat](https://github.com/RedHatOfficial) |
## Alumni
None at present
## Credits
The structure of this document was based off of the equivalent one in the [CRI-O Project](https://github.com/cri-o/cri-o/blob/main/MAINTAINERS.md).
## Note
If there is a discrepancy between the [MAINTAINERS.md](https://github.com/containers/podman/blob/main/MAINTAINERS.md) file in the main Podman repository and this file regarding Core Maintainers or Community Managers, the file in the Podman Repository is considered the source of truth.

View File

@ -1,9 +1,9 @@
export GOPROXY=https://proxy.golang.org
APPARMORTAG := $(shell hack/apparmor_tag.sh)
STORAGETAGS := $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./hack/libsubid_tag.sh)
STORAGETAGS := $(shell ./btrfs_installed_tag.sh) $(shell ./hack/libsubid_tag.sh)
SECURITYTAGS ?= seccomp $(APPARMORTAG)
TAGS ?= $(SECURITYTAGS) $(STORAGETAGS) $(shell ./hack/systemd_tag.sh)
TAGS ?= $(SECURITYTAGS) $(STORAGETAGS) $(shell ./hack/systemd_tag.sh) $(shell ./hack/sqlite_tag.sh)
ifeq ($(shell uname -s),FreeBSD)
# FreeBSD needs CNI until netavark is supported
TAGS += cni
@ -22,9 +22,10 @@ STRIP ?= strip
GO := go
GO_LDFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-ldflags"; fi)
GO_GCFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-gcflags"; fi)
NPROCS := $(shell nproc)
export GO_BUILD=$(GO) build
export GO_TEST=$(GO) test
RACEFLAGS := $(shell $(GO_TEST) -race ./pkg/dummy > /dev/null 2>&1 && echo -race)
export GO_TEST=$(GO) test -parallel=$(NPROCS)
RACEFLAGS ?= $(shell $(GO_TEST) -race ./pkg/dummy > /dev/null 2>&1 && echo -race)
COMMIT_NO ?= $(shell git rev-parse HEAD 2> /dev/null || true)
GIT_COMMIT ?= $(if $(shell git status --porcelain --untracked-files=no),${COMMIT_NO}-dirty,${COMMIT_NO})
@ -51,27 +52,14 @@ ifeq ($(BUILDDEBUG), 1)
override GOGCFLAGS += -N -l
endif
# Managed by renovate.
export GOLANGCI_LINT_VERSION := 2.1.0
# make all BUILDDEBUG=1
# Note: Uses the -N -l go compiler options to disable compiler optimizations
# and inlining. Using these build options allows you to subsequently
# use source debugging tools like delve.
all: bin/buildah bin/imgtype bin/copy bin/inet bin/tutorial docs
# Update nix/nixpkgs.json its latest stable commit
.PHONY: nixpkgs
nixpkgs:
@nix run \
-f channel:nixos-20.09 nix-prefetch-git \
-c nix-prefetch-git \
--no-deepClone \
https://github.com/nixos/nixpkgs refs/heads/nixos-20.09 > nix/nixpkgs.json
# Build statically linked binary
.PHONY: static
static:
@nix build -f nix/
mkdir -p ./bin
cp -rfp ./result/bin/* ./bin/
all: bin/buildah bin/imgtype bin/copy bin/inet bin/tutorial bin/dumpspec bin/passwd docs
bin/buildah: $(SOURCES) internal/mkcw/embed/entrypoint_amd64.gz
$(GO_BUILD) $(BUILDAH_LDFLAGS) $(GO_GCFLAGS) "$(GOGCFLAGS)" -o $@ $(BUILDFLAGS) ./cmd/buildah
@ -103,6 +91,9 @@ bin/buildah.%: $(SOURCES)
mkdir -p ./bin
GOOS=$(word 2,$(subst ., ,$@)) GOARCH=$(word 3,$(subst ., ,$@)) $(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ -tags "containers_image_openpgp" ./cmd/buildah
bin/dumpspec: $(SOURCES)
$(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/dumpspec
bin/imgtype: $(SOURCES)
$(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/imgtype/imgtype.go
@ -115,9 +106,12 @@ bin/tutorial: $(SOURCES)
bin/inet: tests/inet/inet.go
$(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/inet/inet.go
bin/passwd: tests/passwd/passwd.go
$(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/passwd/passwd.go
.PHONY: clean
clean:
$(RM) -r bin tests/testreport/testreport
$(RM) -r bin tests/testreport/testreport tests/conformance/testdata/mount-targets/true
$(MAKE) -C docs clean
.PHONY: docs
@ -155,7 +149,7 @@ install.completions:
install -m 644 contrib/completions/bash/buildah $(DESTDIR)/$(BASHINSTALLDIR)/buildah
.PHONY: test-conformance
test-conformance:
test-conformance: tests/conformance/testdata/mount-targets/true
$(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover -timeout 60m ./tests/conformance
.PHONY: test-integration
@ -165,6 +159,9 @@ test-integration: install.tools
tests/testreport/testreport: tests/testreport/testreport.go
$(GO_BUILD) $(GO_LDFLAGS) "-linkmode external -extldflags -static" -tags "$(STORAGETAGS) $(SECURITYTAGS)" -o tests/testreport/testreport ./tests/testreport/testreport.go
tests/conformance/testdata/mount-targets/true: tests/conformance/testdata/mount-targets/true.go
$(GO_BUILD) $(GO_LDFLAGS) "-linkmode external -extldflags -static" -o tests/conformance/testdata/mount-targets/true tests/conformance/testdata/mount-targets/true.go
.PHONY: test-unit
test-unit: tests/testreport/testreport
$(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) $(shell $(GO) list ./... | grep -v vendor | grep -v tests | grep -v cmd | grep -v chroot | grep -v copier) -timeout 45m
@ -175,8 +172,8 @@ test-unit: tests/testreport/testreport
vendor-in-container:
goversion=$(shell sed -e '/^go /!d' -e '/^go /s,.* ,,g' go.mod) ; \
if test -d `go env GOCACHE` && test -w `go env GOCACHE` ; then \
podman run --privileged --rm --env HOME=/root -v `go env GOCACHE`:/root/.cache/go-build --env GOCACHE=/root/.cache/go-build -v `pwd`:/src -w /src docker.io/library/golang:$$goversion make vendor ; \
if test -d `$(GO) env GOCACHE` && test -w `$(GO) env GOCACHE` ; then \
podman run --privileged --rm --env HOME=/root -v `$(GO) env GOCACHE`:/root/.cache/go-build --env GOCACHE=/root/.cache/go-build -v `pwd`:/src -w /src docker.io/library/golang:$$goversion make vendor ; \
else \
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:$$goversion make vendor ; \
fi
@ -186,11 +183,12 @@ vendor:
$(GO) mod tidy
$(GO) mod vendor
$(GO) mod verify
if test -n "$(strip $(shell go env GOTOOLCHAIN))"; then go mod edit -toolchain none ; fi
if test -n "$(strip $(shell $(GO) env GOTOOLCHAIN))"; then go mod edit -toolchain none ; fi
.PHONY: lint
lint: install.tools
./tests/tools/build/golangci-lint run $(LINTFLAGS)
./tests/tools/build/golangci-lint run --tests=false $(LINTFLAGS)
# CAUTION: This is not a replacement for RPMs provided by your distro.
# Only intended to build and test the latest unreleased changes.

16
OWNERS
View File

@ -1,15 +1,25 @@
approvers:
- TomSweeneyRedHat
- baude
- flouthoc
- giuseppe
- lsm5
- Luap99
- mheon
- mtrmac
- nalind
- rhatdan
- umohnani8
- TomSweeneyRedHat
reviewers:
- ashley-cui
- baude
- edsantiago
- flouthoc
- giuseppe
- Honny1
- lsm5
- Luap99
- mheon
- mtrmac
- nalind
- rhatdan
- TomSweeneyRedHat
- vrothberg

View File

@ -4,6 +4,7 @@
# [Buildah](https://www.youtube.com/embed/YVk5NgSiUw8) - a tool that facilitates building [Open Container Initiative (OCI)](https://www.opencontainers.org/) container images
[![Go Report Card](https://goreportcard.com/badge/github.com/containers/buildah)](https://goreportcard.com/report/github.com/containers/buildah)
[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/10579/badge)](https://www.bestpractices.dev/projects/10579)
The Buildah package provides a command line tool that can be used to

48
ROADMAP.md Normal file
View File

@ -0,0 +1,48 @@
![buildah logo (light)](logos/buildah-logo_large.png#gh-light-mode-only)
![buildah logo (dark)](logos/buildah-logo_reverse_large.png#gh-dark-mode-only)
# Buildah Roadmap
The Buildah development team reviews feature requests from its various stakeholders for consideration
quarterly along with the Podman Development team. These features are then prioritized and the top
features are then assigned to one or more engineers.
## Future feature considerations
The following features are of general importantance to Buildah. While these features have no timeline
associated with them yet, they will likely be on future quarterly milestones.
* Ongoing work around partial pull support (zstd:chunked)
* Improved support for the BuildKit API.
* Performance and stability improvements.
* Reductions to the size of the Buildah binary.
## Milestones and commitments by quarter
This section is a historical account of what features were prioritized by quarter. Results of the prioritization will be added at start of each quarter (Jan, Apr, July, Oct).
### 2025 Q2 ####
#### Releases ####
- [ ] Buildah 1.40
#### Features ####
- [ ] Reduce binary size of Buildah
- [ ] Additional Containerfile command options
#### CNCF ####
- [ ] Add and adhere to Governance model
- [ ] Update Maintainers file
### 2025 Q1 ####
#### Releases ####
- [x] Buildah 1.39
#### Features ####
- [x] Artifact add --options
#### CNCF ####
- [x] Create Governance documentation
- [x] Create Maintainers file

221
add.go
View File

@ -12,6 +12,7 @@ import (
"os"
"path"
"path/filepath"
"slices"
"strconv"
"strings"
"sync"
@ -32,6 +33,7 @@ import (
"github.com/hashicorp/go-multierror"
"github.com/moby/sys/userns"
digest "github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
@ -94,6 +96,22 @@ type AddAndCopyOptions struct {
// RetryDelay is how long to wait before retrying attempts to retrieve
// remote contents.
RetryDelay time.Duration
// Parents specifies that we should preserve either all of the parent
// directories of source locations, or the ones which follow "/./" in
// the source paths for source locations which include such a
// component.
Parents bool
// Timestamp is a timestamp to override on all content as it is being read.
Timestamp *time.Time
// Link, when set to true, creates an independent layer containing the copied content
// that sits on top of existing layers. This layer can be cached and reused
// separately, and is not affected by filesystem changes from previous instructions.
Link bool
// BuildMetadata is consulted only when Link is true. Contains metadata used by
// imagebuildah for cache evaluation of linked layers (inheritLabels, unsetAnnotations,
// inheritAnnotations, newAnnotations). This field is internally managed and should
// not be set by external API users.
BuildMetadata string
}
// gitURLFragmentSuffix matches fragments to use as Git reference and build
@ -120,7 +138,7 @@ func sourceIsRemote(source string) bool {
}
// getURL writes a tar archive containing the named content
func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, writer io.Writer, chmod *os.FileMode, srcDigest digest.Digest, certPath string, insecureSkipTLSVerify types.OptionalBool) error {
func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, writer io.Writer, chmod *os.FileMode, srcDigest digest.Digest, certPath string, insecureSkipTLSVerify types.OptionalBool, timestamp *time.Time) error {
url, err := url.Parse(src)
if err != nil {
return err
@ -151,15 +169,19 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string,
name = path.Base(url.Path)
}
// If there's a date on the content, use it. If not, use the Unix epoch
// for compatibility.
// or a specified value for compatibility.
date := time.Unix(0, 0).UTC()
lastModified := response.Header.Get("Last-Modified")
if lastModified != "" {
d, err := time.Parse(time.RFC1123, lastModified)
if err != nil {
return fmt.Errorf("parsing last-modified time: %w", err)
if timestamp != nil {
date = timestamp.UTC()
} else {
lastModified := response.Header.Get("Last-Modified")
if lastModified != "" {
d, err := time.Parse(time.RFC1123, lastModified)
if err != nil {
return fmt.Errorf("parsing last-modified time %q: %w", lastModified, err)
}
date = d.UTC()
}
date = d
}
// Figure out the size of the content.
size := response.ContentLength
@ -263,6 +285,25 @@ func globbedToGlobbable(glob string) string {
return result
}
// getParentsPrefixToRemoveAndParentsToSkip gets from the pattern the prefix before the "pivot point",
// the location in the source path marked by the path component named "."
// (i.e. where "/./" occurs in the path). And list of parents to skip.
// In case "/./" is not present is returned "/".
func getParentsPrefixToRemoveAndParentsToSkip(pattern string, contextDir string) (string, []string) {
prefix, _, found := strings.Cut(strings.TrimPrefix(pattern, contextDir), "/./")
if !found {
return string(filepath.Separator), []string{}
}
prefix = strings.TrimPrefix(filepath.Clean(string(filepath.Separator)+prefix), string(filepath.Separator))
out := []string{}
parentPath := prefix
for parentPath != "/" && parentPath != "." {
out = append(out, parentPath)
parentPath = filepath.Dir(parentPath)
}
return prefix, out
}
// Add copies the contents of the specified sources into the container's root
// filesystem, optionally extracting contents of local files that look like
// non-empty archives.
@ -432,10 +473,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
}
// if the destination is a directory that doesn't yet exist, let's copy it.
newDestDirFound := false
if (len(destStats) == 1 || len(destStats[0].Globbed) == 0) && destMustBeDirectory && !destCanBeFile {
newDestDirFound = true
}
newDestDirFound := (len(destStats) == 1 || len(destStats[0].Globbed) == 0) && destMustBeDirectory && !destCanBeFile
if len(destStats) == 1 && len(destStats[0].Globbed) == 1 && destStats[0].Results[destStats[0].Globbed[0]].IsRegular {
if destMustBeDirectory {
@ -467,14 +505,73 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
}
destUIDMap, destGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
// Create the target directory if it doesn't exist yet.
var putRoot, putDir, stagingDir string
var createdDirs []string
var latestTimestamp time.Time
mkdirOptions := copier.MkdirOptions{
UIDMap: destUIDMap,
GIDMap: destGIDMap,
ChownNew: chownDirs,
}
if err := copier.Mkdir(mountPoint, extractDirectory, mkdirOptions); err != nil {
return fmt.Errorf("ensuring target directory exists: %w", err)
// If --link is specified, we create a staging directory to hold the content
// that will then become an independent layer
if options.Link {
containerDir, err := b.store.ContainerDirectory(b.ContainerID)
if err != nil {
return fmt.Errorf("getting container directory for %q: %w", b.ContainerID, err)
}
stagingDir, err = os.MkdirTemp(containerDir, "link-stage-")
if err != nil {
return fmt.Errorf("creating staging directory for link %q: %w", b.ContainerID, err)
}
putRoot = stagingDir
cleanDest := filepath.Clean(destination)
if strings.Contains(cleanDest, "..") {
return fmt.Errorf("invalid destination path %q: contains path traversal", destination)
}
if renameTarget != "" {
putDir = filepath.Dir(filepath.Join(stagingDir, cleanDest))
} else {
putDir = filepath.Join(stagingDir, cleanDest)
}
putDirAbs, err := filepath.Abs(putDir)
if err != nil {
return fmt.Errorf("failed to resolve absolute path: %w", err)
}
stagingDirAbs, err := filepath.Abs(stagingDir)
if err != nil {
return fmt.Errorf("failed to resolve staging directory absolute path: %w", err)
}
if !strings.HasPrefix(putDirAbs, stagingDirAbs+string(os.PathSeparator)) && putDirAbs != stagingDirAbs {
return fmt.Errorf("destination path %q escapes staging directory", destination)
}
if err := copier.Mkdir(putRoot, putDirAbs, mkdirOptions); err != nil {
return fmt.Errorf("ensuring target directory exists: %w", err)
}
tempPath := putDir
for tempPath != stagingDir && tempPath != filepath.Dir(tempPath) {
if _, err := os.Stat(tempPath); err == nil {
createdDirs = append(createdDirs, tempPath)
}
tempPath = filepath.Dir(tempPath)
}
} else {
if err := copier.Mkdir(mountPoint, extractDirectory, mkdirOptions); err != nil {
return fmt.Errorf("ensuring target directory exists: %w", err)
}
putRoot = extractDirectory
putDir = extractDirectory
}
// Copy each source in turn.
@ -495,8 +592,13 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
wg.Add(1)
if sourceIsGit(src) {
go func() {
defer wg.Done()
defer pipeWriter.Close()
var cloneDir, subdir string
cloneDir, subdir, getErr = define.TempDirForURL(tmpdir.GetTempDir(), "", src)
if getErr != nil {
return
}
getOptions := copier.GetOptions{
UIDMap: srcUIDMap,
GIDMap: srcGIDMap,
@ -509,17 +611,16 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
StripSetuidBit: options.StripSetuidBit,
StripSetgidBit: options.StripSetgidBit,
StripStickyBit: options.StripStickyBit,
Timestamp: options.Timestamp,
}
writer := io.WriteCloser(pipeWriter)
repositoryDir := filepath.Join(cloneDir, subdir)
getErr = copier.Get(repositoryDir, repositoryDir, getOptions, []string{"."}, writer)
pipeWriter.Close()
wg.Done()
}()
} else {
go func() {
getErr = retry.IfNecessary(context.TODO(), func() error {
return getURL(src, chownFiles, mountPoint, renameTarget, pipeWriter, chmodDirsFiles, srcDigest, options.CertPath, options.InsecureSkipTLSVerify)
return getURL(src, chownFiles, mountPoint, renameTarget, pipeWriter, chmodDirsFiles, srcDigest, options.CertPath, options.InsecureSkipTLSVerify, options.Timestamp)
}, &retry.Options{
MaxRetry: options.MaxRetries,
Delay: options.RetryDelay,
@ -549,7 +650,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
ChmodFiles: nil,
IgnoreDevices: userns.RunningInUserNS(),
}
putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
putErr = copier.Put(putRoot, putDir, putOptions, io.TeeReader(pipeReader, hasher))
}
hashCloser.Close()
pipeReader.Close()
@ -587,7 +688,6 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
if localSourceStat == nil {
continue
}
// Iterate through every item that matched the glob.
itemsCopied := 0
for _, globbed := range localSourceStat.Globbed {
@ -602,7 +702,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
}
// Check for dockerignore-style exclusion of this item.
if rel != "." {
excluded, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck
excluded, err := pm.Matches(filepath.ToSlash(rel)) //nolint:staticcheck
if err != nil {
return fmt.Errorf("checking if %q(%q) is excluded: %w", globbed, rel, err)
}
@ -628,6 +728,9 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
itemsCopied++
}
st := localSourceStat.Results[globbed]
if options.Link && st.ModTime.After(latestTimestamp) {
latestTimestamp = st.ModTime
}
pipeReader, pipeWriter := io.Pipe()
wg.Add(1)
go func() {
@ -640,6 +743,25 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
return false, false, nil
})
}
if options.Parents {
parentsPrefixToRemove, parentsToSkip := getParentsPrefixToRemoveAndParentsToSkip(src, options.ContextDir)
writer = newTarFilterer(writer, func(hdr *tar.Header) (bool, bool, io.Reader) {
if slices.Contains(parentsToSkip, hdr.Name) && hdr.Typeflag == tar.TypeDir {
return true, false, nil
}
hdr.Name = strings.TrimPrefix(hdr.Name, parentsPrefixToRemove)
hdr.Name = strings.TrimPrefix(hdr.Name, "/")
if hdr.Typeflag == tar.TypeLink {
hdr.Linkname = strings.TrimPrefix(hdr.Linkname, parentsPrefixToRemove)
hdr.Linkname = strings.TrimPrefix(hdr.Linkname, "/")
}
if hdr.Name == "" {
return true, false, nil
}
return false, false, nil
})
}
writer = newTarFilterer(writer, func(_ *tar.Header) (bool, bool, io.Reader) {
itemsCopied++
return false, false, nil
@ -656,6 +778,8 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
StripSetuidBit: options.StripSetuidBit,
StripSetgidBit: options.StripSetgidBit,
StripStickyBit: options.StripStickyBit,
Parents: options.Parents,
Timestamp: options.Timestamp,
}
getErr = copier.Get(contextDir, contextDir, getOptions, []string{globbedToGlobbable(globbed)}, writer)
closeErr = writer.Close()
@ -690,12 +814,13 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
ChmodFiles: nil,
IgnoreDevices: userns.RunningInUserNS(),
}
putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
putErr = copier.Put(putRoot, putDir, putOptions, io.TeeReader(pipeReader, hasher))
}
hashCloser.Close()
pipeReader.Close()
wg.Done()
}()
wg.Wait()
if getErr != nil {
getErr = fmt.Errorf("reading %q: %w", src, getErr)
@ -725,6 +850,58 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
return fmt.Errorf("no items matching glob %q copied (%d filtered out%s): %w", localSourceStat.Glob, len(localSourceStat.Globbed), excludesFile, syscall.ENOENT)
}
}
if options.Link {
if !latestTimestamp.IsZero() {
for _, dir := range createdDirs {
if err := os.Chtimes(dir, latestTimestamp, latestTimestamp); err != nil {
logrus.Warnf("failed to set timestamp on directory %q: %v", dir, err)
}
}
}
var created time.Time
if options.Timestamp != nil {
created = *options.Timestamp
} else if !latestTimestamp.IsZero() {
created = latestTimestamp
} else {
created = time.Unix(0, 0).UTC()
}
command := "ADD"
if !extract {
command = "COPY"
}
contentType, digest := b.ContentDigester.Digest()
summary := contentType
if digest != "" {
if summary != "" {
summary = summary + ":"
}
summary = summary + digest.Encoded()
logrus.Debugf("added content from --link %s", summary)
}
createdBy := "/bin/sh -c #(nop) " + command + " --link " + summary + " in " + destination + " " + options.BuildMetadata
history := v1.History{
Created: &created,
CreatedBy: createdBy,
Comment: b.HistoryComment(),
}
linkedLayer := LinkedLayer{
History: history,
BlobPath: stagingDir,
}
b.AppendedLinkedLayers = append(b.AppendedLinkedLayers, linkedLayer)
if err := b.Save(); err != nil {
return fmt.Errorf("saving builder state after queuing linked layer: %w", err)
}
}
return nil
}

View File

@ -1,7 +0,0 @@
#!/usr/bin/env bash
${CPP:-${CC:-cc} -E} ${CPPFLAGS} - > /dev/null 2> /dev/null << EOF
#include <btrfs/version.h>
EOF
if test $? -ne 0 ; then
echo btrfs_noversion
fi

View File

@ -180,6 +180,7 @@ type Builder struct {
// Format to use for a container image we eventually commit, when we do.
Format string
// TempVolumes are temporary mount points created during Run() calls.
// Deprecated: do not use.
TempVolumes map[string]bool
// ContentDigester counts the digest of all Add()ed content since it was
// last restarted.

View File

@ -40,6 +40,12 @@ func TestMain(m *testing.M) {
}
func TestOpenBuilderCommonBuildOpts(t *testing.T) {
// This test cannot be parallelized as this uses NewBuilder()
// which eventually and indirectly accesses a global variable
// defined in `go-selinux`, this must be fixed at `go-selinux`
// or builder must enable sometime of locking mechanism i.e if
// routine is creating Builder other's must wait for it.
// Tracked here: https://github.com/containers/buildah/issues/5967
ctx := context.TODO()
store, err := storage.GetStore(types.StoreOptions{
RunRoot: t.TempDir(),

View File

@ -1,3 +1,131 @@
- Changelog for v1.40.0 (2025-04-17)
* Bump c/storage to v1.58.0, c/image v5.35.0, c/common v0.63.0
* fix(deps): update module github.com/docker/docker to v28.1.0+incompatible
* fix(deps): update module github.com/containers/storage to v1.58.0
* cirrus: make Total Success wait for rootless integration
* chroot: use symbolic names when complaining about mount() errors
* cli: hide the `completion` command instead of disabling it outright
* Document rw and src options for --mount flag in buildah-run(1)
* fix(deps): update module github.com/moby/buildkit to v0.21.0
* build: add support for inherit-labels
* chore(deps): update dependency golangci/golangci-lint to v2.1.0
* .github: check_cirrus_cron work around github bug
* stage_executor,getCreatedBy: expand buildArgs for sources correctly
* Add a link to project governance and MAINTAINERS file
* fix(deps): update github.com/containers/storage digest to b1d1b45
* generateHostname: simplify
* Use maps.Copy
* Use slices.Concat
* Use slices.Clone
* Use slices.Contains
* Use for range over integers
* tests/testreport: don't copy os.Environ
* Use any instead of interface{}
* ci: add golangci-lint run with --tests=false
* ci: add nolintlint, fix found issues
* copier: rm nolint:unparam annotation
* .golangci.yml: add unused linter
* chroot: fix unused warnings
* copier: fix unused warnings
* tests/conformance: fix unused warning
* ci: switch to golangci-lint v2
* internal/mkcw: disable ST1003 warnings
* tests/conformance: do not double import (fix ST1019)
* cmd/buildah: don't double import (fix ST1019)
* Do not capitalize error strings
* cmd/buildah: do not capitalize error strings
* tests/conformance: fix QF1012 warnings
* tests/serve: fix QF1012 warning
* Use strings.ReplaceAll to fix QF1004 warnings
* Use switch to fix QF1003 warnings
* Apply De Morgan's law to fix QF1001 warnings
* Fix QF1007 staticcheck warnings
* imagebuildah: fix revive warning
* Rename max variable
* tests/tools: install lint from binary, use renovate
* fix(deps): update module github.com/containernetworking/cni to v1.3.0
* Update Buildah issue template to new version and support podman build
* fix(deps): update module golang.org/x/crypto to v0.37.0
* stage_executor: reset platform in systemcontext for stages
* fix(deps): update github.com/opencontainers/runtime-tools digest to 260e151
* cmd/buildah: rm unused containerOutputUsingTemplate
* cmd/buildah: rm unused getDateAndDigestAndSize
* build: return ExecErrorCodeGeneric when git operation fails
* add: report error while creating dir for URL source.
* createPlatformContainer: drop MS_REMOUNT|MS_BIND
* fix(deps): update module github.com/docker/docker to v28.0.3+incompatible
* fix: bats won't fail on ! without cleverness
* feat: use HistoryTimestamp, if set, for oci-archive entries
* Allow extendedGlob to work with Windows paths
* fix(deps): update module github.com/moby/buildkit to v0.20.2
* fix(deps): update github.com/openshift/imagebuilder digest to e87e4e1
* fix(deps): update module github.com/docker/docker to v28.0.2+incompatible
* fix(deps): update module tags.cncf.io/container-device-interface to v1.0.1
* chore(deps): update dependency containers/automation_images to v20250324
* vendor: update github.com/opencontainers/selinux to v1.12.0
* replace deprecated selinux/label calls
* vendor: bump c/common to dbeb17e40c80
* Use builtin arg defaults from imagebuilder
* linux: accept unmask paths as glob values
* vendor: update containers/common
* Add --parents option for COPY in Dockerfiles
* fix(deps): update module github.com/opencontainers/runc to v1.2.6
* update go.sum from the previous commit
* fix(deps): update module tags.cncf.io/container-device-interface to v1
* chore(deps): update module golang.org/x/net to v0.36.0 [security]
* packit: remove f40 from copr builds
* cirrus: update to go 1.23 image
* vendor bump to golang.org/x/crypto v0.36.0
* cirrus: update PRIOR_FEDORA comment
* github: remove cirrus rerun action
* fix(deps): update module github.com/containers/common to v0.62.2
* fix(deps): update module github.com/containers/image/v5 to v5.34.2
* fix: close files properly when BuildDockerfiles exits
* fix(deps): update module github.com/containers/storage to v1.57.2
* stage_executor: history should include heredoc summary correctly
* fix(deps): update module github.com/containers/common to v0.62.1
* github: disable cron rerun action
* fix(deps): update module github.com/moby/buildkit to v0.20.1
* internal/mkcw.Archive(): use github.com/containers/storage/pkg/ioutils
* [skip-ci] TMT: system tests
* buildah-build.1.md: secret examples
* fix(deps): update github.com/containers/luksy digest to 40bd943
* fix(deps): update module github.com/opencontainers/image-spec to v1.1.1
* fix(deps): update module github.com/containers/image/v5 to v5.34.1
* Use UnparsedInstance.Manifest instead of ImageSource.GetManifest
* fix(deps): update module github.com/opencontainers/runtime-spec to v1.2.1
* tests/conformance/testdata/Dockerfile.add: update some URLs
* Vendor imagebuilder
* Fix source of OS, architecture and variant
* chore(deps): update module github.com/go-jose/go-jose/v4 to v4.0.5 [security]
* fix(deps): update module tags.cncf.io/container-device-interface to v0.8.1
* fix(deps): update module github.com/moby/buildkit to v0.20.0
* chroot createPlatformContainer: use MS_REMOUNT
* conformance: make TestCommit and TestConformance parallel
* cirrus: reduce task timeout
* mkcw: mkcw_check_image use bats run_with_log
* test: use /tmp as TMPDIR
* heredoc: create temp subdirs for each build
* test: heredoc remove python dependency from test
* Support the containers.conf container_name_as_hostname option
* fix(deps): update module github.com/opencontainers/runc to v1.2.5
* fix(deps): update module github.com/spf13/cobra to v1.9.0
* .cirrus: use more cores for smoke
* Switch to the CNCF Code of Conduct
* .cirrus: bump ci resources
* fix(deps): update module golang.org/x/crypto to v0.33.0
* Distinguish --mount=type=cache locations by ownership, too
* fix(deps): update module golang.org/x/term to v0.29.0
* .cirrus: run -race only on non-PR branch
* unit: deparallize some tests
* .cirrus: use multiple cpu for unit tests
* Makefile: use -parallel for go test
* unit_test: use Parallel test where possible
* Update module golang.org/x/sys to v0.30.0
* Update module golang.org/x/sync to v0.11.0
* Update dependency containers/automation_images to v20250131
* Bump to Buildah v1.40.0-dev
- Changelog for v1.39.0 (2025-01-31)
* Bump c/storage v1.57.1, c/image 5.34.0, c/common v0.62.0
* Update module github.com/containers/storage to v1.57.0

View File

@ -1,11 +0,0 @@
//go:build !linux && !(freebsd && cgo)
package chroot
import (
"errors"
)
func getPtyDescriptors() (int, int, error) {
return -1, -1, errors.New("getPtyDescriptors not supported on this platform")
}

View File

@ -18,6 +18,7 @@ import (
"syscall"
"github.com/containers/buildah/bind"
"github.com/containers/buildah/internal/pty"
"github.com/containers/buildah/util"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/reexec"
@ -217,7 +218,7 @@ func runUsingChrootMain() {
var stderr io.Writer
fdDesc := make(map[int]string)
if options.Spec.Process.Terminal {
ptyMasterFd, ptyFd, err := getPtyDescriptors()
ptyMasterFd, ptyFd, err := pty.GetPtyDescriptors()
if err != nil {
logrus.Errorf("error opening PTY descriptors: %v", err)
os.Exit(1)

View File

@ -8,6 +8,7 @@ import (
"os"
"os/exec"
"path/filepath"
"slices"
"strings"
"syscall"
"time"
@ -42,8 +43,82 @@ var (
"RLIMIT_STACK": unix.RLIMIT_STACK,
}
rlimitsReverseMap = map[int]string{}
mountFlagMap = map[int]string{
unix.MS_ACTIVE: "MS_ACTIVE",
unix.MS_BIND: "MS_BIND",
unix.MS_BORN: "MS_BORN",
unix.MS_DIRSYNC: "MS_DIRSYNC",
unix.MS_KERNMOUNT: "MS_KERNMOUNT",
unix.MS_LAZYTIME: "MS_LAZYTIME",
unix.MS_MANDLOCK: "MS_MANDLOCK",
unix.MS_MOVE: "MS_MOVE",
unix.MS_NOATIME: "MS_NOATIME",
unix.MS_NODEV: "MS_NODEV",
unix.MS_NODIRATIME: "MS_NODIRATIME",
unix.MS_NOEXEC: "MS_NOEXEC",
unix.MS_NOREMOTELOCK: "MS_NOREMOTELOCK",
unix.MS_NOSEC: "MS_NOSEC",
unix.MS_NOSUID: "MS_NOSUID",
unix.MS_NOSYMFOLLOW: "MS_NOSYMFOLLOW",
unix.MS_NOUSER: "MS_NOUSER",
unix.MS_POSIXACL: "MS_POSIXACL",
unix.MS_PRIVATE: "MS_PRIVATE",
unix.MS_RDONLY: "MS_RDONLY",
unix.MS_REC: "MS_REC",
unix.MS_RELATIME: "MS_RELATIME",
unix.MS_REMOUNT: "MS_REMOUNT",
unix.MS_SHARED: "MS_SHARED",
unix.MS_SILENT: "MS_SILENT",
unix.MS_SLAVE: "MS_SLAVE",
unix.MS_STRICTATIME: "MS_STRICTATIME",
unix.MS_SUBMOUNT: "MS_SUBMOUNT",
unix.MS_SYNCHRONOUS: "MS_SYNCHRONOUS",
unix.MS_UNBINDABLE: "MS_UNBINDABLE",
}
statFlagMap = map[int]string{
unix.ST_MANDLOCK: "ST_MANDLOCK",
unix.ST_NOATIME: "ST_NOATIME",
unix.ST_NODEV: "ST_NODEV",
unix.ST_NODIRATIME: "ST_NODIRATIME",
unix.ST_NOEXEC: "ST_NOEXEC",
unix.ST_NOSUID: "ST_NOSUID",
unix.ST_RDONLY: "ST_RDONLY",
unix.ST_RELATIME: "ST_RELATIME",
unix.ST_SYNCHRONOUS: "ST_SYNCHRONOUS",
}
)
func mountFlagNames(flags uintptr) []string {
var names []string
for flag, name := range mountFlagMap {
if int(flags)&flag == flag {
names = append(names, name)
flags = flags &^ (uintptr(flag))
}
}
if flags != 0 { // got some unknown leftovers
names = append(names, fmt.Sprintf("%#x", flags))
}
slices.Sort(names)
return names
}
func statFlagNames(flags uintptr) []string {
var names []string
flags = flags & ^uintptr(0x20) // mask off ST_VALID
for flag, name := range statFlagMap {
if int(flags)&flag == flag {
names = append(names, name)
flags = flags &^ (uintptr(flag))
}
}
if flags != 0 { // got some unknown leftovers
names = append(names, fmt.Sprintf("%#x", flags))
}
slices.Sort(names)
return names
}
type runUsingChrootSubprocOptions struct {
Spec *specs.Spec
BundlePath string
@ -61,14 +136,14 @@ func setPlatformUnshareOptions(spec *specs.Spec, cmd *unshare.Cmd) error {
uidmap, gidmap := spec.Linux.UIDMappings, spec.Linux.GIDMappings
if len(uidmap) == 0 {
// No UID mappings are configured for the container. Borrow our parent's mappings.
uidmap = append([]specs.LinuxIDMapping{}, hostUidmap...)
uidmap = slices.Clone(hostUidmap)
for i := range uidmap {
uidmap[i].HostID = uidmap[i].ContainerID
}
}
if len(gidmap) == 0 {
// No GID mappings are configured for the container. Borrow our parent's mappings.
gidmap = append([]specs.LinuxIDMapping{}, hostGidmap...)
gidmap = slices.Clone(hostGidmap)
for i := range gidmap {
gidmap[i].HostID = gidmap[i].ContainerID
}
@ -263,7 +338,7 @@ func createPlatformContainer(options runUsingChrootExecSubprocOptions) error {
return fmt.Errorf("changing to host root directory: %w", err)
}
// make sure we only unmount things under this tree
if err := unix.Mount(".", ".", "bind", unix.MS_BIND|unix.MS_SLAVE|unix.MS_REC, ""); err != nil {
if err := unix.Mount(".", ".", "", unix.MS_SLAVE|unix.MS_REC, ""); err != nil {
return fmt.Errorf("tweaking mount flags on host root directory before unmounting from mount namespace: %w", err)
}
// detach this (unnamed?) old directory
@ -573,15 +648,15 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
remountFlags |= uintptr(fs.Flags) & possibleImportantFlags
}
if err = unix.Mount(target, target, m.Type, remountFlags, ""); err != nil {
return undoBinds, fmt.Errorf("remounting %q in mount namespace with flags %#x instead of %#x: %w", target, requestFlags, effectiveImportantFlags, err)
return undoBinds, fmt.Errorf("remounting %q in mount namespace with flags %v instead of %v: %w", target, mountFlagNames(requestFlags), statFlagNames(effectiveImportantFlags), err)
}
// Check if the desired flags stuck.
if err = unix.Statfs(target, &fs); err != nil {
return undoBinds, fmt.Errorf("checking if directory %q was remounted with requested flags %#x instead of %#x: %w", target, requestFlags, effectiveImportantFlags, err)
return undoBinds, fmt.Errorf("checking if directory %q was remounted with requested flags %v instead of %v: %w", target, mountFlagNames(requestFlags), statFlagNames(effectiveImportantFlags), err)
}
newEffectiveImportantFlags := uintptr(fs.Flags) & importantFlags
if newEffectiveImportantFlags != expectedImportantFlags {
return undoBinds, fmt.Errorf("unable to remount %q with requested flags %#x instead of %#x, just got %#x back", target, requestFlags, effectiveImportantFlags, newEffectiveImportantFlags)
return undoBinds, fmt.Errorf("unable to remount %q with requested flags %v instead of %v, just got %v back", target, mountFlagNames(requestFlags), statFlagNames(effectiveImportantFlags), statFlagNames(newEffectiveImportantFlags))
}
}
}

32
chroot/run_linux_test.go Normal file
View File

@ -0,0 +1,32 @@
package chroot
import (
"slices"
"testing"
"github.com/stretchr/testify/assert"
)
func TestStatFlagNames(t *testing.T) {
var names []string
var flags int
for flag := range statFlagMap {
flags |= flag
names = append(names, statFlagMap[flag])
assert.Equal(t, []string{statFlagMap[flag]}, statFlagNames(uintptr(flag)))
}
slices.Sort(names)
assert.Equal(t, names, statFlagNames(uintptr(flags)))
}
func TestMountFlagNames(t *testing.T) {
var names []string
var flags int
for flag := range mountFlagMap {
flags |= flag
names = append(names, mountFlagMap[flag])
assert.Equal(t, []string{mountFlagMap[flag]}, mountFlagNames(uintptr(flag)))
}
slices.Sort(names)
assert.Equal(t, names, mountFlagNames(uintptr(flags)))
}

View File

@ -123,6 +123,7 @@ func testMinimal(t *testing.T, modify func(g *generate.Generator, rootDir, bundl
}
func TestNoop(t *testing.T) {
t.Parallel()
if unix.Getuid() != 0 {
t.Skip("tests need to be run as root")
}
@ -130,6 +131,7 @@ func TestNoop(t *testing.T) {
}
func TestMinimalSkeleton(t *testing.T) {
t.Parallel()
if unix.Getuid() != 0 {
t.Skip("tests need to be run as root")
}
@ -142,6 +144,7 @@ func TestMinimalSkeleton(t *testing.T) {
}
func TestProcessTerminal(t *testing.T) {
t.Parallel()
if unix.Getuid() != 0 {
t.Skip("tests need to be run as root")
}
@ -160,6 +163,7 @@ func TestProcessTerminal(t *testing.T) {
}
func TestProcessConsoleSize(t *testing.T) {
t.Parallel()
if unix.Getuid() != 0 {
t.Skip("tests need to be run as root")
}
@ -182,6 +186,7 @@ func TestProcessConsoleSize(t *testing.T) {
}
func TestProcessUser(t *testing.T) {
t.Parallel()
if unix.Getuid() != 0 {
t.Skip("tests need to be run as root")
}
@ -205,6 +210,7 @@ func TestProcessUser(t *testing.T) {
}
func TestProcessEnv(t *testing.T) {
t.Parallel()
if unix.Getuid() != 0 {
t.Skip("tests need to be run as root")
}
@ -215,10 +221,8 @@ func TestProcessEnv(t *testing.T) {
g.AddProcessEnv("PARENT_TEST_PID", strconv.Itoa(unix.Getpid()))
},
func(t *testing.T, report *types.TestReport) {
for _, ev := range report.Spec.Process.Env {
if ev == e {
return
}
if slices.Contains(report.Spec.Process.Env, e) {
return
}
t.Fatalf("expected environment variable %q", e)
},
@ -226,6 +230,7 @@ func TestProcessEnv(t *testing.T) {
}
func TestProcessCwd(t *testing.T) {
t.Parallel()
if unix.Getuid() != 0 {
t.Skip("tests need to be run as root")
}
@ -245,6 +250,7 @@ func TestProcessCwd(t *testing.T) {
}
func TestProcessCapabilities(t *testing.T) {
t.Parallel()
if unix.Getuid() != 0 {
t.Skip("tests need to be run as root")
}
@ -289,6 +295,7 @@ func TestProcessCapabilities(t *testing.T) {
}
func TestProcessRlimits(t *testing.T) {
t.Parallel()
if unix.Getuid() != 0 {
t.Skip("tests need to be run as root")
}
@ -307,7 +314,7 @@ func TestProcessRlimits(t *testing.T) {
rlim = &report.Spec.Process.Rlimits[i]
}
}
if limit == unix.RLIM_INFINITY && !(rlim == nil || (rlim.Soft == unix.RLIM_INFINITY && rlim.Hard == unix.RLIM_INFINITY)) {
if limit == unix.RLIM_INFINITY && rlim != nil && (rlim.Soft != unix.RLIM_INFINITY || rlim.Hard != unix.RLIM_INFINITY) {
t.Fatalf("wasn't supposed to set limit on number of open files: %#v", rlim)
}
if limit != unix.RLIM_INFINITY && rlim == nil {
@ -327,6 +334,7 @@ func TestProcessRlimits(t *testing.T) {
}
func TestProcessNoNewPrivileges(t *testing.T) {
t.Parallel()
if unix.Getuid() != 0 {
t.Skip("tests need to be run as root")
}
@ -348,6 +356,7 @@ func TestProcessNoNewPrivileges(t *testing.T) {
}
func TestProcessOOMScoreAdj(t *testing.T) {
t.Parallel()
if unix.Getuid() != 0 {
t.Skip("tests need to be run as root")
}
@ -370,6 +379,7 @@ func TestProcessOOMScoreAdj(t *testing.T) {
}
func TestHostname(t *testing.T) {
t.Parallel()
if unix.Getuid() != 0 {
t.Skip("tests need to be run as root")
}
@ -387,6 +397,7 @@ func TestHostname(t *testing.T) {
}
func TestMounts(t *testing.T) {
t.Parallel()
if unix.Getuid() != 0 {
t.Skip("tests need to be run as root")
}
@ -547,6 +558,7 @@ func TestMounts(t *testing.T) {
}
func TestLinuxIDMapping(t *testing.T) {
t.Parallel()
if unix.Getuid() != 0 {
t.Skip("tests need to be run as root")
}
@ -584,6 +596,7 @@ func TestLinuxIDMapping(t *testing.T) {
}
func TestLinuxIDMappingShift(t *testing.T) {
t.Parallel()
if unix.Getuid() != 0 {
t.Skip("tests need to be run as root")
}

View File

@ -4,16 +4,12 @@ package chroot
import (
"fmt"
"os"
"github.com/containers/common/pkg/seccomp"
specs "github.com/opencontainers/runtime-spec/specs-go"
libseccomp "github.com/seccomp/libseccomp-golang"
"github.com/sirupsen/logrus"
)
const seccompAvailable = true
// setSeccomp sets the seccomp filter for ourselves and any processes that we'll start.
func setSeccomp(spec *specs.Spec) error {
logrus.Debugf("setting seccomp configuration")
@ -178,27 +174,3 @@ func setSeccomp(spec *specs.Spec) error {
}
return nil
}
func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
switch seccompProfilePath {
case "unconfined":
spec.Linux.Seccomp = nil
case "":
seccompConfig, err := seccomp.GetDefaultProfile(spec)
if err != nil {
return fmt.Errorf("loading default seccomp profile failed: %w", err)
}
spec.Linux.Seccomp = seccompConfig
default:
seccompProfile, err := os.ReadFile(seccompProfilePath)
if err != nil {
return fmt.Errorf("opening seccomp profile failed: %w", err)
}
seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec)
if err != nil {
return fmt.Errorf("loading seccomp profile (%s) failed: %w", seccompProfilePath, err)
}
spec.Linux.Seccomp = seccompConfig
}
return nil
}

37
chroot/seccomp_test.go Normal file
View File

@ -0,0 +1,37 @@
//go:build linux && seccomp
package chroot
import (
"fmt"
"os"
"github.com/containers/common/pkg/seccomp"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
const seccompAvailable = true
func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
switch seccompProfilePath {
case "unconfined":
spec.Linux.Seccomp = nil
case "":
seccompConfig, err := seccomp.GetDefaultProfile(spec)
if err != nil {
return fmt.Errorf("loading default seccomp profile failed: %w", err)
}
spec.Linux.Seccomp = seccompConfig
default:
seccompProfile, err := os.ReadFile(seccompProfilePath)
if err != nil {
return fmt.Errorf("opening seccomp profile failed: %w", err)
}
seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec)
if err != nil {
return fmt.Errorf("loading seccomp profile (%s) failed: %w", seccompProfilePath, err)
}
spec.Linux.Seccomp = seccompConfig
}
return nil
}

View File

@ -8,19 +8,9 @@ import (
"github.com/opencontainers/runtime-spec/specs-go"
)
const seccompAvailable = false
func setSeccomp(spec *specs.Spec) error {
if spec.Linux.Seccomp != nil {
return errors.New("configured a seccomp filter without seccomp support?")
}
return nil
}
func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
if spec.Linux != nil {
// runtime-tools may have supplied us with a default filter
spec.Linux.Seccomp = nil
}
return nil
}

View File

@ -0,0 +1,17 @@
//go:build (!linux && !freebsd) || !seccomp
package chroot
import (
"github.com/opencontainers/runtime-spec/specs-go"
)
const seccompAvailable = false
func setupSeccomp(spec *specs.Spec, _ string) error {
if spec.Linux != nil {
// runtime-tools may have supplied us with a default filter
spec.Linux.Seccomp = nil
}
return nil
}

View File

@ -7,7 +7,6 @@ import (
"github.com/opencontainers/runtime-spec/specs-go"
selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/sirupsen/logrus"
)
@ -15,7 +14,7 @@ import (
func setSelinuxLabel(spec *specs.Spec) error {
logrus.Debugf("setting selinux label")
if spec.Process.SelinuxLabel != "" && selinux.GetEnabled() {
if err := label.SetProcessLabel(spec.Process.SelinuxLabel); err != nil {
if err := selinux.SetExecLabel(spec.Process.SelinuxLabel); err != nil {
return fmt.Errorf("setting process label to %q: %w", spec.Process.SelinuxLabel, err)
}
}

View File

@ -5,6 +5,7 @@ import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
@ -38,6 +39,9 @@ type addCopyResults struct {
retry int
retryDelay string
excludes []string
parents bool
timestamp string
link bool
}
func createCommand(addCopy string, desc string, short string, opts *addCopyResults) *cobra.Command {
@ -71,6 +75,7 @@ func applyFlagVars(flags *pflag.FlagSet, opts *addCopyResults) {
flags.StringVar(&opts.chown, "chown", "", "set the user and group ownership of the destination content")
flags.StringVar(&opts.chmod, "chmod", "", "set the access permissions of the destination content")
flags.StringVar(&opts.creds, "creds", "", "use `[username[:password]]` for accessing registries when pulling images")
flags.BoolVar(&opts.link, "link", false, "enable layer caching for this operation (creates an independent layer)")
if err := flags.MarkHidden("creds"); err != nil {
panic(fmt.Sprintf("error marking creds as hidden: %v", err))
}
@ -94,6 +99,7 @@ func applyFlagVars(flags *pflag.FlagSet, opts *addCopyResults) {
if err := flags.MarkHidden("signature-policy"); err != nil {
panic(fmt.Sprintf("error marking signature-policy as hidden: %v", err))
}
flags.StringVar(&opts.timestamp, "timestamp", "", "set timestamps on new content to `seconds` after the epoch")
}
func init() {
@ -116,6 +122,7 @@ func init() {
copyFlags := copyCommand.Flags()
applyFlagVars(copyFlags, &copyOpts)
copyFlags.BoolVar(&copyOpts.parents, "parents", false, "preserve leading directories in the paths of items being copied")
rootCmd.AddCommand(addCommand)
rootCmd.AddCommand(copyCommand)
@ -233,6 +240,16 @@ func addAndCopyCmd(c *cobra.Command, args []string, verb string, iopts addCopyRe
builder.ContentDigester.Restart()
var timestamp *time.Time
if iopts.timestamp != "" {
u, err := strconv.ParseInt(iopts.timestamp, 10, 64)
if err != nil {
return fmt.Errorf("parsing timestamp value %q: %w", iopts.timestamp, err)
}
t := time.Unix(u, 0).UTC()
timestamp = &t
}
options := buildah.AddAndCopyOptions{
Chmod: iopts.chmod,
Chown: iopts.chown,
@ -246,6 +263,9 @@ func addAndCopyCmd(c *cobra.Command, args []string, verb string, iopts addCopyRe
CertPath: systemContext.DockerCertPath,
InsecureSkipTLSVerify: systemContext.DockerInsecureSkipTLSVerify,
MaxRetries: iopts.retry,
Parents: iopts.parents,
Timestamp: timestamp,
Link: iopts.link,
}
if iopts.contextdir != "" {
var excludes []string

View File

@ -5,11 +5,13 @@ import (
"errors"
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/containers/buildah"
"github.com/containers/buildah/define"
"github.com/containers/buildah/internal"
"github.com/containers/buildah/pkg/cli"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/util"
@ -39,6 +41,8 @@ type commitInputOptions struct {
manifest string
omitTimestamp bool
timestamp int64
sourceDateEpoch string
rewriteTimestamp bool
quiet bool
referenceTime string
rm bool
@ -62,6 +66,9 @@ type commitInputOptions struct {
encryptLayers []int
unsetenvs []string
addFile []string
unsetAnnotation []string
annotation []string
createdAnnotation bool
}
func init() {
@ -117,7 +124,14 @@ func commitListFlagSet(cmd *cobra.Command, opts *commitInputOptions) {
flags.StringVar(&opts.iidfile, "iidfile", "", "write the image ID to the file")
_ = cmd.RegisterFlagCompletionFunc("iidfile", completion.AutocompleteDefault)
flags.BoolVar(&opts.omitTimestamp, "omit-timestamp", false, "set created timestamp to epoch 0 to allow for deterministic builds")
flags.Int64Var(&opts.timestamp, "timestamp", 0, "set created timestamp to epoch seconds to allow for deterministic builds, defaults to current time")
sourceDateEpochUsageDefault := "current time"
if v := os.Getenv(internal.SourceDateEpochName); v != "" {
sourceDateEpochUsageDefault = fmt.Sprintf("%q", v)
}
flags.StringVar(&opts.sourceDateEpoch, "source-date-epoch", os.Getenv(internal.SourceDateEpochName), "set new timestamps in image info to `seconds` after the epoch, defaults to "+sourceDateEpochUsageDefault)
_ = cmd.RegisterFlagCompletionFunc("source-date-epoch", completion.AutocompleteNone)
flags.BoolVar(&opts.rewriteTimestamp, "rewrite-timestamp", false, "set timestamps in layer to no later than the value for --source-date-epoch")
flags.Int64Var(&opts.timestamp, "timestamp", 0, "set new timestamps in image info and layer to `seconds` after the epoch, defaults to current times")
_ = cmd.RegisterFlagCompletionFunc("timestamp", completion.AutocompleteNone)
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "don't output progress information when writing images")
flags.StringVar(&opts.referenceTime, "reference-time", "", "set the timestamp on the image to match the named `file`")
@ -176,6 +190,11 @@ func commitListFlagSet(cmd *cobra.Command, opts *commitInputOptions) {
flags.StringSliceVar(&opts.unsetenvs, "unsetenv", nil, "unset env from final image")
_ = cmd.RegisterFlagCompletionFunc("unsetenv", completion.AutocompleteNone)
flags.StringSliceVar(&opts.unsetAnnotation, "unsetannotation", nil, "unset annotation when inheriting annotations from base image")
_ = cmd.RegisterFlagCompletionFunc("unsetannotation", completion.AutocompleteNone)
flags.StringArrayVar(&opts.annotation, "annotation", []string{}, "set metadata for an image (default [])")
_ = cmd.RegisterFlagCompletionFunc("annotation", completion.AutocompleteNone)
flags.BoolVar(&opts.createdAnnotation, "created-annotation", true, `set an "org.opencontainers.image.created" annotation in the image`)
}
func commitCmd(c *cobra.Command, args []string, iopts commitInputOptions) error {
@ -244,11 +263,6 @@ func commitCmd(c *cobra.Command, args []string, iopts commitInputOptions) error
}
}
// Add builder identity information.
if iopts.identityLabel {
builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version)
}
encConfig, encLayers, err := cli.EncryptConfig(iopts.encryptionKeys, iopts.encryptLayers)
if err != nil {
return fmt.Errorf("unable to obtain encryption config: %w", err)
@ -305,6 +319,9 @@ func commitCmd(c *cobra.Command, args []string, iopts commitInputOptions) error
OverrideChanges: iopts.changes,
OverrideConfig: overrideConfig,
ExtraImageContent: addFiles,
UnsetAnnotations: iopts.unsetAnnotation,
Annotations: iopts.annotation,
CreatedAnnotation: types.NewOptionalBool(iopts.createdAnnotation),
}
exclusiveFlags := 0
if c.Flag("reference-time").Changed {
@ -317,6 +334,16 @@ func commitCmd(c *cobra.Command, args []string, iopts commitInputOptions) error
timestamp := finfo.ModTime().UTC()
options.HistoryTimestamp = &timestamp
}
if iopts.sourceDateEpoch != "" {
exclusiveFlags++
sourceDateEpochVal, err := strconv.ParseInt(iopts.sourceDateEpoch, 10, 64)
if err != nil {
return fmt.Errorf("parsing source date epoch %q: %w", iopts.sourceDateEpoch, err)
}
sourceDateEpoch := time.Unix(sourceDateEpochVal, 0).UTC()
options.SourceDateEpoch = &sourceDateEpoch
}
options.RewriteTimestamp = iopts.rewriteTimestamp
if c.Flag("timestamp").Changed {
exclusiveFlags++
timestamp := time.Unix(iopts.timestamp, 0).UTC()
@ -327,6 +354,25 @@ func commitCmd(c *cobra.Command, args []string, iopts commitInputOptions) error
timestamp := time.Unix(0, 0).UTC()
options.HistoryTimestamp = &timestamp
}
if exclusiveFlags > 1 {
return errors.New("cannot use more then one timestamp option at at time")
}
// Add builder identity information.
var identityLabel types.OptionalBool
if c.Flag("identity-label").Changed {
identityLabel = types.NewOptionalBool(iopts.identityLabel)
}
switch identityLabel {
case types.OptionalBoolTrue:
builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version)
case types.OptionalBoolFalse:
// nothing - don't clear it if there's a value set in the base image
default:
if options.HistoryTimestamp == nil && options.SourceDateEpoch == nil {
builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version)
}
}
if iopts.cwOptions != "" {
confidentialWorkloadOptions, err := parse.GetConfidentialWorkloadOptions(iopts.cwOptions)
@ -352,10 +398,6 @@ func commitCmd(c *cobra.Command, args []string, iopts commitInputOptions) error
options.SBOMScanOptions = sbomOptions
}
if exclusiveFlags > 1 {
return errors.New("can not use more then one timestamp option at at time")
}
if !iopts.quiet {
options.ReportWriter = os.Stderr
}

View File

@ -5,12 +5,9 @@ import (
"errors"
"fmt"
"os"
"time"
"github.com/containers/buildah"
"github.com/containers/common/pkg/umask"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/manifest"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
@ -154,46 +151,6 @@ func openImage(ctx context.Context, sc *types.SystemContext, store storage.Store
return builder, nil
}
func getDateAndDigestAndSize(ctx context.Context, sys *types.SystemContext, store storage.Store, storeImage storage.Image) (time.Time, string, int64, error) {
created := time.Time{}
is.Transport.SetStore(store)
storeRef, err := is.Transport.ParseStoreReference(store, storeImage.ID)
if err != nil {
return created, "", -1, err
}
img, err := storeRef.NewImageSource(ctx, nil)
if err != nil {
return created, "", -1, err
}
defer img.Close()
imgSize, sizeErr := store.ImageSize(storeImage.ID)
if sizeErr != nil {
imgSize = -1
}
manifestBytes, _, manifestErr := img.GetManifest(ctx, nil)
manifestDigest := ""
if manifestErr == nil && len(manifestBytes) > 0 {
mDigest, err := manifest.Digest(manifestBytes)
manifestErr = err
if manifestErr == nil {
manifestDigest = mDigest.String()
}
}
inspectable, inspectableErr := image.FromUnparsedImage(ctx, sys, image.UnparsedInstance(img, nil))
if inspectableErr == nil {
inspectInfo, inspectErr := inspectable.Inspect(ctx)
if inspectErr == nil && inspectInfo != nil && inspectInfo.Created != nil {
created = *inspectInfo.Created
}
}
if sizeErr != nil {
err = sizeErr
} else if manifestErr != nil {
err = manifestErr
}
return created, manifestDigest, imgSize, err
}
// getContext returns a context.TODO
func getContext() context.Context {
return context.TODO()

View File

@ -7,8 +7,6 @@ import (
"testing"
"github.com/containers/buildah"
"github.com/containers/buildah/define"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/sirupsen/logrus"
@ -77,31 +75,6 @@ func TestGetStore(t *testing.T) {
}
}
func TestGetSize(t *testing.T) {
// Make sure the tests are running as root
failTestIfNotRoot(t)
store, err := storage.GetStore(storeOptions)
if err != nil {
t.Fatal(err)
} else if store != nil {
is.Transport.SetStore(store)
}
// Pull an image so that we know we have at least one
pullTestImage(t)
images, err := store.Images()
if err != nil {
t.Fatalf("Error reading images: %v", err)
}
_, _, _, err = getDateAndDigestAndSize(getContext(), &testSystemContext, store, images[0])
if err != nil {
t.Error(err)
}
}
func failTestIfNotRoot(t *testing.T) {
u, err := user.Current()
if err != nil {
@ -110,30 +83,3 @@ func failTestIfNotRoot(t *testing.T) {
t.Fatal("tests will fail unless run as root")
}
}
func pullTestImage(t *testing.T) string {
store, err := storage.GetStore(storeOptions)
if err != nil {
t.Fatal(err)
}
commonOpts := &define.CommonBuildOptions{
LabelOpts: nil,
}
options := buildah.BuilderOptions{
FromImage: "busybox:latest",
SignaturePolicyPath: signaturePolicyPath,
CommonBuildOpts: commonOpts,
SystemContext: &testSystemContext,
}
b, err := buildah.NewBuilder(getContext(), store, options)
if err != nil {
t.Fatal(err)
}
id := b.FromImageID
err = b.Delete()
if err != nil {
t.Fatal(err)
}
return id
}

View File

@ -49,6 +49,7 @@ type configResults struct {
volume []string
workingDir string
unsetLabels []string
unsetAnnotations []string
}
func init() {
@ -102,6 +103,7 @@ func init() {
flags.StringSliceVarP(&opts.volume, "volume", "v", []string{}, "add default `volume` path to be created for containers based on image (default [])")
flags.StringVar(&opts.workingDir, "workingdir", "", "set working `directory` for containers based on image")
flags.StringSliceVar(&opts.unsetLabels, "unsetlabel", nil, "remove image configuration label")
flags.StringSliceVar(&opts.unsetAnnotations, "unsetannotation", nil, "remove image configuration annotation")
rootCmd.AddCommand(configCommand)
}
@ -154,7 +156,7 @@ func updateEntrypoint(builder *buildah.Builder, entrypoint string) {
builder.SetEntrypoint(entrypointSpec)
}
func conditionallyAddHistory(builder *buildah.Builder, c *cobra.Command, createdByFmt string, args ...interface{}) {
func conditionallyAddHistory(builder *buildah.Builder, c *cobra.Command, createdByFmt string, args ...any) {
history := buildahcli.DefaultHistory()
if c.Flag("add-history").Changed {
history, _ = c.Flags().GetBool("add-history")
@ -309,6 +311,10 @@ func updateConfig(builder *buildah.Builder, c *cobra.Command, iopts configResult
for _, key := range iopts.unsetLabels {
builder.UnsetLabel(key)
}
// unset annotation if any
for _, key := range iopts.unsetAnnotations {
builder.UnsetAnnotation(key)
}
if c.Flag("workingdir").Changed {
builder.SetWorkDir(iopts.workingDir)
conditionallyAddHistory(builder, c, "/bin/sh -c #(nop) WORKDIR %s", iopts.workingDir)

View File

@ -4,15 +4,12 @@ import (
"encoding/json"
"errors"
"fmt"
"os"
"regexp"
"strings"
"text/template"
"github.com/containers/buildah"
"github.com/containers/buildah/define"
"github.com/containers/buildah/pkg/formats"
"github.com/containers/buildah/util"
"github.com/containers/common/pkg/formats"
"github.com/containers/storage"
"github.com/spf13/cobra"
)
@ -253,35 +250,15 @@ func outputContainers(store storage.Store, opts containerOptions, params *contai
return nil
}
func containersToGeneric(templParams []containerOutputParams) (genericParams []interface{}) {
func containersToGeneric(templParams []containerOutputParams) (genericParams []any) {
if len(templParams) > 0 {
for _, v := range templParams {
genericParams = append(genericParams, interface{}(v))
genericParams = append(genericParams, any(v))
}
}
return genericParams
}
func containerOutputUsingTemplate(format string, params containerOutputParams) error {
if matched, err := regexp.MatchString("{{.*}}", format); err != nil {
return fmt.Errorf("validating format provided: %s: %w", format, err)
} else if !matched {
return fmt.Errorf("invalid format provided: %s", format)
}
tmpl, err := template.New("container").Parse(format)
if err != nil {
return fmt.Errorf("Template parsing error: %w", err)
}
err = tmpl.Execute(os.Stdout, params)
if err != nil {
return err
}
fmt.Println()
return nil
}
func containerOutputUsingFormatString(truncate bool, params containerOutputParams) {
if truncate {
fmt.Printf("%-12.12s %-8s %-12.12s %-32s %s\n", params.ContainerID, params.Builder, params.ImageID, util.TruncateString(params.ImageName, 32), params.ContainerName)

View File

@ -5,66 +5,9 @@ import (
"fmt"
"io"
"os"
"strings"
"testing"
)
func TestContainerTemplateOutputValidFormat(t *testing.T) {
params := containerOutputParams{
ContainerID: "e477836657bb",
Builder: " ",
ImageID: "f975c5035748",
ImageName: "test/image:latest",
ContainerName: "test-container",
}
formatString := "Container ID: {{.ContainerID}}"
expectedString := "Container ID: " + params.ContainerID
output, err := captureOutputWithError(func() error {
return containerOutputUsingTemplate(formatString, params)
})
if err != nil {
t.Error(err)
} else if strings.TrimSpace(output) != expectedString {
t.Errorf("Errorf with template output:\nExpected: %s\nReceived: %s\n", expectedString, output)
}
}
func TestContainerTemplateOutputInvalidFormat(t *testing.T) {
params := containerOutputParams{
ContainerID: "e477836657bb",
Builder: " ",
ImageID: "f975c5035748",
ImageName: "test/image:latest",
ContainerName: "test-container",
}
formatString := "ContainerID"
err := containerOutputUsingTemplate(formatString, params)
if err == nil || err.Error() != "invalid format provided: ContainerID" {
t.Fatalf("expected error invalid format")
}
}
func TestContainerTemplateOutputNonexistentField(t *testing.T) {
params := containerOutputParams{
ContainerID: "e477836657bb",
Builder: " ",
ImageID: "f975c5035748",
ImageName: "test/image:latest",
ContainerName: "test-container",
}
formatString := "{{.ID}}"
err := containerOutputUsingTemplate(formatString, params)
if err == nil || !strings.Contains(err.Error(), "can't evaluate field ID") {
t.Fatalf("expected error nonexistent field")
}
}
func TestContainerFormatStringOutput(t *testing.T) {
params := containerOutputParams{
ContainerID: "e477836657bb",
@ -110,25 +53,6 @@ func TestContainerHeaderOutput(t *testing.T) {
}
}
func captureOutputWithError(f func() error) (string, error) {
old := os.Stdout
r, w, err := os.Pipe()
if err != nil {
return "", err
}
os.Stdout = w
if err := f(); err != nil {
return "", err
}
w.Close()
os.Stdout = old
var buf bytes.Buffer
io.Copy(&buf, r) //nolint
return buf.String(), err
}
// Captures output so that it can be compared to expected values
func captureOutput(f func()) string {
old := os.Stdout

View File

@ -10,9 +10,9 @@ import (
"time"
buildahcli "github.com/containers/buildah/pkg/cli"
"github.com/containers/buildah/pkg/formats"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/common/libimage"
"github.com/containers/common/pkg/formats"
"github.com/docker/go-units"
"github.com/spf13/cobra"
)
@ -189,7 +189,7 @@ func imagesCmd(c *cobra.Command, args []string, iopts *imageResults) error {
func outputHeader(opts imageOptions) string {
if opts.format != "" {
return strings.Replace(opts.format, `\t`, "\t", -1)
return strings.ReplaceAll(opts.format, `\t`, "\t")
}
if opts.quiet {
return formats.IDString
@ -327,10 +327,10 @@ func truncateID(id string, truncate bool) string {
return id
}
func imagesToGeneric(templParams []imageOutputParams) (genericParams []interface{}) {
func imagesToGeneric(templParams []imageOutputParams) (genericParams []any) {
if len(templParams) > 0 {
for _, v := range templParams {
genericParams = append(genericParams, interface{}(v))
genericParams = append(genericParams, any(v))
}
}
return genericParams

View File

@ -5,6 +5,7 @@ import (
)
func TestSizeFormatting(t *testing.T) {
t.Parallel()
size := formattedSize(0)
if size != "0 B" {
t.Errorf("Error formatting size: expected '%s' got '%s'", "0 B", size)
@ -22,6 +23,7 @@ func TestSizeFormatting(t *testing.T) {
}
func TestMatchWithTag(t *testing.T) {
t.Parallel()
isMatch := matchesReference("gcr.io/pause:latest", "pause:latest")
if !isMatch {
t.Error("expected match, got not match")
@ -34,6 +36,7 @@ func TestMatchWithTag(t *testing.T) {
}
func TestNoMatchesReferenceWithTag(t *testing.T) {
t.Parallel()
isMatch := matchesReference("gcr.io/pause:latest", "redis:latest")
if isMatch {
t.Error("expected no match, got match")
@ -46,6 +49,7 @@ func TestNoMatchesReferenceWithTag(t *testing.T) {
}
func TestMatchesReferenceWithoutTag(t *testing.T) {
t.Parallel()
isMatch := matchesReference("gcr.io/pause:latest", "pause")
if !isMatch {
t.Error("expected match, got not match")
@ -58,6 +62,7 @@ func TestMatchesReferenceWithoutTag(t *testing.T) {
}
func TestNoMatchesReferenceWithoutTag(t *testing.T) {
t.Parallel()
isMatch := matchesReference("gcr.io/pause:latest", "redis")
if isMatch {
t.Error("expected no match, got match")

View File

@ -6,10 +6,10 @@ import (
"os"
"regexp"
"runtime"
"text/template"
"github.com/containers/buildah"
"github.com/containers/buildah/define"
"github.com/containers/common/pkg/formats"
"github.com/spf13/cobra"
"golang.org/x/term"
)
@ -43,7 +43,7 @@ func init() {
}
func infoCmd(c *cobra.Command, iopts infoResults) error {
info := map[string]interface{}{}
info := map[string]any{}
store, err := getStore(c)
if err != nil {
@ -71,9 +71,9 @@ func infoCmd(c *cobra.Command, iopts infoResults) error {
} else if !matched {
return fmt.Errorf("invalid format provided: %s", format)
}
t, err := template.New("format").Parse(format)
t, err := formats.NewParse("info", format)
if err != nil {
return fmt.Errorf("Template parsing error: %w", err)
return fmt.Errorf("template parsing error: %w", err)
}
if err = t.Execute(os.Stdout, info); err != nil {
return err
@ -92,8 +92,8 @@ func infoCmd(c *cobra.Command, iopts infoResults) error {
}
// top-level "debug" info
func debugInfo() map[string]interface{} {
info := map[string]interface{}{}
func debugInfo() map[string]any {
info := map[string]any{}
info["compiler"] = runtime.Compiler
info["go version"] = runtime.Version()
info["buildah version"] = define.Version

View File

@ -6,11 +6,11 @@ import (
"fmt"
"os"
"regexp"
"text/template"
"github.com/containers/buildah"
buildahcli "github.com/containers/buildah/pkg/cli"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/common/pkg/formats"
"github.com/spf13/cobra"
"golang.org/x/term"
)
@ -113,9 +113,9 @@ func inspectCmd(c *cobra.Command, args []string, iopts inspectResults) error {
} else if !matched {
return fmt.Errorf("invalid format provided: %s", format)
}
t, err := template.New("format").Parse(format)
t, err := formats.NewParse("inspect", format)
if err != nil {
return fmt.Errorf("Template parsing error: %w", err)
return fmt.Errorf("template parsing error: %w", err)
}
if err = t.Execute(os.Stdout, out); err != nil {
return err

View File

@ -85,8 +85,8 @@ func init() {
defaultContainerConfig.CheckCgroupsAndAdjustConfig()
cobra.OnInitialize(initConfig)
// Disable the implicit `completion` command in cobra.
rootCmd.CompletionOptions.DisableDefaultCmd = true
// Hide the implicit `completion` command in cobra.
rootCmd.CompletionOptions.HiddenDefaultCmd = true
// rootCmd.TraverseChildren = true
rootCmd.Version = fmt.Sprintf("%s (image-spec %s, runtime-spec %s)", define.Version, ispecs.Version, rspecs.Version)
rootCmd.PersistentFlags().BoolVar(&globalFlagResults.Debug, "debug", false, "print debugging information")

View File

@ -17,6 +17,7 @@ import (
"github.com/containers/common/libimage/manifests"
"github.com/containers/common/pkg/auth"
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/transports"
@ -26,7 +27,6 @@ import (
"github.com/hashicorp/go-multierror"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -147,7 +147,7 @@ func init() {
flags.StringVar(&manifestAddOpts.artifactConfigType, "artifact-config-type", imgspecv1.DescriptorEmptyJSON.MediaType, "artifact config media type")
flags.StringVar(&manifestAddOpts.artifactConfigFile, "artifact-config", "", "artifact config file")
flags.StringVar(&manifestAddOpts.artifactLayerType, "artifact-layer-type", "", "artifact layer media type")
flags.BoolVar(&manifestAddOpts.artifactExcludeTitles, "artifact-exclude-titles", false, fmt.Sprintf(`refrain from setting %q annotations on "layers"`, v1.AnnotationTitle))
flags.BoolVar(&manifestAddOpts.artifactExcludeTitles, "artifact-exclude-titles", false, fmt.Sprintf(`refrain from setting %q annotations on "layers"`, imgspecv1.AnnotationTitle))
flags.StringVar(&manifestAddOpts.artifactSubject, "artifact-subject", "", "artifact subject reference")
flags.StringSliceVar(&manifestAddOpts.artifactAnnotations, "artifact-annotation", nil, "artifact annotation")
flags.StringVar(&manifestAddOpts.authfile, "authfile", auth.GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
@ -290,7 +290,7 @@ func init() {
func manifestExistsCmd(c *cobra.Command, args []string) error {
if len(args) == 0 {
return errors.New("At least a name must be specified for the list")
return errors.New("at least a name must be specified for the list")
}
name := args[0]
@ -321,7 +321,7 @@ func manifestExistsCmd(c *cobra.Command, args []string) error {
func manifestCreateCmd(c *cobra.Command, args []string, opts manifestCreateOpts) error {
if len(args) == 0 {
return errors.New("At least a name must be specified for the list")
return errors.New("at least a name must be specified for the list")
}
listImageSpec := args[0]
imageSpecs := args[1:]
@ -432,21 +432,21 @@ func manifestAddCmd(c *cobra.Command, args []string, opts manifestAddOpts) error
artifactSpec := []string{}
switch len(args) {
case 0, 1:
return errors.New("At least a list image and an image or artifact to add must be specified")
return errors.New("at least a list image and an image or artifact to add must be specified")
default:
listImageSpec = args[0]
if listImageSpec == "" {
return fmt.Errorf("Invalid image name %q", args[0])
return fmt.Errorf("invalid image name %q", args[0])
}
if opts.artifact {
artifactSpec = args[1:]
} else {
if len(args) > 2 {
return errors.New("Too many arguments: expected list and image add to list")
return errors.New("too many arguments: expected list and image add to list")
}
imageSpec = args[1]
if imageSpec == "" {
return fmt.Errorf("Invalid image name %q", args[1])
return fmt.Errorf("invalid image name %q", args[1])
}
}
}
@ -634,18 +634,18 @@ func manifestRemoveCmd(c *cobra.Command, args []string, _ manifestRemoveOpts) er
var instanceSpec string
switch len(args) {
case 0, 1:
return errors.New("At least a list image and one or more instance digests must be specified")
return errors.New("at least a list image and one or more instance digests must be specified")
case 2:
listImageSpec = args[0]
if listImageSpec == "" {
return fmt.Errorf(`Invalid image name "%s"`, args[0])
return fmt.Errorf(`invalid image name "%s"`, args[0])
}
instanceSpec = args[1]
if instanceSpec == "" {
return fmt.Errorf(`Invalid instance "%s"`, args[1])
return fmt.Errorf(`invalid instance "%s"`, args[1])
}
default:
return errors.New("At least two arguments are necessary: list and digest of instance to remove from list")
return errors.New("at least two arguments are necessary: list and digest of instance to remove from list")
}
store, err := getStore(c)
@ -676,23 +676,23 @@ func manifestRemoveCmd(c *cobra.Command, args []string, _ manifestRemoveOpts) er
if err != nil {
if instanceRef, err = alltransports.ParseImageName(util.DefaultTransport + instanceSpec); err != nil {
if instanceRef, _, err = util.FindImage(store, "", systemContext, instanceSpec); err != nil {
return fmt.Errorf(`Invalid instance "%s": %v`, instanceSpec, err)
return fmt.Errorf(`invalid instance "%s": %v`, instanceSpec, err)
}
}
}
ctx := getContext()
instanceImg, err := instanceRef.NewImageSource(ctx, systemContext)
if err != nil {
return fmt.Errorf("Reading image instance: %w", err)
return fmt.Errorf("reading image instance: %w", err)
}
defer instanceImg.Close()
manifestBytes, _, err := instanceImg.GetManifest(ctx, nil)
manifestBytes, _, err := image.UnparsedInstance(instanceImg, nil).Manifest(ctx)
if err != nil {
return fmt.Errorf("Reading image instance manifest: %w", err)
return fmt.Errorf("reading image instance manifest: %w", err)
}
d, err = manifest.Digest(manifestBytes)
if err != nil {
return fmt.Errorf("Digesting image instance manifest: %w", err)
return fmt.Errorf("digesting image instance manifest: %w", err)
}
}
instanceDigest = d
@ -751,29 +751,29 @@ func manifestAnnotateCmd(c *cobra.Command, args []string, opts manifestAnnotateO
}
switch len(args) {
case 0:
return errors.New("At least a list image must be specified")
return errors.New("at least a list image must be specified")
case 1:
listImageSpec = args[0]
if listImageSpec == "" {
return fmt.Errorf(`Invalid image name "%s"`, args[0])
return fmt.Errorf(`invalid image name "%s"`, args[0])
}
if !opts.index {
return errors.New(`Expected an instance digest, image name, or artifact name`)
return errors.New(`expected an instance digest, image name, or artifact name`)
}
case 2:
listImageSpec = args[0]
if listImageSpec == "" {
return fmt.Errorf(`Invalid image name "%s"`, args[0])
return fmt.Errorf(`invalid image name "%s"`, args[0])
}
if opts.index {
return fmt.Errorf(`Did not expect image or artifact name "%s" when modifying the entire index`, args[1])
return fmt.Errorf(`did not expect image or artifact name "%s" when modifying the entire index`, args[1])
}
instanceSpec = args[1]
if instanceSpec == "" {
return fmt.Errorf(`Invalid instance digest, image name, or artifact name "%s"`, instanceSpec)
return fmt.Errorf(`invalid instance digest, image name, or artifact name "%s"`, instanceSpec)
}
default:
return errors.New("Expected either a list name and --index or a list name and an image digest or image name or artifact name")
return errors.New("expected either a list name and --index or a list name and an image digest or image name or artifact name")
}
store, err := getStore(c)
@ -816,23 +816,23 @@ func manifestAnnotateCmd(c *cobra.Command, args []string, opts manifestAnnotateO
if instanceRef, err = alltransports.ParseImageName(util.DefaultTransport + instanceSpec); err != nil {
// check if the local image exists
if instanceRef, _, err = util.FindImage(store, "", systemContext, instanceSpec); err != nil {
return fmt.Errorf(`Invalid instance "%s": %v`, instanceSpec, err)
return fmt.Errorf(`invalid instance "%s": %v`, instanceSpec, err)
}
}
}
ctx := getContext()
instanceImg, err := instanceRef.NewImageSource(ctx, systemContext)
if err != nil {
return fmt.Errorf("Reading image instance: %w", err)
return fmt.Errorf("reading image instance: %w", err)
}
defer instanceImg.Close()
manifestBytes, _, err := instanceImg.GetManifest(ctx, nil)
manifestBytes, _, err := image.UnparsedInstance(instanceImg, nil).Manifest(ctx)
if err != nil {
return fmt.Errorf("Reading image instance manifest: %w", err)
return fmt.Errorf("reading image instance manifest: %w", err)
}
d, err = manifest.Digest(manifestBytes)
if err != nil {
return fmt.Errorf("Digesting image instance manifest: %w", err)
return fmt.Errorf("digesting image instance manifest: %w", err)
}
}
instance = d
@ -922,7 +922,7 @@ func manifestAnnotateCmd(c *cobra.Command, args []string, opts manifestAnnotateO
}
defer src.Close()
manifestBytes, manifestType, err := src.GetManifest(ctx, nil)
manifestBytes, manifestType, err := image.UnparsedInstance(src, nil).Manifest(ctx)
if err != nil {
logrus.Errorf("Error while trying to read artifact subject manifest: %v", err)
return err
@ -963,14 +963,14 @@ func manifestInspectCmd(c *cobra.Command, args []string, opts manifestInspectOpt
imageSpec := ""
switch len(args) {
case 0:
return errors.New("At least a source list ID must be specified")
return errors.New("at least a source list ID must be specified")
case 1:
imageSpec = args[0]
if imageSpec == "" {
return fmt.Errorf(`Invalid image name "%s"`, imageSpec)
return fmt.Errorf(`invalid image name "%s"`, imageSpec)
}
default:
return errors.New("Only one argument is necessary for inspect: an image name")
return errors.New("only one argument is necessary for inspect: an image name")
}
store, err := getStore(c)
@ -1063,7 +1063,7 @@ func manifestInspect(ctx context.Context, store storage.Store, systemContext *ty
}
defer src.Close()
manifestBytes, manifestType, err := src.GetManifest(ctx, nil)
manifestBytes, manifestType, err := image.UnparsedInstance(src, nil).Manifest(ctx)
if err != nil {
appendErr(fmt.Errorf("loading manifest %q: %w", transports.ImageName(ref), err))
continue
@ -1092,7 +1092,7 @@ func manifestPushCmd(c *cobra.Command, args []string, opts pushOptions) error {
destSpec := ""
switch len(args) {
case 0:
return errors.New("At least a source list ID must be specified")
return errors.New("at least a source list ID must be specified")
case 1:
listImageSpec = args[0]
destSpec = "docker://" + listImageSpec
@ -1100,7 +1100,7 @@ func manifestPushCmd(c *cobra.Command, args []string, opts pushOptions) error {
listImageSpec = args[0]
destSpec = args[1]
default:
return errors.New("Only two arguments are necessary to push: source and destination")
return errors.New("only two arguments are necessary to push: source and destination")
}
if listImageSpec == "" {
return fmt.Errorf(`invalid image name "%s"`, listImageSpec)

View File

@ -1,34 +0,0 @@
package main
import (
"fmt"
"github.com/spf13/cobra"
"golang.org/x/crypto/bcrypt"
)
var (
passwdDescription = `Generate a password hash using golang.org/x/crypto/bcrypt.`
passwdCommand = &cobra.Command{
Use: "passwd",
Short: "Generate a password hash",
Long: passwdDescription,
RunE: passwdCmd,
Example: `buildah passwd testpassword`,
Args: cobra.ExactArgs(1),
Hidden: true,
}
)
func passwdCmd(_ *cobra.Command, args []string) error {
passwd, err := bcrypt.GenerateFromPassword([]byte(args[0]), bcrypt.DefaultCost)
if err != nil {
return err
}
fmt.Println(string(passwd))
return nil
}
func init() {
rootCmd.AddCommand(passwdCommand)
}

View File

@ -135,7 +135,7 @@ func pushCmd(c *cobra.Command, args []string, iopts pushOptions) error {
return fmt.Errorf(`invalid image name "%s"`, args[0])
}
default:
return errors.New("Only two arguments are necessary to push: source and destination")
return errors.New("only two arguments are necessary to push: source and destination")
}
compress := define.Gzip

View File

@ -47,7 +47,7 @@ func renameCmd(c *cobra.Command, args []string) error {
}
if build, err := openBuilder(getContext(), store, newName); err == nil {
return fmt.Errorf("The container name %q is already in use by container %q", newName, build.ContainerID)
return fmt.Errorf("the container name %q is already in use by container %q", newName, build.ContainerID)
}
err = store.SetNames(builder.ContainerID, []string{newName})

View File

@ -30,10 +30,8 @@ func init() {
}
func umountCmd(c *cobra.Command, args []string) error {
umountAll := false
if c.Flag("all").Changed {
umountAll = true
}
umountAll := c.Flag("all").Changed
umountContainerErrStr := "error unmounting container"
if len(args) == 0 && !umountAll {
return errors.New("at least one container ID must be specified")

View File

@ -30,9 +30,10 @@ import (
)
const (
// BuilderIdentityAnnotation is the name of the annotation key containing
// the name and version of the producer of the image stored as an
// annotation on commit.
// BuilderIdentityAnnotation is the name of the label which will be set
// to contain the name and version of the producer of the image at
// commit-time. (N.B. yes, the constant's name includes "Annotation",
// but it's added as a label.)
BuilderIdentityAnnotation = "io.buildah.version"
)
@ -58,9 +59,20 @@ type CommitOptions struct {
// ReportWriter is an io.Writer which will be used to log the writing
// of the new image.
ReportWriter io.Writer
// HistoryTimestamp is the timestamp used when creating new items in the
// image's history. If unset, the current time will be used.
// HistoryTimestamp specifies a timestamp to use for the image's
// created-on date, the corresponding field in new history entries, and
// the timestamps to set on contents in new layer diffs. If left
// unset, the current time is used for the configuration and manifest,
// and timestamps of layer contents are used as-is.
HistoryTimestamp *time.Time
// SourceDateEpoch specifies a timestamp to use for the image's
// created-on date and the corresponding field in new history entries.
// If left unset, the current time is used for the configuration and
// manifest.
SourceDateEpoch *time.Time
// RewriteTimestamp, if set, forces timestamps in generated layers to
// not be later than the SourceDateEpoch, if it is set.
RewriteTimestamp bool
// github.com/containers/image/types SystemContext to hold credentials
// and other authentication/authorization information.
SystemContext *types.SystemContext
@ -82,9 +94,18 @@ type CommitOptions struct {
// EmptyLayer tells the builder to omit the diff for the working
// container.
EmptyLayer bool
// OmitLayerHistoryEntry tells the builder to omit the diff for the
// working container and to not add an entry in the commit history. By
// default, the rest of the image's history is preserved, subject to
// the OmitHistory setting. N.B.: setting this flag, without any
// PrependedEmptyLayers, AppendedEmptyLayers, PrependedLinkedLayers, or
// AppendedLinkedLayers will more or less produce a copy of the base
// image.
OmitLayerHistoryEntry bool
// OmitTimestamp forces epoch 0 as created timestamp to allow for
// deterministic, content-addressable builds.
// Deprecated use HistoryTimestamp instead.
// Deprecated: use HistoryTimestamp or SourceDateEpoch (possibly with
// RewriteTimestamp) instead.
OmitTimestamp bool
// SignBy is the fingerprint of a GPG key to use for signing the image.
SignBy string
@ -110,7 +131,8 @@ type CommitOptions struct {
// contents of a rootfs.
ConfidentialWorkloadOptions ConfidentialWorkloadOptions
// UnsetEnvs is a list of environments to not add to final image.
// Deprecated: use UnsetEnv() before committing instead.
// Deprecated: use UnsetEnv() before committing, or set OverrideChanges
// instead.
UnsetEnvs []string
// OverrideConfig is an optional Schema2Config which can override parts
// of the working container's configuration for the image that is being
@ -134,6 +156,11 @@ type CommitOptions struct {
// the image in Docker format. Newer BuildKit-based builds don't set
// this field.
CompatSetParent types.OptionalBool
// CompatLayerOmissions causes the "/dev", "/proc", and "/sys"
// directories to be omitted from the layer diff and related output, as
// the classic builder did. Newer BuildKit-based builds include them
// in the built image by default.
CompatLayerOmissions types.OptionalBool
// PrependedLinkedLayers and AppendedLinkedLayers are combinations of
// history entries and locations of either directory trees (if
// directories, per os.Stat()) or uncompressed layer blobs which should
@ -142,6 +169,15 @@ type CommitOptions struct {
// corresponding members in the Builder object, in the committed image
// is not guaranteed.
PrependedLinkedLayers, AppendedLinkedLayers []LinkedLayer
// UnsetAnnotations is a list of annotations (names only) to withhold
// from the image.
UnsetAnnotations []string
// Annotations is a list of annotations (in the form "key=value") to
// add to the image.
Annotations []string
// CreatedAnnotation controls whether or not an "org.opencontainers.image.created"
// annotation is present in the output image.
CreatedAnnotation types.OptionalBool
}
// LinkedLayer combines a history entry with the location of either a directory
@ -274,8 +310,9 @@ func (b *Builder) addManifest(ctx context.Context, manifestName string, imageSpe
// if commit was successful and the image destination was local.
func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options CommitOptions) (string, reference.Canonical, digest.Digest, error) {
var (
imgID string
src types.ImageReference
imgID string
src types.ImageReference
destinationTimestamp *time.Time
)
// If we weren't given a name, build a destination reference using a
@ -288,11 +325,15 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
// work twice.
if options.OmitTimestamp {
if options.HistoryTimestamp != nil {
return imgID, nil, "", fmt.Errorf("OmitTimestamp ahd HistoryTimestamp can not be used together")
return imgID, nil, "", fmt.Errorf("OmitTimestamp and HistoryTimestamp can not be used together")
}
timestamp := time.Unix(0, 0).UTC()
options.HistoryTimestamp = &timestamp
}
destinationTimestamp = options.HistoryTimestamp
if options.SourceDateEpoch != nil {
destinationTimestamp = options.SourceDateEpoch
}
nameToRemove := ""
if dest == nil {
nameToRemove = stringid.GenerateRandomID() + "-tmp"
@ -415,7 +456,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
var manifestBytes []byte
if manifestBytes, err = retryCopyImage(ctx, policyContext, maybeCachedDest, maybeCachedSrc, dest, getCopyOptions(b.store, options.ReportWriter, nil, systemContext, "", false, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
if manifestBytes, err = retryCopyImage(ctx, policyContext, maybeCachedDest, maybeCachedSrc, dest, getCopyOptions(b.store, options.ReportWriter, nil, systemContext, "", false, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil, destinationTimestamp), options.MaxRetries, options.RetryDelay); err != nil {
return imgID, nil, "", fmt.Errorf("copying layers and metadata for container %q: %w", b.ContainerID, err)
}
// If we've got more names to attach, and we know how to do that for

View File

@ -4,6 +4,7 @@ import (
"archive/tar"
"context"
"crypto/rand"
"encoding/json"
"fmt"
"io"
"os"
@ -11,16 +12,42 @@ import (
"testing"
"time"
"github.com/containers/image/v5/manifest"
ociLayout "github.com/containers/image/v5/oci/layout"
imageStorage "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
storageTypes "github.com/containers/storage/types"
digest "github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
rspec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func makeFile(t *testing.T, base string, size int64) string {
t.Helper()
fn := filepath.Join(t.TempDir(), base)
f, err := os.Create(fn)
require.NoError(t, err)
defer f.Close()
if size == 0 {
size = 512
}
_, err = io.CopyN(f, rand.Reader, size)
require.NoErrorf(t, err, "writing payload file %d", base)
return f.Name()
}
func TestCommitLinkedLayers(t *testing.T) {
// This test cannot be parallelized as this uses NewBuilder()
// which eventually and indirectly accesses a global variable
// defined in `go-selinux`, this must be fixed at `go-selinux`
// or builder must enable sometime of locking mechanism i.e if
// routine is creating Builder other's must wait for it.
// Tracked here: https://github.com/containers/buildah/issues/5967
ctx := context.TODO()
now := time.Now()
@ -28,6 +55,7 @@ func TestCommitLinkedLayers(t *testing.T) {
if graphDriverName == "" {
graphDriverName = "vfs"
}
t.Logf("using storage driver %q", graphDriverName)
store, err := storage.GetStore(storageTypes.StoreOptions{
RunRoot: t.TempDir(),
GraphRoot: t.TempDir(),
@ -38,17 +66,7 @@ func TestCommitLinkedLayers(t *testing.T) {
imageName := func(i int) string { return fmt.Sprintf("image%d", i) }
makeFile := func(base string, size int64) string {
t.Helper()
fn := filepath.Join(t.TempDir(), base)
f, err := os.Create(fn)
require.NoError(t, err)
defer f.Close()
if size == 0 {
size = 512
}
_, err = io.CopyN(f, rand.Reader, size)
require.NoErrorf(t, err, "writing payload file %d", base)
return f.Name()
return makeFile(t, base, size)
}
makeArchive := func(base string, size int64) string {
t.Helper()
@ -168,9 +186,9 @@ func TestCommitLinkedLayers(t *testing.T) {
}
b.AddAppendedLinkedLayer(nil, imageName(layerNumber+6), "", "", ninthArchiveFile)
ref, err = imageStorage.Transport.ParseStoreReference(store, imageName(layerNumber))
require.NoError(t, err, "parsing reference for to-be-committed image", imageName(layerNumber))
require.NoErrorf(t, err, "parsing reference for to-be-committed image %q", imageName(layerNumber))
_, _, _, err = b.Commit(ctx, ref, commitOptions)
require.NoError(t, err, "committing", imageName(layerNumber))
require.NoErrorf(t, err, "committing %q", imageName(layerNumber))
// Build one last image based on the previous one.
builderOptions.FromImage = imageName(layerNumber)
@ -236,3 +254,317 @@ func TestCommitLinkedLayers(t *testing.T) {
}()
}
}
func TestCommitCompression(t *testing.T) {
// This test cannot be parallelized as this uses NewBuilder()
// which eventually and indirectly accesses a global variable
// defined in `go-selinux`, this must be fixed at `go-selinux`
// or builder must enable sometime of locking mechanism i.e if
// routine is creating Builder other's must wait for it.
// Tracked here: https://github.com/containers/buildah/issues/5967
ctx := context.TODO()
graphDriverName := os.Getenv("STORAGE_DRIVER")
if graphDriverName == "" {
graphDriverName = "vfs"
}
t.Logf("using storage driver %q", graphDriverName)
store, err := storage.GetStore(storageTypes.StoreOptions{
RunRoot: t.TempDir(),
GraphRoot: t.TempDir(),
GraphDriverName: graphDriverName,
})
require.NoError(t, err, "initializing storage")
t.Cleanup(func() { _, err := store.Shutdown(true); assert.NoError(t, err) })
builderOptions := BuilderOptions{
FromImage: "scratch",
NamespaceOptions: []NamespaceOption{{
Name: string(rspec.NetworkNamespace),
Host: true,
}},
SystemContext: &testSystemContext,
}
b, err := NewBuilder(ctx, store, builderOptions)
require.NoError(t, err, "creating builder")
payload := makeFile(t, "file0", 0)
b.SetCreatedBy("ADD file0 in /")
err = b.Add("/", false, AddAndCopyOptions{}, payload)
require.NoError(t, err, "adding", payload)
for _, compressor := range []struct {
compression archive.Compression
name string
expectError bool
layerMediaType string
}{
{archive.Uncompressed, "uncompressed", false, v1.MediaTypeImageLayer},
{archive.Gzip, "gzip", false, v1.MediaTypeImageLayerGzip},
{archive.Bzip2, "bz2", true, ""},
{archive.Xz, "xz", true, ""},
{archive.Zstd, "zstd", false, v1.MediaTypeImageLayerZstd},
} {
t.Run(compressor.name, func(t *testing.T) {
var ref types.ImageReference
commitOptions := CommitOptions{
PreferredManifestType: v1.MediaTypeImageManifest,
SystemContext: &testSystemContext,
Compression: compressor.compression,
}
imageName := compressor.name
ref, err := imageStorage.Transport.ParseStoreReference(store, imageName)
require.NoErrorf(t, err, "parsing reference for to-be-committed local image %q", imageName)
_, _, _, err = b.Commit(ctx, ref, commitOptions)
if compressor.expectError {
require.Errorf(t, err, "committing local image %q", imageName)
} else {
require.NoErrorf(t, err, "committing local image %q", imageName)
}
imageName = t.TempDir()
ref, err = ociLayout.Transport.ParseReference(imageName)
require.NoErrorf(t, err, "parsing reference for to-be-committed oci layout %q", imageName)
_, _, _, err = b.Commit(ctx, ref, commitOptions)
if compressor.expectError {
require.Errorf(t, err, "committing oci layout %q", imageName)
return
}
require.NoErrorf(t, err, "committing oci layout %q", imageName)
src, err := ref.NewImageSource(ctx, &testSystemContext)
require.NoErrorf(t, err, "reading oci layout %q", imageName)
defer src.Close()
manifestBytes, manifestType, err := src.GetManifest(ctx, nil)
require.NoErrorf(t, err, "reading manifest from oci layout %q", imageName)
require.Equalf(t, v1.MediaTypeImageManifest, manifestType, "manifest type from oci layout %q looked wrong", imageName)
parsedManifest, err := manifest.OCI1FromManifest(manifestBytes)
require.NoErrorf(t, err, "parsing manifest from oci layout %q", imageName)
require.Lenf(t, parsedManifest.Layers, 1, "expected exactly one layer in oci layout %q", imageName)
require.Equalf(t, compressor.layerMediaType, parsedManifest.Layers[0].MediaType, "expected the layer media type to reflect compression in oci layout %q", imageName)
blobReadCloser, _, err := src.GetBlob(ctx, types.BlobInfo{
Digest: parsedManifest.Layers[0].Digest,
MediaType: parsedManifest.Layers[0].MediaType,
}, nil)
require.NoErrorf(t, err, "reading the first layer from oci layout %q", imageName)
defer blobReadCloser.Close()
blob, err := io.ReadAll(blobReadCloser)
require.NoErrorf(t, err, "consuming the first layer from oci layout %q", imageName)
require.Equalf(t, compressor.compression, archive.DetectCompression(blob), "detected compression looks wrong for layer in oci layout %q")
})
}
}
func TestCommitEmpty(t *testing.T) {
// This test cannot be parallelized as this uses NewBuilder()
// which eventually and indirectly accesses a global variable
// defined in `go-selinux`, this must be fixed at `go-selinux`
// or builder must enable sometime of locking mechanism i.e if
// routine is creating Builder other's must wait for it.
// Tracked here: https://github.com/containers/buildah/issues/5967
ctx := context.TODO()
graphDriverName := os.Getenv("STORAGE_DRIVER")
if graphDriverName == "" {
graphDriverName = "vfs"
}
t.Logf("using storage driver %q", graphDriverName)
store, err := storage.GetStore(storageTypes.StoreOptions{
RunRoot: t.TempDir(),
GraphRoot: t.TempDir(),
GraphDriverName: graphDriverName,
})
require.NoError(t, err, "initializing storage")
t.Cleanup(func() { _, err := store.Shutdown(true); assert.NoError(t, err) })
builderOptions := BuilderOptions{
FromImage: "scratch",
NamespaceOptions: []NamespaceOption{{
Name: string(rspec.NetworkNamespace),
Host: true,
}},
SystemContext: &testSystemContext,
}
b, err := NewBuilder(ctx, store, builderOptions)
require.NoError(t, err, "creating builder")
committedLayoutDir := t.TempDir()
committedRef, err := ociLayout.ParseReference(committedLayoutDir)
require.NoError(t, err, "parsing reference to where we're committing a basic image")
_, _, _, err = b.Commit(ctx, committedRef, CommitOptions{})
require.NoError(t, err, "committing with default settings")
committedImg, err := committedRef.NewImageSource(ctx, &testSystemContext)
require.NoError(t, err, "preparing to read committed image")
defer committedImg.Close()
committedManifestBytes, committedManifestType, err := committedImg.GetManifest(ctx, nil)
require.NoError(t, err, "reading manifest from committed image")
require.Equalf(t, v1.MediaTypeImageManifest, committedManifestType, "unexpected manifest type")
committedManifest, err := manifest.FromBlob(committedManifestBytes, committedManifestType)
require.NoError(t, err, "parsing manifest from committed image")
require.Equalf(t, 1, len(committedManifest.LayerInfos()), "expected one layer in manifest")
configReadCloser, _, err := committedImg.GetBlob(ctx, committedManifest.ConfigInfo(), nil)
require.NoError(t, err, "reading config blob from committed image")
defer configReadCloser.Close()
var committedImage v1.Image
err = json.NewDecoder(configReadCloser).Decode(&committedImage)
require.NoError(t, err, "parsing config blob from committed image")
require.Equalf(t, 1, len(committedImage.History), "expected one history entry")
require.Falsef(t, committedImage.History[0].EmptyLayer, "expected lone history entry to not be marked as an empty layer")
require.Equalf(t, 1, len(committedImage.RootFS.DiffIDs), "expected one rootfs layer")
t.Run("emptylayer", func(t *testing.T) {
options := CommitOptions{
EmptyLayer: true,
}
layoutDir := t.TempDir()
ref, err := ociLayout.ParseReference(layoutDir)
require.NoError(t, err, "parsing reference to image we're going to commit with EmptyLayer")
_, _, _, err = b.Commit(ctx, ref, options)
require.NoError(t, err, "committing with EmptyLayer = true")
img, err := ref.NewImageSource(ctx, &testSystemContext)
require.NoError(t, err, "preparing to read committed image")
defer img.Close()
manifestBytes, manifestType, err := img.GetManifest(ctx, nil)
require.NoError(t, err, "reading manifest from committed image")
require.Equalf(t, v1.MediaTypeImageManifest, manifestType, "unexpected manifest type")
parsedManifest, err := manifest.FromBlob(manifestBytes, manifestType)
require.NoError(t, err, "parsing manifest from committed image")
require.Zerof(t, len(parsedManifest.LayerInfos()), "expected no layers in manifest")
configReadCloser, _, err := img.GetBlob(ctx, parsedManifest.ConfigInfo(), nil)
require.NoError(t, err, "reading config blob from committed image")
defer configReadCloser.Close()
var image v1.Image
err = json.NewDecoder(configReadCloser).Decode(&image)
require.NoError(t, err, "parsing config blob from committed image")
require.Equalf(t, 1, len(image.History), "expected one history entry")
require.Truef(t, image.History[0].EmptyLayer, "expected lone history entry to be marked as an empty layer")
})
t.Run("omitlayerhistoryentry", func(t *testing.T) {
options := CommitOptions{
OmitLayerHistoryEntry: true,
}
layoutDir := t.TempDir()
ref, err := ociLayout.ParseReference(layoutDir)
require.NoError(t, err, "parsing reference to image we're going to commit with OmitLayerHistoryEntry")
_, _, _, err = b.Commit(ctx, ref, options)
require.NoError(t, err, "committing with OmitLayerHistoryEntry = true")
img, err := ref.NewImageSource(ctx, &testSystemContext)
require.NoError(t, err, "preparing to read committed image")
defer img.Close()
manifestBytes, manifestType, err := img.GetManifest(ctx, nil)
require.NoError(t, err, "reading manifest from committed image")
require.Equalf(t, v1.MediaTypeImageManifest, manifestType, "unexpected manifest type")
parsedManifest, err := manifest.FromBlob(manifestBytes, manifestType)
require.NoError(t, err, "parsing manifest from committed image")
require.Equalf(t, 0, len(parsedManifest.LayerInfos()), "expected no layers in manifest")
configReadCloser, _, err := img.GetBlob(ctx, parsedManifest.ConfigInfo(), nil)
require.NoError(t, err, "reading config blob from committed image")
defer configReadCloser.Close()
var image v1.Image
err = json.NewDecoder(configReadCloser).Decode(&image)
require.NoError(t, err, "parsing config blob from committed image")
require.Equalf(t, 0, len(image.History), "expected no history entries")
require.Equalf(t, 0, len(image.RootFS.DiffIDs), "expected no diff IDs")
})
builderOptions.FromImage = transports.ImageName(committedRef)
b, err = NewBuilder(ctx, store, builderOptions)
require.NoError(t, err, "creating builder from committed base image")
t.Run("derived-emptylayer", func(t *testing.T) {
options := CommitOptions{
EmptyLayer: true,
}
layoutDir := t.TempDir()
ref, err := ociLayout.ParseReference(layoutDir)
require.NoError(t, err, "parsing reference to image we're going to commit with EmptyLayer")
_, _, _, err = b.Commit(ctx, ref, options)
require.NoError(t, err, "committing with EmptyLayer = true")
img, err := ref.NewImageSource(ctx, &testSystemContext)
require.NoError(t, err, "preparing to read committed image")
defer img.Close()
manifestBytes, manifestType, err := img.GetManifest(ctx, nil)
require.NoError(t, err, "reading manifest from committed image")
require.Equalf(t, v1.MediaTypeImageManifest, manifestType, "unexpected manifest type")
parsedManifest, err := manifest.FromBlob(manifestBytes, manifestType)
require.NoError(t, err, "parsing manifest from committed image")
require.Equalf(t, len(committedManifest.LayerInfos()), len(parsedManifest.LayerInfos()), "expected no new layers in manifest")
configReadCloser, _, err := img.GetBlob(ctx, parsedManifest.ConfigInfo(), nil)
require.NoError(t, err, "reading config blob from committed image")
defer configReadCloser.Close()
var image v1.Image
err = json.NewDecoder(configReadCloser).Decode(&image)
require.NoError(t, err, "parsing config blob from committed image")
require.Equalf(t, len(committedImage.History)+1, len(image.History), "expected one new history entry")
require.Equalf(t, len(committedImage.RootFS.DiffIDs), len(image.RootFS.DiffIDs), "expected no new diff IDs")
require.Truef(t, image.History[1].EmptyLayer, "expected new history entry to be marked as an empty layer")
})
t.Run("derived-omitlayerhistoryentry", func(t *testing.T) {
options := CommitOptions{
OmitLayerHistoryEntry: true,
}
layoutDir := t.TempDir()
ref, err := ociLayout.ParseReference(layoutDir)
require.NoError(t, err, "parsing reference to image we're going to commit with OmitLayerHistoryEntry")
_, _, _, err = b.Commit(ctx, ref, options)
require.NoError(t, err, "committing with OmitLayerHistoryEntry = true")
img, err := ref.NewImageSource(ctx, &testSystemContext)
require.NoError(t, err, "preparing to read committed image")
defer img.Close()
manifestBytes, manifestType, err := img.GetManifest(ctx, nil)
require.NoError(t, err, "reading manifest from committed image")
require.Equalf(t, v1.MediaTypeImageManifest, manifestType, "unexpected manifest type")
parsedManifest, err := manifest.FromBlob(manifestBytes, manifestType)
require.NoError(t, err, "parsing manifest from committed image")
require.Equalf(t, len(committedManifest.LayerInfos()), len(parsedManifest.LayerInfos()), "expected no new layers in manifest")
configReadCloser, _, err := img.GetBlob(ctx, parsedManifest.ConfigInfo(), nil)
require.NoError(t, err, "reading config blob from committed image")
defer configReadCloser.Close()
var image v1.Image
err = json.NewDecoder(configReadCloser).Decode(&image)
require.NoError(t, err, "parsing config blob from committed image")
require.Equalf(t, len(committedImage.History), len(image.History), "expected no new history entry")
require.Equalf(t, len(committedImage.RootFS.DiffIDs), len(image.RootFS.DiffIDs), "expected no new diff IDs")
})
t.Run("derived-synthetic", func(t *testing.T) {
randomDir := t.TempDir()
randomFile, err := os.CreateTemp(randomDir, "file")
require.NoError(t, err, "creating a temporary file")
layerDigest := digest.Canonical.Digester()
_, err = io.CopyN(io.MultiWriter(layerDigest.Hash(), randomFile), rand.Reader, 512)
require.NoError(t, err, "writing a temporary file")
require.NoError(t, randomFile.Close(), "closing temporary file")
options := CommitOptions{
OmitLayerHistoryEntry: true,
AppendedLinkedLayers: []LinkedLayer{{
History: v1.History{
CreatedBy: "yolo",
}, // history entry to add
BlobPath: randomFile.Name(),
}},
}
layoutDir := t.TempDir()
ref, err := ociLayout.ParseReference(layoutDir)
require.NoErrorf(t, err, "parsing reference for to-be-committed image with externally-controlled changes")
_, _, _, err = b.Commit(ctx, ref, options)
require.NoError(t, err, "committing with OmitLayerHistoryEntry = true")
img, err := ref.NewImageSource(ctx, &testSystemContext)
require.NoError(t, err, "preparing to read committed image")
defer img.Close()
manifestBytes, manifestType, err := img.GetManifest(ctx, nil)
require.NoError(t, err, "reading manifest from committed image")
require.Equalf(t, v1.MediaTypeImageManifest, manifestType, "unexpected manifest type")
parsedManifest, err := manifest.FromBlob(manifestBytes, manifestType)
require.NoError(t, err, "parsing manifest from committed image")
require.Equalf(t, len(committedManifest.LayerInfos())+1, len(parsedManifest.LayerInfos()), "expected one new layer in manifest")
configReadCloser, _, err := img.GetBlob(ctx, parsedManifest.ConfigInfo(), nil)
require.NoError(t, err, "reading config blob from committed image")
defer configReadCloser.Close()
var image v1.Image
err = json.NewDecoder(configReadCloser).Decode(&image)
require.NoError(t, err, "decoding image config")
require.Equalf(t, len(committedImage.History)+1, len(image.History), "expected one new history entry")
require.Equalf(t, len(committedImage.RootFS.DiffIDs)+1, len(image.RootFS.DiffIDs), "expected one new diff ID")
require.Equalf(t, layerDigest.Digest(), image.RootFS.DiffIDs[len(image.RootFS.DiffIDs)-1], "expected new diff ID to match the randomly-generated layer")
})
}

View File

@ -27,7 +27,7 @@ const (
DOCKER = define.DOCKER
)
func getCopyOptions(store storage.Store, reportWriter io.Writer, sourceSystemContext *types.SystemContext, destinationSystemContext *types.SystemContext, manifestType string, removeSignatures bool, addSigner string, ociEncryptLayers *[]int, ociEncryptConfig *encconfig.EncryptConfig, ociDecryptConfig *encconfig.DecryptConfig) *cp.Options {
func getCopyOptions(store storage.Store, reportWriter io.Writer, sourceSystemContext *types.SystemContext, destinationSystemContext *types.SystemContext, manifestType string, removeSignatures bool, addSigner string, ociEncryptLayers *[]int, ociEncryptConfig *encconfig.EncryptConfig, ociDecryptConfig *encconfig.DecryptConfig, destinationTimestamp *time.Time) *cp.Options {
sourceCtx := getSystemContext(store, nil, "")
if sourceSystemContext != nil {
*sourceCtx = *sourceSystemContext
@ -47,6 +47,7 @@ func getCopyOptions(store storage.Store, reportWriter io.Writer, sourceSystemCon
OciEncryptConfig: ociEncryptConfig,
OciDecryptConfig: ociDecryptConfig,
OciEncryptLayers: ociEncryptLayers,
DestinationTimestamp: destinationTimestamp,
}
}

View File

@ -41,6 +41,7 @@ func (ts *testRetryCopyImageWrappedStore) CreateImage(id string, names []string,
}
func TestRetryCopyImage(t *testing.T) {
t.Parallel()
ctx := context.TODO()
graphDriverName := os.Getenv("STORAGE_DRIVER")

View File

@ -6,11 +6,11 @@ import (
"fmt"
"maps"
"os"
"runtime"
"slices"
"strings"
"time"
"github.com/containerd/platforms"
"github.com/containers/buildah/define"
"github.com/containers/buildah/docker"
internalUtil "github.com/containers/buildah/internal/util"
@ -26,7 +26,7 @@ import (
// unmarshalConvertedConfig obtains the config blob of img valid for the wantedManifestMIMEType format
// (either as it exists, or converting the image if necessary), and unmarshals it into dest.
// NOTE: The MIME type is of the _manifest_, not of the _config_ that is returned.
func unmarshalConvertedConfig(ctx context.Context, dest interface{}, img types.Image, wantedManifestMIMEType string) error {
func unmarshalConvertedConfig(ctx context.Context, dest any, img types.Image, wantedManifestMIMEType string) error {
_, actualManifestMIMEType, err := img.Manifest(ctx)
if err != nil {
return fmt.Errorf("getting manifest MIME type for %q: %w", transports.ImageName(img.Reference()), err)
@ -96,9 +96,7 @@ func (b *Builder) initConfig(ctx context.Context, sys *types.SystemContext, img
if b.ImageAnnotations == nil {
b.ImageAnnotations = make(map[string]string, len(v1Manifest.Annotations))
}
for k, v := range v1Manifest.Annotations {
b.ImageAnnotations[k] = v
}
maps.Copy(b.ImageAnnotations, v1Manifest.Annotations)
}
}
} else {
@ -137,27 +135,21 @@ func (b *Builder) fixupConfig(sys *types.SystemContext) {
if b.OCIv1.Created == nil || b.OCIv1.Created.IsZero() {
b.OCIv1.Created = &now
}
currentPlatformSpecification := platforms.DefaultSpec()
if b.OS() == "" {
if sys != nil && sys.OSChoice != "" {
b.SetOS(sys.OSChoice)
} else {
b.SetOS(runtime.GOOS)
b.SetOS(currentPlatformSpecification.OS)
}
}
if b.Architecture() == "" {
if sys != nil && sys.ArchitectureChoice != "" {
b.SetArchitecture(sys.ArchitectureChoice)
} else {
b.SetArchitecture(runtime.GOARCH)
}
// in case the arch string we started with was shorthand for a known arch+variant pair, normalize it
ps := internalUtil.NormalizePlatform(ociv1.Platform{OS: b.OS(), Architecture: b.Architecture(), Variant: b.Variant()})
b.SetArchitecture(ps.Architecture)
b.SetVariant(ps.Variant)
}
if b.Variant() == "" {
if sys != nil && sys.VariantChoice != "" {
b.SetVariant(sys.VariantChoice)
} else {
b.SetArchitecture(currentPlatformSpecification.Architecture)
b.SetVariant(currentPlatformSpecification.Variant)
}
// in case the arch string we started with was shorthand for a known arch+variant pair, normalize it
ps := internalUtil.NormalizePlatform(ociv1.Platform{OS: b.OS(), Architecture: b.Architecture(), Variant: b.Variant()})

View File

@ -81,7 +81,7 @@ export CI_USE_REGISTRY_CACHE=true
# N/B: Don't include BUILDAH_ISOLATION, STORAGE_DRIVER, or CGROUP_MANAGER
# here because they will negatively affect execution of the rootless
# integration tests.
PASSTHROUGH_ENV_EXACT='DEST_BRANCH|DISTRO_NV|GOPATH|GOSRC|ROOTLESS_USER|SCRIPT_BASE|IN_PODMAN_IMAGE'
PASSTHROUGH_ENV_EXACT='BUILDAH_RUNTIME|DEST_BRANCH|DISTRO_NV|GOPATH|GOSRC|ROOTLESS_USER|SCRIPT_BASE|IN_PODMAN_IMAGE'
# List of envariable patterns which must match AT THE BEGINNING of the name.
PASSTHROUGH_ENV_ATSTART='CI|TEST'
@ -204,7 +204,7 @@ in_podman() {
-e "CGROUP_MANAGER=cgroupfs" \
-v "$HOME/auth:$HOME/auth:ro" \
-v /sys/fs/cgroup:/sys/fs/cgroup:rw \
-v /dev/fuse:/dev/fuse:rw \
--device /dev/fuse:rwm \
-v "$GOSRC:$GOSRC:z" \
--workdir "$GOSRC" \
"$@"
@ -296,11 +296,22 @@ setup_rootless() {
msg "************************************************************"
cd $GOSRC || exit 1
# Guarantee independence from specific values
rootless_uid=$[RANDOM+1000]
rootless_gid=$[RANDOM+1000]
msg "creating $rootless_uid:$rootless_gid $ROOTLESS_USER user"
rootless_uid=$((RANDOM+1000))
rootless_gid=$((RANDOM+1000))
rootless_supplemental_gid1=$((rootless_gid+1))
rootless_supplemental_gid2=$((rootless_supplemental_gid1+1))
rootless_supplemental_gid3=$((rootless_supplemental_gid2+1))
msg "creating $rootless_uid:$rootless_gid,$rootless_supplemental_gid1,$rootless_supplemental_gid2,$rootless_supplemental_gid3 $ROOTLESS_USER user"
groupadd -g $rootless_gid $ROOTLESS_USER
useradd -g $rootless_gid -u $rootless_uid --no-user-group --create-home $ROOTLESS_USER
groupadd -g $rootless_supplemental_gid1 ${ROOTLESS_USER}sg1
groupadd -g $rootless_supplemental_gid2 ${ROOTLESS_USER}sg2
groupadd -g $rootless_supplemental_gid3 ${ROOTLESS_USER}sg3
useradd -g $rootless_gid -G ${ROOTLESS_USER}sg1,${ROOTLESS_USER}sg2,${ROOTLESS_USER}sg3 -u $rootless_uid --no-user-group --create-home $ROOTLESS_USER
rootless_supplemental_gid4=$(awk 'BEGIN{FS=":"}/^rootlessuser:/{print $2+$3}' /etc/subgid)
groupadd -g $rootless_supplemental_gid4 ${ROOTLESS_USER}sg4
usermod -G ${ROOTLESS_USER}sg1,${ROOTLESS_USER}sg2,${ROOTLESS_USER}sg3,${ROOTLESS_USER}sg4 $ROOTLESS_USER
msg "running id for $ROOTLESS_USER"
id $ROOTLESS_USER
# We also set up rootless user for image-scp tests (running as root)
if [[ $PRIV_NAME = "rootless" ]]; then

View File

@ -67,7 +67,13 @@ else
showrun make validate
;;
unit)
showrun make test-unit
race=
if [[ -z "$CIRRUS_PR" ]]; then
# If not running on a PR then run unit tests
# with appropriate `-race` flags.
race="-race"
fi
showrun make test-unit RACEFLAGS=$race
;;
conformance)
# Typically it's undesirable to install packages at runtime.

View File

@ -66,6 +66,12 @@ func (d *dummyAttestationHandler) ServeHTTP(rw http.ResponseWriter, req *http.Re
}
func TestCWConvertImage(t *testing.T) {
// This test cannot be parallelized as this uses NewBuilder()
// which eventually and indirectly accesses a global variable
// defined in `go-selinux`, this must be fixed at `go-selinux`
// or builder must enable sometime of locking mechanism i.e if
// routine is creating Builder other's must wait for it.
// Tracked here: https://github.com/containers/buildah/issues/5967
ctx := context.TODO()
for _, status := range []int{http.StatusOK, http.StatusInternalServerError} {
for _, ignoreChainRetrievalErrors := range []bool{false, true} {

View File

@ -13,6 +13,8 @@ import (
"os/user"
"path"
"path/filepath"
"slices"
"sort"
"strconv"
"strings"
"sync"
@ -47,6 +49,7 @@ func init() {
// "**" component in the pattern, filepath.Glob() will be called with the "**"
// replaced with all of the subdirectories under that point, and the results
// will be concatenated.
// The matched paths are returned in lexical order, which makes the output deterministic.
func extendedGlob(pattern string) (matches []string, err error) {
subdirs := func(dir string) []string {
var subdirectories []string
@ -69,12 +72,14 @@ func extendedGlob(pattern string) (matches []string, err error) {
components := []string{}
dir := pattern
file := ""
for dir != "" && dir != string(os.PathSeparator) {
for dir != filepath.VolumeName(dir) && dir != string(os.PathSeparator) {
dir, file = filepath.Split(dir)
components = append([]string{file}, components...)
if file != "" {
components = append([]string{file}, components...)
}
dir = strings.TrimSuffix(dir, string(os.PathSeparator))
}
patterns := []string{string(os.PathSeparator)}
patterns := []string{filepath.VolumeName(dir) + string(os.PathSeparator)}
for i := range components {
var nextPatterns []string
if components[i] == "**" {
@ -110,6 +115,7 @@ func extendedGlob(pattern string) (matches []string, err error) {
}
matches = append(matches, theseMatches...)
}
sort.Strings(matches)
return matches, nil
}
@ -135,30 +141,34 @@ func isArchivePath(path string) bool {
type requestType string
const (
requestEval requestType = "EVAL"
requestStat requestType = "STAT"
requestGet requestType = "GET"
requestPut requestType = "PUT"
requestMkdir requestType = "MKDIR"
requestRemove requestType = "REMOVE"
requestQuit requestType = "QUIT"
requestEval requestType = "EVAL"
requestStat requestType = "STAT"
requestGet requestType = "GET"
requestPut requestType = "PUT"
requestMkdir requestType = "MKDIR"
requestRemove requestType = "REMOVE"
requestQuit requestType = "QUIT"
requestEnsure requestType = "ENSURE"
requestConditionalRemove requestType = "CONDRM"
)
// Request encodes a single request.
type request struct {
Request requestType
Root string // used by all requests
preservedRoot string
rootPrefix string // used to reconstruct paths being handed back to the caller
Directory string // used by all requests
preservedDirectory string
Globs []string `json:",omitempty"` // used by stat, get
preservedGlobs []string
StatOptions StatOptions `json:",omitempty"`
GetOptions GetOptions `json:",omitempty"`
PutOptions PutOptions `json:",omitempty"`
MkdirOptions MkdirOptions `json:",omitempty"`
RemoveOptions RemoveOptions `json:",omitempty"`
Request requestType
Root string // used by all requests
preservedRoot string
rootPrefix string // used to reconstruct paths being handed back to the caller
Directory string // used by all requests
preservedDirectory string
Globs []string `json:",omitempty"` // used by stat, get
preservedGlobs []string
StatOptions StatOptions `json:",omitempty"`
GetOptions GetOptions `json:",omitempty"`
PutOptions PutOptions `json:",omitempty"`
MkdirOptions MkdirOptions `json:",omitempty"`
RemoveOptions RemoveOptions `json:",omitempty"`
EnsureOptions EnsureOptions `json:",omitempty"`
ConditionalRemoveOptions ConditionalRemoveOptions `json:",omitempty"`
}
func (req *request) Excludes() []string {
@ -177,6 +187,10 @@ func (req *request) Excludes() []string {
return nil
case requestQuit:
return nil
case requestEnsure:
return nil
case requestConditionalRemove:
return nil
default:
panic(fmt.Sprintf("not an implemented request type: %q", req.Request))
}
@ -198,6 +212,10 @@ func (req *request) UIDMap() []idtools.IDMap {
return nil
case requestQuit:
return nil
case requestEnsure:
return req.EnsureOptions.UIDMap
case requestConditionalRemove:
return req.ConditionalRemoveOptions.UIDMap
default:
panic(fmt.Sprintf("not an implemented request type: %q", req.Request))
}
@ -219,6 +237,10 @@ func (req *request) GIDMap() []idtools.IDMap {
return nil
case requestQuit:
return nil
case requestEnsure:
return req.EnsureOptions.GIDMap
case requestConditionalRemove:
return req.ConditionalRemoveOptions.GIDMap
default:
panic(fmt.Sprintf("not an implemented request type: %q", req.Request))
}
@ -226,13 +248,15 @@ func (req *request) GIDMap() []idtools.IDMap {
// Response encodes a single response.
type response struct {
Error string `json:",omitempty"`
Stat statResponse `json:",omitempty"`
Eval evalResponse `json:",omitempty"`
Get getResponse `json:",omitempty"`
Put putResponse `json:",omitempty"`
Mkdir mkdirResponse `json:",omitempty"`
Remove removeResponse `json:",omitempty"`
Error string `json:",omitempty"`
Stat statResponse `json:",omitempty"`
Eval evalResponse `json:",omitempty"`
Get getResponse `json:",omitempty"`
Put putResponse `json:",omitempty"`
Mkdir mkdirResponse `json:",omitempty"`
Remove removeResponse `json:",omitempty"`
Ensure ensureResponse `json:",omitempty"`
ConditionalRemove conditionalRemoveResponse `json:",omitempty"`
}
// statResponse encodes a response for a single Stat request.
@ -279,6 +303,16 @@ type mkdirResponse struct{}
// removeResponse encodes a response for a single Remove request.
type removeResponse struct{}
// ensureResponse encodes a response to an Ensure request.
type ensureResponse struct {
Created []string // paths that were created because they weren't already present
}
// conditionalRemoveResponse encodes a response to a conditionalRemove request.
type conditionalRemoveResponse struct {
Removed []string // paths that were removed
}
// EvalOptions controls parts of Eval()'s behavior.
type EvalOptions struct{}
@ -328,7 +362,7 @@ func Stat(root string, directory string, options StatOptions, globs []string) ([
Request: requestStat,
Root: root,
Directory: directory,
Globs: append([]string{}, globs...),
Globs: slices.Clone(globs),
StatOptions: options,
}
resp, err := copier(nil, nil, req)
@ -350,6 +384,7 @@ type GetOptions struct {
ChmodDirs *os.FileMode // set permissions on directories. no effect on archives being extracted
ChownFiles *idtools.IDPair // set ownership of files. no effect on archives being extracted
ChmodFiles *os.FileMode // set permissions on files. no effect on archives being extracted
Parents bool // maintain the sources parent directory in the destination
StripSetuidBit bool // strip the setuid bit off of items being copied. no effect on archives being extracted
StripSetgidBit bool // strip the setgid bit off of items being copied. no effect on archives being extracted
StripStickyBit bool // strip the sticky bit off of items being copied. no effect on archives being extracted
@ -359,6 +394,7 @@ type GetOptions struct {
NoDerefSymlinks bool // don't follow symlinks when globs match them
IgnoreUnreadable bool // ignore errors reading items, instead of returning an error
NoCrossDevice bool // if a subdirectory is a mountpoint with a different device number, include it but skip its contents
Timestamp *time.Time // timestamp to force on all contents
}
// Get produces an archive containing items that match the specified glob
@ -378,7 +414,7 @@ func Get(root string, directory string, options GetOptions, globs []string, bulk
Request: requestGet,
Root: root,
Directory: directory,
Globs: append([]string{}, globs...),
Globs: slices.Clone(globs),
StatOptions: StatOptions{
CheckForArchives: options.ExpandArchives,
},
@ -594,7 +630,7 @@ func copierWithoutSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req req
req.preservedRoot = req.Root
req.rootPrefix = string(os.PathSeparator)
req.preservedDirectory = req.Directory
req.preservedGlobs = append([]string{}, req.Globs...)
req.preservedGlobs = slices.Clone(req.Globs)
if !filepath.IsAbs(req.Directory) {
req.Directory = filepath.Join(req.Root, cleanerReldirectory(req.Directory))
}
@ -697,9 +733,9 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques
bulkReaderRead = nil
bulkWriterWrite.Close()
bulkWriterWrite = nil
killAndReturn := func(err error, step string) (*response, error) { // nolint: unparam
killAndReturn := func(err error, step string) error {
if err2 := cmd.Process.Kill(); err2 != nil {
return nil, fmt.Errorf("killing subprocess: %v; %s: %w", err2, step, err)
return fmt.Errorf("killing subprocess: %v; %s: %w", err2, step, err)
}
if errors.Is(err, io.ErrClosedPipe) || errors.Is(err, syscall.EPIPE) {
err2 := cmd.Wait()
@ -707,22 +743,22 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques
err = fmt.Errorf("%s: %w", errorText, err)
}
if err2 != nil {
return nil, fmt.Errorf("waiting on subprocess: %v; %s: %w", err2, step, err)
return fmt.Errorf("waiting on subprocess: %v; %s: %w", err2, step, err)
}
}
return nil, fmt.Errorf("%v: %w", step, err)
return fmt.Errorf("%v: %w", step, err)
}
if err = encoder.Encode(req); err != nil {
return killAndReturn(err, "error encoding work request for copier subprocess")
return nil, killAndReturn(err, "error encoding work request for copier subprocess")
}
if err = decoder.Decode(&resp); err != nil {
if errors.Is(err, io.EOF) && errorBuffer.Len() > 0 {
return killAndReturn(errors.New(errorBuffer.String()), "error in copier subprocess")
return nil, killAndReturn(errors.New(errorBuffer.String()), "error in copier subprocess")
}
return killAndReturn(err, "error decoding response from copier subprocess")
return nil, killAndReturn(err, "error decoding response from copier subprocess")
}
if err = encoder.Encode(&request{Request: requestQuit}); err != nil {
return killAndReturn(err, "error encoding quit request for copier subprocess")
return nil, killAndReturn(err, "error encoding quit request for copier subprocess")
}
stdinWrite.Close()
stdinWrite = nil
@ -846,7 +882,7 @@ func copierMain() {
req.preservedRoot = req.Root
req.rootPrefix = string(os.PathSeparator)
req.preservedDirectory = req.Directory
req.preservedGlobs = append([]string{}, req.Globs...)
req.preservedGlobs = slices.Clone(req.Globs)
if chrooted {
// We'll need to adjust some things now that the root
// directory isn't what it was. Make the directory and
@ -948,6 +984,12 @@ func copierHandler(bulkReader io.Reader, bulkWriter io.Writer, req request) (*re
case requestRemove:
resp := copierHandlerRemove(req)
return resp, nil, nil
case requestEnsure:
resp := copierHandlerEnsure(req, idMappings)
return resp, nil, nil
case requestConditionalRemove:
resp := copierHandlerConditionalRemove(req, idMappings)
return resp, nil, nil
case requestQuit:
return nil, nil, nil
}
@ -971,7 +1013,7 @@ func pathIsExcluded(root, path string, pm *fileutils.PatternMatcher) (string, bo
// Matches uses filepath.FromSlash() to convert candidates before
// checking if they match the patterns it's been given, implying that
// it expects Unix-style paths.
matches, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck
matches, err := pm.Matches(filepath.ToSlash(rel)) //nolint:staticcheck
if err != nil {
return rel, false, fmt.Errorf("copier: error checking if %q is excluded: %w", rel, err)
}
@ -1005,7 +1047,7 @@ func resolvePath(root, path string, evaluateFinalComponent bool, pm *fileutils.P
}
excluded = excluded || thisExcluded
if !excluded {
if target, err := os.Readlink(filepath.Join(workingPath, components[0])); err == nil && !(len(components) == 1 && !evaluateFinalComponent) {
if target, err := os.Readlink(filepath.Join(workingPath, components[0])); err == nil && (len(components) != 1 || evaluateFinalComponent) {
followed++
if followed > maxLoopsFollowed {
return "", &os.PathError{
@ -1048,7 +1090,7 @@ func resolvePath(root, path string, evaluateFinalComponent bool, pm *fileutils.P
}
func copierHandlerEval(req request) *response {
errorResponse := func(fmtspec string, args ...interface{}) *response {
errorResponse := func(fmtspec string, args ...any) *response {
return &response{Error: fmt.Sprintf(fmtspec, args...), Eval: evalResponse{}}
}
resolvedTarget, err := resolvePath(req.Root, req.Directory, true, nil)
@ -1059,7 +1101,7 @@ func copierHandlerEval(req request) *response {
}
func copierHandlerStat(req request, pm *fileutils.PatternMatcher) *response {
errorResponse := func(fmtspec string, args ...interface{}) *response {
errorResponse := func(fmtspec string, args ...any) *response {
return &response{Error: fmt.Sprintf(fmtspec, args...), Stat: statResponse{}}
}
if len(req.Globs) == 0 {
@ -1182,11 +1224,54 @@ func errorIsPermission(err error) bool {
return errors.Is(err, os.ErrPermission) || strings.Contains(err.Error(), "permission denied")
}
func getParents(path string, stopPath string) []string {
out := []string{}
for path != "/" && path != "." && path != stopPath {
path = filepath.Dir(path)
if path == stopPath {
continue
}
out = append(out, path)
}
slices.Reverse(out)
return out
}
func checkLinks(item string, req request, info os.FileInfo) (string, os.FileInfo, error) {
// chase links. if we hit a dead end, we should just fail
oldItem := item
followedLinks := 0
const maxFollowedLinks = 16
for !req.GetOptions.NoDerefSymlinks && info.Mode()&os.ModeType == os.ModeSymlink && followedLinks < maxFollowedLinks {
path, err := os.Readlink(item)
if err != nil {
continue
}
if filepath.IsAbs(path) || looksLikeAbs(path) {
path = filepath.Join(req.Root, path)
} else {
path = filepath.Join(filepath.Dir(item), path)
}
item = path
if _, err = convertToRelSubdirectory(req.Root, item); err != nil {
return "", nil, fmt.Errorf("copier: get: computing path of %q(%q) relative to %q: %w", oldItem, item, req.Root, err)
}
if info, err = os.Lstat(item); err != nil {
return "", nil, fmt.Errorf("copier: get: lstat %q(%q): %w", oldItem, item, err)
}
followedLinks++
}
if followedLinks >= maxFollowedLinks {
return "", nil, fmt.Errorf("copier: get: resolving symlink %q(%q): %w", oldItem, item, syscall.ELOOP)
}
return item, info, nil
}
func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMatcher, idMappings *idtools.IDMappings) (*response, func() error, error) {
statRequest := req
statRequest.Request = requestStat
statResponse := copierHandlerStat(req, pm)
errorResponse := func(fmtspec string, args ...interface{}) (*response, func() error, error) {
errorResponse := func(fmtspec string, args ...any) (*response, func() error, error) {
return &response{Error: fmt.Sprintf(fmtspec, args...), Stat: statResponse.Stat, Get: getResponse{}}, nil, nil
}
if statResponse.Error != "" {
@ -1196,15 +1281,25 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
return errorResponse("copier: get: expected at least one glob pattern, got 0")
}
// build a queue of items by globbing
var queue []string
type queueItem struct {
glob string
parents []string
}
var queue []queueItem
globMatchedCount := 0
for _, glob := range req.Globs {
globMatched, err := extendedGlob(glob)
if err != nil {
return errorResponse("copier: get: glob %q: %v", glob, err)
}
globMatchedCount += len(globMatched)
queue = append(queue, globMatched...)
for _, path := range globMatched {
var parents []string
if req.GetOptions.Parents {
parents = getParents(path, req.Directory)
}
globMatchedCount++
queue = append(queue, queueItem{glob: path, parents: parents})
}
}
// no matches -> error
if len(queue) == 0 {
@ -1219,7 +1314,9 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
defer tw.Close()
hardlinkChecker := new(hardlinkChecker)
itemsCopied := 0
for i, item := range queue {
addedParents := map[string]struct{}{}
for i, qItem := range queue {
item := qItem.glob
// if we're not discarding the names of individual directories, keep track of this one
relNamePrefix := ""
if req.GetOptions.KeepDirectoryNames {
@ -1230,31 +1327,53 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
if err != nil {
return fmt.Errorf("copier: get: lstat %q: %w", item, err)
}
// chase links. if we hit a dead end, we should just fail
followedLinks := 0
const maxFollowedLinks = 16
for !req.GetOptions.NoDerefSymlinks && info.Mode()&os.ModeType == os.ModeSymlink && followedLinks < maxFollowedLinks {
path, err := os.Readlink(item)
if req.GetOptions.Parents && info.Mode().IsDir() {
if !slices.Contains(qItem.parents, item) {
qItem.parents = append(qItem.parents, item)
}
}
// Copy parents in to tarball first if exists
for _, parent := range qItem.parents {
oldParent := parent
parentInfo, err := os.Lstat(parent)
if err != nil {
return fmt.Errorf("copier: get: lstat %q: %w", parent, err)
}
parent, parentInfo, err = checkLinks(parent, req, parentInfo)
if err != nil {
return err
}
parentName, err := convertToRelSubdirectory(req.Directory, oldParent)
if err != nil {
return fmt.Errorf("copier: get: error computing path of %q relative to %q: %w", parent, req.Directory, err)
}
if parentName == "" || parentName == "." {
// skip the "." entry
continue
}
if filepath.IsAbs(path) || looksLikeAbs(path) {
path = filepath.Join(req.Root, path)
} else {
path = filepath.Join(filepath.Dir(item), path)
if _, ok := addedParents[parentName]; ok {
continue
}
item = path
if _, err = convertToRelSubdirectory(req.Root, item); err != nil {
return fmt.Errorf("copier: get: computing path of %q(%q) relative to %q: %w", queue[i], item, req.Root, err)
addedParents[parentName] = struct{}{}
if err := copierHandlerGetOne(parentInfo, "", parentName, parent, req.GetOptions, tw, hardlinkChecker, idMappings); err != nil {
if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) {
continue
} else if errors.Is(err, os.ErrNotExist) {
logrus.Warningf("copier: file disappeared while reading: %q", parent)
return nil
}
return fmt.Errorf("copier: get: %q: %w", queue[i].glob, err)
}
if info, err = os.Lstat(item); err != nil {
return fmt.Errorf("copier: get: lstat %q(%q): %w", queue[i], item, err)
}
followedLinks++
itemsCopied++
}
if followedLinks >= maxFollowedLinks {
return fmt.Errorf("copier: get: resolving symlink %q(%q): %w", queue[i], item, syscall.ELOOP)
item, info, err = checkLinks(item, req, info)
if err != nil {
return err
}
// evaluate excludes relative to the root directory
if info.Mode().IsDir() {
// we don't expand any of the contents that are archives
@ -1354,6 +1473,12 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
ok = filepath.SkipDir
}
}
if req.GetOptions.Parents {
rel, err = convertToRelSubdirectory(req.Directory, path)
if err != nil {
return fmt.Errorf("copier: get: error computing path of %q relative to %q: %w", path, req.Root, err)
}
}
// add the item to the outgoing tar stream
if err := copierHandlerGetOne(info, symlinkTarget, rel, path, options, tw, hardlinkChecker, idMappings); err != nil {
if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) {
@ -1368,7 +1493,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
}
// walk the directory tree, checking/adding items individually
if err := filepath.WalkDir(item, walkfn); err != nil {
return fmt.Errorf("copier: get: %q(%q): %w", queue[i], item, err)
return fmt.Errorf("copier: get: %q(%q): %w", queue[i].glob, item, err)
}
itemsCopied++
} else {
@ -1379,15 +1504,24 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa
if skip {
continue
}
// add the item to the outgoing tar stream. in
// cases where this was a symlink that we
// dereferenced, be sure to use the name of the
// link.
if err := copierHandlerGetOne(info, "", filepath.Base(queue[i]), item, req.GetOptions, tw, hardlinkChecker, idMappings); err != nil {
name := filepath.Base(queue[i].glob)
if req.GetOptions.Parents {
name, err = convertToRelSubdirectory(req.Directory, queue[i].glob)
if err != nil {
return fmt.Errorf("copier: get: error computing path of %q relative to %q: %w", item, req.Root, err)
}
if name == "" || name == "." {
// skip the "." entry
continue
}
}
if err := copierHandlerGetOne(info, "", name, item, req.GetOptions, tw, hardlinkChecker, idMappings); err != nil {
if req.GetOptions.IgnoreUnreadable && errorIsPermission(err) {
continue
}
return fmt.Errorf("copier: get: %q: %w", queue[i], err)
return fmt.Errorf("copier: get: %q: %w", queue[i].glob, err)
}
itemsCopied++
}
@ -1503,6 +1637,16 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
if options.Rename != nil {
hdr.Name = handleRename(options.Rename, hdr.Name)
}
if options.Timestamp != nil {
timestamp := options.Timestamp.UTC()
hdr.ModTime = timestamp
if !hdr.AccessTime.IsZero() {
hdr.AccessTime = timestamp
}
if !hdr.ChangeTime.IsZero() {
hdr.ChangeTime = timestamp
}
}
if err = tw.WriteHeader(hdr); err != nil {
return fmt.Errorf("writing tar header from %q to pipe: %w", contentPath, err)
}
@ -1565,14 +1709,15 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
return fmt.Errorf("getting fflags: %w", err)
}
var f *os.File
if hdr.Typeflag == tar.TypeReg {
switch hdr.Typeflag {
case tar.TypeReg:
// open the file first so that we don't write a header for it if we can't actually read it
f, err = os.Open(contentPath)
if err != nil {
return fmt.Errorf("opening file for adding its contents to archive: %w", err)
}
defer f.Close()
} else if hdr.Typeflag == tar.TypeDir {
case tar.TypeDir:
// open the directory file first to make sure we can access it.
f, err = os.Open(contentPath)
if err != nil {
@ -1580,6 +1725,16 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
}
defer f.Close()
}
if options.Timestamp != nil {
timestamp := options.Timestamp.UTC()
hdr.ModTime = timestamp
if !hdr.AccessTime.IsZero() {
hdr.AccessTime = timestamp
}
if !hdr.ChangeTime.IsZero() {
hdr.ChangeTime = timestamp
}
}
// output the header
if err = tw.WriteHeader(hdr); err != nil {
return fmt.Errorf("writing header for %s (%s): %w", contentPath, hdr.Name, err)
@ -1599,7 +1754,7 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
}
func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDMappings) (*response, func() error, error) {
errorResponse := func(fmtspec string, args ...interface{}) (*response, func() error, error) {
errorResponse := func(fmtspec string, args ...any) (*response, func() error, error) {
return &response{Error: fmt.Sprintf(fmtspec, args...), Put: putResponse{}}, nil, nil
}
dirUID, dirGID, defaultDirUID, defaultDirGID := 0, 0, 0, 0
@ -1653,7 +1808,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
return fmt.Errorf("copier: put: error setting owner of %q to %d:%d: %w", path, defaultDirUID, defaultDirGID, err)
}
// make a conditional note to set this directory's permissions
// later, but not if we already had an explictly-provided mode
// later, but not if we already had an explicitly-provided mode
if _, ok := directoryModes[path]; !ok {
directoryModes[path] = defaultDirMode
}
@ -1984,7 +2139,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
// set xattrs, including some that might have been reset by chown()
if !req.PutOptions.StripXattrs {
xattrs := mapWithPrefixedKeysWithoutKeyPrefix(hdr.PAXRecords, xattrPAXRecordNamespace)
if err = Lsetxattrs(path, xattrs); err != nil { // nolint:staticcheck
if err = Lsetxattrs(path, xattrs); err != nil {
if !req.PutOptions.IgnoreXattrErrors {
return fmt.Errorf("copier: put: error setting extended attributes on %q: %w", path, err)
}
@ -2013,7 +2168,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
}
func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response, func() error, error) {
errorResponse := func(fmtspec string, args ...interface{}) (*response, func() error, error) {
errorResponse := func(fmtspec string, args ...any) (*response, func() error, error) {
return &response{Error: fmt.Sprintf(fmtspec, args...), Mkdir: mkdirResponse{}}, nil, nil
}
dirUID, dirGID := 0, 0
@ -2067,7 +2222,7 @@ func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response,
}
func copierHandlerRemove(req request) *response {
errorResponse := func(fmtspec string, args ...interface{}) *response {
errorResponse := func(fmtspec string, args ...any) *response {
return &response{Error: fmt.Sprintf(fmtspec, args...), Remove: removeResponse{}}
}
resolvedTarget, err := resolvePath(req.Root, req.Directory, false, nil)
@ -2084,3 +2239,257 @@ func copierHandlerRemove(req request) *response {
}
return &response{Error: "", Remove: removeResponse{}}
}
// EnsurePath is a single item being passed to an Ensure() call.
type EnsurePath struct {
Path string // a pathname, relative to the Directory, possibly relative to the root
Typeflag byte // can be either TypeReg or TypeDir, everything else is currently ignored
ModTime *time.Time // mtime to set on newly-created items, default is to leave them be
Chmod *os.FileMode // mode, defaults to 000 for files and 700 for directories
Chown *idtools.IDPair // owner settings to set on newly-created items, defaults to 0:0
}
// EnsureOptions controls parts of Ensure()'s behavior.
type EnsureOptions struct {
UIDMap, GIDMap []idtools.IDMap // map from hostIDs to containerIDs in the chroot
Paths []EnsurePath
}
// Ensure ensures that the specified mount point targets exist under the root.
// If the root directory is not specified, the current root directory is used.
// If root is specified and the current OS supports it, and the calling process
// has the necessary privileges, the operation is performed in a chrooted
// context.
func Ensure(root, directory string, options EnsureOptions) ([]string, error) {
req := request{
Request: requestEnsure,
Root: root,
Directory: directory,
EnsureOptions: options,
}
resp, err := copier(nil, nil, req)
if err != nil {
return nil, err
}
if resp.Error != "" {
return nil, errors.New(resp.Error)
}
return resp.Ensure.Created, nil
}
func copierHandlerEnsure(req request, idMappings *idtools.IDMappings) *response {
errorResponse := func(fmtspec string, args ...any) *response {
return &response{Error: fmt.Sprintf(fmtspec, args...), Ensure: ensureResponse{}}
}
slices.SortFunc(req.EnsureOptions.Paths, func(a, b EnsurePath) int { return strings.Compare(a.Path, b.Path) })
var created []string
for _, item := range req.EnsureOptions.Paths {
uid, gid := 0, 0
if item.Chown != nil {
uid, gid = item.Chown.UID, item.Chown.UID
}
var mode os.FileMode
switch item.Typeflag {
case tar.TypeReg:
mode = 0o000
case tar.TypeDir:
mode = 0o700
default:
continue
}
if item.Chmod != nil {
mode = *item.Chmod
}
if idMappings != nil && !idMappings.Empty() {
containerDirPair := idtools.IDPair{UID: uid, GID: gid}
hostDirPair, err := idMappings.ToHost(containerDirPair)
if err != nil {
return errorResponse("copier: ensure: error mapping container filesystem owner %d:%d to host filesystem owners: %v", uid, gid, err)
}
uid, gid = hostDirPair.UID, hostDirPair.GID
}
directory, err := resolvePath(req.Root, req.Directory, true, nil)
if err != nil {
return errorResponse("copier: ensure: error resolving %q: %v", req.Directory, err)
}
rel, err := convertToRelSubdirectory(req.Root, directory)
if err != nil {
return errorResponse("copier: ensure: error computing path of %q relative to %q: %v", directory, req.Root, err)
}
subdir := ""
components := strings.Split(filepath.Join(rel, item.Path), string(os.PathSeparator))
components = slices.DeleteFunc(components, func(s string) bool { return s == "" || s == "." })
for i, component := range components {
parentPath := subdir
if parentPath == "" {
parentPath = "."
}
leaf := filepath.Join(subdir, component)
parentInfo, err := os.Stat(filepath.Join(req.Root, parentPath))
if err != nil {
return errorResponse("copier: ensure: checking datestamps on %q (%d: %v): %v", parentPath, i, components, err)
}
if i < len(components)-1 || item.Typeflag == tar.TypeDir {
err = os.Mkdir(filepath.Join(req.Root, leaf), mode)
subdir = leaf
} else if item.Typeflag == tar.TypeReg {
var f *os.File
if f, err = os.OpenFile(filepath.Join(req.Root, leaf), os.O_CREATE|os.O_EXCL|os.O_RDWR, mode); err == nil {
f.Close()
}
} else {
continue
}
if err == nil {
createdLeaf := leaf
if len(createdLeaf) > 1 {
createdLeaf = strings.TrimPrefix(createdLeaf, string(os.PathSeparator))
}
created = append(created, createdLeaf)
if err = chown(filepath.Join(req.Root, leaf), uid, uid); err != nil {
return errorResponse("copier: ensure: error setting owner of %q to %d:%d: %v", leaf, uid, gid, err)
}
if err = chmod(filepath.Join(req.Root, leaf), mode); err != nil {
return errorResponse("copier: ensure: error setting permissions on %q to 0%o: %v", leaf, mode)
}
if item.ModTime != nil {
if err := os.Chtimes(filepath.Join(req.Root, leaf), *item.ModTime, *item.ModTime); err != nil {
return errorResponse("copier: ensure: resetting datestamp on %q: %v", leaf, err)
}
}
} else {
// FreeBSD can return EISDIR for "mkdir /":
// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=59739.
if !errors.Is(err, os.ErrExist) && !errors.Is(err, syscall.EISDIR) {
return errorResponse("copier: ensure: error checking item %q: %v", leaf, err)
}
}
if err := os.Chtimes(filepath.Join(req.Root, parentPath), parentInfo.ModTime(), parentInfo.ModTime()); err != nil {
return errorResponse("copier: ensure: resetting datestamp on %q: %v", parentPath, err)
}
}
}
slices.Sort(created)
return &response{Error: "", Ensure: ensureResponse{Created: created}}
}
// ConditionalRemovePath is a single item being passed to an ConditionalRemove() call.
type ConditionalRemovePath struct {
Path string // a pathname, relative to the Directory, possibly relative to the root
ModTime *time.Time // mtime to expect this item to have, if it's a condition
Mode *os.FileMode // mode to expect this item to have, if it's a condition
Owner *idtools.IDPair // owner to expect this item to have, if it's a condition
}
// ConditionalRemoveOptions controls parts of ConditionalRemove()'s behavior.
type ConditionalRemoveOptions struct {
UIDMap, GIDMap []idtools.IDMap // map from hostIDs to containerIDs in the chroot
Paths []ConditionalRemovePath
}
// ConditionalRemove removes the set of named items if they're present and
// currently match the additional conditions, returning the list of items it
// removed. Directories will also only be removed if they have no contents,
// and will be left in place otherwise.
func ConditionalRemove(root, directory string, options ConditionalRemoveOptions) ([]string, error) {
req := request{
Request: requestConditionalRemove,
Root: root,
Directory: directory,
ConditionalRemoveOptions: options,
}
resp, err := copier(nil, nil, req)
if err != nil {
return nil, err
}
if resp.Error != "" {
return nil, errors.New(resp.Error)
}
return resp.ConditionalRemove.Removed, nil
}
func copierHandlerConditionalRemove(req request, idMappings *idtools.IDMappings) *response {
errorResponse := func(fmtspec string, args ...any) *response {
return &response{Error: fmt.Sprintf(fmtspec, args...), ConditionalRemove: conditionalRemoveResponse{}}
}
slices.SortFunc(req.ConditionalRemoveOptions.Paths, func(a, b ConditionalRemovePath) int { return strings.Compare(b.Path, a.Path) })
var removed []string
for _, item := range req.ConditionalRemoveOptions.Paths {
uid, gid := 0, 0
if item.Owner != nil {
uid, gid = item.Owner.UID, item.Owner.GID
}
if idMappings != nil && !idMappings.Empty() {
containerDirPair := idtools.IDPair{UID: uid, GID: gid}
hostDirPair, err := idMappings.ToHost(containerDirPair)
if err != nil {
return errorResponse("copier: conditionalRemove: error mapping container filesystem owner %d:%d to host filesystem owners: %v", uid, gid, err)
}
uid, gid = hostDirPair.UID, hostDirPair.GID
}
directory, err := resolvePath(req.Root, req.Directory, true, nil)
if err != nil {
return errorResponse("copier: conditionalRemove: error resolving %q: %v", req.Directory, err)
}
rel, err := convertToRelSubdirectory(req.Root, directory)
if err != nil {
return errorResponse("copier: conditionalRemove: error computing path of %q relative to %q: %v", directory, req.Root, err)
}
components := strings.Split(filepath.Join(rel, item.Path), string(os.PathSeparator))
components = slices.DeleteFunc(components, func(s string) bool { return s == "" || s == "." })
if len(components) == 0 {
continue
}
itemPath := filepath.Join(append([]string{req.Root}, components...)...)
itemInfo, err := os.Lstat(itemPath)
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
return errorResponse("copier: conditionalRemove: checking on candidate %q: %v", itemPath, err)
}
// okay?
removed = append(removed, item.Path)
continue
}
parentPath := filepath.Dir(itemPath)
parentInfo, err := os.Stat(parentPath)
if err != nil {
return errorResponse("copier: conditionalRemove: checking on parent directory %q: %v", parentPath, err)
}
if item.Mode != nil && itemInfo.Mode().Perm()&fs.ModePerm != *item.Mode&fs.ModePerm {
// mismatch, modified? ignore
continue
}
if item.ModTime != nil && !item.ModTime.Equal(itemInfo.ModTime()) {
// mismatch, modified? ignore
continue
}
if item.Owner != nil {
ownerUID, ownerGID, err := owner(itemInfo)
if err != nil {
return errorResponse("copier: conditionalRemove: checking ownership of %q: %v", itemPath, err)
}
if uid != ownerUID || gid != ownerGID {
// mismatch, modified? ignore
continue
}
}
if err := os.Remove(itemPath); err != nil && !errors.Is(err, os.ErrNotExist) {
if !errors.Is(err, syscall.EEXIST) && !errors.Is(err, syscall.ENOTEMPTY) {
return errorResponse("copier: conditionalRemove: removing %q: %v", itemPath, err)
}
// okay? not removed, but it wasn't empty, so okay?
continue
}
removed = append(removed, item.Path)
if err := os.Chtimes(parentPath, parentInfo.ModTime(), parentInfo.ModTime()); err != nil {
return errorResponse("copier: conditionalRemove: resetting datestamp on %q: %v", parentPath, err)
}
}
slices.Sort(removed)
return &response{Error: "", ConditionalRemove: conditionalRemoveResponse{Removed: removed}}
}

View File

@ -13,6 +13,7 @@ import (
"path"
"path/filepath"
"reflect"
"slices"
"sort"
"strconv"
"strings"
@ -48,7 +49,7 @@ func makeContents(length int64) io.ReadCloser {
for count < length {
if _, err := buffered.Write([]byte{"0123456789abcdef"[count%16]}); err != nil {
buffered.Flush()
pipeWriter.CloseWithError(err) // nolint:errcheck
pipeWriter.CloseWithError(err)
return
}
count++
@ -111,7 +112,7 @@ func makeArchive(headers []tar.Header, contents map[string][]byte) io.ReadCloser
tw.Close()
buffered.Flush()
if err != nil {
pipeWriter.CloseWithError(err) // nolint:errcheck
pipeWriter.CloseWithError(err)
} else {
pipeWriter.Close()
}
@ -176,7 +177,8 @@ type enumeratedFile struct {
}
var (
testDate = time.Unix(1485449953, 0)
testDate = time.Unix(1485449953, 0)
secondTestDate = time.Unix(1485449953*2, 0)
uid = os.Getuid()
@ -888,6 +890,8 @@ func testGetMultiple(t *testing.T) {
keepDirectoryNames bool
renames map[string]string
noDerefSymlinks bool
parents bool
timestamp *time.Time
}
getTestArchives := []struct {
name string
@ -995,6 +999,16 @@ func testGetMultiple(t *testing.T) {
"subdir-f/hlink-b", // from subdir-e
},
},
{
name: "timestamped",
pattern: "file*",
items: []string{
"file-0",
"file-a",
"file-b",
},
timestamp: &secondTestDate,
},
{
name: "dot-with-wildcard-includes-and-excludes",
pattern: ".",
@ -1364,6 +1378,124 @@ func testGetMultiple(t *testing.T) {
"file-q", // from link-c -> subdir-c
},
},
{
name: "wildcard and parents",
pattern: "*",
parents: true,
items: []string{
"file-0",
"file-a",
"file-b",
"link-a",
"hlink-0",
"something-a",
"archive-a",
"non-archive-a",
"subdir-a",
"subdir-b",
"subdir-c",
"subdir-d",
"subdir-e",
"subdir-a/file-n",
"subdir-a/file-o",
"subdir-a/file-a",
"subdir-a/file-b",
"subdir-a/file-c",
"subdir-b/file-n",
"subdir-b/file-o",
"subdir-c/file-p",
"subdir-c/file-p",
"subdir-c/file-q",
"subdir-c/file-q",
"subdir-d/hlink-0",
"subdir-e/subdir-f",
"subdir-e/subdir-f/hlink-b",
},
},
{
name: "everything-with-wildcard-includes-and-excludes-parents",
pattern: "*",
parents: true,
exclude: []string{"**/*-a", "!**/*-c"},
items: []string{
"file-0",
"file-b",
"subdir-a",
"subdir-b",
"subdir-c",
"subdir-d",
"subdir-e",
"subdir-a/file-c",
"subdir-b/file-n",
"subdir-b/file-o",
"subdir-c/file-p",
"subdir-c/file-p",
"subdir-c/file-q",
"subdir-c/file-q",
"hlink-0",
"subdir-d/hlink-0",
"subdir-e/subdir-f",
"subdir-e/subdir-f/hlink-b",
},
},
{
name: "file-and-dir-wildcard-parents",
pattern: "*-a",
parents: true,
items: []string{
"file-a",
"link-a",
"something-a",
"archive-a",
"non-archive-a",
"subdir-a",
"subdir-a/file-n",
"subdir-a/file-o",
"subdir-a/file-a",
"subdir-a/file-b",
"subdir-a/file-c",
},
},
{
name: "root-wildcard-parents",
pattern: "/subdir-b/*",
parents: true,
items: []string{
"subdir-b",
"subdir-b/file-n",
"subdir-b/file-o",
},
},
{
name: "dotdot-wildcard-parents",
pattern: "../../subdir-b/*",
parents: true,
items: []string{
"subdir-b",
"subdir-b/file-n",
"subdir-b/file-o",
},
},
{
name: "dir-with-parents",
pattern: "subdir-e/subdir-f",
parents: true,
items: []string{
"subdir-e",
"subdir-e/subdir-f",
"subdir-e/subdir-f/hlink-b",
},
},
{
name: "hlink-with-parents",
pattern: "subdir-e/subdir-f/hlink-b",
parents: true,
items: []string{
"subdir-e",
"subdir-e/subdir-f",
"subdir-e/subdir-f/hlink-b",
},
},
},
},
}
@ -1399,6 +1531,8 @@ func testGetMultiple(t *testing.T) {
KeepDirectoryNames: testCase.keepDirectoryNames,
Rename: testCase.renames,
NoDerefSymlinks: testCase.noDerefSymlinks,
Parents: testCase.parents,
Timestamp: testCase.timestamp,
}
t.Run(fmt.Sprintf("topdir=%s,archive=%s,case=%s,pattern=%s", topdir, testArchive.name, testCase.name, testCase.pattern), func(t *testing.T) {
@ -1414,15 +1548,18 @@ func testGetMultiple(t *testing.T) {
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
getErr = Get(root, topdir, getOptions, []string{testCase.pattern}, pipeWriter)
pipeWriter.Close()
wg.Done()
}()
tr := tar.NewReader(pipeReader)
hdr, err := tr.Next()
actualContents := []string{}
for err == nil {
actualContents = append(actualContents, filepath.FromSlash(hdr.Name))
if testCase.timestamp != nil {
assert.Truef(t, testCase.timestamp.Equal(hdr.ModTime), "timestamp was supposed to be forced for %q", hdr.Name)
}
hdr, err = tr.Next()
}
pipeReader.Close()
@ -1633,7 +1770,7 @@ func testMkdir(t *testing.T) {
return nil
})
require.NoErrorf(t, err, "error walking directory to catalog post-Mkdir contents: %v", err)
expected := append([]string{}, beforeNames...)
expected := slices.Clone(beforeNames)
for _, expect := range testCase.expect {
expected = append(expected, filepath.FromSlash(expect))
}
@ -1904,3 +2041,318 @@ func TestExtendedGlob(t *testing.T) {
require.NoError(t, err, "globbing")
require.ElementsMatch(t, expected2, matched, "**/d/**/*.dat")
}
func testEnsure(t *testing.T) {
zero := time.Unix(0, 0)
worldReadable := os.FileMode(0o644)
testCases := []struct {
description string
subdir string
options EnsureOptions
expected []string
}{
{
description: "base",
subdir: "subdir",
options: EnsureOptions{
Paths: []EnsurePath{
{
Path: filepath.Join(string(os.PathSeparator), "a", "b", "a"),
Typeflag: tar.TypeReg,
Chmod: &worldReadable,
},
{
Path: filepath.Join("a", "b", "b"),
Typeflag: tar.TypeReg,
ModTime: &zero,
},
{
Path: filepath.Join(string(os.PathSeparator), "a", "b", "c"),
Typeflag: tar.TypeDir,
ModTime: &zero,
},
{
Path: filepath.Join("a", "b", "d"),
Typeflag: tar.TypeDir,
},
},
},
expected: []string{
"subdir",
"subdir/a",
"subdir/a/b",
"subdir/a/b/a",
"subdir/a/b/b",
"subdir/a/b/c",
"subdir/a/b/d",
},
},
{
description: "nosubdir",
options: EnsureOptions{
Paths: []EnsurePath{
{
Path: filepath.Join(string(os.PathSeparator), "a", "b", "c"),
Typeflag: tar.TypeDir,
ModTime: &zero,
},
{
Path: filepath.Join("a", "b", "d"),
Typeflag: tar.TypeDir,
},
},
},
expected: []string{
"a",
"a/b",
"a/b/c",
"a/b/d",
},
},
}
for i := range testCases {
t.Run(testCases[i].description, func(t *testing.T) {
testStarted := time.Now()
tmpdir := t.TempDir()
created, err := Ensure(tmpdir, testCases[i].subdir, testCases[i].options)
require.NoError(t, err, "unexpected error ensuring")
require.EqualValues(t, testCases[i].expected, created, "did not expect these")
for _, item := range testCases[i].options.Paths {
target := filepath.Join(tmpdir, testCases[i].subdir, item.Path)
st, err := os.Stat(target)
require.NoError(t, err, "we supposedly created %q", item.Path)
if item.Chmod != nil {
assert.Equalf(t, *item.Chmod, st.Mode().Perm(), "permissions look wrong on %q", item.Path)
}
if item.Chown != nil {
uid, gid, err := owner(st)
require.NoErrorf(t, err, "expected to be able to read uid:gid for %q", item.Path)
assert.Equalf(t, item.Chown.UID, uid, "user looks wrong on %q", item.Path)
assert.Equalf(t, item.Chown.GID, gid, "group looks wrong on %q", item.Path)
}
if item.ModTime != nil {
assert.Equalf(t, item.ModTime.Unix(), st.ModTime().Unix(), "datestamp looks wrong on %q", item.Path)
} else {
assert.True(t, !testStarted.After(st.ModTime()), "datestamp is too old on %q: %v < %v", st.ModTime(), testStarted)
}
}
})
}
}
func TestEnsureNoChroot(t *testing.T) {
couldChroot := canChroot
canChroot = false
testEnsure(t)
canChroot = couldChroot
}
func testConditionalRemove(t *testing.T) {
mode, mismatchedMode := os.FileMode(0o751), os.FileMode(0o755)
now := time.Now()
then := time.Unix(now.Unix()/2, 0)
type create struct {
path string
typeFlag byte
mtime *time.Time
mode *os.FileMode
}
testCases := []struct {
description string
subdir string
create []create
remove ConditionalRemoveOptions
expectedRemoved []string
expectedRemain []string
}{
{
description: "withoutsubdir",
create: []create{
{path: "/a", typeFlag: tar.TypeDir},
{path: "b", typeFlag: tar.TypeReg},
{path: "c/d", typeFlag: tar.TypeReg},
{path: "c/e", typeFlag: tar.TypeReg},
},
remove: ConditionalRemoveOptions{
Paths: []ConditionalRemovePath{
{Path: "a"},
{Path: "b"},
{Path: "c"},
{Path: "c/e"},
},
},
expectedRemoved: []string{"a", "b", "c/e"},
expectedRemain: []string{"c/d", "c"},
},
{
description: "withsubdir",
subdir: "subdir",
create: []create{
{path: "/a", typeFlag: tar.TypeDir},
{path: "b", typeFlag: tar.TypeReg},
{path: "c/d", typeFlag: tar.TypeReg},
{path: "c/e", typeFlag: tar.TypeReg},
},
remove: ConditionalRemoveOptions{
Paths: []ConditionalRemovePath{
{Path: "a"},
{Path: "b"},
{Path: "c"},
{Path: "c/e"},
},
},
expectedRemoved: []string{"a", "b", "c/e"},
expectedRemain: []string{"c/d", "c"},
},
{
description: "withsubdir",
subdir: "subdir",
create: []create{
{path: "/a", typeFlag: tar.TypeDir},
{path: "b", typeFlag: tar.TypeReg},
{path: "c/d", typeFlag: tar.TypeReg},
{path: "c/e", typeFlag: tar.TypeReg},
},
remove: ConditionalRemoveOptions{
Paths: []ConditionalRemovePath{
{Path: "a"},
{Path: "b"},
{Path: "c"},
{Path: "c/e"},
},
},
expectedRemoved: []string{"a", "b", "c/e"},
expectedRemain: []string{"c/d", "c"},
},
{
description: "unconditional",
create: []create{
{path: "/a", typeFlag: tar.TypeDir, mtime: &then, mode: &mode},
{path: "b", typeFlag: tar.TypeReg, mtime: &then, mode: &mode},
{path: "c/d", typeFlag: tar.TypeReg, mtime: &then, mode: &mode},
{path: "c/e", typeFlag: tar.TypeReg, mtime: &then, mode: &mode},
},
remove: ConditionalRemoveOptions{
Paths: []ConditionalRemovePath{
{Path: "a"},
{Path: "b"},
{Path: "c"},
{Path: "c/e"},
},
},
expectedRemoved: []string{"a", "b", "c/e"},
expectedRemain: []string{"c/d", "c"},
},
{
description: "conditions-not-met",
create: []create{
{path: "/a", typeFlag: tar.TypeDir, mtime: &then, mode: &mode},
{path: "b", typeFlag: tar.TypeReg, mtime: &then, mode: &mode},
{path: "c/d", typeFlag: tar.TypeReg, mtime: &then, mode: &mode},
{path: "c/e", typeFlag: tar.TypeReg, mtime: &then, mode: &mode},
},
remove: ConditionalRemoveOptions{
Paths: []ConditionalRemovePath{
{Path: "a", Mode: &mismatchedMode},
{Path: "b", Mode: &mismatchedMode},
{Path: "c", Mode: &mismatchedMode},
{Path: "c/e", Mode: &mismatchedMode},
{Path: "a", ModTime: &now},
{Path: "b", ModTime: &now},
{Path: "c", ModTime: &now},
{Path: "c/e", ModTime: &now},
},
},
expectedRemain: []string{"a", "b", "c/e", "c/d", "c"},
},
{
description: "conditions-met",
create: []create{
{path: "/a", typeFlag: tar.TypeDir, mtime: &then, mode: &mode},
{path: "b", typeFlag: tar.TypeReg, mtime: &then, mode: &mode},
{path: "c/d", typeFlag: tar.TypeReg, mtime: &then, mode: &mode},
{path: "c/e", typeFlag: tar.TypeReg, mtime: &then, mode: &mode},
},
remove: ConditionalRemoveOptions{
Paths: []ConditionalRemovePath{
{Path: "a", ModTime: &then, Mode: &mode},
{Path: "b", ModTime: &then, Mode: &mode},
{Path: "c"},
{Path: "c/d", ModTime: &then, Mode: &mode},
},
},
expectedRemoved: []string{"a", "b", "c/d"},
expectedRemain: []string{"c", "c/e"},
},
}
for i := range testCases {
t.Run(testCases[i].description, func(t *testing.T) {
tmpdir := t.TempDir()
var create EnsureOptions
for _, what := range testCases[i].create {
create.Paths = append(create.Paths, EnsurePath{
Path: what.path,
Typeflag: what.typeFlag,
ModTime: what.mtime,
Chmod: what.mode,
})
}
created, err := Ensure(tmpdir, testCases[i].subdir, create)
require.NoErrorf(t, err, "unexpected error creating %#v", create)
remove := testCases[i].remove
for _, what := range created {
remove.Paths = append(remove.Paths, ConditionalRemovePath{
Path: what,
})
}
removed, err := ConditionalRemove(tmpdir, testCases[i].subdir, testCases[i].remove)
require.NoError(t, err, "unexpected error removing")
expectedRemoved := slices.Clone(testCases[i].expectedRemoved)
slices.Sort(expectedRemoved)
require.EqualValues(t, expectedRemoved, removed, "did not expect these to be removed")
var remain []string
err = filepath.Walk(filepath.Join(tmpdir, testCases[i].subdir), func(path string, _ fs.FileInfo, err error) error {
if err != nil {
return err
}
rel, err := filepath.Rel(filepath.Join(tmpdir, testCases[i].subdir), path)
if err != nil {
return fmt.Errorf("computing path of %q relative to %q: %w", path, filepath.Join(tmpdir, testCases[i].subdir), err)
}
if rel != "" && rel == "." {
return nil
}
remain = append(remain, rel)
return nil
})
slices.Sort(remain)
expectedRemain := slices.Clone(testCases[i].expectedRemain)
slices.Sort(expectedRemain)
require.NoError(t, err, "unexpected error checking what's left")
require.EqualValues(t, expectedRemain, remain, "did not expect these to be left behind")
})
}
}
func TestConditionalRemoveNoChroot(t *testing.T) {
couldChroot := canChroot
canChroot = false
testConditionalRemove(t)
canChroot = couldChroot
}
func TestSortedExtendedGlob(t *testing.T) {
tmpdir := t.TempDir()
buf := []byte("buffer")
expect := []string{}
for _, name := range []string{"z", "y", "x", "a", "b", "c", "d", "e", "f"} {
require.NoError(t, os.WriteFile(filepath.Join(tmpdir, name), buf, 0o600))
expect = append(expect, filepath.Join(tmpdir, name))
}
sort.Strings(expect)
matched, err := extendedGlob(filepath.Join(tmpdir, "*"))
require.NoError(t, err, "globbing")
require.ElementsMatch(t, expect, matched, "sorted globbing")
}

View File

@ -3,9 +3,15 @@
package copier
import (
"os"
"testing"
)
const (
testModeMask = int64(os.ModePerm)
testIgnoreSymlinkDates = false
)
func TestPutChroot(t *testing.T) {
if uid != 0 {
t.Skip("chroot() requires root privileges, skipping")
@ -75,3 +81,23 @@ func TestRemoveChroot(t *testing.T) {
testRemove(t)
canChroot = couldChroot
}
func TestEnsureChroot(t *testing.T) {
if uid != 0 {
t.Skip("chroot() requires root privileges, skipping")
}
couldChroot := canChroot
canChroot = true
testEnsure(t)
canChroot = couldChroot
}
func TestConditionalRemoveChroot(t *testing.T) {
if uid != 0 {
t.Skip("chroot() requires root privileges, skipping")
}
couldChroot := canChroot
canChroot = true
testConditionalRemove(t)
canChroot = couldChroot
}

View File

@ -0,0 +1,8 @@
//go:build windows
package copier
const (
testModeMask = int64(0o600)
testIgnoreSymlinkDates = true
)

View File

@ -70,6 +70,13 @@ func lutimes(_ bool, path string, atime, mtime time.Time) error {
return unix.Lutimes(path, []unix.Timeval{unix.NsecToTimeval(atime.UnixNano()), unix.NsecToTimeval(mtime.UnixNano())})
}
func owner(info os.FileInfo) (int, int, error) {
if st, ok := info.Sys().(*syscall.Stat_t); ok {
return int(st.Uid), int(st.Gid), nil
}
return -1, -1, syscall.ENOSYS
}
// sameDevice returns true unless we're sure that they're not on the same device
func sameDevice(a, b os.FileInfo) bool {
aSys := a.Sys()
@ -84,8 +91,3 @@ func sameDevice(a, b os.FileInfo) bool {
}
return uA.Dev == uB.Dev
}
const (
testModeMask = int64(os.ModePerm)
testIgnoreSymlinkDates = false
)

View File

@ -77,12 +77,11 @@ func lutimes(isSymlink bool, path string, atime, mtime time.Time) error {
return windows.UtimesNano(path, []windows.Timespec{windows.NsecToTimespec(atime.UnixNano()), windows.NsecToTimespec(mtime.UnixNano())})
}
func owner(info os.FileInfo) (int, int, error) {
return -1, -1, syscall.ENOSYS
}
// sameDevice returns true since we can't be sure that they're not on the same device
func sameDevice(a, b os.FileInfo) bool {
return true
}
const (
testModeMask = int64(0o600)
testIgnoreSymlinkDates = true
)

View File

@ -1,3 +1,5 @@
//go:build linux || netbsd || freebsd || darwin
package copier
import (
@ -17,6 +19,7 @@ func init() {
}
func TestXattrs(t *testing.T) {
t.Parallel()
if !xattrsSupported {
t.Skipf("xattrs are not supported on this platform, skipping")
}

View File

@ -49,7 +49,8 @@ type CommonBuildOptions struct {
CPUSetMems string
// HTTPProxy determines whether *_proxy env vars from the build host are passed into the container.
HTTPProxy bool
// IdentityLabel if set ensures that default `io.buildah.version` label is not applied to build image.
// IdentityLabel if set controls whether or not a `io.buildah.version` label is added to the built image.
// Setting this to false does not clear the label if it would be inherited from the base image.
IdentityLabel types.OptionalBool
// Memory is the upper limit (in bytes) on how much memory running containers can use.
Memory int64
@ -167,9 +168,12 @@ type BuildOptions struct {
AdditionalBuildContexts map[string]*AdditionalBuildContext
// Name of the image to write to.
Output string
// BuildOutput specifies if any custom build output is selected for following build.
// It allows end user to export recently built rootfs into a directory or tar.
// See the documentation of 'buildah build --output' for the details of the format.
// BuildOutputs specifies if any custom build output is selected for
// following build. It allows the end user to export the image's
// rootfs to a directory or a tar archive. See the documentation of
// 'buildah build --output' for the details of the syntax.
BuildOutputs []string
// Deprecated: use BuildOutputs instead.
BuildOutput string
// ConfidentialWorkload controls whether or not, and if so, how, we produce an
// image that's meant to be run using krun as a VM instead of a conventional
@ -187,7 +191,7 @@ type BuildOptions struct {
// Log is a callback that will print a progress message. If no value
// is supplied, the message will be sent to Err (or os.Stderr, if Err
// is nil) by default.
Log func(format string, args ...interface{})
Log func(format string, args ...any)
// In is connected to stdin for RUN instructions.
In io.Reader
// Out is a place where non-error log messages are sent.
@ -236,6 +240,12 @@ type BuildOptions struct {
// ID mapping options to use if we're setting up our own user namespace
// when handling RUN instructions.
IDMappingOptions *IDMappingOptions
// InheritLabels controls whether or not built images will retain the labels
// which were set in their base images
InheritLabels types.OptionalBool
// InheritAnnotations controls whether or not built images will retain the annotations
// which were set in their base images
InheritAnnotations types.OptionalBool
// AddCapabilities is a list of capabilities to add to the default set when
// handling RUN instructions.
AddCapabilities []string
@ -289,9 +299,23 @@ type BuildOptions struct {
SignBy string
// Architecture specifies the target architecture of the image to be built.
Architecture string
// Timestamp sets the created timestamp to the specified time, allowing
// for deterministic, content-addressable builds.
// Timestamp specifies a timestamp to use for the image's created-on
// date, the corresponding field in new history entries, the timestamps
// to set on contents in new layer diffs, and the timestamps to set on
// contents written as specified in the BuildOutput field. If left
// unset, the current time is used for the configuration and manifest,
// and layer contents are recorded as-is.
Timestamp *time.Time
// SourceDateEpoch specifies a timestamp to use for the image's
// created-on date and the corresponding field in new history entries,
// and any content written as specified in the BuildOutput field. If
// left unset, the current time is used for the configuration and
// manifest, and layer and BuildOutput contents retain their original
// timestamps.
SourceDateEpoch *time.Time
// RewriteTimestamp, if set, forces timestamps in generated layers to
// not be later than the SourceDateEpoch, if it is also set.
RewriteTimestamp bool
// OS is the specifies the operating system of the image to be built.
OS string
// MaxPullPushRetries is the maximum number of attempts we'll make to pull or push any one
@ -334,6 +358,8 @@ type BuildOptions struct {
UnsetEnvs []string
// UnsetLabels is a list of labels to not add to final image from base image.
UnsetLabels []string
// UnsetAnnotations is a list of annotations to not add to final image from base image.
UnsetAnnotations []string
// Envs is a list of environment variables to set in the final image.
Envs []string
// OSFeatures specifies operating system features the image requires.
@ -383,6 +409,13 @@ type BuildOptions struct {
// provides a minimal initial configuration with a working directory
// set in it.
CompatScratchConfig types.OptionalBool
// CompatLayerOmissions causes the "/dev", "/proc", and "/sys"
// directories to be omitted from the image and related output. Newer
// BuildKit-based builds include them in the built image by default.
CompatLayerOmissions types.OptionalBool
// NoPivotRoot inhibits the usage of pivot_root when setting up the rootfs
NoPivotRoot bool
// CreatedAnnotation controls whether or not an "org.opencontainers.image.created"
// annotation is present in the output image.
CreatedAnnotation types.OptionalBool
}

View File

@ -7,6 +7,7 @@ import (
)
func TestPullPolicy(t *testing.T) {
t.Parallel()
for name, val := range PolicyMap {
assert.Equal(t, name, val.String())
}

View File

@ -29,7 +29,7 @@ const (
// identify working containers.
Package = "buildah"
// Version for the Package. Also used by .packit.sh for Packit builds.
Version = "1.39.0"
Version = "1.41.0-dev"
// DefaultRuntime if containers.conf fails.
DefaultRuntime = "runc"
@ -260,13 +260,6 @@ func parseGitBuildContext(url string) (string, string, string) {
return gitBranchPart[0], gitSubdir, gitBranch
}
func isGitTag(remote, ref string) bool {
if _, err := exec.Command("git", "ls-remote", "--exit-code", remote, ref).Output(); err != nil {
return true
}
return false
}
func cloneToDirectory(url, dir string) ([]byte, string, error) {
var cmd *exec.Cmd
gitRepo, gitSubdir, gitRef := parseGitBuildContext(url)
@ -274,20 +267,18 @@ func cloneToDirectory(url, dir string) ([]byte, string, error) {
cmd = exec.Command("git", "init", dir)
combinedOutput, err := cmd.CombinedOutput()
if err != nil {
return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git init`: %w", err)
// Return err.Error() instead of err as we want buildah to override error code with more predictable
// value.
return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git init`: %s", err.Error())
}
// add origin
cmd = exec.Command("git", "remote", "add", "origin", gitRepo)
cmd.Dir = dir
combinedOutput, err = cmd.CombinedOutput()
if err != nil {
return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git remote add`: %w", err)
}
if gitRef != "" {
if ok := isGitTag(url, gitRef); ok {
gitRef += ":refs/tags/" + gitRef
}
// Return err.Error() instead of err as we want buildah to override error code with more predictable
// value.
return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git remote add`: %s", err.Error())
}
logrus.Debugf("fetching repo %q and branch (or commit ID) %q to %q", gitRepo, gitRef, dir)
@ -296,14 +287,18 @@ func cloneToDirectory(url, dir string) ([]byte, string, error) {
cmd.Dir = dir
combinedOutput, err = cmd.CombinedOutput()
if err != nil {
return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git fetch`: %w", err)
// Return err.Error() instead of err as we want buildah to override error code with more predictable
// value.
return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git fetch`: %s", err.Error())
}
cmd = exec.Command("git", "checkout", "FETCH_HEAD")
cmd.Dir = dir
combinedOutput, err = cmd.CombinedOutput()
if err != nil {
return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git checkout`: %w", err)
// Return err.Error() instead of err as we want buildah to override error code with more predictable
// value.
return combinedOutput, gitSubdir, fmt.Errorf("failed while performing `git checkout`: %s", err.Error())
}
return combinedOutput, gitSubdir, nil
}

View File

@ -7,6 +7,7 @@ import (
)
func TestParseGitBuildContext(t *testing.T) {
t.Parallel()
// Tests with only repo
repo, subdir, branch := parseGitBuildContext("https://github.com/containers/repo.git")
assert.Equal(t, repo, "https://github.com/containers/repo.git")

View File

@ -19,14 +19,13 @@ function is_rootless() {
## as root. The `buildah unshare` command switches your user
## session to root within the user namespace.
if is_rootless; then
buildah unshare $0
exit
exec buildah unshare $0
fi
demoimg=myshdemo
quayuser=ipbabble
myname=WilliamHenry
distrorelease=30
distrorelease=42
pkgmgr=dnf # switch to yum if using yum
#Setting up some colors for helping read the demo output
@ -55,9 +54,9 @@ ls $scratchmnt
echo -e "${red}Note that the root of the scratch container is EMPTY!${reset}"
read -p "${cyan}Time to install some basic bash capabilities: coreutils and bash packages${reset}"
if [ "$pkgmgr" == "dnf" ]; then
$pkgmgr install --installroot $scratchmnt --release ${distrorelease} bash coreutils --setopt install_weak_deps=false -y
$pkgmgr install --installroot $scratchmnt --releasever ${distrorelease} bash coreutils --use-host-config --setopt "*.countme=false" --setopt install_weak_deps=false -y
elif [ "$pkgmgr" == "yum" ]; then
$pkgmgr install --installroot $scratchmnt --releasever ${distrorelease} bash coreutils -y
$pkgmgr install --installroot $scratchmnt --releasever ${distrorelease} bash coreutils ---use-host-config --setopt "*.countme=false" y
else
echo -e "${red}[Error] Unknown package manager ${pkgmgr}${reset}"
fi

View File

@ -61,7 +61,14 @@ type tarFilterer struct {
}
func (t *tarFilterer) Write(p []byte) (int, error) {
return t.pipeWriter.Write(p)
n, err := t.pipeWriter.Write(p)
if err != nil {
t.closedLock.Lock()
closed := t.closed
t.closedLock.Unlock()
err = fmt.Errorf("writing to tar filter pipe (closed=%v,err=%v): %w", closed, t.err, err)
}
return n, err
}
func (t *tarFilterer) Close() error {
@ -108,9 +115,8 @@ func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (sk
skip, replaceContents, replacementContents = filter(hdr)
}
if !skip {
err = tarWriter.WriteHeader(hdr)
if err != nil {
err = fmt.Errorf("filtering tar header for %q: %w", hdr.Name, err)
if err = tarWriter.WriteHeader(hdr); err != nil {
err = fmt.Errorf("writing tar header for %q: %w", hdr.Name, err)
break
}
if hdr.Size != 0 {
@ -130,10 +136,14 @@ func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (sk
break
}
}
if err = tarWriter.Flush(); err != nil {
err = fmt.Errorf("flushing tar item padding for %q: %w", hdr.Name, err)
break
}
}
hdr, err = tarReader.Next()
}
if err != io.EOF {
if !errors.Is(err, io.EOF) {
filterer.err = fmt.Errorf("reading tar archive: %w", err)
break
}
@ -146,7 +156,11 @@ func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (sk
if err == nil {
err = err1
}
pipeReader.CloseWithError(err)
if err != nil {
pipeReader.CloseWithError(err)
} else {
pipeReader.Close()
}
filterer.wg.Done()
}()
return filterer

View File

@ -25,6 +25,7 @@ func (c *CompositeDigester) isOpen() bool {
}
func TestCompositeDigester(t *testing.T) {
t.Parallel()
tests := []struct {
name string
itemTypes []string
@ -186,6 +187,7 @@ func TestCompositeDigester(t *testing.T) {
}
func TestTarFilterer(t *testing.T) {
t.Parallel()
tests := []struct {
name string
input, output map[string]string

View File

@ -164,7 +164,7 @@ type V1Image struct {
// V2Image stores the image configuration
type V2Image struct {
V1Image
Parent ID `json:"parent,omitempty"` // nolint:govet
Parent ID `json:"parent,omitempty"`
RootFS *V2S2RootFS `json:"rootfs,omitempty"`
History []V2S2History `json:"history,omitempty"`
OSVersion string `json:"os.version,omitempty"`

View File

@ -65,6 +65,13 @@ can be used.
Path to an alternative .containerignore (.dockerignore) file. Requires \-\-contextdir be specified.
**--link**
Create an independent image layer for the added files instead of modifying the working
container's filesystem. If `buildah run` creates a file and `buildah add --link` adds a file
to the same path, the file from `buildah add --link` will be present in the committed image.
The --link layer is applied after all container filesystem changes at commit time.
**--quiet**, **-q**
Refrain from printing a digest of the added content.
@ -83,6 +90,15 @@ from registries or retrieving content from HTTPS URLs.
Defaults to `2s`.
**--timestamp** *seconds*
Set the timestamp ("mtime") for added content to exactly this number of seconds
since the epoch (Unix time 0, i.e., 00:00:00 UTC on 1 January 1970) to help
allow for deterministic builds.
The destination directory into which the content is being copied will most
likely reflect the time at which the content was added to it.
**--tls-verify** *bool-value*
Require verification of certificates when retrieving sources from HTTPS

View File

@ -34,6 +34,12 @@ Add a custom host-to-IP mapping (host:ip)
Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** option can be set multiple times. Conflicts with the --no-hosts option.
Instead of an IP address, the special flag host-gateway can be given. This resolves to an IP address the container can use to connect to the host. The IP address chosen depends on your network setup, thus there's no guarantee that Buildah can determine the host-gateway address automatically, which will then cause Buildah to fail with an error message. You can overwrite this IP address using the host_containers_internal_ip option in containers.conf.
The host-gateway address is also used by Buildah to automatically add the host.containers.internal and host.docker.internal hostnames to /etc/hosts. You can prevent that by either giving the --no-hosts option, or by setting host_containers_internal_ip="none" in containers.conf. If no host-gateway address was configured manually and Buildah fails to determine the IP address automatically, Buildah will silently skip adding these internal hostnames to /etc/hosts. If Buildah is running in a virtual machine using podman machine (this includes Mac and Windows hosts), Buildah will silently skip adding the internal hostnames to /etc/hosts, unless an IP address was configured manually; the internal hostnames are resolved by the gvproxy DNS resolver instead.
Buildah will use the /etc/hosts file of the host as a basis by default, i.e. any hostname present in this file will also be present in the /etc/hosts file of the container. A different base file can be configured using the base_hosts_file config in containers.conf
**--all-platforms**
Instead of building for a set of platforms specified using the **--platform** option, inspect the build's base images, and build for all of the platforms for which they are all available. Stages that use *scratch* as a starting point can not be inspected, so at least one non-*scratch* stage must be present for detection to work usefully.
@ -189,7 +195,7 @@ The default certificates directory is _/etc/containers/certs.d_.
**--cgroup-parent**=""
Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist.
Path to cgroups under which the cgroup for RUN instructions will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist.
**--cgroupns** *how*
@ -291,6 +297,16 @@ If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1`
then processes in your container will only use memory from the first
two memory nodes.
**--created-annotation**
Add an image *annotation* (see also **--annotation**) to the image metadata
setting "org.opencontainers.image.created" to the current time, or to the
datestamp specified to the **--source-date-epoch** or **--timestamp** flag,
if either was used. If *false*, no such annotation will be present in the
written image.
Note: this information is not present in Docker image formats, so it is discarded when writing images in Docker formats.
**--creds** *creds*
The [username[:password]] to use to authenticate with the registry if required.
@ -486,7 +502,9 @@ those.
**--identity-label** *bool-value*
Adds default identity label `io.buildah.version` if set. (default true).
Adds a label `io.buildah.version` with its value set to the version of buildah
which built the image (default true unless `--timestamp` or
`--source-date-epoch` is used).
**--ignorefile** *file*
@ -497,6 +515,16 @@ Path to an alternative .containerignore (.dockerignore) file.
Write the built image's ID to the file. When `--platform` is specified more
than once, attempting to use this option will trigger an error.
**--inherit-annotations** *bool-value*
Inherit the annotations from the base image or base stages. (default true).
Use cases which set this flag to *false* may need to do the same for the
**--created-annotation** flag.
**--inherit-labels** *bool-value*
Inherit the labels from the base image or base stages. (default true).
**--ipc** *how*
Sets the configuration for IPC namespaces when handling `RUN` instructions.
@ -705,24 +733,34 @@ Windows base images, so using this option is usually unnecessary.
**--output**, **-o**=""
Output destination (format: type=local,dest=path)
Additional output (format: type=local,dest=path)
The --output (or -o) option extends the default behavior of building a container image by allowing users to export the contents of the image as files on the local filesystem, which can be useful for generating local binaries, code generation, etc.
The --output (or -o) option supplements the default behavior of building a
container image by allowing users to export the image's contents as files on
the local filesystem, which can be useful for generating local binaries, code
generation, etc.
The value for --output is a comma-separated sequence of key=value pairs, defining the output type and options.
The value for --output is a comma-separated sequence of key=value pairs,
defining the output type and options.
Supported _keys_ are:
- **dest**: Destination path for exported output. Valid value is absolute or relative path, `-` means the standard output.
- **type**: Defines the type of output to be used. Valid values is documented below.
**dest**: Destination for exported output. Can be set to `-` to indicate standard output, or to an absolute or relative path.
**type**: Defines the type of output to be written. Must be one of the values listed below.
Valid _type_ values are:
- **local**: write the resulting build files to a directory on the client-side.
- **tar**: write the resulting files as a single tarball (.tar).
**local**: write the resulting build files to a directory on the client-side.
**tar**: write the resulting files as a single tarball (.tar).
If no type is specified, the value defaults to **local**.
Alternatively, instead of a comma-separated sequence, the value of **--output** can be just a destination (in the `**dest**` format) (e.g. `--output some-path`, `--output -`) where `--output some-path` is treated as if **type=local** and `--output -` is treated as if **type=tar**.
Alternatively, instead of a comma-separated sequence, the value of **--output**
can be just the destination (in the `**dest**` format) (e.g. `--output
some-path`, `--output -`), and the **type** will be inferred to be **tar** if
the output destination is `-`, and **local** otherwise.
Note: The **--tag** option can also be used to change the file image format to supported `containers-transports(5)`.
Timestamps on the output contents will be set to exactly match the value
specified using the **--timestamp** flag, or to exactly match the value
specified for the **--source-date-epoch** flag, if either are specified.
Note that the **--tag** option can also be used to write the image to any location described by `containers-transports(5)`.
**--pid** *how*
@ -792,6 +830,13 @@ Duration of delay between retry attempts in case of failure when performing push
Defaults to `2s`.
**--rewrite-timestamp**
When generating new layers for the image, ensure that no newly added content
bears a timestamp later than the value used by the **--source-date-epoch**
flag, if one was provided, by replacing any timestamps which are later than
that value, with that value.
**--rm** *bool-value*
Remove intermediate containers after a successful build (default true).
@ -910,7 +955,7 @@ in a safe way that will not end up stored in the final image, or be seen in othe
The value of the secret will be read from an environment variable or file named
by the "id" option, or named by the "src" option if it is specified, or from an
environment variable specified by the "env" option.
The secret will be mounted in the container at `/run/secrets/*id*` by default.
The secret will be mounted in the container at `/run/secrets/<id>` by default.
To later use the secret, use the --mount flag in a `RUN` instruction within a `Containerfile`:
@ -944,7 +989,7 @@ Security Options
"seccomp=profile.json : JSON configuration for a seccomp filter
"unmask=_ALL_ or _/path/1:/path/2_, or shell expanded paths (/proc/*): Paths to unmask separated by a colon. If set to **ALL**, it unmasks all the paths that are masked or made read-only by default.
The default masked paths are **/proc/acpi, /proc/kcore, /proc/keys, /proc/latency_stats, /proc/sched_debug, /proc/scsi, /proc/timer_list, /proc/timer_stats, /sys/firmware, and /sys/fs/selinux**, **/sys/devices/virtual/powercap**. The default paths that are read-only are **/proc/asound**, **/proc/bus**, **/proc/fs**, **/proc/irq**, **/proc/sys**, **/proc/sysrq-trigger**, **/sys/fs/cgroup**.
The default masked paths are **/proc/acpi, /proc/interrupts, /proc/kcore, /proc/keys, /proc/latency_stats, /proc/sched_debug, /proc/scsi, /proc/timer_list, /proc/timer_stats, /sys/devices/virtual/powercap, /sys/firmware**, and **/sys/fs/selinux**. The default paths that are read-only are **/proc/asound**, **/proc/bus**, **/proc/fs**, **/proc/irq**, **/proc/sys**, and **/proc/sysrq-trigger**.
**--shm-size**=""
@ -960,6 +1005,31 @@ Sign the built image using the GPG key that matches the specified fingerprint.
Skip stages in multi-stage builds which don't affect the target stage. (Default is `true`).
**--source-date-epoch** *seconds*
Set the "created" timestamp for the built image to this number of seconds since
the epoch (Unix time 0, i.e., 00:00:00 UTC on 1 January 1970) (default is to
use the value set in the `SOURCE_DATE_EPOCH` environment variable, or the
current time if it is not set).
The "created" timestamp is written into the image's configuration and manifest
when the image is committed, so running the same build two different times
will ordinarily produce images with different sha256 hashes, even if no other
changes were made to the Containerfile and build context.
When this flag is set, a `SOURCE_DATE_EPOCH` build arg will provide its value
for a stage in which it is declared.
When this flag is set, the image configuration's "created" timestamp is always
set to the time specified, which should allow for identical images to be built
at different times using the same set of inputs.
When this flag is set, output written as specified to the **--output** flag
will bear exactly the specified timestamp.
Conflicts with the similar **--timestamp** flag, which also sets its specified
time on the contents of new layers.
**--squash**
Squash all layers, including those from base image(s), into one single layer. (Default is false).
@ -1003,10 +1073,25 @@ Commands after the target stage will be skipped.
**--timestamp** *seconds*
Set the create timestamp to seconds since epoch to allow for deterministic builds (defaults to current time).
By default, the created timestamp is changed and written into the image manifest with every commit,
causing the image's sha256 hash to be different even if the sources are exactly the same otherwise.
When --timestamp is set, the created timestamp is always set to the time specified and therefore not changed, allowing the image's sha256 to remain the same. All files committed to the layers of the image will be created with the timestamp.
Set the "created" timestamp for the built image to this number of seconds since
the epoch (Unix time 0, i.e., 00:00:00 UTC on 1 January 1970) (defaults to
current time).
The "created" timestamp is written into the image's configuration and manifest
when the image is committed, so running the same build two different times
will ordinarily produce images with different sha256 hashes, even if no other
changes were made to the Containerfile and build context.
When --timestamp is set, the "created" timestamp is always set to the time
specified, which should allow for identical images to be built at different
times using the same set of inputs.
When --timestamp is set, all content in layers created as part of the build,
and output written as specified to the **--output** flag, will also bear this
same timestamp.
Conflicts with the similar **--source-date-epoch** flag, which by default does
not affect the timestamps of layer contents.
**--tls-verify** *bool-value*
@ -1035,6 +1120,10 @@ include:
"sigpending": maximum number of pending signals (ulimit -i)
"stack": maximum stack size (ulimit -s)
**--unsetannotation** *annotation*
Unset the image annotation, causing the annotation not to be inherited from the base image.
**--unsetenv** *env*
Unset environment variables from the final image.
@ -1313,6 +1402,16 @@ buildah build --os-feature win32k -t imageName .
buildah build --os-feature win32k- -t imageName .
buildah build --secret=id=mysecret .
buildah build --secret=id=mysecret,env=MYSECRET .
buildah build --secret=id=mysecret,src=MYSECRET,type=env .
buildah build --secret=id=mysecret,src=.mysecret,type=file .
buildah build --secret=id=mysecret,src=.mysecret .
### Building an multi-architecture image using the --manifest option (requires emulation software)
buildah build --arch arm --manifest myimage /tmp/mysrc

View File

@ -26,8 +26,15 @@ The image ID of the image that was created. On error, 1 is returned and errno i
Read the contents of the file `source` and add it to the committed image as a
file at `destination`. If `destination` is not specified, the path of `source`
will be used. The new file will be owned by UID 0, GID 0, have 0644
permissions, and be given a current timestamp unless the **--timestamp** option
is also specified. This option can be specified multiple times.
permissions, and be given the timestamp specified to the **--timestamp** option
if it is specified. This option can be specified multiple times.
**--annotation** *annotation[=value]*
Add an image *annotation* (e.g. annotation=*value*) to the image metadata. Can be used multiple times.
If *annotation* is named, but neither `=` nor a `value` is provided, then the *annotation* is set to an empty value.
Note: this information is not present in Docker image formats, so it is discarded when writing images in Docker formats.
**--authfile** *path*
@ -55,6 +62,16 @@ Read a JSON-encoded version of an image configuration object from the specified
file, and merge the values from it with the configuration of the image being
committed.
**--created-annotation**
Add an image *annotation* (see also **--annotation**) to the image metadata
setting "org.opencontainers.image.created" to the current time, or to the
datestamp specified to the **--source-date-epoch** or **--timestamp** flag,
if either was used. If *false*, no such annotation will be present in the
written image.
Note: this information is not present in Docker image formats, so it is discarded when writing images in Docker formats.
**--creds** *creds*
The [username[:password]] to use to authenticate with the registry if required.
@ -146,7 +163,9 @@ environment variable. `export BUILDAH_FORMAT=docker`
**--identity-label** *bool-value*
Adds default identity label `io.buildah.version` if set. (default true).
Adds a label `io.buildah.version` with its value set to the version of buildah
which committed the image (default true unless `--timestamp` or
`--source-date-epoch` is used).
**--iidfile** *ImageIDfile*
@ -196,6 +215,13 @@ the image is not present locally.
When writing the output image, suppress progress output.
**--rewrite-timestamp**
When generating the new layer for the image, ensure that no newly added content
bears a timestamp later than the value used by the **--source-date-epoch**
flag, if one was provided, by replacing any timestamps which are later than
that value, with that value.
**--rm**
Remove the working container and its contents after creating the image.
Default leaves the container and its content in place.
@ -295,21 +321,56 @@ Generate SBOMs using the specified scanner image.
Sign the new image using the GPG key that matches the specified fingerprint.
**--source-date-epoch** *seconds*
Set the "created" timestamp for the image to this number of seconds since the
epoch (Unix time 0, i.e., 00:00:00 UTC on 1 January 1970) to make it easier to
create deterministic builds (defaults to $SOURCE_DATE_EPOCH if set, otherwise
the current time will be used).
The "created" timestamp is written into the image's configuration and manifest
when the image is committed, so committing the same working container at two
different times will produce images with different sha256 hashes, even if no
other changes were made to the working container in between.
When --source-date-epoch is set, the "created" timestamp is always set to the time
specified, which should allow for identical images to be committed at different
times.
Conflicts with the similar **--timestamp** flag, which also sets its specified
time on layer contents.
**--squash**
Squash all of the new image's layers (including those inherited from a base image) into a single new layer.
**--timestamp** *seconds*
Set the create timestamp to seconds since epoch to allow for deterministic builds (defaults to current time).
By default, the created timestamp is changed and written into the image manifest with every commit,
causing the image's sha256 hash to be different even if the sources are exactly the same otherwise.
When --timestamp is set, the created timestamp is always set to the time specified and therefore not changed, allowing the image's sha256 to remain the same. All files committed to the layers of the image will be created with the timestamp.
Set the "created" timestamp for the image to this number of seconds since the
epoch (Unix time 0, i.e., 00:00:00 UTC on 1 January 1970) to make it easier to
create deterministic builds (defaults to current time).
The "created" timestamp is written into the image's configuration and manifest
when the image is committed, so committing the same working container at two
different times will produce images with different sha256 hashes, even if no
other changes were made to the working container in between.
When --timestamp is set, the "created" timestamp is always set to the time
specified, which should allow for identical images to be committed at different
times. All content in the new layer added as part of the image will also bear
this timestamp.
Conflicts with the similar **--source-date-epoch** flag, which by default does
not affect the timestamps of layer contents.
**--tls-verify** *bool-value*
Require HTTPS and verification of certificates when talking to container registries (defaults to true). TLS verification cannot be used when talking to an insecure registry.
**--unsetannotation** *annotation*
Unset the image annotation, causing the annotation not to be inherited from the base image.
**--unsetenv** *env*
Unset environment variables from the final image.

View File

@ -231,6 +231,10 @@ Note: this setting is not present in the OCIv1 image format, so it is discarded
Set default *stop signal* for container. This signal will be sent when container is stopped, default is SIGINT.
**--unsetannotation** *annotation*
Unset the image annotation, causing the annotation not to be inherited from the base image.
**--unsetlabel** *label*
Unset the image label, causing the label not to be inherited from the base image.

View File

@ -65,6 +65,19 @@ is preserved.
Path to an alternative .containerignore (.dockerignore) file. Requires \-\-contextdir be specified.
**--link**
Create an independent image layer for the added files instead of modifying the working
container's filesystem. If `buildah run` creates a file and `buildah copy --link` adds a file
to the same path, the file from `buildah copy --link` will be present in the committed image.
The --link layer is applied after all container filesystem changes at commit time.
**--parents**
Preserve leading directories in the paths of items being copied, relative to either the
top of the build context, or to the "pivot point", a location in the source path marked
by a path component named "." (i.e., where "/./" occurs in the path).
**--quiet**, **-q**
Refrain from printing a digest of the copied content.
@ -81,6 +94,15 @@ Duration of delay between retry attempts in case of failure when performing pull
Defaults to `2s`.
**--timestamp** *seconds*
Set the timestamp ("mtime") for added content to exactly this number of seconds
since the epoch (Unix time 0, i.e., 00:00:00 UTC on 1 January 1970) to help
allow for deterministic builds.
The destination directory into which the content is being copied will most
likely reflect the time at which the content was added to it.
**--tls-verify** *bool-value*
Require verification of certificates when pulling images referred to with the
@ -93,6 +115,8 @@ buildah copy containerID '/myapp/app.conf' '/myapp/app.conf'
buildah copy --exclude=**/*.md docs containerID 'docs' '/docs'
buildah copy --parents containerID './x/a.txt' './y/a.txt' '/parents'
buildah copy --chown myuser:mygroup containerID '/myapp/app.conf' '/myapp/app.conf'
buildah copy --chmod 660 containerID '/myapp/app.conf' '/myapp/app.conf'

View File

@ -136,6 +136,8 @@ Current supported mount TYPES are bind, cache, secret and tmpfs. Writes to `bind
· ro, read-only: (default true for `type=bind`, false for `type=tmpfs`, `type=cache`).
· rw, read-write: (default false for `type=bind`, true for `type=tmpfs`, `type=cache`).
Options specific to bind:
· bind-propagation: shared, slave, private, rshared, rslave, or rprivate(default). See also mount(2). <sup>[[1]](#Footnote1)</sup>
@ -144,6 +146,8 @@ Current supported mount TYPES are bind, cache, secret and tmpfs. Writes to `bind
· from: image name for the root of the source. Defaults to **--contextdir**, mandatory if **--contextdir** was not specified.
· src: location in the context directory (or image, if the **from** option is used) to mount instead of its top-level directory.
· z: Set shared SELinux label on mounted destination. Use if SELinux is enabled on host machine.
· Z: Set private SELinux label on mounted destination. Use if SELinux is enabled on host machine.
@ -166,7 +170,7 @@ Current supported mount TYPES are bind, cache, secret and tmpfs. Writes to `bind
· mode: File mode for new cache directory in octal. Default 0755.
· ro, readonly: read only cache if set.
· src: location in the cache (or image, if the **from** option is used) to mount instead of its top-level directory.
· uid: uid for cache directory.

View File

@ -40,23 +40,26 @@ buildah unshare rm -fr $HOME/.local/share/containers/storage /run/user/\`id -u\`
buildah unshare --mount containerID sh -c 'cat ${containerID}/etc/os-release'
If you want to use buildah with a mount command then you can create a script that looks something like:
buildah unshare --mount root=containerID sh -c 'cat ${root}/etc/os-release'
```
cat buildah-script.sh << _EOF
#!/bin/sh
If you want to use buildah with a 'mount' command then you can create a script that looks something like:
```console
cat > buildah-script.sh << _EOF
#!/bin/bash
ctr=$(buildah from scratch)
mnt=$(buildah mount $ctr)
dnf -y install --installroot=$mnt PACKAGES
dnf -y install --installroot=$mnt --use-host-config --setopt "*.countme=false" PACKAGES
dnf -y clean all --installroot=$mnt
buildah config --entrypoint="/bin/PACKAGE" --env "FOO=BAR" $ctr
buildah commit $ctr imagename
buildah unmount $ctr
_EOF
chmod +x buildah-script.sh
```
Then execute it with:
```
buildah unshare buildah-script.sh
```console
buildah unshare ./buildah-script.sh
```
## SEE ALSO

View File

@ -18,11 +18,15 @@ Note that installation instructions below assume you are running a Linux distro
First step is to install Buildah. Run as root because you will need to be root for installing the Buildah package:
```console
$ sudo -s
```
Then install buildah by running:
```console
# dnf -y install buildah
```
## Rootless User Configuration
@ -32,37 +36,53 @@ If you plan to run Buildah as a user without root privileges, i.e. a "rootless u
After installing Buildah we can see there are no images installed. The `buildah images` command will list all the images:
```console
# buildah images
```
We can also see that there are also no working containers by running:
```console
# buildah containers
```
When you build a working container from an existing image, Buildah defaults to appending '-working-container' to the image's name to construct a name for the container. The Buildah CLI conveniently returns the name of the new container. You can take advantage of this by assigning the returned value to a shell variable using standard shell assignment:
```console
# container=$(buildah from fedora)
```
It is not required to assign the container's name to a shell variable. Running `buildah from fedora` is sufficient. It just helps simplify commands later. To see the name of the container that we stored in the shell variable:
```console
# echo $container
```
What can we do with this new container? Let's try running bash:
```console
# buildah run $container bash
```
Notice we get a new shell prompt because we are running a bash shell inside of the container. It should be noted that `buildah run` is primarily intended for debugging and running commands as part of the build process. A more full-featured engine like Podman or a container runtime interface service like [CRI-O](https://github.com/kubernetes-sigs/cri-o) is more suited for starting containers in production.
Be sure to `exit` out of the container and let's try running something else:
```console
# buildah run $container java
```
Oops. Java is not installed. A message containing something like the following was returned.
```
runc create failed: unable to start start container process: exec: "java": executable file not found in $PATH
```
Let's try installing it inside the container using:
```console
# buildah run $container -- dnf -y install java
```
The `--` syntax basically tells Buildah: there are no more `buildah run` command options after this point. The options after this point are for the command that's started inside the container. It is required if the command we specify includes command line options which are not meant for Buildah.
@ -74,81 +94,108 @@ One of the advantages of using `buildah` to build OCI compliant container images
Let's build a container and image from scratch. The special "image" name "scratch" tells Buildah to create an empty container. The container has a small amount of metadata about the container but no real Linux content.
```console
# newcontainer=$(buildah from scratch)
```
You can see this new empty container by running:
```console
# buildah containers
```
You should see output similar to the following:
```
CONTAINER ID BUILDER IMAGE ID IMAGE NAME CONTAINER NAME
82af3b9a9488 * 3d85fcda5754 docker.io/library/fedora:latest fedora-working-container
ac8fa6be0f0a * scratch working-container
```
Its container name is working-container by default and it's stored in the `$newcontainer` variable. Notice the image name (IMAGE NAME) is "scratch". This is a special value that indicates that the working container wasn't based on an image. When we run:
```console
# buildah images
```
We don't see the "scratch" image listed. There is no corresponding scratch image. A container based on "scratch" starts from nothing.
So does this container actually do anything? Let's see.
```console
# buildah run $newcontainer bash
```
Nope. This really is empty. The package installer `dnf` is not even inside this container. It's essentially an empty layer on top of the kernel. So what can be done with that? Thankfully there is a `buildah mount` command.
```console
# scratchmnt=$(buildah mount $newcontainer)
```
Note: If attempting to mount in rootless mode, the command fails. Mounting a container can only be done in a mount namespace that you own. Create and enter a user namespace and mount namespace by executing the `buildah unshare` command. See buildah-mount(1) man page for more information.
```console
$ export newcontainer
$ buildah unshare
# scratchmnt=$(buildah mount $newcontainer)
```
By echoing `$scratchmnt` we can see the path for the [overlay mount point](https://wiki.archlinux.org/index.php/Overlay_filesystem), which is used as the root file system for the container.
```console
# echo $scratchmnt
/var/lib/containers/storage/overlay/b78d0e11957d15b5d1fe776293bd40a36c28825fb6cf76f407b4d0a95b2a200d/merged
```
Notice that the overlay mount point is somewhere under `/var/lib/containers/storage` if you started out as root, and under your home directory's `.local/share/containers/storage` directory if you're in rootless mode. (See above on `containers/storage` or for more information see [containers/storage](https://github.com/containers/storage).)
Now that we have a new empty container we can install or remove software packages or simply copy content into that container. So let's install `bash` and `coreutils` so that we can run bash scripts. This could easily be `nginx` or other packages needed for your container.
**NOTE:** the version in the example below (40) relates to a Fedora version which is the Linux platform this example was run on. If you are running dnf on the host to populate the container, the version you specify must be valid for the host or dnf will throw an error. I.e. If you were to run this on a RHEL platform, you'd need to specify `--releasever 8.1` or similar instead of `--releasever 40`. If you want the container to be a particular Linux platform, change `scratch` in the first line of the example to the platform you want, i.e. `# newcontainer=$(buildah from fedora)`, and then you can specify an appropriate version number for that Linux platform.
**NOTE:** the version in the example below (42) relates to a Fedora version which is the Linux platform this example was run on. If you are running dnf on the host to populate the container, the version you specify must be valid for the host or dnf will throw an error. I.e. If you were to run this on a RHEL platform, you'd need to specify `--releasever 8.1` or similar instead of `--releasever 42`. If you want the container to be a particular Linux platform, change `scratch` in the first line of the example to the platform you want, i.e. `# newcontainer=$(buildah from fedora)`, and then you can specify an appropriate version number for that Linux platform.
# dnf install --installroot $scratchmnt --releasever 40 bash coreutils --setopt install_weak_deps=false -y
```console
# dnf install --installroot $scratchmnt --releasever 42 bash coreutils --use-host-config --setopt "*.countme=false" --setopt install_weak_deps=false -y
```
Let's try it out (showing the prompt in this example to demonstrate the difference):
```console
# buildah run $newcontainer sh
sh-5.1# cd /usr/bin
sh-5.1# ls
sh-5.1# exit
```
Notice we now have a `/usr/bin` directory in the newcontainer's root file system. Let's first copy a simple file from our host into the container. Create a file called runecho.sh which contains the following:
```console
#!/usr/bin/env bash
for i in `seq 0 9`;
do
echo "This is a new container from ipbabble [" $i "]"
done
```
Change the permissions on the file so that it can be run:
```console
# chmod +x runecho.sh
```
With `buildah` files can be copied into the new container. We can then use `buildah run` to run that command within the container by specifying the command. We can also configure the image we'll create from this container to run the command directly when we run it using [Podman](https://github.com/containers/podman) and its `podman run` command. In short the `buildah run` command is equivalent to the "RUN" command in a Dockerfile (it always needs to be told what to run), whereas `podman run` is equivalent to the `docker run` command (it can look at the image's configuration to see what to run). Now let's copy this new command into the container's `/usr/bin` directory, configure the command to be run when the image is run by `podman`, and create an image from the container's root file system and configuration settings:
```console
# To test with Podman, first install via:
# dnf -y install podman
# buildah copy $newcontainer ./runecho.sh /usr/bin/
# buildah config --cmd /usr/bin/runecho.sh $newcontainer
# buildah commit $newcontainer newimage
```
We've got a new image named "newimage". The container is still there because we didn't remove it.
Now run the command in the container with Buildah specifying the command to run in the container:
```console
# buildah run $newcontainer /usr/bin/runecho.sh
This is a new container from ipbabble [ 0 ]
This is a new container from ipbabble [ 1 ]
@ -160,9 +207,11 @@ Now run the command in the container with Buildah specifying the command to run
This is a new container from ipbabble [ 7 ]
This is a new container from ipbabble [ 8 ]
This is a new container from ipbabble [ 9 ]
```
Now use Podman to run the command in a new container based on our new image (no command required):
```console
# podman run --rm newimage
This is a new container from ipbabble [ 0 ]
This is a new container from ipbabble [ 1 ]
@ -174,51 +223,69 @@ Now use Podman to run the command in a new container based on our new image (no
This is a new container from ipbabble [ 7 ]
This is a new container from ipbabble [ 8 ]
This is a new container from ipbabble [ 9 ]
```
It works! Congratulations, you have built a new OCI container image from scratch that uses bash scripting.
Back to Buildah, let's add some more configuration information.
```console
# buildah config --created-by "ipbabble" $newcontainer
# buildah config --author "wgh at redhat.com @ipbabble" --label name=fedora40-bashecho $newcontainer
# buildah config --author "wgh at redhat.com @ipbabble" --label name=fedora42-bashecho $newcontainer
```
We can inspect the working container's metadata using the `inspect` command:
```console
# buildah inspect $newcontainer
```
We should probably unmount the working container's rootfs. We will need to commit the container again to create an image that includes the two configuration changes we just made:
```console
# buildah unmount $newcontainer
# buildah commit $newcontainer fedora-bashecho
# buildah images
```
And you can see there is a new image called `localhost/fedora-bashecho:latest`. You can inspect the new image using:
```console
# buildah inspect --type=image fedora-bashecho
```
Later when you want to create a new container or containers from this image, you simply need to do `buildah from fedora-bashecho`. This will create a new container based on this image for you.
Now that you have the new image you can remove the scratch container called working-container:
```console
# buildah rm $newcontainer
```
or
```console
# buildah rm working-container
```
## OCI images built using Buildah are portable
Let's test if this new OCI image is really portable to another container engine like Docker. First you should install Docker and start it. Notice that Docker requires a running daemon process in order to run any client commands. Buildah and Podman have no daemon requirement.
```console
# dnf -y install docker
# systemctl start docker
```
Let's copy that image from where containers/storage stores it to where the Docker daemon stores its images, so that we can run it using Docker. We can achieve this using `buildah push`. This copies the image to Docker's storage area which is located under `/var/lib/docker`. Docker's storage is managed by the Docker daemon. This needs to be explicitly stated by telling Buildah to push the image to the Docker daemon using `docker-daemon:`.
```console
# buildah push fedora-bashecho docker-daemon:fedora-bashecho:latest
```
Under the covers, the containers/image library calls into the containers/storage library to read the image's contents from where buildah keeps them, and sends them to the local Docker daemon, which writes them to where it keeps them. This can take a little while. And usually you won't need to do this. If you're using `buildah` you are probably not using Docker. This is just for demo purposes. Let's try it:
```console
# docker run --rm fedora-bashecho
This is a new container from ipbabble [ 0 ]
This is a new container from ipbabble [ 1 ]
@ -230,10 +297,13 @@ Under the covers, the containers/image library calls into the containers/storage
This is a new container from ipbabble [ 7 ]
This is a new container from ipbabble [ 8 ]
This is a new container from ipbabble [ 9 ]
```
OCI container images built with `buildah` are completely standard as expected. So now it might be time to run:
```console
# dnf -y remove docker
```
## Using Containerfiles/Dockerfiles with Buildah
@ -241,6 +311,7 @@ What if you have been using Docker for a while and have some existing Dockerfile
Find one of your Dockerfiles or create a file called Dockerfile. Use the following example or some variation if you'd like:
```Dockerfile
# Base on the most recently released Fedora
FROM fedora:latest
MAINTAINER ipbabble email buildahboy@redhat.com # not a real email
@ -254,22 +325,31 @@ Find one of your Dockerfiles or create a file called Dockerfile. Use the followi
# Run the httpd
CMD ["/usr/sbin/httpd", "-DFOREGROUND"]
```
Now run `buildah build` with the name of the Dockerfile and the name to be given to the created image (e.g. fedora-httpd):
```console
# buildah build -f Dockerfile -t fedora-httpd .
```
or, because `buildah build` defaults to `Dockerfile` and using the current directory as the build context:
```console
# buildah build -t fedora-httpd
```
You will see all the steps of the Dockerfile executing. Afterwards `buildah images` will show you the new image. Now we can create a container from the image and test it with `podman run`:
```console
# podman run --rm -p 8123:80 fedora-httpd
```
While that container is running, in another shell run:
```console
# curl localhost:8123
```
You will see the standard Apache webpage.

View File

@ -9,19 +9,27 @@ In the [first tutorial](https://github.com/containers/buildah/blob/main/docs/tut
First we must pull down a registry. As a shortcut we will save the container name that is returned from the `buildah from` command, into a bash variable called `registry`. This is just like we did in Tutorial 1:
```console
# registryctr=$(buildah from registry)
```
It is worth pointing out that the `from` command can also use other protocols beyond the default (and implicitly assumed) order that first looks in local containers-storage (containers-storage:) and then looks in a container registry (by default, Docker Hub) (docker:). For example, if you already had a registry container image downloaded by a local Docker daemon then you could use the following:
```console
# registryctr=$(buildah from docker-daemon:registry:latest)
```
Then we need to start the registry. You should start the registry in a separate shell and leave it running there:
```console
# buildah run --net=host $registryctr /entrypoint.sh /etc/docker/registry/config.yml
```
If you would like to see more details as to what is going on inside the registry, especially if you are having problems with the registry, you can run the registry container in debug mode as follows:
```console
# buildah --log-level=debug run --net=host $registryctr /entrypoint.sh /etc/docker/registry/config.yml
```
You can use `--log-level=debug` on any Buildah command.
@ -29,10 +37,13 @@ The registry is running and is waiting for requests to process. Notice that this
Let's push our image to the private registry. By default, Buildah is set up to only make secure connections to a registry. Therefore we will need to turn the TLS verification off using the `--tls-verify` flag. We also need to tell Buildah that the registry is on this local host (i.e. localhost) and listening on port 5000. Similar to what you'd expect to do on multi-tenant Docker Hub, we will explicitly specify that the registry is to store the image under the `ipbabble` repository - so as not to clash with other users' similarly named images.
```console
# buildah push --tls-verify=false fedora-bashecho docker://localhost:5000/ipbabble/fedora-bashecho:latest
```
[Skopeo](https://github.com/containers/skopeo) is a containers tool that was created to inspect images in registries without having to pull the image from the registry. It has grown to have many other uses. We will verify that the image has been stored by using Skopeo to inspect the image in the registry:
```console
# skopeo inspect --tls-verify=false docker://localhost:5000/ipbabble/fedora-bashecho:latest
{
"Name": "localhost:5000/ipbabble/fedora-bashecho",
@ -51,9 +62,11 @@ Let's push our image to the private registry. By default, Buildah is set up to o
"sha256:0cb7556c714767b8da6e0299cbeab765abaddede84769475c023785ae66d10ca"
]
}
```
We can verify that it is still portable to Docker by starting Docker again, as we did in the first tutorial. Then we can pull down the image and start the container using Docker:
```console
# systemctl start docker
# docker pull localhost:5000/ipbabble/fedora-bashecho
Using default tag: latest
@ -75,13 +88,17 @@ We can verify that it is still portable to Docker by starting Docker again, as w
This is a new container named ipbabble [ 8 ]
This is a new container named ipbabble [ 9 ]
# systemctl stop docker
```
Pushing to Docker Hub is just as easy. Of course you must have an account with credentials. In this example I'm using a Docker Hub API key, which has the form "username:password" (example password has been edited for privacy), that I created with my Docker Hub account. I use the `--creds` flag to use my API key. I also specify my local image name `fedora-bashecho` as my image source and I use the `docker` protocol with no registry name or port so that it will look at the default port on the default Docker Hub registry:
```console
# buildah push --creds=ipbabble:5bbb9990-6eeb-1234-af1a-aaa80066887c fedora-bashecho docker://ipbabble/fedora-bashecho:latest
```
And let's inspect that with Skopeo:
```console
# skopeo inspect --creds ipbabble:5bbb9990-6eeb-1234-af1a-aaa80066887c docker://ipbabble/fedora-bashecho:latest
{
"Name": "docker.io/ipbabble/fedora-bashecho",
@ -100,9 +117,11 @@ And let's inspect that with Skopeo:
"sha256:0cb7556c714767b8da6e0299cbeab765abaddede84769475c023785ae66d10ca"
]
}
```
We can use Buildah to pull down the image using the `buildah from` command. But before we do let's clean up our local containers-storage so that we don't already have a copy of the fedora-bashecho image - otherwise Buildah will know it already exists and not bother pulling it down.
```console
# buildah images
IMAGE ID IMAGE NAME CREATED AT SIZE
d4cd7d73ee42 docker.io/library/registry:latest Dec 1, 2017 22:15 31.74 MB
@ -113,19 +132,24 @@ We can use Buildah to pull down the image using the `buildah from` command. But
# buildah images
IMAGE ID IMAGE NAME CREATED AT SIZE
d4cd7d73ee42 docker.io/library/registry:latest Dec 1, 2017 22:15 31.74 MB
```
Okay, so we don't have a fedora-bashecho image anymore. Let's pull the image from Docker Hub:
```console
# buildah from ipbabble/fedora-bashecho
```
If you don't want to bother doing the remove image step (`rmi`) you can use the flag `--pull-always` to force the image to be pulled again and overwrite any corresponding local image.
Now check that image is in the local containers-storage:
```console
# buildah images
IMAGE ID IMAGE NAME CREATED AT SIZE
d4cd7d73ee42 docker.io/library/registry:latest Dec 1, 2017 22:15 31.74 MB
864871ac1c45 docker.io/ipbabble/fedora-bashecho:latest Dec 5, 2017 21:38 315.4 MB
```
Success!

View File

@ -15,19 +15,27 @@ The following assumes installation on Fedora.
Run as root because you will need to be root for installing the Buildah package:
```console
$ sudo -s
```
Then install Buildah by running:
```console
# dnf -y install buildah
```
After installing Buildah check to see that there are no images installed. The `buildah images` command will list all the images:
```console
# buildah images
```
We can also see that there are also no containers by running:
```console
# buildah containers
```
## Examples
@ -41,7 +49,7 @@ The first example was provided by Chris Collins (GitHub @clcollins), the idea is
First create two Dockerfiles:
```
```Dockerfile
$ cat << EOF > Dockerfile
FROM registry.fedoraproject.org/fedora:latest
RUN touch /foo
@ -56,7 +64,7 @@ EOF
Now to create the first container image and verify that ONBUILD has been set:
```
```console
# buildah build --format=docker -f Dockerfile -t onbuild-image .
# buildah inspect --format '{{.Docker.Config.OnBuild}}' onbuild-image
[RUN touch /bar]
@ -64,7 +72,7 @@ Now to create the first container image and verify that ONBUILD has been set:
The second container image is now created and the `/bar` file will be created within it:
```
```console
# buildah build --format=docker -f Dockerfile-2 -t result-image .
STEP 1: FROM onbuild-image
STEP 2: RUN touch /bar # Note /bar is created here based on the ONBUILD in the base image
@ -82,7 +90,7 @@ Instead of using a Dockerfile to create the onbuild-image, Buildah allows you to
First a Fedora container will be created with `buildah from`, then the `/foo` file will be added with `buildah run`. The `buildah config` command will configure ONBUILD to add `/bar` when a container image is created from the primary image, and finally the image will be saved with `buildah commit`.
```
```console
# buildah from --format=docker --name onbuild-container registry.fedoraproject.org/fedora:latest
# buildah run onbuild-container touch /foo
# buildah config --onbuild="RUN touch /bar" onbuild-container
@ -93,7 +101,7 @@ First a Fedora container will be created with `buildah from`, then the `/foo` fi
```
The onbuild-image has been created, so now create a container from it using the same commands as the first example using the second Dockerfile:
```
```console
# buildah build --format=docker -f Dockerfile-2 -t result-image .
STEP 1: FROM onbuild-image
STEP 2: RUN touch /bar # Note /bar is created here based on the ONBUILD in the base image
@ -106,7 +114,7 @@ $ container=$(buildah from result-image)
```
Or for bonus points, piece the secondary container image together with Buildah commands directly:
```
```console
# buildah from --format=docker --name result-container onbuild-image
result-container
# buildah run result-container touch /baz
@ -118,7 +126,7 @@ result-container
For this example the ONBUILD instructions in the primary container image will be used to copy a shell script and then run it in the secondary container image. For the script, we'll make use of the shell script from the [Introduction Tutorial](01-intro.md). First create a file in the local directory called `runecho.sh` containing the following:
```
```console
#!/usr/bin/env bash
for i in `seq 0 9`;
@ -128,13 +136,13 @@ done
```
Change the permissions on the file so that it can be run:
```
```console
$ chmod +x runecho.sh
```
Now create a second primary container image. This image has multiple ONBUILD instructions, the first ONBUILD instruction copies the file into the image and a second ONBUILD instruction to then run it. We're going to do this example using only Buildah commands. A Dockerfile could be translated easily and used from these commands, or these commands could be saved to a script directly.
```
```console
# buildah from --format=docker --name onbuild-container-2 fedora:latest
onbuild-container-2
# buildah config --onbuild="COPY ./runecho.sh /usr/bin/runecho.sh" onbuild-container-2
@ -147,7 +155,7 @@ onbuild-container-2
Now the secondary container can be created from the second primary container image onbuild-image-2. The runecho.sh script will be copied to the container's /usr/bin directory and then run from there when the secondary container is created.
```
```console
# buildah from --format=docker --name result-container-2 onbuild-image-2
STEP 1: COPY ./runecho.sh /usr/bin/runecho.sh
STEP 2: RUN /usr/bin/runecho.sh
@ -164,7 +172,7 @@ result-container-2
```
As result-container-2 has a copy of the script stored in its /usr/bin it can be run at anytime.
```
```console
# buildah run result-container-2 /usr/bin/runecho.sh
This is a new container pull ipbabble [ 1 ]
This is a new container pull ipbabble [ 2 ]

View File

@ -18,13 +18,13 @@ Make the registry URL available to the following steps.
*Note that you need to change this so it matches your OpenShift installation.*
````console
```console
$ export REGISTRY_URL=default-route-openshift-image-registry.apps.whatever.com
````
```
Login to OpenShift and its registry:
````console
```console
$ oc login -n image-build
Username: ...
Password: ...
@ -36,7 +36,7 @@ Using project "image-build".
$ oc whoami -t | buildah login --tls-verify=false -u $(id -u -n) --password-stdin $REGISTRY_URL
Login Succeeded!
````
```
### Make builder image
@ -47,7 +47,7 @@ The image starts a python web server. This allows us to interact with the contai
First create an ImageStream to hold the image:
````console
```console
$ oc create -f - <<EOF
apiVersion: image.openshift.io/v1
kind: ImageStream
@ -56,14 +56,14 @@ metadata:
EOF
imagestream.image.openshift.io/buildah created
````
```
Then create the image.
Note that no packages are updated - this should ensure that this tutorial is actually working.
If you are making anything for use in the real world, make sure to update it frequently for security fixes!
````console
```console
$ cat > Containerfile-buildah <<EOF
FROM quay.io/buildah/stable:v1.36.0
@ -106,14 +106,14 @@ $ buildah push --tls-verify=false $REGISTRY_URL/image-build/buildah
Getting image source signatures
...
Storing signatures
````
```
### Create Service Account for building images
Create a service account which is solely used for image building.
````console
```console
$ oc create -f - <<EOF
apiVersion: v1
kind: ServiceAccount
@ -122,14 +122,14 @@ metadata:
EOF
serviceaccount/buildah-sa created
````
```
You need to assign it the ability to run as the standard `anyuid` [SCC](https://docs.openshift.com/container-platform/4.3/authentication/managing-security-context-constraints.html).
````console
```console
$ oc adm policy add-scc-to-user anyuid -z buildah-sa
clusterrole.rbac.authorization.k8s.io/system:openshift:scc:anyuid added: "buildah-sa"
````
```
This will give the container *cap_kill*, *cap_setgid*, and *cap_setuid* capabilities which are extras compared to the `restricted` SCC.
Note that *cap_kill* is dropped by the DeploymentConfig, but the two others are required to execute commands with different user ids as an image is built.
@ -137,13 +137,13 @@ Note that *cap_kill* is dropped by the DeploymentConfig, but the two others are
With this in place, when you get the Pod running (in a little while!), its YAML state will contain:
````
```
kind: Pod
metadata:
...
openshift.io/scc: anyuid
...
````
```
Which tells you that the Pod has been launched with the correct permissions.
@ -154,7 +154,7 @@ This is a simple RC just to get the container running.
Note that it drops CAP_KILL which is not required.
````console
```console
$ oc create -f - <<EOF
apiVersion: v1
kind: ReplicationController
@ -187,7 +187,7 @@ spec:
EOF
replicationcontroller/buildah created
````
```
#### The Buildah container
@ -195,7 +195,7 @@ In the OpenShift console you can now open the Pod's terminal (or run `oc rsh rc/
This is what the user/platform should look like:
````console
```console
sh-5.0$ id
uid=1000(build) gid=1000(build) groups=1000(build)
@ -216,11 +216,11 @@ uid=1000(build) euid=1000(build)
gid=1000(build)
groups=1000(build)
Guessed mode: HYBRID (4)
````
```
This is what the Buildah data should look like:
````console
```console
sh-5.0$ buildah version
Version: 1.36.0
Go Version: go1.22.3
@ -277,7 +277,7 @@ sh-5.0$ buildah info
"RunRoot": "/var/tmp/storage-run-1000/containers"
}
}
````
```
#### Building an image
@ -285,7 +285,7 @@ Now create some files for testing.
This container test file exercises at least some of the critical parts of building an image (package update/installation, execution of commands, and use of volumes).
````console
```console
sh-5.0$ cat > test-script.sh <<EOF
#/bin/bash
echo "Args \$*"
@ -303,11 +303,11 @@ RUN dnf install -y gcc
EOF
sh-5.0$ mkdir output
````
```
And finally build the image, testing that everything works as expected:
````console
```console
sh-5.0$ buildah build --layers -v /home/build/output:/output:rw -v /home/build/test-script.sh:/test-script.sh:ro -t myimage -f Containerfile.test
FROM fedora:40
RUN ls -l /test-script.sh
@ -477,4 +477,4 @@ registry.fedoraproject.org/fedora 40 b8638217aa4e 13 hours ago 233
sh-5.0$ ls -l output/
total 4
-rw-r--r--. 1 build build 288 Aug 5 18:35 update-output.txt
````
```

View File

@ -23,4 +23,3 @@ Learn how to include Buildah as a library in your build tool.
**[Rootless OpenShift container](05-openshift-rootless-build.md)**
Learn how to build an image from a rootless OpenShift container.

150
go.mod
View File

@ -2,156 +2,142 @@ module github.com/containers/buildah
// Warning: Ensure the "go" and "toolchain" versions match exactly to prevent unwanted auto-updates
go 1.22.8
go 1.23.3
require (
github.com/containerd/platforms v1.0.0-rc.1
github.com/containernetworking/cni v1.2.3
github.com/containers/common v0.62.0
github.com/containers/image/v5 v5.34.0
github.com/containers/luksy v0.0.0-20250106202729-a3a812db5b72
github.com/containernetworking/cni v1.3.0
github.com/containers/common v0.64.0
github.com/containers/image/v5 v5.36.0
github.com/containers/luksy v0.0.0-20250609192159-bc60f96d4194
github.com/containers/ocicrypt v1.2.1
github.com/containers/storage v1.57.1
github.com/cyphar/filepath-securejoin v0.3.6
github.com/containers/storage v1.59.0
github.com/cyphar/filepath-securejoin v0.4.1
github.com/docker/distribution v2.8.3+incompatible
github.com/docker/docker v27.5.1+incompatible
github.com/docker/docker v28.3.2+incompatible
github.com/docker/go-connections v0.5.0
github.com/docker/go-units v0.5.0
github.com/fsouza/go-dockerclient v1.12.0
github.com/fsouza/go-dockerclient v1.12.1
github.com/hashicorp/go-multierror v1.1.1
github.com/mattn/go-shellwords v1.0.12
github.com/moby/buildkit v0.19.0
github.com/moby/buildkit v0.23.2
github.com/moby/sys/capability v0.4.0
github.com/moby/sys/userns v0.1.0
github.com/opencontainers/cgroups v0.0.4
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.1.0
github.com/opencontainers/runc v1.2.4
github.com/opencontainers/runtime-spec v1.2.0
github.com/opencontainers/runtime-tools v0.9.1-0.20241108202711-f7e3563b0271
github.com/opencontainers/selinux v1.11.1
github.com/openshift/imagebuilder v1.2.15
github.com/seccomp/libseccomp-golang v0.10.0
github.com/opencontainers/image-spec v1.1.1
github.com/opencontainers/runc v1.3.0
github.com/opencontainers/runtime-spec v1.2.1
github.com/opencontainers/runtime-tools v0.9.1-0.20250523060157-0ea5ed0382a2
github.com/opencontainers/selinux v1.12.0
github.com/openshift/imagebuilder v1.2.16
github.com/seccomp/libseccomp-golang v0.11.0
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.8.1
github.com/spf13/pflag v1.0.6
github.com/spf13/cobra v1.9.1
github.com/spf13/pflag v1.0.7
github.com/stretchr/testify v1.10.0
go.etcd.io/bbolt v1.3.11
golang.org/x/crypto v0.32.0
golang.org/x/sync v0.10.0
golang.org/x/sys v0.29.0
golang.org/x/term v0.28.0
sigs.k8s.io/yaml v1.4.0
tags.cncf.io/container-device-interface v0.8.0
go.etcd.io/bbolt v1.4.2
golang.org/x/crypto v0.40.0
golang.org/x/sync v0.16.0
golang.org/x/sys v0.34.0
golang.org/x/term v0.33.0
tags.cncf.io/container-device-interface v1.0.1
)
require (
dario.cat/mergo v1.0.1 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/BurntSushi/toml v1.4.0 // indirect
dario.cat/mergo v1.0.2 // indirect
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
github.com/BurntSushi/toml v1.5.0 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/Microsoft/hcsshim v0.12.9 // indirect
github.com/Microsoft/hcsshim v0.13.0 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/chzyer/readline v1.5.1 // indirect
github.com/containerd/cgroups/v3 v3.0.3 // indirect
github.com/containerd/cgroups/v3 v3.0.5 // indirect
github.com/containerd/errdefs v1.0.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
github.com/containerd/typeurl/v2 v2.2.3 // indirect
github.com/containernetworking/plugins v1.5.1 // indirect
github.com/containernetworking/plugins v1.7.1 // indirect
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect
github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/disiqueira/gotree/v3 v3.0.2 // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/docker-credential-helpers v0.8.2 // indirect
github.com/docker/docker-credential-helpers v0.9.3 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.8.0 // indirect
github.com/go-jose/go-jose/v4 v4.0.4 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/analysis v0.23.0 // indirect
github.com/go-openapi/errors v0.22.0 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/loads v0.22.0 // indirect
github.com/go-openapi/runtime v0.28.0 // indirect
github.com/go-openapi/spec v0.21.0 // indirect
github.com/go-openapi/strfmt v0.23.0 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-openapi/validate v0.24.0 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/go-containerregistry v0.20.2 // indirect
github.com/google/go-containerregistry v0.20.3 // indirect
github.com/google/go-intervals v0.0.2 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jinzhu/copier v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.11 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/manifoldco/promptui v0.9.0 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/mattn/go-sqlite3 v1.14.24 // indirect
github.com/mattn/go-sqlite3 v1.14.28 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/go-archive v0.1.0 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/sys/mountinfo v0.7.2 // indirect
github.com/moby/sys/sequential v0.6.0 // indirect
github.com/moby/sys/user v0.3.0 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/moby/sys/user v0.4.0 // indirect
github.com/moby/term v0.5.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/proglottis/gpgme v0.1.4 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect
github.com/sigstore/fulcio v1.6.4 // indirect
github.com/sigstore/rekor v1.3.8 // indirect
github.com/sigstore/sigstore v1.8.12 // indirect
github.com/sigstore/fulcio v1.6.6 // indirect
github.com/sigstore/protobuf-specs v0.4.1 // indirect
github.com/sigstore/sigstore v1.9.5 // indirect
github.com/smallstep/pkcs7 v0.1.1 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect
github.com/sylabs/sif/v2 v2.20.2 // indirect
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
github.com/tchap/go-patricia/v2 v2.3.2 // indirect
github.com/sylabs/sif/v2 v2.21.1 // indirect
github.com/tchap/go-patricia/v2 v2.3.3 // indirect
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
github.com/ulikunitz/xz v0.5.12 // indirect
github.com/vbatts/tar-split v0.11.7 // indirect
github.com/vbauerster/mpb/v8 v8.9.1 // indirect
github.com/vishvananda/netlink v1.3.1-0.20240922070040-084abd93d350 // indirect
github.com/vishvananda/netns v0.0.4 // indirect
go.mongodb.org/mongo-driver v1.14.0 // indirect
github.com/vbatts/tar-split v0.12.1 // indirect
github.com/vbauerster/mpb/v8 v8.10.2 // indirect
github.com/vishvananda/netlink v1.3.1 // indirect
github.com/vishvananda/netns v0.0.5 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect
go.opentelemetry.io/otel v1.31.0 // indirect
go.opentelemetry.io/otel/metric v1.31.0 // indirect
go.opentelemetry.io/otel/trace v1.31.0 // indirect
golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 // indirect
golang.org/x/mod v0.22.0 // indirect
golang.org/x/net v0.34.0 // indirect
golang.org/x/text v0.21.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d // indirect
google.golang.org/grpc v1.69.4 // indirect
google.golang.org/protobuf v1.36.2 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
go.opentelemetry.io/otel v1.35.0 // indirect
go.opentelemetry.io/otel/metric v1.35.0 // indirect
go.opentelemetry.io/otel/trace v1.35.0 // indirect
go.yaml.in/yaml/v2 v2.4.2 // indirect
golang.org/x/mod v0.25.0 // indirect
golang.org/x/net v0.41.0 // indirect
golang.org/x/text v0.27.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 // indirect
google.golang.org/grpc v1.72.2 // indirect
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog v1.0.0 // indirect
tags.cncf.io/container-device-interface/specs-go v0.8.0 // indirect
sigs.k8s.io/yaml v1.5.0 // indirect
tags.cncf.io/container-device-interface/specs-go v1.0.0 // indirect
)

386
go.sum
View File

@ -1,30 +1,25 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg=
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0=
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg=
github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y=
github.com/Microsoft/hcsshim v0.13.0 h1:/BcXOiS6Qi7N9XqUcv27vkIuVOkBEcWstd2pMlWSeaA=
github.com/Microsoft/hcsshim v0.13.0/go.mod h1:9KWJ/8DgU+QzYGupX4tzMhRQE8h6w90lH6HAaclpEok=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 h1:5L8Mj9Co9sJVgW3TpYk2gxGJnDjsYuboNTcRmbtGKGs=
github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6/go.mod h1:3HgLJ9d18kXMLQlJvIY3+FszZYMxCz8WfE2MQ7hDY0w=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
@ -43,10 +38,8 @@ github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
github.com/containerd/containerd v1.7.23 h1:H2CClyUkmpKAGlhQp95g2WXHfLYc7whAuvZGBNYOOwQ=
github.com/containerd/containerd v1.7.23/go.mod h1:7QUzfURqZWCZV7RLNEn1XjUCQLEf0bkaK4GjUaZehxw=
github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo=
github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins=
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
@ -59,31 +52,31 @@ github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRcc
github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU=
github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40=
github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk=
github.com/containernetworking/cni v1.2.3 h1:hhOcjNVUQTnzdRJ6alC5XF+wd9mfGIUaj8FuJbEslXM=
github.com/containernetworking/cni v1.2.3/go.mod h1:DuLgF+aPd3DzcTQTtp/Nvl1Kim23oFKdm2okJzBQA5M=
github.com/containernetworking/plugins v1.5.1 h1:T5ji+LPYjjgW0QM+KyrigZbLsZ8jaX+E5J/EcKOE4gQ=
github.com/containernetworking/plugins v1.5.1/go.mod h1:MIQfgMayGuHYs0XdNudf31cLLAC+i242hNm6KuDGqCM=
github.com/containers/common v0.62.0 h1:Sl9WE5h7Y/F3bejrMAA4teP1EcY9ygqJmW4iwSloZ10=
github.com/containers/common v0.62.0/go.mod h1:Yec+z8mrSq4rydHofrnDCBqAcNA/BGrSg1kfFUL6F6s=
github.com/containers/image/v5 v5.34.0 h1:HPqQaDUsox/3mC1pbOyLAIQEp0JhQqiUZ+6JiFIZLDI=
github.com/containers/image/v5 v5.34.0/go.mod h1:/WnvUSEfdqC/ahMRd4YJDBLrpYWkGl018rB77iB3FDo=
github.com/containernetworking/cni v1.3.0 h1:v6EpN8RznAZj9765HhXQrtXgX+ECGebEYEmnuFjskwo=
github.com/containernetworking/cni v1.3.0/go.mod h1:Bs8glZjjFfGPHMw6hQu82RUgEPNGEaBb9KS5KtNMnJ4=
github.com/containernetworking/plugins v1.7.1 h1:CNAR0jviDj6FS5Vg85NTgKWLDzZPfi/lj+VJfhMDTIs=
github.com/containernetworking/plugins v1.7.1/go.mod h1:xuMdjuio+a1oVQsHKjr/mgzuZ24leAsqUYRnzGoXHy0=
github.com/containers/common v0.64.0 h1:Jdjq1e5tqrLov9tcAVc/AfvQCgX4krhcfDBgOXwrSfw=
github.com/containers/common v0.64.0/go.mod h1:bq2UIiFP8vUJdgM+WN8E8jkD7wF69SpDRGzU7epJljg=
github.com/containers/image/v5 v5.36.0 h1:Zh+xFcLjRmicnOT5AFPHH/xj+e3s9ojDN/9X2Kx1+Jo=
github.com/containers/image/v5 v5.36.0/go.mod h1:VZ6cyDHbxZoOt4dklUJ+WNEH9FrgSgfH3qUBYKFlcT0=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/luksy v0.0.0-20250106202729-a3a812db5b72 h1:hdBIFaml6hO+Bal8CdQSQPTF305gwsJfubs4NoOV53A=
github.com/containers/luksy v0.0.0-20250106202729-a3a812db5b72/go.mod h1:UpMgEjd9XelIA/iK+qD3hWIrZY/M3eaepn+gm5U8OYE=
github.com/containers/luksy v0.0.0-20250609192159-bc60f96d4194 h1:mm+XFgCXPx3pFFkFJ0CH6KgX1os5jfrD/T6S/6ht4FE=
github.com/containers/luksy v0.0.0-20250609192159-bc60f96d4194/go.mod h1:ab2XWZtMgybWBznSwo8BEPeIeSpspKh+wlnkq/UY2Uo=
github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM=
github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ=
github.com/containers/storage v1.57.1 h1:hKPoFsuBcB3qTzBxa4IFpZMRzUuL5Xhv/BE44W0XHx8=
github.com/containers/storage v1.57.1/go.mod h1:i/Hb4lu7YgFr9G0K6BMjqW0BLJO1sFsnWQwj2UoWCUM=
github.com/containers/storage v1.59.0 h1:r2pYSTzQpJTROZbjJQ54Z0GT+rUC6+wHzlSY8yPjsXk=
github.com/containers/storage v1.59.0/go.mod h1:KoAYHnAjP3/cTsRS+mmWZGkufSY2GACiKQ4V3ZLQnR0=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM=
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18CYMpo6xB9uWM=
github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q=
github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=
github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@ -92,14 +85,14 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh
github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/cli v27.5.1+incompatible h1:JB9cieUT9YNiMITtIsguaN55PLOHhBSz3LKVc6cqWaY=
github.com/docker/cli v27.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v28.3.2+incompatible h1:mOt9fcLE7zaACbxW1GeS65RI67wIJrTnqS3hP2huFsY=
github.com/docker/cli v28.3.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8=
github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
github.com/docker/docker v28.3.2+incompatible h1:wn66NJ6pWB1vBZIilP8G3qQPqHy5XymfYn5vsqeA5oA=
github.com/docker/docker v28.3.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8=
github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
@ -112,38 +105,18 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fsouza/go-dockerclient v1.12.0 h1:S2f2crEUbBNCFiF06kR/GvioEB8EMsb3Td/bpawD+aU=
github.com/fsouza/go-dockerclient v1.12.0/go.mod h1:YWUtjg8japrqD/80L98nTtCoxQFp5B5wrSsnyeB5lFo=
github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E=
github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fsouza/go-dockerclient v1.12.1 h1:FMoLq+Zhv9Oz/rFmu6JWkImfr6CBgZOPcL+bHW4gS0o=
github.com/fsouza/go-dockerclient v1.12.1/go.mod h1:OqsgJJcpCwqyM3JED7TdfM9QVWS5O7jSYwXxYKmOooY=
github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU=
github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo=
github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w=
github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco=
github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs=
github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ=
github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc=
github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY=
github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk=
github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c=
github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58=
github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U=
@ -155,8 +128,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -175,24 +148,24 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo=
github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI=
github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI=
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@ -204,14 +177,12 @@ github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8=
github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs=
github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
@ -220,40 +191,40 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ=
github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA=
github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A=
github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU=
github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/buildkit v0.19.0 h1:w9G1p7sArvCGNkpWstAqJfRQTXBKukMyMK1bsah1HNo=
github.com/moby/buildkit v0.19.0/go.mod h1:WiHBFTgWV8eB1AmPxIWsAlKjUACAwm3X/14xOV4VWew=
github.com/moby/buildkit v0.23.2 h1:gt/dkfcpgTXKx+B9I310kV767hhVqTvEyxGgI3mqsGQ=
github.com/moby/buildkit v0.23.2/go.mod h1:iEjAfPQKIuO+8y6OcInInvzqTMiKMbb2RdJz1K/95a0=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ=
github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo=
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
github.com/moby/sys/capability v0.4.0 h1:4D4mI6KlNtWMCM1Z/K0i7RV1FkX+DBDHKVJpCndZoHk=
github.com/moby/sys/capability v0.4.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I=
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo=
github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -263,28 +234,26 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus=
github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8=
github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y=
github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
github.com/opencontainers/cgroups v0.0.4 h1:XVj8P/IHVms/j+7eh8ggdkTLAxjz84ZzuFyGoE28DR4=
github.com/opencontainers/cgroups v0.0.4/go.mod h1:s8lktyhlGUqM7OSRL5P7eAW6Wb+kWPNvt4qvVfzA5vs=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/opencontainers/runc v1.2.4 h1:yWFgLkghp71D76Fa0l349yAl5g4Gse7DPYNlvkQ9Eiw=
github.com/opencontainers/runc v1.2.4/go.mod h1:nSxcWUydXrsBZVYNSkTjoQ/N6rcyTtn+1SD5D4+kRIM=
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.9.1-0.20241108202711-f7e3563b0271 h1:TPj0pMLCTy1CKwmrat3hqTxoZfqOuTy0asG0ccpGk8Q=
github.com/opencontainers/runtime-tools v0.9.1-0.20241108202711-f7e3563b0271/go.mod h1:oIH6VwKkaDOO+SIYZpdwrC/0wKYqrfO6E1sG1j3UVws=
github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8=
github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/openshift/imagebuilder v1.2.15 h1:MNn1OztEE/l8pSEDPYAQ71Ys6rpXA2P00UFhdY9p/yk=
github.com/openshift/imagebuilder v1.2.15/go.mod h1:cK6MLyBl1IHmIYGLY/2SLOG6p0PtEDUOC7khxsFYUXE=
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
github.com/opencontainers/runc v1.3.0 h1:cvP7xbEvD0QQAs0nZKLzkVog2OPZhI/V2w3WmTmUSXI=
github.com/opencontainers/runc v1.3.0/go.mod h1:9wbWt42gV+KRxKRVVugNP6D5+PQciRbenB4fLVsqGPs=
github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww=
github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.9.1-0.20250523060157-0ea5ed0382a2 h1:2xZEHOdeQBV6PW8ZtimN863bIOl7OCW/X10K0cnxKeA=
github.com/opencontainers/runtime-tools v0.9.1-0.20250523060157-0ea5ed0382a2/go.mod h1:MXdPzqAA8pHC58USHqNCSjyLnRQ6D+NjbpP+02Z1U/0=
github.com/opencontainers/selinux v1.12.0 h1:6n5JV4Cf+4y0KNXW48TLj5DwfXpvWlxXplUkdTrmPb8=
github.com/opencontainers/selinux v1.12.0/go.mod h1:BTPX+bjVbWGXw7ZZWUbdENt8w0htPSrlgOOysQaU62U=
github.com/openshift/imagebuilder v1.2.16 h1:Vqjy5uPoVDJiX5JUKHo0Cf440ih5cKI7lVe2ZJ2X+RA=
github.com/openshift/imagebuilder v1.2.16/go.mod h1:gASl6jikVG3bCFnLjG6Ow5TeKwKVvrqUUj8C7EUmqc8=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
@ -294,44 +263,46 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/proglottis/gpgme v0.1.4 h1:3nE7YNA70o2aLjcg63tXMOhPD7bplfE5CBdV+hLAm2M=
github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM=
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.57.0 h1:Ro/rKjwdq9mZn1K5QPctzh+MA4Lp0BuYk5ZZEVhoNcY=
github.com/prometheus/common v0.57.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ=
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU=
github.com/sebdah/goldie/v2 v2.5.5 h1:rx1mwF95RxZ3/83sdS4Yp7t2C5TCokvWP4TBRbAyEWY=
github.com/sebdah/goldie/v2 v2.5.5/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY=
github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/seccomp/libseccomp-golang v0.11.0 h1:SDkcBRqGLP+sezmMACkxO1EfgbghxIxnRKfd6mHUEis=
github.com/seccomp/libseccomp-golang v0.11.0/go.mod h1:5m1Lk8E9OwgZTTVz4bBOer7JuazaBa+xTkM895tDiWc=
github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc=
github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw=
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
github.com/sigstore/fulcio v1.6.4 h1:d86obfxUAG3Y6CYwOx1pdwCZwKmROB6w6927pKOVIRY=
github.com/sigstore/fulcio v1.6.4/go.mod h1:Y6bn3i3KGhXpaHsAtYP3Z4Np0+VzCo1fLv8Ci6mbPDs=
github.com/sigstore/rekor v1.3.8 h1:B8kJI8mpSIXova4Jxa6vXdJyysRxFGsEsLKBDl0rRjA=
github.com/sigstore/rekor v1.3.8/go.mod h1:/dHFYKSuxEygfDRnEwyJ+ZD6qoVYNXQdi1mJrKvKWsI=
github.com/sigstore/sigstore v1.8.12 h1:S8xMVZbE2z9ZBuQUEG737pxdLjnbOIcFi5v9UFfkJFc=
github.com/sigstore/sigstore v1.8.12/go.mod h1:+PYQAa8rfw0QdPpBcT+Gl3egKD9c+TUgAlF12H3Nmjo=
github.com/sigstore/fulcio v1.6.6 h1:XaMYX6TNT+8n7Npe8D94nyZ7/ERjEsNGFC+REdi/wzw=
github.com/sigstore/fulcio v1.6.6/go.mod h1:BhQ22lwaebDgIxVBEYOOqLRcN5+xOV+C9bh/GUXRhOk=
github.com/sigstore/protobuf-specs v0.4.1 h1:5SsMqZbdkcO/DNHudaxuCUEjj6x29tS2Xby1BxGU7Zc=
github.com/sigstore/protobuf-specs v0.4.1/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc=
github.com/sigstore/sigstore v1.9.5 h1:Wm1LT9yF4LhQdEMy5A2JeGRHTrAWGjT3ubE5JUSrGVU=
github.com/sigstore/sigstore v1.9.5/go.mod h1:VtxgvGqCmEZN9X2zhFSOkfXxvKUjpy8RpUW39oCtoII=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smallstep/pkcs7 v0.1.1 h1:x+rPdt2W088V9Vkjho4KtoggyktZJlMduZAtRHm68LU=
github.com/smallstep/pkcs7 v0.1.1/go.mod h1:dL6j5AIz9GHjVEBTXtW+QliALcgM19RtXaTeyxI+AfA=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M=
github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw=
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@ -344,24 +315,22 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/sylabs/sif/v2 v2.20.2 h1:HGEPzauCHhIosw5o6xmT3jczuKEuaFzSfdjAsH33vYw=
github.com/sylabs/sif/v2 v2.20.2/go.mod h1:WyYryGRaR4Wp21SAymm5pK0p45qzZCSRiZMFvUZiuhc=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM=
github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
github.com/sylabs/sif/v2 v2.21.1 h1:GZ0b5//AFAqJEChd8wHV/uSKx/l1iuGYwjR8nx+4wPI=
github.com/sylabs/sif/v2 v2.21.1/go.mod h1:YoqEGQnb5x/ItV653bawXHZJOXQaEWpGwHsSD3YePJI=
github.com/tchap/go-patricia/v2 v2.3.3 h1:xfNEsODumaEcCcY3gI0hYPZ/PcpVv5ju6RMAhgwZDDc=
github.com/tchap/go-patricia/v2 v2.3.3/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/vbatts/tar-split v0.11.7 h1:ixZ93pO/GmvaZw4Vq9OwmfZK/kc2zKdPfu0B+gYqs3U=
github.com/vbatts/tar-split v0.11.7/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
github.com/vbauerster/mpb/v8 v8.9.1 h1:LH5R3lXPfE2e3lIGxN7WNWv3Hl5nWO6LRi2B0L0ERHw=
github.com/vbauerster/mpb/v8 v8.9.1/go.mod h1:4XMvznPh8nfe2NpnDo1QTPvW9MVkUhbG90mPWvmOzcQ=
github.com/vishvananda/netlink v1.3.1-0.20240922070040-084abd93d350 h1:w5OI+kArIBVksl8UGn6ARQshtPCQvDsbuA9NQie3GIg=
github.com/vishvananda/netlink v1.3.1-0.20240922070040-084abd93d350/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs=
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo=
github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
github.com/vbauerster/mpb/v8 v8.10.2 h1:2uBykSHAYHekE11YvJhKxYmLATKHAGorZwFlyNw4hHM=
github.com/vbauerster/mpb/v8 v8.10.2/go.mod h1:+Ja4P92E3/CorSZgfDtK46D7AVbDqmBQRTmyTqPElo0=
github.com/vishvananda/netlink v1.3.1 h1:3AEMt62VKqz90r0tmNhog0r/PpWKmrEShJU0wJW6bV0=
github.com/vishvananda/netlink v1.3.1/go.mod h1:ARtKouGSTGchR8aMwmkzC0qiNPrrWO5JS/XMVl45+b4=
github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY=
github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
@ -371,30 +340,36 @@ github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQ
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I=
go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM=
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4=
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ=
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk=
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY=
go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg=
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE=
go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@ -403,11 +378,9 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 h1:9kj3STMvgqy3YA4VQXBrN7925ICMxD5wzMRcgA30588=
golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@ -418,8 +391,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -436,8 +409,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -449,8 +422,9 @@ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -471,8 +445,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -482,8 +456,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
@ -492,10 +466,11 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@ -508,8 +483,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -519,18 +494,17 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk=
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d h1:xJJRGY7TJcvIlpSrN3K6LAWgNFUILlO+OMAqtg9aqnw=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4=
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950=
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 h1:iK2jbkWL86DXjEx0qiHcRE9dE4/Ahua5k6V8OWFb//c=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8=
google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -540,25 +514,23 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU=
google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
tags.cncf.io/container-device-interface v0.8.0 h1:8bCFo/g9WODjWx3m6EYl3GfUG31eKJbaggyBDxEldRc=
tags.cncf.io/container-device-interface v0.8.0/go.mod h1:Apb7N4VdILW0EVdEMRYXIDVRZfNJZ+kmEUss2kRRQ6Y=
tags.cncf.io/container-device-interface/specs-go v0.8.0 h1:QYGFzGxvYK/ZLMrjhvY0RjpUavIn4KcmRmVP/JjdBTA=
tags.cncf.io/container-device-interface/specs-go v0.8.0/go.mod h1:BhJIkjjPh4qpys+qm4DAYtUyryaTDg9zris+AczXyws=
sigs.k8s.io/yaml v1.5.0 h1:M10b2U7aEUY6hRtU870n2VTPgR5RZiL/I6Lcc2F4NUQ=
sigs.k8s.io/yaml v1.5.0/go.mod h1:wZs27Rbxoai4C0f8/9urLZtZtF3avA3gKvGyPdDqTO4=
tags.cncf.io/container-device-interface v1.0.1 h1:KqQDr4vIlxwfYh0Ed/uJGVgX+CHAkahrgabg6Q8GYxc=
tags.cncf.io/container-device-interface v1.0.1/go.mod h1:JojJIOeW3hNbcnOH2q0NrWNha/JuHoDZcmYxAZwb2i0=
tags.cncf.io/container-device-interface/specs-go v1.0.0 h1:8gLw29hH1ZQP9K1YtAzpvkHCjjyIxHZYzBAvlQ+0vD8=
tags.cncf.io/container-device-interface/specs-go v1.0.0/go.mod h1:u86hoFWqnh3hWz3esofRFKbI261bUlvUfLKGrDhJkgQ=

7
hack/sqlite_tag.sh Executable file
View File

@ -0,0 +1,7 @@
#!/usr/bin/env bash
${CPP:-${CC:-cc} -E} ${CPPFLAGS} - &> /dev/null << EOF
#include <sqlite3.h>
EOF
if test $? -eq 0 ; then
echo libsqlite3
fi

1036
image.go

File diff suppressed because it is too large Load Diff

View File

@ -17,10 +17,12 @@ import (
"strconv"
"strings"
"sync"
"time"
"github.com/containerd/platforms"
"github.com/containers/buildah"
"github.com/containers/buildah/define"
"github.com/containers/buildah/internal"
internalUtil "github.com/containers/buildah/internal/util"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/util"
@ -28,6 +30,7 @@ import (
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/shortnames"
istorage "github.com/containers/image/v5/storage"
@ -92,12 +95,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
}
logger.SetLevel(logrus.GetLevel())
var dockerfiles []io.ReadCloser
defer func(dockerfiles ...io.ReadCloser) {
for _, d := range dockerfiles {
d.Close()
}
}(dockerfiles...)
var dockerfiles []io.Reader
for _, tag := range append([]string{options.Output}, options.AdditionalTags...) {
if tag == "" {
@ -109,7 +107,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
}
for _, dfile := range paths {
var data io.ReadCloser
var data io.Reader
if strings.HasPrefix(dfile, "http://") || strings.HasPrefix(dfile, "https://") {
logger.Debugf("reading remote Dockerfile %q", dfile)
@ -117,8 +115,8 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
if resp.ContentLength == 0 {
resp.Body.Close()
return "", nil, fmt.Errorf("no contents in %q", dfile)
}
data = resp.Body
@ -145,13 +143,12 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
if err != nil {
return "", nil, fmt.Errorf("reading build instructions: %w", err)
}
defer contents.Close()
dinfo, err = contents.Stat()
if err != nil {
contents.Close()
return "", nil, fmt.Errorf("reading info about %q: %w", dfile, err)
}
if dinfo.Mode().IsRegular() && dinfo.Size() == 0 {
contents.Close()
return "", nil, fmt.Errorf("no contents in %q", dfile)
}
data = contents
@ -163,7 +160,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
if err != nil {
return "", nil, err
}
data = io.NopCloser(pData)
data = pData
}
dockerfiles = append(dockerfiles, data)
@ -223,6 +220,15 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
}
}
if sourceDateEpoch, ok := options.Args[internal.SourceDateEpochName]; ok && options.SourceDateEpoch == nil {
sde, err := strconv.ParseInt(sourceDateEpoch, 10, 64)
if err != nil {
return "", nil, fmt.Errorf("parsing SOURCE_DATE_EPOCH build-arg %q: %w", sourceDateEpoch, err)
}
sdeTime := time.Unix(sde, 0)
options.SourceDateEpoch = &sdeTime
}
systemContext := options.SystemContext
for _, platform := range options.Platforms {
platformContext := *systemContext
@ -264,6 +270,16 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
}
// Deep copy args to prevent concurrent read/writes over Args.
platformOptions.Args = maps.Clone(options.Args)
if options.SourceDateEpoch != nil {
if options.Timestamp != nil {
return "", nil, errors.New("timestamp and source-date-epoch would be ambiguous if allowed together")
}
if _, alreadySet := platformOptions.Args[internal.SourceDateEpochName]; !alreadySet {
platformOptions.Args[internal.SourceDateEpochName] = fmt.Sprintf("%d", options.SourceDateEpoch.Unix())
}
}
builds.Go(func() error {
loggerPerPlatform := logger
if platformOptions.LogFile != "" && platformOptions.LogSplitByPlatform {
@ -369,7 +385,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
return "", nil, err
}
defer imgSource.Close()
manifestBytes, _, err := imgSource.GetManifest(ctx, nil)
manifestBytes, _, err := image.UnparsedInstance(imgSource, nil).Manifest(ctx)
if err != nil {
return "", nil, err
}
@ -423,16 +439,6 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr
if options.SystemContext.VariantChoice != "" {
builtinArgDefaults["TARGETPLATFORM"] += "/" + options.SystemContext.VariantChoice
}
} else {
// fill them in using values for the default platform
defaultPlatform := platforms.DefaultSpec()
builtinArgDefaults["TARGETOS"] = defaultPlatform.OS
builtinArgDefaults["TARGETVARIANT"] = defaultPlatform.Variant
builtinArgDefaults["TARGETARCH"] = defaultPlatform.Architecture
builtinArgDefaults["TARGETPLATFORM"] = defaultPlatform.OS + "/" + defaultPlatform.Architecture
if defaultPlatform.Variant != "" {
builtinArgDefaults["TARGETPLATFORM"] += "/" + defaultPlatform.Variant
}
}
delete(options.Args, "TARGETPLATFORM")
@ -450,9 +456,8 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr
return "", nil, fmt.Errorf("creating build executor: %w", err)
}
b := imagebuilder.NewBuilder(options.Args)
for k, v := range builtinArgDefaults {
b.BuiltinArgDefaults[k] = v
}
maps.Copy(b.BuiltinArgDefaults, builtinArgDefaults)
defaultContainerConfig, err := config.Default()
if err != nil {
return "", nil, fmt.Errorf("failed to get container config: %w", err)
@ -465,7 +470,7 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr
if options.Target != "" {
stagesTargeted, ok := stages.ThroughTarget(options.Target)
if !ok {
return "", nil, fmt.Errorf("The target %q was not found in the provided Dockerfile", options.Target)
return "", nil, fmt.Errorf("the target %q was not found in the provided Dockerfile", options.Target)
}
stages = stagesTargeted
}
@ -567,7 +572,7 @@ func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfi
logrus.Debugf("preparing to read image manifest for %q: %v", baseImage, err)
continue
}
candidateBytes, candidateType, err := src.GetManifest(ctx, nil)
candidateBytes, candidateType, err := image.UnparsedInstance(src, nil).Manifest(ctx)
_ = src.Close()
if err != nil {
logrus.Debugf("reading image manifest for %q: %v", baseImage, err)

View File

@ -0,0 +1,63 @@
package imagebuildah
import (
"context"
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/containers/buildah/define"
)
func TestFilesClosedProperlyByBuildDockerfiles(t *testing.T) {
// create files in in temp dir
var paths []string
for _, name := range []string{"Dockerfile", "Dockerfile.in"} {
fpath, err := filepath.Abs(filepath.Join(t.TempDir(), name))
assert.Nil(t, err)
assert.Nil(t, os.WriteFile(fpath, []byte("FROM scratch"), 0o644))
paths = append(paths, fpath)
}
// send files as above, and a missing one, so that we error early and return and don't try an actual build
_, _, err := BuildDockerfiles(context.Background(), nil, define.BuildOptions{}, append(append(make([]string, 0, len(paths)), paths...), "missing")...)
var pathErr *fs.PathError
assert.True(t, errors.As(err, &pathErr))
assert.Equal(t, "missing", pathErr.Path)
// verify (as best we can) that we don't think these files are still open
openFiles, err := currentOpenFiles()
assert.Nil(t, err)
for _, path := range paths {
assert.NotContains(t, openFiles, path)
}
}
// currentOpenFiles makes an effort at returning a map of which files are currently
// open by our process. We don't fail if we can't follow symlinks from fds as this
// perhaps they now longer exist between when we read them and when we tried to use
// them. Instead we just ignore.
func currentOpenFiles() (map[string]struct{}, error) {
rd := "/proc/self/fd"
es, err := os.ReadDir(rd)
if err != nil {
return nil, err
}
rv := make(map[string]struct{})
for _, de := range es {
if de.Type()&fs.ModeSymlink == fs.ModeSymlink {
dest, err := os.Readlink(filepath.Join(rd, de.Name()))
if err != nil {
fmt.Fprintf(os.Stderr, "cannot follow symlink, ignoring: %v\n", err)
continue
}
rv[dest] = struct{}{}
}
}
return rv, nil
}

View File

@ -14,6 +14,7 @@ import (
"github.com/containers/buildah"
"github.com/containers/buildah/define"
"github.com/containers/buildah/internal"
internalUtil "github.com/containers/buildah/internal/util"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/pkg/sshagent"
@ -43,18 +44,19 @@ import (
// instruction in the Dockerfile, since that's usually an indication of a user
// error, but for these values we make exceptions and ignore them.
var builtinAllowedBuildArgs = map[string]struct{}{
"HTTP_PROXY": {},
"http_proxy": {},
"HTTPS_PROXY": {},
"https_proxy": {},
"FTP_PROXY": {},
"ftp_proxy": {},
"NO_PROXY": {},
"no_proxy": {},
"TARGETARCH": {},
"TARGETOS": {},
"TARGETPLATFORM": {},
"TARGETVARIANT": {},
"HTTP_PROXY": {},
"http_proxy": {},
"HTTPS_PROXY": {},
"https_proxy": {},
"FTP_PROXY": {},
"ftp_proxy": {},
"NO_PROXY": {},
"no_proxy": {},
"TARGETARCH": {},
"TARGETOS": {},
"TARGETPLATFORM": {},
"TARGETVARIANT": {},
internal.SourceDateEpochName: {},
}
// Executor is a buildah-based implementation of the imagebuilder.Executor
@ -80,8 +82,10 @@ type Executor struct {
output string
outputFormat string
additionalTags []string
log func(format string, args ...interface{}) // can be nil
log func(format string, args ...any) // can be nil
in io.Reader
inheritLabels types.OptionalBool
inheritAnnotations types.OptionalBool
out io.Writer
err io.Writer
signaturePolicyPath string
@ -150,9 +154,10 @@ type Executor struct {
logPrefix string
unsetEnvs []string
unsetLabels []string
processLabel string // Shares processLabel of first stage container with containers of other stages in same build
mountLabel string // Shares mountLabel of first stage container with containers of other stages in same build
buildOutput string // Specifies instructions for any custom build output
unsetAnnotations []string
processLabel string // Shares processLabel of first stage container with containers of other stages in same build
mountLabel string // Shares mountLabel of first stage container with containers of other stages in same build
buildOutputs []string // Specifies instructions for any custom build output
osVersion string
osFeatures []string
envs []string
@ -162,7 +167,11 @@ type Executor struct {
compatSetParent types.OptionalBool
compatVolumes types.OptionalBool
compatScratchConfig types.OptionalBool
compatLayerOmissions types.OptionalBool
noPivotRoot bool
sourceDateEpoch *time.Time
rewriteTimestamp bool
createdAnnotation types.OptionalBool
}
type imageTypeAndHistoryAndDiffIDs struct {
@ -170,6 +179,8 @@ type imageTypeAndHistoryAndDiffIDs struct {
history []v1.History
diffIDs []digest.Digest
err error
architecture string
os string
}
// newExecutor creates a new instance of the imagebuilder.Executor interface.
@ -228,6 +239,11 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
}
}
buildOutputs := slices.Clone(options.BuildOutputs)
if options.BuildOutput != "" { //nolint:staticcheck
buildOutputs = append(buildOutputs, options.BuildOutput) //nolint:staticcheck
}
exec := Executor{
args: options.Args,
cacheFrom: options.CacheFrom,
@ -261,6 +277,8 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
err: options.Err,
reportWriter: writer,
isolation: options.Isolation,
inheritLabels: options.InheritLabels,
inheritAnnotations: options.InheritAnnotations,
namespaceOptions: options.NamespaceOptions,
configureNetwork: options.ConfigureNetwork,
cniPluginPath: options.CNIPluginPath,
@ -312,7 +330,8 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
logPrefix: logPrefix,
unsetEnvs: slices.Clone(options.UnsetEnvs),
unsetLabels: slices.Clone(options.UnsetLabels),
buildOutput: options.BuildOutput,
unsetAnnotations: slices.Clone(options.UnsetAnnotations),
buildOutputs: buildOutputs,
osVersion: options.OSVersion,
osFeatures: slices.Clone(options.OSFeatures),
envs: slices.Clone(options.Envs),
@ -322,8 +341,17 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
compatSetParent: options.CompatSetParent,
compatVolumes: options.CompatVolumes,
compatScratchConfig: options.CompatScratchConfig,
compatLayerOmissions: options.CompatLayerOmissions,
noPivotRoot: options.NoPivotRoot,
sourceDateEpoch: options.SourceDateEpoch,
rewriteTimestamp: options.RewriteTimestamp,
createdAnnotation: options.CreatedAnnotation,
}
// sort unsetAnnotations because we will later write these
// values to the history of the image therefore we want to
// make sure that order is always consistent.
slices.Sort(exec.unsetAnnotations)
if exec.err == nil {
exec.err = os.Stderr
}
@ -372,9 +400,12 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
// startStage creates a new stage executor that will be referenced whenever a
// COPY or ADD statement uses a --from=NAME flag.
func (b *Executor) startStage(ctx context.Context, stage *imagebuilder.Stage, stages imagebuilder.Stages, output string) *StageExecutor {
// create a copy of systemContext for each stage executor.
systemContext := *b.systemContext
stageExec := &StageExecutor{
ctx: ctx,
executor: b,
systemContext: &systemContext,
log: b.log,
index: stage.Position,
stages: stages,
@ -447,30 +478,30 @@ func (b *Executor) waitForStage(ctx context.Context, name string, stages imagebu
}
}
// getImageTypeAndHistoryAndDiffIDs returns the manifest type, history, and diff IDs list of imageID.
func (b *Executor) getImageTypeAndHistoryAndDiffIDs(ctx context.Context, imageID string) (string, []v1.History, []digest.Digest, error) {
// getImageTypeAndHistoryAndDiffIDs returns the os, architecture, manifest type, history, and diff IDs list of imageID.
func (b *Executor) getImageTypeAndHistoryAndDiffIDs(ctx context.Context, imageID string) (string, string, string, []v1.History, []digest.Digest, error) {
b.imageInfoLock.Lock()
imageInfo, ok := b.imageInfoCache[imageID]
b.imageInfoLock.Unlock()
if ok {
return imageInfo.manifestType, imageInfo.history, imageInfo.diffIDs, imageInfo.err
return imageInfo.os, imageInfo.architecture, imageInfo.manifestType, imageInfo.history, imageInfo.diffIDs, imageInfo.err
}
imageRef, err := storageTransport.Transport.ParseStoreReference(b.store, "@"+imageID)
if err != nil {
return "", nil, nil, fmt.Errorf("getting image reference %q: %w", imageID, err)
return "", "", "", nil, nil, fmt.Errorf("getting image reference %q: %w", imageID, err)
}
ref, err := imageRef.NewImage(ctx, nil)
if err != nil {
return "", nil, nil, fmt.Errorf("creating new image from reference to image %q: %w", imageID, err)
return "", "", "", nil, nil, fmt.Errorf("creating new image from reference to image %q: %w", imageID, err)
}
defer ref.Close()
oci, err := ref.OCIConfig(ctx)
if err != nil {
return "", nil, nil, fmt.Errorf("getting possibly-converted OCI config of image %q: %w", imageID, err)
return "", "", "", nil, nil, fmt.Errorf("getting possibly-converted OCI config of image %q: %w", imageID, err)
}
manifestBytes, manifestFormat, err := ref.Manifest(ctx)
if err != nil {
return "", nil, nil, fmt.Errorf("getting manifest of image %q: %w", imageID, err)
return "", "", "", nil, nil, fmt.Errorf("getting manifest of image %q: %w", imageID, err)
}
if manifestFormat == "" && len(manifestBytes) > 0 {
manifestFormat = manifest.GuessMIMEType(manifestBytes)
@ -481,9 +512,11 @@ func (b *Executor) getImageTypeAndHistoryAndDiffIDs(ctx context.Context, imageID
history: oci.History,
diffIDs: oci.RootFS.DiffIDs,
err: nil,
architecture: oci.Architecture,
os: oci.OS,
}
b.imageInfoLock.Unlock()
return manifestFormat, oci.History, oci.RootFS.DiffIDs, nil
return oci.OS, oci.Architecture, manifestFormat, oci.History, oci.RootFS.DiffIDs, nil
}
func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageExecutor, stages imagebuilder.Stages, stageIndex int) (imageID string, ref reference.Canonical, onlyBaseImage bool, err error) {
@ -508,7 +541,7 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE
// layers, its easier to reuse cached layers.
if len(b.labels) > 0 {
var labelLine string
labels := append([]string{}, b.labels...)
labels := slices.Clone(b.labels)
for _, labelSpec := range labels {
key, value, _ := strings.Cut(labelSpec, "=")
// check only for an empty key since docker allows empty values
@ -553,7 +586,7 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE
stageExecutor := b.startStage(ctx, &stage, stages, output)
if stageExecutor.log == nil {
stepCounter := 0
stageExecutor.log = func(format string, args ...interface{}) {
stageExecutor.log = func(format string, args ...any) {
prefix := b.logPrefix
if len(stages) > 1 {
prefix += fmt.Sprintf("[%d/%d] ", stageIndex+1, len(stages))

View File

@ -59,8 +59,9 @@ import (
// name to the image that it produces.
type StageExecutor struct {
ctx context.Context
systemContext *types.SystemContext
executor *Executor
log func(format string, args ...interface{})
log func(format string, args ...any)
index int
stages imagebuilder.Stages
name string
@ -75,6 +76,8 @@ type StageExecutor struct {
stage *imagebuilder.Stage
didExecute bool
argsFromContainerfile []string
hasLink bool
isLastStep bool
}
// Preserve informs the stage executor that from this point on, it needs to
@ -172,14 +175,7 @@ func (s *StageExecutor) Preserve(path string) error {
for cachedPath := range s.volumeCache {
// Walk our list of cached volumes, and check that they're
// still in the list of locations that we need to cache.
found := false
for _, volume := range s.volumes {
if volume == cachedPath {
// We need to keep this volume's cache.
found = true
break
}
}
found := slices.Contains(s.volumes, cachedPath)
if !found {
// We don't need to keep this volume's cache. Make a
// note to remove it.
@ -365,11 +361,11 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
}
return errors.New("COPY --keep-git-dir is not supported")
}
if cp.Link {
return errors.New("COPY --link is not supported")
}
if cp.Parents {
return errors.New("COPY --parents is not supported")
if cp.Link && s.executor.layers {
s.hasLink = true
} else if cp.Link {
s.executor.logger.Warn("--link is not supported when building without --layers, ignoring --link")
s.hasLink = false
}
if len(cp.Excludes) > 0 {
excludes = append(slices.Clone(excludes), cp.Excludes...)
@ -427,7 +423,13 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co
data = strings.TrimPrefix(data, "\n")
// add breakline when heredoc ends for docker compat
data = data + "\n"
tmpFile, err := os.Create(filepath.Join(parse.GetTempDir(), path.Base(filepath.ToSlash(file.Name))))
// Create separate subdir for this file.
tmpDir, err := os.MkdirTemp(parse.GetTempDir(), "buildah-heredoc")
if err != nil {
return fmt.Errorf("unable to create tmp dir for heredoc run %q: %w", parse.GetTempDir(), err)
}
defer os.RemoveAll(tmpDir)
tmpFile, err := os.Create(filepath.Join(tmpDir, path.Base(filepath.ToSlash(file.Name))))
if err != nil {
return fmt.Errorf("unable to create tmp file for COPY instruction at %q: %w", parse.GetTempDir(), err)
}
@ -442,7 +444,7 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co
tmpFile.Close()
return fmt.Errorf("unable to write contents of heredoc file at %q: %w", tmpFile.Name(), err)
}
copySources = append(copySources, filepath.Base(tmpFile.Name()))
copySources = append(copySources, filepath.Join(filepath.Base(tmpDir), filepath.Base(tmpFile.Name())))
tmpFile.Close()
}
contextDir = parse.GetTempDir()
@ -554,9 +556,20 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co
return fmt.Errorf("source can't be a URL for COPY")
}
} else {
sources = append(sources, filepath.Join(contextDir, src))
// filepath.Join clean path so /./ is removed
if _, suffix, found := strings.Cut(src, "/./"); found && copy.Parents {
fullPath := filepath.Join(contextDir, src)
suffix = filepath.Clean(suffix)
prefix := strings.TrimSuffix(fullPath, suffix)
prefix = filepath.Clean(prefix)
src = prefix + "/./" + suffix
} else {
src = filepath.Join(contextDir, src)
}
sources = append(sources, src)
}
}
labelsAndAnnotations := s.buildMetadata(s.isLastStep, true)
options := buildah.AddAndCopyOptions{
Chmod: copy.Chmod,
Chown: copy.Chown,
@ -571,10 +584,13 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co
// The values for these next two fields are ultimately
// based on command line flags with names that sound
// much more generic.
CertPath: s.executor.systemContext.DockerCertPath,
InsecureSkipTLSVerify: s.executor.systemContext.DockerInsecureSkipTLSVerify,
CertPath: s.systemContext.DockerCertPath,
InsecureSkipTLSVerify: s.systemContext.DockerInsecureSkipTLSVerify,
MaxRetries: s.executor.maxPullPushRetries,
RetryDelay: s.executor.retryPullPushDelay,
Parents: copy.Parents,
Link: s.hasLink,
BuildMetadata: labelsAndAnnotations,
}
if len(copy.Files) > 0 {
// If we are copying heredoc files, we need to temporary place
@ -607,7 +623,7 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte
if strings.Contains(flag, "from") {
tokens := strings.Split(flag, ",")
if len(tokens) < 2 {
return nil, fmt.Errorf("Invalid --mount command: %s", flag)
return nil, fmt.Errorf("invalid --mount command: %s", flag)
}
for _, token := range tokens {
key, val, hasVal := strings.Cut(token, "=")
@ -697,18 +713,16 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte
MountPoint: otherStage.mountPoint,
}
break
} else {
// Treat the source's name as the name of an image.
mountPoint, err := s.getImageRootfs(s.ctx, from)
if err != nil {
return nil, fmt.Errorf("%s from=%s: no stage or image found with that name", flag, from)
}
stageMountPoints[from] = internal.StageMountDetails{
IsImage: true,
DidExecute: true,
MountPoint: mountPoint,
}
break
}
// Otherwise, treat the source's name as the name of an image.
mountPoint, err := s.getImageRootfs(s.ctx, from)
if err != nil {
return nil, fmt.Errorf("%s from=%s: no stage or image found with that name", flag, from)
}
stageMountPoints[from] = internal.StageMountDetails{
IsImage: true,
DidExecute: true,
MountPoint: mountPoint,
}
default:
continue
@ -802,7 +816,7 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
defer devNull.Close()
stdin = devNull
}
namespaceOptions := append([]define.NamespaceOption{}, s.executor.namespaceOptions...)
namespaceOptions := slices.Clone(s.executor.namespaceOptions)
options := buildah.RunOptions{
Args: s.executor.runtimeArgs,
Cmd: config.Cmd,
@ -827,7 +841,7 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
Stderr: s.executor.err,
Stdin: stdin,
Stdout: s.executor.out,
SystemContext: s.executor.systemContext,
SystemContext: s.systemContext,
Terminal: buildah.WithoutTerminal,
User: config.User,
WorkingDir: config.WorkingDir,
@ -952,19 +966,20 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
}
}
builderSystemContext := s.executor.systemContext
// get platform string from stage
if stage.Builder.Platform != "" {
os, arch, variant, err := parse.Platform(stage.Builder.Platform)
// In a multi-stage build where `FROM --platform=<>` was used then we must
// reset context for new stages so that new stages don't inherit unexpected
// `--platform` from prior stages.
if stage.Builder.Platform != "" || (len(s.stages) > 1 && (s.systemContext.ArchitectureChoice == "" && s.systemContext.VariantChoice == "" && s.systemContext.OSChoice == "")) {
imageOS, imageArch, imageVariant, err := parse.Platform(stage.Builder.Platform)
if err != nil {
return nil, fmt.Errorf("unable to parse platform %q: %w", stage.Builder.Platform, err)
}
if arch != "" || variant != "" {
builderSystemContext.ArchitectureChoice = arch
builderSystemContext.VariantChoice = variant
if imageArch != "" || imageVariant != "" {
s.systemContext.ArchitectureChoice = imageArch
s.systemContext.VariantChoice = imageVariant
}
if os != "" {
builderSystemContext.OSChoice = os
if imageOS != "" {
s.systemContext.OSChoice = imageOS
}
}
@ -978,7 +993,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
BlobDirectory: s.executor.blobDirectory,
SignaturePolicyPath: s.executor.signaturePolicyPath,
ReportWriter: s.executor.reportWriter,
SystemContext: builderSystemContext,
SystemContext: s.systemContext,
Isolation: s.executor.isolation,
NamespaceOptions: s.executor.namespaceOptions,
ConfigureNetwork: s.executor.configureNetwork,
@ -1027,9 +1042,14 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
for _, p := range builder.Ports() {
ports[docker.Port(p)] = struct{}{}
}
hostname, domainname := builder.Hostname(), builder.Domainname()
containerName := builder.Container
if s.executor.timestamp != nil || s.executor.sourceDateEpoch != nil {
hostname, domainname, containerName = "sandbox", "", ""
}
dConfig := docker.Config{
Hostname: builder.Hostname(),
Domainname: builder.Domainname(),
Hostname: hostname,
Domainname: domainname,
User: builder.User(),
Env: builder.Env(),
Cmd: builder.Cmd(),
@ -1056,12 +1076,17 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
dImage := docker.Image{
Parent: builder.FromImageID,
ContainerConfig: dConfig,
Container: builder.Container,
Container: containerName,
Author: builder.Maintainer(),
Architecture: builder.Architecture(),
RootFS: rootfs,
}
dImage.Config = &dImage.ContainerConfig
if s.executor.inheritLabels == types.OptionalBoolFalse {
// If user has selected `--inherit-labels=false` let's not
// inherit labels from base image.
dImage.Config.Labels = nil
}
err = ib.FromImage(&dImage, node)
if err != nil {
if err2 := builder.Delete(); err2 != nil {
@ -1258,19 +1283,21 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
}
// Parse and populate buildOutputOption if needed
var buildOutputOption define.BuildOutputOption
canGenerateBuildOutput := (s.executor.buildOutput != "" && lastStage)
if canGenerateBuildOutput {
logrus.Debugf("Generating custom build output with options %q", s.executor.buildOutput)
buildOutputOption, err = parse.GetBuildOutput(s.executor.buildOutput)
if err != nil {
return "", nil, false, fmt.Errorf("failed to parse build output: %w", err)
var buildOutputOptions []define.BuildOutputOption
if lastStage && len(s.executor.buildOutputs) > 0 {
for _, buildOutput := range s.executor.buildOutputs {
logrus.Debugf("generating custom build output with options %q", buildOutput)
buildOutputOption, err := parse.GetBuildOutput(buildOutput)
if err != nil {
return "", nil, false, fmt.Errorf("failed to parse build output %q: %w", buildOutput, err)
}
buildOutputOptions = append(buildOutputOptions, buildOutputOption)
}
}
if len(children) == 0 {
// There are no steps.
if s.builder.FromImageID == "" || s.executor.squash || s.executor.confidentialWorkload.Convert || len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 || len(s.executor.unsetEnvs) > 0 || len(s.executor.unsetLabels) > 0 || len(s.executor.sbomScanOptions) > 0 {
if s.builder.FromImageID == "" || s.executor.squash || s.executor.confidentialWorkload.Convert || len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 || len(s.executor.unsetEnvs) > 0 || len(s.executor.unsetLabels) > 0 || len(s.executor.sbomScanOptions) > 0 || len(s.executor.unsetAnnotations) > 0 {
// We either don't have a base image, or we need to
// transform the contents of the base image, or we need
// to make some changes to just the config blob. Whichever
@ -1279,7 +1306,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// No base image means there's nothing to put in a
// layer, so don't create one.
emptyLayer := (s.builder.FromImageID == "")
createdBy, err := s.getCreatedBy(nil, "")
createdBy, err := s.getCreatedBy(nil, "", lastStage)
if err != nil {
return "", nil, false, fmt.Errorf("unable to get createdBy for the node: %w", err)
}
@ -1299,7 +1326,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
}
// Generate build output from the new image, or the preexisting
// one if we didn't actually do anything, if needed.
if canGenerateBuildOutput {
for _, buildOutputOption := range buildOutputOptions {
if err := s.generateBuildOutput(buildOutputOption); err != nil {
return "", nil, onlyBaseImage, err
}
@ -1311,6 +1338,8 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
logRusage()
moreInstructions := i < len(children)-1
lastInstruction := !moreInstructions
s.isLastStep = lastStage && lastInstruction
// Resolve any arguments in this instruction.
step := ib.Step()
if err := step.Resolve(node); err != nil {
@ -1430,7 +1459,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
if s.executor.timestamp != nil {
timestamp = *s.executor.timestamp
}
createdBy, err := s.getCreatedBy(node, addedContentSummary)
createdBy, err := s.getCreatedBy(node, addedContentSummary, false)
if err != nil {
return "", nil, false, fmt.Errorf("unable to get createdBy for the node: %w", err)
}
@ -1444,7 +1473,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// stage.
if lastStage || imageIsUsedLater {
logCommit(s.output, i)
createdBy, err := s.getCreatedBy(node, addedContentSummary)
createdBy, err := s.getCreatedBy(node, addedContentSummary, lastStage && lastInstruction)
if err != nil {
return "", nil, false, fmt.Errorf("unable to get createdBy for the node: %w", err)
}
@ -1454,7 +1483,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
}
logImageID(imgID)
// Generate build output if needed.
if canGenerateBuildOutput {
for _, buildOutputOption := range buildOutputOptions {
if err := s.generateBuildOutput(buildOutputOption); err != nil {
return "", nil, false, err
}
@ -1519,7 +1548,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// cacheKey since it will be used either while pulling or pushing the
// cache images.
if needsCacheKey {
cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step), lastInstruction && lastStage)
if err != nil {
return "", nil, false, fmt.Errorf("failed while generating cache key: %w", err)
}
@ -1531,7 +1560,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// we need to call ib.Run() to correctly put the args together before
// determining if a cached layer with the same build args already exists
// and that is done in the if block below.
if checkForLayers && step.Command != "arg" && !(s.executor.squash && lastInstruction && lastStage) && !avoidLookingCache {
if checkForLayers && step.Command != "arg" && (!s.executor.squash || !lastInstruction || !lastStage) && !avoidLookingCache {
// For `COPY` and `ADD`, history entries include digests computed from
// the content that's copied in. We need to compute that information so that
// it can be used to evaluate the cache, which means we need to go ahead
@ -1547,13 +1576,13 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
addedContentSummary = s.getContentSummaryAfterAddingContent()
// regenerate cache key with updated content summary
if needsCacheKey {
cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step), lastInstruction && lastStage)
if err != nil {
return "", nil, false, fmt.Errorf("failed while generating cache key: %w", err)
}
}
}
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step), lastInstruction && lastStage)
if err != nil {
return "", nil, false, fmt.Errorf("checking if cached image exists from a previous build: %w", err)
}
@ -1565,7 +1594,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// is ignored and will be automatically logged for --log-level debug
if ref, id, err := s.pullCache(ctx, cacheKey); ref != nil && id != "" && err == nil {
logCachePulled(cacheKey, ref)
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step), lastInstruction && lastStage)
if err != nil {
return "", nil, false, fmt.Errorf("checking if cached image exists from a previous build: %w", err)
}
@ -1597,7 +1626,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
addedContentSummary = s.getContentSummaryAfterAddingContent()
// regenerate cache key with updated content summary
if needsCacheKey {
cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
cacheKey, err = s.generateCacheKey(ctx, node, addedContentSummary, s.stepRequiresLayer(step), lastInstruction && lastStage)
if err != nil {
return "", nil, false, fmt.Errorf("failed while generating cache key: %w", err)
}
@ -1606,7 +1635,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// Check if there's already an image based on our parent that
// has the same change that we just made.
if checkForLayers && !avoidLookingCache {
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step), lastInstruction && lastStage)
if err != nil {
return "", nil, false, fmt.Errorf("checking if cached image exists from a previous build: %w", err)
}
@ -1619,7 +1648,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// is ignored and will be automatically logged for --log-level debug
if ref, id, err := s.pullCache(ctx, cacheKey); ref != nil && id != "" && err == nil {
logCachePulled(cacheKey, ref)
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step), lastInstruction && lastStage)
if err != nil {
return "", nil, false, fmt.Errorf("checking if cached image exists from a previous build: %w", err)
}
@ -1669,7 +1698,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// We're not going to find any more cache hits, so we
// can stop looking for them.
checkForLayers = false
createdBy, err := s.getCreatedBy(node, addedContentSummary)
createdBy, err := s.getCreatedBy(node, addedContentSummary, lastStage && lastInstruction)
if err != nil {
return "", nil, false, fmt.Errorf("unable to get createdBy for the node: %w", err)
}
@ -1685,7 +1714,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
return "", nil, false, fmt.Errorf("committing container for step %+v: %w", *step, err)
}
// Generate build output if needed.
if canGenerateBuildOutput {
for _, buildOutputOption := range buildOutputOptions {
if err := s.generateBuildOutput(buildOutputOption); err != nil {
return "", nil, false, err
}
@ -1711,7 +1740,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
if lastInstruction && lastStage {
if s.executor.squash || s.executor.confidentialWorkload.Convert || len(s.executor.sbomScanOptions) != 0 {
createdBy, err := s.getCreatedBy(node, addedContentSummary)
createdBy, err := s.getCreatedBy(node, addedContentSummary, lastStage && lastInstruction)
if err != nil {
return "", nil, false, fmt.Errorf("unable to get createdBy for the node: %w", err)
}
@ -1725,7 +1754,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
return "", nil, false, fmt.Errorf("committing final squash step %+v: %w", *step, err)
}
// Generate build output if needed.
if canGenerateBuildOutput {
for _, buildOutputOption := range buildOutputOptions {
if err := s.generateBuildOutput(buildOutputOption); err != nil {
return "", nil, false, err
}
@ -1740,7 +1769,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// then generate output manually since there is no opportunity
// for us to perform `commit` anywhere in the code.
// Generate build output if needed.
if canGenerateBuildOutput {
for _, buildOutputOption := range buildOutputOptions {
if err := s.generateBuildOutput(buildOutputOption); err != nil {
return "", nil, false, err
}
@ -1776,6 +1805,8 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
return "", nil, false, fmt.Errorf("preparing container for next step: %w", err)
}
}
s.hasLink = false
}
return imgID, ref, onlyBaseImage, nil
@ -1812,7 +1843,7 @@ func historyEntriesEqual(base, derived v1.History) bool {
// that we're comparing.
// Used to verify whether a cache of the intermediate image exists and whether
// to run the build again.
func (s *StageExecutor) historyAndDiffIDsMatch(baseHistory []v1.History, baseDiffIDs []digest.Digest, child *parser.Node, history []v1.History, diffIDs []digest.Digest, addedContentSummary string, buildAddsLayer bool) (bool, error) {
func (s *StageExecutor) historyAndDiffIDsMatch(baseHistory []v1.History, baseDiffIDs []digest.Digest, child *parser.Node, history []v1.History, diffIDs []digest.Digest, addedContentSummary string, buildAddsLayer bool, lastInstruction bool) (bool, error) {
// our history should be as long as the base's, plus one entry for what
// we're doing
if len(history) != len(baseHistory)+1 {
@ -1851,7 +1882,7 @@ func (s *StageExecutor) historyAndDiffIDsMatch(baseHistory []v1.History, baseDif
return false, nil
}
}
createdBy, err := s.getCreatedBy(child, addedContentSummary)
createdBy, err := s.getCreatedBy(child, addedContentSummary, lastInstruction)
if err != nil {
return false, fmt.Errorf("unable to get createdBy for the node: %w", err)
}
@ -1861,11 +1892,21 @@ func (s *StageExecutor) historyAndDiffIDsMatch(baseHistory []v1.History, baseDif
// getCreatedBy returns the command the image at node will be created by. If
// the passed-in CompositeDigester is not nil, it is assumed to have the digest
// information for the content if the node is ADD or COPY.
func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary string) (string, error) {
//
// This function acts differently if getCreatedBy is invoked by LastStep. For instances
// certain instructions like `removing annotations` does not makes sense for every step
// but only makes sense if the step is last step of a build.
func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary string, isLastStep bool) (string, error) {
if node == nil {
return "/bin/sh", nil
}
switch strings.ToUpper(node.Value) {
command := strings.ToUpper(node.Value)
addcopy := command == "ADD" || command == "COPY"
labelsAndAnnotations := s.buildMetadata(isLastStep, addcopy)
switch command {
case "ARG":
for _, variable := range strings.Fields(node.Original) {
if variable != "ARG" {
@ -1873,7 +1914,7 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri
}
}
buildArgs := s.getBuildArgsKey()
return "/bin/sh -c #(nop) ARG " + buildArgs, nil
return "/bin/sh -c #(nop) ARG " + buildArgs + labelsAndAnnotations, nil
case "RUN":
shArg := ""
buildArgs := s.getBuildArgsResolvedForRun()
@ -1889,13 +1930,17 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri
continue
}
mountOptionSource = mountInfo.Source
mountOptionSource, err = imagebuilder.ProcessWord(mountOptionSource, s.stage.Builder.Arguments())
if err != nil {
return "", fmt.Errorf("getCreatedBy: while replacing arg variables with values for format %q: %w", mountOptionSource, err)
}
mountOptionFrom = mountInfo.From
// If source is not specified then default is '.'
if mountOptionSource == "" {
mountOptionSource = "."
}
}
// Source specificed is part of stage, image or additional-build-context.
// Source specified is part of stage, image or additional-build-context.
if mountOptionFrom != "" {
// If this is not a stage then get digest of image or additional build context
if _, ok := s.executor.stages[mountOptionFrom]; !ok {
@ -1937,25 +1982,32 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri
if len(node.Original) > 4 {
shArg = node.Original[4:]
}
if buildArgs != "" {
return "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " /bin/sh -c " + shArg + appendCheckSum, nil
}
result := "/bin/sh -c " + shArg
heredoc := ""
result := ""
if len(node.Heredocs) > 0 {
for _, doc := range node.Heredocs {
heredocContent := strings.TrimSpace(doc.Content)
result = result + "\n" + heredocContent
heredoc = heredoc + "\n" + heredocContent
}
}
return result + appendCheckSum, nil
if buildArgs != "" {
result = result + "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " "
}
result = result + "/bin/sh -c " + shArg + heredoc + appendCheckSum + labelsAndAnnotations
return result, nil
case "ADD", "COPY":
destination := node
for destination.Next != nil {
destination = destination.Next
}
return "/bin/sh -c #(nop) " + strings.ToUpper(node.Value) + " " + addedContentSummary + " in " + destination.Value + " ", nil
hasLink := ""
if s.hasLink {
hasLink = " --link"
}
return "/bin/sh -c #(nop) " + strings.ToUpper(node.Value) + hasLink + " " + addedContentSummary + " in " + destination.Value + " " + labelsAndAnnotations, nil
default:
return "/bin/sh -c #(nop) " + node.Original, nil
return "/bin/sh -c #(nop) " + node.Original + labelsAndAnnotations, nil
}
}
@ -2041,7 +2093,7 @@ func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output st
return "", nil, err
}
policyContext, err := util.GetPolicyContext(s.executor.systemContext)
policyContext, err := util.GetPolicyContext(s.systemContext)
if err != nil {
return "", nil, err
}
@ -2089,22 +2141,22 @@ func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output st
// generated CacheKey is further used by buildah to lock and decide
// tag for the intermediate image which can be pushed and pulled to/from
// the remote repository.
func (s *StageExecutor) generateCacheKey(ctx context.Context, currNode *parser.Node, addedContentDigest string, buildAddsLayer bool) (string, error) {
func (s *StageExecutor) generateCacheKey(ctx context.Context, currNode *parser.Node, addedContentDigest string, buildAddsLayer bool, lastInstruction bool) (string, error) {
hash := sha256.New()
var baseHistory []v1.History
var diffIDs []digest.Digest
var manifestType string
var err error
if s.builder.FromImageID != "" {
manifestType, baseHistory, diffIDs, err = s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, s.builder.FromImageID)
_, _, manifestType, baseHistory, diffIDs, err = s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, s.builder.FromImageID)
if err != nil {
return "", fmt.Errorf("getting history of base image %q: %w", s.builder.FromImageID, err)
}
for i := 0; i < len(diffIDs); i++ {
for i := range len(diffIDs) {
fmt.Fprintln(hash, diffIDs[i].String())
}
}
createdBy, err := s.getCreatedBy(currNode, addedContentDigest)
createdBy, err := s.getCreatedBy(currNode, addedContentDigest, lastInstruction)
if err != nil {
return "", err
}
@ -2154,7 +2206,7 @@ func (s *StageExecutor) pushCache(ctx context.Context, src, cacheKey string) err
Compression: s.executor.compression,
SignaturePolicyPath: s.executor.signaturePolicyPath,
Store: s.executor.store,
SystemContext: s.executor.systemContext,
SystemContext: s.systemContext,
BlobDirectory: s.executor.blobDirectory,
SignBy: s.executor.signBy,
MaxRetries: s.executor.maxPullPushRetries,
@ -2192,7 +2244,7 @@ func (s *StageExecutor) pullCache(ctx context.Context, cacheKey string) (referen
options := buildah.PullOptions{
SignaturePolicyPath: s.executor.signaturePolicyPath,
Store: s.executor.store,
SystemContext: s.executor.systemContext,
SystemContext: s.systemContext,
BlobDirectory: s.executor.blobDirectory,
MaxRetries: s.executor.maxPullPushRetries,
RetryDelay: s.executor.retryPullPushDelay,
@ -2219,9 +2271,11 @@ func (s *StageExecutor) pullCache(ctx context.Context, cacheKey string) (referen
return nil, "", fmt.Errorf("failed pulling cache from all available sources %q", srcList)
}
// intermediateImageExists returns true if an intermediate image of currNode exists in the image store from a previous build.
// intermediateImageExists returns image ID if an intermediate image of currNode exists in the image store from a previous build.
// It verifies this by checking the parent of the top layer of the image and the history.
func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *parser.Node, addedContentDigest string, buildAddsLayer bool) (string, error) {
// If more than one image matches as potential candidates then priority is given to the most recently built image.
func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *parser.Node, addedContentDigest string, buildAddsLayer bool, lastInstruction bool) (string, error) {
cacheCandidates := []storage.Image{}
// Get the list of images available in the image store
images, err := s.executor.store.Images()
if err != nil {
@ -2230,7 +2284,7 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p
var baseHistory []v1.History
var baseDiffIDs []digest.Digest
if s.builder.FromImageID != "" {
_, baseHistory, baseDiffIDs, err = s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, s.builder.FromImageID)
_, _, _, baseHistory, baseDiffIDs, err = s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, s.builder.FromImageID)
if err != nil {
return "", fmt.Errorf("getting history of base image %q: %w", s.builder.FromImageID, err)
}
@ -2271,9 +2325,10 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p
if s.builder.TopLayer != imageParentLayerID {
continue
}
// Next we double check that the history of this image is equivalent to the previous
// lines in the Dockerfile up till the point we are at in the build.
manifestType, history, diffIDs, err := s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, image.ID)
imageOS, imageArchitecture, manifestType, history, diffIDs, err := s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, image.ID)
if err != nil {
// It's possible that this image is for another architecture, which results
// in a custom-crafted error message that we'd have to use substring matching
@ -2286,15 +2341,38 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p
if manifestType != s.executor.outputFormat {
continue
}
// Compare the cached image's platform with the current build's target platform
currentArch := s.executor.architecture
currentOS := s.executor.os
if currentArch == "" && currentOS == "" {
currentOS, currentArch, _, err = parse.Platform(s.stage.Builder.Platform)
if err != nil {
logrus.Debugf("unable to parse default OS and Arch for the current build: %v", err)
}
}
if currentArch != "" && imageArchitecture != currentArch {
logrus.Debugf("cached image %q has architecture %q but current build targets %q, ignoring it", image.ID, imageArchitecture, currentArch)
continue
}
if currentOS != "" && imageOS != currentOS {
logrus.Debugf("cached image %q has OS %q but current build targets %q, ignoring it", image.ID, imageOS, currentOS)
continue
}
// children + currNode is the point of the Dockerfile we are currently at.
foundMatch, err := s.historyAndDiffIDsMatch(baseHistory, baseDiffIDs, currNode, history, diffIDs, addedContentDigest, buildAddsLayer)
foundMatch, err := s.historyAndDiffIDsMatch(baseHistory, baseDiffIDs, currNode, history, diffIDs, addedContentDigest, buildAddsLayer, lastInstruction)
if err != nil {
return "", err
}
if foundMatch {
return image.ID, nil
cacheCandidates = append(cacheCandidates, image)
}
}
if len(cacheCandidates) > 0 {
slices.SortFunc(cacheCandidates, func(a, b storage.Image) int { return a.Created.Compare(b.Created) })
return cacheCandidates[len(cacheCandidates)-1].ID, nil
}
return "", nil
}
@ -2365,7 +2443,7 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
s.builder.SetStopSignal(config.StopSignal)
if config.Healthcheck != nil {
s.builder.SetHealthcheck(&buildahdocker.HealthConfig{
Test: append([]string{}, config.Healthcheck.Test...),
Test: slices.Clone(config.Healthcheck.Test),
Interval: config.Healthcheck.Interval,
Timeout: config.Healthcheck.Timeout,
StartPeriod: config.Healthcheck.StartPeriod,
@ -2389,15 +2467,33 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
for k, v := range config.Labels {
s.builder.SetLabel(k, v)
}
if s.executor.commonBuildOptions.IdentityLabel == types.OptionalBoolUndefined || s.executor.commonBuildOptions.IdentityLabel == types.OptionalBoolTrue {
switch s.executor.commonBuildOptions.IdentityLabel {
case types.OptionalBoolTrue:
s.builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version)
case types.OptionalBoolFalse:
// nothing - don't clear it if there's a value set in the base image
default:
if s.executor.timestamp == nil && s.executor.sourceDateEpoch == nil {
s.builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version)
}
}
for _, key := range s.executor.unsetLabels {
s.builder.UnsetLabel(key)
}
for _, annotationSpec := range s.executor.annotations {
annotationk, annotationv, _ := strings.Cut(annotationSpec, "=")
s.builder.SetAnnotation(annotationk, annotationv)
if finalInstruction {
if s.executor.inheritAnnotations == types.OptionalBoolFalse {
// If user has selected `--inherit-annotations=false` let's not
// inherit annotations from base image.
s.builder.ClearAnnotations()
}
// Add new annotations to the last step.
for _, annotationSpec := range s.executor.annotations {
annotationk, annotationv, _ := strings.Cut(annotationSpec, "=")
s.builder.SetAnnotation(annotationk, annotationv)
}
for _, key := range s.executor.unsetAnnotations {
s.builder.UnsetAnnotation(key)
}
}
if imageRef != nil {
logName := transports.ImageName(imageRef)
@ -2414,10 +2510,11 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
SignaturePolicyPath: s.executor.signaturePolicyPath,
ReportWriter: writer,
PreferredManifestType: s.executor.outputFormat,
SystemContext: s.executor.systemContext,
SystemContext: s.systemContext,
Squash: squash,
OmitHistory: s.executor.commonBuildOptions.OmitHistory,
EmptyLayer: emptyLayer,
OmitLayerHistoryEntry: s.hasLink,
BlobDirectory: s.executor.blobDirectory,
SignBy: s.executor.signBy,
MaxRetries: s.executor.maxPullPushRetries,
@ -2425,6 +2522,12 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
HistoryTimestamp: s.executor.timestamp,
Manifest: s.executor.manifest,
CompatSetParent: s.executor.compatSetParent,
SourceDateEpoch: s.executor.sourceDateEpoch,
RewriteTimestamp: s.executor.rewriteTimestamp,
CompatLayerOmissions: s.executor.compatLayerOmissions,
UnsetAnnotations: s.executor.unsetAnnotations,
Annotations: s.executor.annotations,
CreatedAnnotation: s.executor.createdAnnotation,
}
if finalInstruction {
options.ConfidentialWorkloadOptions = s.executor.confidentialWorkload
@ -2446,7 +2549,13 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
}
func (s *StageExecutor) generateBuildOutput(buildOutputOpts define.BuildOutputOption) error {
extractRootfsOpts := buildah.ExtractRootfsOptions{}
forceTimestamp := s.executor.timestamp
if s.executor.sourceDateEpoch != nil {
forceTimestamp = s.executor.sourceDateEpoch
}
extractRootfsOpts := buildah.ExtractRootfsOptions{
ForceTimestamp: forceTimestamp,
}
if unshare.IsRootless() {
// In order to maintain as much parity as possible
// with buildkit's version of --output and to avoid
@ -2460,7 +2569,12 @@ func (s *StageExecutor) generateBuildOutput(buildOutputOpts define.BuildOutputOp
extractRootfsOpts.StripSetgidBit = true
extractRootfsOpts.StripXattrs = true
}
rc, errChan, err := s.builder.ExtractRootfs(buildah.CommitOptions{}, extractRootfsOpts)
rc, errChan, err := s.builder.ExtractRootfs(buildah.CommitOptions{
HistoryTimestamp: s.executor.timestamp,
SourceDateEpoch: s.executor.sourceDateEpoch,
RewriteTimestamp: s.executor.rewriteTimestamp,
CompatLayerOmissions: s.executor.compatLayerOmissions,
}, extractRootfsOpts)
if err != nil {
return fmt.Errorf("failed to extract rootfs from given container image: %w", err)
}
@ -2487,3 +2601,34 @@ func (s *StageExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMo
logrus.Debugf("EnsureContainerPath %q (owner %q, mode %o) in %q", path, user, mode, s.builder.ContainerID)
return s.builder.EnsureContainerPathAs(path, user, mode)
}
func (s *StageExecutor) buildMetadata(isLastStep bool, addcopy bool) string {
inheritLabels := ""
unsetAnnotations := ""
inheritAnnotations := ""
newAnnotations := ""
// If --inherit-label was manually set to false then update history.
if s.executor.inheritLabels == types.OptionalBoolFalse {
inheritLabels = "|inheritLabels=false"
}
if isLastStep {
for _, annotation := range s.executor.unsetAnnotations {
unsetAnnotations += "|unsetAnnotation=" + annotation
}
// If --inherit-annotation was manually set to false then update history.
if s.executor.inheritAnnotations == types.OptionalBoolFalse {
inheritAnnotations = "|inheritAnnotations=false"
}
// If new annotations are added, they must be added as part of the last step of the build,
// so mention in history that new annotations were added inorder to make sure the builds
// can either reuse layers or burst the cache depending upon new annotations.
if len(s.executor.annotations) > 0 {
newAnnotations += strings.Join(s.executor.annotations, ",")
}
}
if addcopy {
return inheritLabels + " " + unsetAnnotations + " " + inheritAnnotations + " " + newAnnotations
}
return inheritLabels + unsetAnnotations + inheritAnnotations + newAnnotations
}

View File

@ -11,6 +11,7 @@ import (
)
func TestHistoryEntriesEqual(t *testing.T) {
t.Parallel()
testCases := []struct {
a, b string
equal bool

View File

@ -12,6 +12,7 @@ import (
)
func TestGeneratePathChecksum(t *testing.T) {
t.Parallel()
tempDir := t.TempDir()
tempFile, err := os.CreateTemp(tempDir, "testfile")

View File

@ -19,7 +19,7 @@ import (
func importBuilderDataFromImage(ctx context.Context, store storage.Store, systemContext *types.SystemContext, imageID, containerName, containerID string) (*Builder, error) {
if imageID == "" {
return nil, errors.New("Internal error: imageID is empty in importBuilderDataFromImage")
return nil, errors.New("internal error: imageID is empty in importBuilderDataFromImage")
}
storeopts, err := storage.DefaultStoreOptions()
@ -39,7 +39,8 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
defer src.Close()
imageDigest := ""
manifestBytes, manifestType, err := src.GetManifest(ctx, nil)
unparsedTop := image.UnparsedInstance(src, nil)
manifestBytes, manifestType, err := unparsedTop.Manifest(ctx)
if err != nil {
return nil, fmt.Errorf("loading image manifest for %q: %w", transports.ImageName(ref), err)
}
@ -48,6 +49,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
}
var instanceDigest *digest.Digest
unparsedInstance := unparsedTop // for instanceDigest
if manifest.MIMETypeIsMultiImage(manifestType) {
list, err := manifest.ListFromBlob(manifestBytes, manifestType)
if err != nil {
@ -58,9 +60,10 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
return nil, fmt.Errorf("finding an appropriate image in manifest list %q: %w", transports.ImageName(ref), err)
}
instanceDigest = &instance
unparsedInstance = image.UnparsedInstance(src, instanceDigest)
}
image, err := image.FromUnparsedImage(ctx, systemContext, image.UnparsedInstance(src, instanceDigest))
image, err := image.FromUnparsedImage(ctx, systemContext, unparsedInstance)
if err != nil {
return nil, fmt.Errorf("instantiating image for %q instance %q: %w", transports.ImageName(ref), instanceDigest, err)
}

16
info.go
View File

@ -23,7 +23,7 @@ import (
// InfoData holds the info type, i.e store, host etc and the data for each type
type InfoData struct {
Type string
Data map[string]interface{}
Data map[string]any
}
// Info returns the store and host information
@ -42,8 +42,8 @@ func Info(store storage.Store) ([]InfoData, error) {
return info, nil
}
func hostInfo() map[string]interface{} {
info := map[string]interface{}{}
func hostInfo() map[string]any {
info := map[string]any{}
ps := internalUtil.NormalizePlatform(v1.Platform{OS: runtime.GOOS, Architecture: runtime.GOARCH})
info["os"] = ps.OS
info["arch"] = ps.Architecture
@ -77,7 +77,7 @@ func hostInfo() map[string]interface{} {
info["SwapFree"] = mi.SwapFree
}
hostDistributionInfo := getHostDistributionInfo()
info["Distribution"] = map[string]interface{}{
info["Distribution"] = map[string]any{
"distribution": hostDistributionInfo["Distribution"],
"version": hostDistributionInfo["Version"],
}
@ -128,9 +128,9 @@ func hostInfo() map[string]interface{} {
}
// top-level "store" info
func storeInfo(store storage.Store) (map[string]interface{}, error) {
func storeInfo(store storage.Store) (map[string]any, error) {
// lets say storage driver in use, number of images, number of containers
info := map[string]interface{}{}
info := map[string]any{}
info["GraphRoot"] = store.GraphRoot()
info["RunRoot"] = store.RunRoot()
info["GraphDriverName"] = store.GraphDriverName()
@ -148,7 +148,7 @@ func storeInfo(store storage.Store) (map[string]interface{}, error) {
if err != nil {
logrus.Error(err, "error getting number of images")
}
info["ImageStore"] = map[string]interface{}{
info["ImageStore"] = map[string]any{
"number": len(images),
}
@ -156,7 +156,7 @@ func storeInfo(store storage.Store) (map[string]interface{}, error) {
if err != nil {
logrus.Error(err, "error getting number of containers")
}
info["ContainerStore"] = map[string]interface{}{
info["ContainerStore"] = map[string]any{
"number": len(containers),
}

View File

@ -1,6 +1,7 @@
package config
import (
"maps"
"slices"
"github.com/containers/image/v5/manifest"
@ -25,9 +26,7 @@ func Schema2ConfigFromGoDockerclientConfig(config *dockerclient.Config) *manifes
}
}
labels := make(map[string]string)
for k, v := range config.Labels {
labels[k] = v
}
maps.Copy(labels, config.Labels)
volumes := make(map[string]struct{})
for v := range config.Volumes {
volumes[v] = struct{}{}
@ -82,9 +81,7 @@ func GoDockerclientConfigFromSchema2Config(s2config *manifest.Schema2Config) *do
}
}
labels := make(map[string]string)
for k, v := range s2config.Labels {
labels[k] = v
}
maps.Copy(labels, s2config.Labels)
volumes := make(map[string]struct{})
for v := range s2config.Volumes {
volumes[v] = struct{}{}
@ -101,17 +98,17 @@ func GoDockerclientConfigFromSchema2Config(s2config *manifest.Schema2Config) *do
Tty: s2config.Tty,
OpenStdin: s2config.OpenStdin,
StdinOnce: s2config.StdinOnce,
Env: append([]string{}, s2config.Env...),
Cmd: append([]string{}, s2config.Cmd...),
Env: slices.Clone(s2config.Env),
Cmd: slices.Clone(s2config.Cmd),
Healthcheck: healthCheck,
ArgsEscaped: s2config.ArgsEscaped,
Image: s2config.Image,
Volumes: volumes,
WorkingDir: s2config.WorkingDir,
Entrypoint: append([]string{}, s2config.Entrypoint...),
Entrypoint: slices.Clone(s2config.Entrypoint),
NetworkDisabled: s2config.NetworkDisabled,
MacAddress: s2config.MacAddress,
OnBuild: append([]string{}, s2config.OnBuild...),
OnBuild: slices.Clone(s2config.OnBuild),
Labels: labels,
StopSignal: s2config.StopSignal,
Shell: s2config.Shell,

Some files were not shown because too many files have changed in this diff Show More