Merge pull request #21472 from mheon/vendor_runc_main

Update to runc main, removing pin to an older version
This commit is contained in:
openshift-merge-bot[bot] 2024-02-02 22:01:02 +00:00 committed by GitHub
commit 2431fb3f0b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
130 changed files with 6136 additions and 2387 deletions

View File

@ -0,0 +1,8 @@
####> This option file is used in:
####> podman farm build
####> If file is edited, make sure the changes
####> are applicable to all of those.
#### **--sbom-image-output**=*path*
When generating SBOMs, store the generated SBOM in the specified path in the
output image. There is no default.

View File

@ -0,0 +1,10 @@
####> This option file is used in:
####> podman farm build
####> If file is edited, make sure the changes
####> are applicable to all of those.
#### **--sbom-image-purl-output**=*path*
When generating SBOMs, scan them for PURL ([package
URL](https://github.com/package-url/purl-spec/blob/master/PURL-SPECIFICATION.rst))
information, and save a list of found PURLs to the specified path in the output
image. There is no default.

View File

@ -0,0 +1,24 @@
####> This option file is used in:
####> podman farm build
####> If file is edited, make sure the changes
####> are applicable to all of those.
#### **--sbom-merge-strategy**=*method*
If more than one **--sbom-scanner-command** value is being used, use the
specified method to merge the output from later commands with output from
earlier commands. Recognized values include:
- cat
Concatenate the files.
- merge-cyclonedx-by-component-name-and-version
Merge the "component" fields of JSON documents, ignoring values from
documents when the combination of their "name" and "version" values is
already present. Documents are processed in the order in which they are
generated, which is the order in which the commands that generate them
were specified.
- merge-spdx-by-package-name-and-versioninfo
Merge the "package" fields of JSON documents, ignoring values from
documents when the combination of their "name" and "versionInfo" values is
already present. Documents are processed in the order in which they are
generated, which is the order in which the commands that generate them
were specified.

View File

@ -0,0 +1,8 @@
####> This option file is used in:
####> podman farm build
####> If file is edited, make sure the changes
####> are applicable to all of those.
#### **--sbom-output**=*file*
When generating SBOMs, store the generated SBOM in the named file on the local
filesystem. There is no default.

View File

@ -0,0 +1,10 @@
####> This option file is used in:
####> podman farm build
####> If file is edited, make sure the changes
####> are applicable to all of those.
#### **--sbom-purl-output**=*file*
When generating SBOMs, scan them for PURL ([package
URL](https://github.com/package-url/purl-spec/blob/master/PURL-SPECIFICATION.rst))
information, and save a list of found PURLs to the named file in the local
filesystem. There is no default.

View File

@ -0,0 +1,15 @@
####> This option file is used in:
####> podman farm build
####> If file is edited, make sure the changes
####> are applicable to all of those.
#### **--sbom-scanner-command**=*image*
Generate SBOMs by running the specified command from the scanner image. If
multiple commands are specified, they are run in the order in which they are
specified. These text substitutions are performed:
- {ROOTFS}
The root of the built image's filesystem, bind mounted.
- {CONTEXT}
The build context and additional build contexts, bind mounted.
- {OUTPUT}
The name of a temporary output file, to be read and merged with others or copied elsewhere.

View File

@ -0,0 +1,7 @@
####> This option file is used in:
####> podman farm build
####> If file is edited, make sure the changes
####> are applicable to all of those.
#### **--sbom-scanner-image**=*image*
Generate SBOMs using the specified scanner image.

View File

@ -0,0 +1,33 @@
####> This option file is used in:
####> podman farm build
####> If file is edited, make sure the changes
####> are applicable to all of those.
#### **--sbom**=*preset*
Generate SBOMs (Software Bills Of Materials) for the output image by scanning
the working container and build contexts using the named combination of scanner
image, scanner commands, and merge strategy. Must be specified with one or
more of **--sbom-image-output**, **--sbom-image-purl-output**, **--sbom-output**,
and **--sbom-purl-output**. Recognized presets, and the set of options which
they equate to:
- "syft", "syft-cyclonedx":
--sbom-scanner-image=ghcr.io/anchore/syft
--sbom-scanner-command="/syft scan -q dir:{ROOTFS} --output cyclonedx-json={OUTPUT}"
--sbom-scanner-command="/syft scan -q dir:{CONTEXT} --output cyclonedx-json={OUTPUT}"
--sbom-merge-strategy=merge-cyclonedx-by-component-name-and-version
- "syft-spdx":
--sbom-scanner-image=ghcr.io/anchore/syft
--sbom-scanner-command="/syft scan -q dir:{ROOTFS} --output spdx-json={OUTPUT}"
--sbom-scanner-command="/syft scan -q dir:{CONTEXT} --output spdx-json={OUTPUT}"
--sbom-merge-strategy=merge-spdx-by-package-name-and-versioninfo
- "trivy", "trivy-cyclonedx":
--sbom-scanner-image=ghcr.io/aquasecurity/trivy
--sbom-scanner-command="trivy filesystem -q {ROOTFS} --format cyclonedx --output {OUTPUT}"
--sbom-scanner-command="trivy filesystem -q {CONTEXT} --format cyclonedx --output {OUTPUT}"
--sbom-merge-strategy=merge-cyclonedx-by-component-name-and-version
- "trivy-spdx":
--sbom-scanner-image=ghcr.io/aquasecurity/trivy
--sbom-scanner-command="trivy filesystem -q {ROOTFS} --format spdx-json --output {OUTPUT}"
--sbom-scanner-command="trivy filesystem -q {CONTEXT} --format spdx-json --output {OUTPUT}"
--sbom-merge-strategy=merge-spdx-by-package-name-and-versioninfo

View File

@ -326,6 +326,97 @@ Pull image policy. The default is **missing**.
@@option runtime-flag
#### **--sbom**=*preset*
Generate SBOMs (Software Bills Of Materials) for the output image by scanning
the working container and build contexts using the named combination of scanner
image, scanner commands, and merge strategy. Must be specified with one or
more of **--sbom-image-output**, **--sbom-image-purl-output**, **--sbom-output**,
and **--sbom-purl-output**. Recognized presets, and the set of options which
they equate to:
- "syft", "syft-cyclonedx":
--sbom-scanner-image=ghcr.io/anchore/syft
--sbom-scanner-command="/syft scan -q dir:{ROOTFS} --output cyclonedx-json={OUTPUT}"
--sbom-scanner-command="/syft scan -q dir:{CONTEXT} --output cyclonedx-json={OUTPUT}"
--sbom-merge-strategy=merge-cyclonedx-by-component-name-and-version
- "syft-spdx":
--sbom-scanner-image=ghcr.io/anchore/syft
--sbom-scanner-command="/syft scan -q dir:{ROOTFS} --output spdx-json={OUTPUT}"
--sbom-scanner-command="/syft scan -q dir:{CONTEXT} --output spdx-json={OUTPUT}"
--sbom-merge-strategy=merge-spdx-by-package-name-and-versioninfo
- "trivy", "trivy-cyclonedx":
--sbom-scanner-image=ghcr.io/aquasecurity/trivy
--sbom-scanner-command="trivy filesystem -q {ROOTFS} --format cyclonedx --output {OUTPUT}"
--sbom-scanner-command="trivy filesystem -q {CONTEXT} --format cyclonedx --output {OUTPUT}"
--sbom-merge-strategy=merge-cyclonedx-by-component-name-and-version
- "trivy-spdx":
--sbom-scanner-image=ghcr.io/aquasecurity/trivy
--sbom-scanner-command="trivy filesystem -q {ROOTFS} --format spdx-json --output {OUTPUT}"
--sbom-scanner-command="trivy filesystem -q {CONTEXT} --format spdx-json --output {OUTPUT}"
--sbom-merge-strategy=merge-spdx-by-package-name-and-versioninfo
#### **--sbom-image-output**=*path*
When generating SBOMs, store the generated SBOM in the specified path in the
output image. There is no default.
#### **--sbom-image-purl-output**=*path*
When generating SBOMs, scan them for PURL ([package
URL](https://github.com/package-url/purl-spec/blob/master/PURL-SPECIFICATION.rst))
information, and save a list of found PURLs to the specified path in the output
image. There is no default.
#### **--sbom-merge-strategy**=*method*
If more than one **--sbom-scanner-command** value is being used, use the
specified method to merge the output from later commands with output from
earlier commands. Recognized values include:
- cat
Concatenate the files.
- merge-cyclonedx-by-component-name-and-version
Merge the "component" fields of JSON documents, ignoring values from
documents when the combination of their "name" and "version" values is
already present. Documents are processed in the order in which they are
generated, which is the order in which the commands that generate them
were specified.
- merge-spdx-by-package-name-and-versioninfo
Merge the "package" fields of JSON documents, ignoring values from
documents when the combination of their "name" and "versionInfo" values is
already present. Documents are processed in the order in which they are
generated, which is the order in which the commands that generate them
were specified.
#### **--sbom-output**=*file*
When generating SBOMs, store the generated SBOM in the named file on the local
filesystem. There is no default.
#### **--sbom-purl-output**=*file*
When generating SBOMs, scan them for PURL ([package
URL](https://github.com/package-url/purl-spec/blob/master/PURL-SPECIFICATION.rst))
information, and save a list of found PURLs to the named file in the local
filesystem. There is no default.
#### **--sbom-scanner-command**=*image*
Generate SBOMs by running the specified command from the scanner image. If
multiple commands are specified, they are run in the order in which they are
specified. These text substitutions are performed:
- {ROOTFS}
The root of the built image's filesystem, bind mounted.
- {CONTEXT}
The build context and additional build contexts, bind mounted.
- {OUTPUT}
The name of a temporary output file, to be read and merged with others or copied elsewhere.
#### **--sbom-scanner-image**=*image*
Generate SBOMs using the specified scanner image.
@@option secret.image
@@option security-opt.image

View File

@ -179,6 +179,22 @@ Build only on farm nodes that match the given platforms.
@@option runtime-flag
@@option sbom
@@option sbom-image-output
@@option sbom-image-purl-output
@@option sbom-merge-strategy
@@option sbom-output
@@option sbom-purl-output
@@option sbom-scanner-command
@@option sbom-scanner-image
@@option secret.image
@@option security-opt.image

16
go.mod
View File

@ -10,7 +10,7 @@ require (
github.com/checkpoint-restore/checkpointctl v1.1.0
github.com/checkpoint-restore/go-criu/v7 v7.0.0
github.com/containernetworking/plugins v1.4.0
github.com/containers/buildah v1.33.2-0.20231121195905-d1a1c53c8e1c
github.com/containers/buildah v1.34.1-0.20240201124221-b850c711ff5c
github.com/containers/common v0.57.1-0.20240130143645-b26099256b92
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/gvisor-tap-vsock v0.7.2
@ -18,7 +18,7 @@ require (
github.com/containers/libhvee v0.6.0
github.com/containers/ocicrypt v1.1.9
github.com/containers/psgo v1.8.0
github.com/containers/storage v1.52.1-0.20240130205044-62997abeaf2f
github.com/containers/storage v1.52.1-0.20240202181245-1419a5980565
github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09
github.com/coreos/stream-metadata-go v0.4.4
github.com/crc-org/vfkit v0.5.0
@ -43,6 +43,7 @@ require (
github.com/mattn/go-shellwords v1.0.12
github.com/mattn/go-sqlite3 v1.14.20
github.com/mdlayher/vsock v1.2.1
github.com/moby/sys/user v0.1.0
github.com/moby/term v0.5.0
github.com/nxadm/tail v1.4.11
github.com/onsi/ginkgo/v2 v2.15.0
@ -71,7 +72,7 @@ require (
golang.org/x/sys v0.16.0
golang.org/x/term v0.16.0
golang.org/x/text v0.14.0
google.golang.org/protobuf v1.31.0
google.golang.org/protobuf v1.32.0
gopkg.in/inf.v0 v0.9.1
gopkg.in/yaml.v3 v3.0.1
k8s.io/kubernetes v1.28.4
@ -98,7 +99,7 @@ require (
github.com/containerd/typeurl/v2 v2.1.1 // indirect
github.com/containernetworking/cni v1.1.2 // indirect
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
github.com/containers/luksy v0.0.0-20231030195837-b5a7f79da98b // indirect
github.com/containers/luksy v0.0.0-20240129181507-b62d551ce6d8 // indirect
github.com/coreos/go-oidc/v3 v3.9.0 // indirect
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect
@ -109,7 +110,7 @@ require (
github.com/docker/docker-credential-helpers v0.8.1 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fsouza/go-dockerclient v1.10.0 // indirect
github.com/fsouza/go-dockerclient v1.10.1 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/gin-gonic/gin v1.9.1 // indirect
@ -160,11 +161,10 @@ require (
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/buildkit v0.12.3 // indirect
github.com/moby/buildkit v0.12.5 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/sys/mountinfo v0.7.1 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/moby/sys/user v0.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect
@ -220,4 +220,4 @@ require (
tags.cncf.io/container-device-interface/specs-go v0.6.0 // indirect
)
replace github.com/opencontainers/runc => github.com/opencontainers/runc v1.1.1-0.20230904132852-a0466dd76f23
replace github.com/opencontainers/runc => github.com/opencontainers/runc v1.1.1-0.20240131200429-02120488a4c0

38
go.sum
View File

@ -152,7 +152,7 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs=
github.com/cilium/ebpf v0.12.3/go.mod h1:TctK1ivibvI3znr66ljgi4hqOT8EYQjz1KWBfb1UVgM=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@ -255,8 +255,8 @@ github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHV
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
github.com/containernetworking/plugins v1.4.0 h1:+w22VPYgk7nQHw7KT92lsRmuToHvb7wwSv9iTbXzzic=
github.com/containernetworking/plugins v1.4.0/go.mod h1:UYhcOyjefnrQvKvmmyEKsUA+M9Nfn7tqULPpH0Pkcj0=
github.com/containers/buildah v1.33.2-0.20231121195905-d1a1c53c8e1c h1:E7nxvH3N3kpyson0waJv1X+eY9hAs+x2zQswsK+//yY=
github.com/containers/buildah v1.33.2-0.20231121195905-d1a1c53c8e1c/go.mod h1:oMNfVrZGEfWVOxXTNOYPMdZzDfSo2umURK/TO0d8TRk=
github.com/containers/buildah v1.34.1-0.20240201124221-b850c711ff5c h1:r+1vFyTAoXptJrsPsnOMI3G0jm4+BCfXAcIyuA33lzo=
github.com/containers/buildah v1.34.1-0.20240201124221-b850c711ff5c/go.mod h1:Hw4qo2URFpWvZ2tjLstoQMpNC6+gR4PtxQefvV/UKaA=
github.com/containers/common v0.57.1-0.20240130143645-b26099256b92 h1:Q60+ofGhDjVxY5lvYmmcVN8aeS9gtQ6pAn/pyLh7rRM=
github.com/containers/common v0.57.1-0.20240130143645-b26099256b92/go.mod h1:Na7hGh5WnmB0RdGkKyb6JQb6DtKrs5qoIGrPucuR8t0=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
@ -269,8 +269,8 @@ github.com/containers/libhvee v0.6.0 h1:tUzwSz8R0GjR6IctgDnkTMjdtCk5Mxhpai4Vyv6U
github.com/containers/libhvee v0.6.0/go.mod h1:f/q1wCdQqOLiK3IZqqBfOD7exMZYBU5pDYsrMa/pSFg=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/luksy v0.0.0-20231030195837-b5a7f79da98b h1:8XvNAm+g7ivwPUkyiHvBs7z356JWpK9a0FDaek86+sY=
github.com/containers/luksy v0.0.0-20231030195837-b5a7f79da98b/go.mod h1:menB9p4o5HckgcLW6cO0+dl6+axkVmSqKlrNcratsh4=
github.com/containers/luksy v0.0.0-20240129181507-b62d551ce6d8 h1:0p58QJRICjkRVCDix1nsnyrtJ3Qj4CWcGd1bOEY9sVY=
github.com/containers/luksy v0.0.0-20240129181507-b62d551ce6d8/go.mod h1:oMhW1fWXz1FGN97rhycbuAwrkXXV1z5c/Bjbn0CSlFY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
@ -279,8 +279,8 @@ github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPN
github.com/containers/psgo v1.8.0 h1:2loGekmGAxM9ir5OsXWEfGwFxorMPYnc6gEDsGFQvhY=
github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc=
github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s=
github.com/containers/storage v1.52.1-0.20240130205044-62997abeaf2f h1:BJSLHe7f1tgu53d8mGIK/y2KhEev5lggWlIk1rWYT7k=
github.com/containers/storage v1.52.1-0.20240130205044-62997abeaf2f/go.mod h1:T/ZMocbhShnMLIF0pdkiLPwpkwlGlyUWJeSXnfC/uew=
github.com/containers/storage v1.52.1-0.20240202181245-1419a5980565 h1:Gcirfx2DNoayB/+ypLgl5+ABzIPPDAoncs1qgZHHQHE=
github.com/containers/storage v1.52.1-0.20240202181245-1419a5980565/go.mod h1:2E/QBqWVcJXwumP7nVUrampwRNL4XKjHL/aQya7ZdhI=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
@ -393,8 +393,8 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/fsouza/go-dockerclient v1.10.0 h1:ppSBsbR60I1DFbV4Ag7LlHlHakHFRNLk9XakATW1yVQ=
github.com/fsouza/go-dockerclient v1.10.0/go.mod h1:+iNzAW78AzClIBTZ6WFjkaMvOgz68GyCJ236b1opLTs=
github.com/fsouza/go-dockerclient v1.10.1 h1:bSU5Wu2ARdub+iv9VtoDsN8yBUI0vgflmshbeQLKhvc=
github.com/fsouza/go-dockerclient v1.10.1/go.mod h1:dyzGriw6v3pK4O4O1u/X+vXxDDsrnLLkCqYkcLsDq2k=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
@ -783,8 +783,8 @@ github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/moby/buildkit v0.12.3 h1:cFaPVnyC0PwAP5xHHfzdU5v9rgQrCi6HnGSg3WuFKp4=
github.com/moby/buildkit v0.12.3/go.mod h1:adB4y0SxxX8trnrY+oEulb48ODLqPO6pKMF0ppGcCoI=
github.com/moby/buildkit v0.12.5 h1:RNHH1l3HDhYyZafr5EgstEu8aGNCwyfvMtrQDtjH9T0=
github.com/moby/buildkit v0.12.5/go.mod h1:YGwjA2loqyiYfZeEo8FtI7z4x5XponAaIWsWcSjWwso=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
@ -811,7 +811,7 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/mrunalp/fileutils v0.5.1/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
@ -859,8 +859,8 @@ github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.1.0-rc6 h1:XDqvyKsJEbRtATzkgItUqBA7QHk58yxX1Ov9HERHNqU=
github.com/opencontainers/image-spec v1.1.0-rc6/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/opencontainers/runc v1.1.1-0.20230904132852-a0466dd76f23 h1:CjJqzUWt07IJR6gO+Ron5qEcXomyLZLgURiSCXN6vXM=
github.com/opencontainers/runc v1.1.1-0.20230904132852-a0466dd76f23/go.mod h1:UkHdGiHfjdRR/suiePnXB844WcjZ0RcfGm2mQS/V5jM=
github.com/opencontainers/runc v1.1.1-0.20240131200429-02120488a4c0 h1:NwSQ/5rex97Rum/xZOMjlDQbbZ8YJKOTihf9sxqHxtE=
github.com/opencontainers/runc v1.1.1-0.20240131200429-02120488a4c0/go.mod h1:tBsQqk9ETVlXxzXjk2Xh/1VjxC/U3Gaq5ps/rC/cadE=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
@ -1180,7 +1180,6 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -1269,7 +1268,6 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -1385,7 +1383,7 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.1-0.20231108175955-e4099bfacb8c/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
@ -1394,7 +1392,6 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1410,7 +1407,6 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -1563,8 +1559,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@ -47,7 +47,7 @@ import (
"github.com/containers/storage/pkg/lockfile"
stypes "github.com/containers/storage/types"
securejoin "github.com/cyphar/filepath-securejoin"
runcuser "github.com/opencontainers/runc/libcontainer/user"
runcuser "github.com/moby/sys/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux"

View File

@ -3,7 +3,7 @@
package libpod
import (
"github.com/opencontainers/runc/libcontainer/user"
"github.com/moby/sys/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
)

View File

@ -4,7 +4,7 @@ package libpod
import (
"github.com/containers/common/pkg/capabilities"
"github.com/opencontainers/runc/libcontainer/user"
"github.com/moby/sys/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
)

View File

@ -41,6 +41,7 @@ func (r *Runtime) platformMakePod(pod *Pod, resourceLimits *spec.LinuxResources)
if err != nil {
return "", err
}
res.SkipDevices = true
// Need to both create and update the cgroup
// rather than create a new path in c/common for pod cgroup creation
// just create as if it is a ctr and then update figures out that we need to

View File

@ -5,7 +5,7 @@ import (
"strconv"
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/opencontainers/runc/libcontainer/user"
"github.com/moby/sys/user"
"github.com/sirupsen/logrus"
)

View File

@ -8,7 +8,7 @@ import (
"sync"
"github.com/containers/storage/pkg/lockfile"
"github.com/opencontainers/runc/libcontainer/user"
"github.com/moby/sys/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
)

View File

@ -4,7 +4,7 @@ import (
"reflect"
"testing"
"github.com/opencontainers/runc/libcontainer/user"
"github.com/moby/sys/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
)

View File

@ -28,7 +28,7 @@ import (
"github.com/containers/storage/pkg/idtools"
stypes "github.com/containers/storage/types"
securejoin "github.com/cyphar/filepath-securejoin"
ruser "github.com/opencontainers/runc/libcontainer/user"
ruser "github.com/moby/sys/user"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"golang.org/x/term"

View File

@ -6,7 +6,7 @@ import (
"time"
"github.com/containers/storage/pkg/idtools"
ruser "github.com/opencontainers/runc/libcontainer/user"
ruser "github.com/moby/sys/user"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/stretchr/testify/assert"
)

View File

@ -27,12 +27,12 @@ env:
####
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
FEDORA_NAME: "fedora-39β"
FEDORA_NAME: "fedora-39"
PRIOR_FEDORA_NAME: "fedora-38"
DEBIAN_NAME: "debian-13"
# Image identifiers
IMAGE_SUFFIX: "c20231004t194547z-f39f38d13"
IMAGE_SUFFIX: "c20240102t212217z-f39f38d13"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"

View File

@ -2,6 +2,19 @@
# Changelog
## v1.34.0 (2023-12-11)
vendor: update c/{common,image,storage}
run: Allow using just one jail per container on FreeBSD
Remove makefile targets entrypoint{,.gz} for non x86_64
## v1.33.2 (2023-11-22)
Update minimum to golang 1.20
fix(deps): update module github.com/fsouza/go-dockerclient to v1.10.0
fix(deps): update module github.com/moby/buildkit to v0.12.3
Bump to v1.33.2-dev
## v1.33.1 (2023-11-18)
fix(deps): update module github.com/moby/buildkit to v0.11.4 [security]

View File

@ -39,7 +39,7 @@ LIBSECCOMP_COMMIT := release-2.3
EXTRA_LDFLAGS ?=
BUILDAH_LDFLAGS := $(GO_LDFLAGS) '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)'
SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go internal/config/*.go internal/mkcw/*.go internal/mkcw/types/*.go internal/parse/*.go internal/source/*.go internal/util/*.go manifests/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go
SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go internal/config/*.go internal/mkcw/*.go internal/mkcw/types/*.go internal/parse/*.go internal/sbom/*.go internal/source/*.go internal/tmpdir/*.go internal/util/*.go internal/volumes/*.go manifests/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go
LINTFLAGS ?=
@ -69,15 +69,14 @@ static:
mkdir -p ./bin
cp -rfp ./result/bin/* ./bin/
bin/buildah: $(SOURCES) cmd/buildah/*.go internal/mkcw/embed/entrypoint.gz
bin/buildah: $(SOURCES) cmd/buildah/*.go internal/mkcw/embed/entrypoint_amd64.gz
$(GO_BUILD) $(BUILDAH_LDFLAGS) $(GO_GCFLAGS) "$(GOGCFLAGS)" -o $@ $(BUILDFLAGS) ./cmd/buildah
ifneq ($(shell as --version | grep x86_64),)
internal/mkcw/embed/entrypoint.gz: internal/mkcw/embed/entrypoint
$(RM) $@
gzip -k $^
internal/mkcw/embed/entrypoint_amd64.gz: internal/mkcw/embed/entrypoint_amd64
gzip -k9nf $^
internal/mkcw/embed/entrypoint: internal/mkcw/embed/entrypoint.s
internal/mkcw/embed/entrypoint_amd64: internal/mkcw/embed/entrypoint_amd64.s
$(AS) -o $(patsubst %.s,%.o,$^) $^
$(LD) -o $@ $(patsubst %.s,%.o,$^)
strip $@
@ -124,7 +123,7 @@ gopath:
test $(shell pwd) = $(shell cd ../../../../src/github.com/containers/buildah ; pwd)
codespell:
codespell -S Makefile,buildah.spec.rpkg,AUTHORS,bin,vendor,.git,go.mod,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L passt,bu,uint,iff,od,erro -w
codespell -S Makefile,buildah.spec.rpkg,AUTHORS,bin,vendor,.git,go.mod,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L secon,passt,bu,uint,iff,od,erro -w
.PHONY: validate
validate: install.tools

View File

@ -1,4 +1,5 @@
![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png)
![buildah logo (light)](logos/buildah-logo_large.png#gh-light-mode-only)
![buildah logo (dark)](logos/buildah-logo_reverse_large.png#gh-dark-mode-only)
# [Buildah](https://www.youtube.com/embed/YVk5NgSiUw8) - a tool that facilitates building [Open Container Initiative (OCI)](https://www.opencontainers.org/) container images

View File

@ -23,7 +23,6 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/hashicorp/go-multierror"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/runc/libcontainer/userns"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
@ -438,7 +437,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
ChmodDirs: nil,
ChownFiles: nil,
ChmodFiles: nil,
IgnoreDevices: userns.RunningInUserNS(),
IgnoreDevices: runningInUserNS(),
}
putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
}
@ -579,7 +578,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
ChmodDirs: nil,
ChownFiles: nil,
ChmodFiles: nil,
IgnoreDevices: userns.RunningInUserNS(),
IgnoreDevices: runningInUserNS(),
}
putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
}

8
vendor/github.com/containers/buildah/add_common.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
//go:build !linux
// +build !linux
package buildah
func runningInUserNS() bool {
return false
}

9
vendor/github.com/containers/buildah/add_linux.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
package buildah
import (
"github.com/opencontainers/runc/libcontainer/userns"
)
func runningInUserNS() bool {
return userns.RunningInUserNS()
}

View File

@ -391,6 +391,10 @@ type ImportFromImageOptions struct {
// instead of the usual rootfs contents.
type ConfidentialWorkloadOptions = define.ConfidentialWorkloadOptions
// SBOMScanOptions encapsulates options which control whether or not we run a
// scanner on the rootfs that we're about to commit, and how.
type SBOMScanOptions = define.SBOMScanOptions
// NewBuilder creates a new build container.
func NewBuilder(ctx context.Context, store storage.Store, options BuilderOptions) (*Builder, error) {
if options.CommonBuildOpts == nil {

View File

@ -1,3 +1,14 @@
- Changelog for v1.34.0 (2023-12-11)
* vendor: update c/{common,image,storage}
* run: Allow using just one jail per container on FreeBSD
* Remove makefile targets entrypoint{,.gz} for non x86_64
- Changelog for v1.33.2 (2023-11-22)
* Update minimum to golang 1.20
* fix(deps): update module github.com/fsouza/go-dockerclient to v1.10.0
* fix(deps): update module github.com/moby/buildkit to v0.12.3
* Bump to v1.33.2-dev
- Changelog for v1.33.1 (2023-11-18)
* fix(deps): update module github.com/moby/buildkit to v0.11.4 [security]
* test,heredoc: use fedora instead of docker.io/library/python:latest

View File

@ -118,6 +118,15 @@ type CommitOptions struct {
// to the configuration of the image that is being committed, after
// OverrideConfig is applied.
OverrideChanges []string
// ExtraImageContent is a map which describes additional content to add
// to the committed image. The map's keys are filesystem paths in the
// image and the corresponding values are the paths of files whose
// contents will be used in their place. The contents will be owned by
// 0:0 and have mode 0644. Currently only accepts regular files.
ExtraImageContent map[string]string
// SBOMScanOptions encapsulates options which control whether or not we
// run scanners on the rootfs that we're about to commit, and how.
SBOMScanOptions []SBOMScanOptions
}
var (
@ -315,6 +324,28 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
logrus.Debugf("committing image with reference %q is allowed by policy", transports.ImageName(dest))
// If we need to scan the rootfs, do it now.
options.ExtraImageContent = copyStringStringMap(options.ExtraImageContent)
var extraImageContent, extraLocalContent map[string]string
if len(options.SBOMScanOptions) != 0 {
var scansDirectory string
if extraImageContent, extraLocalContent, scansDirectory, err = b.sbomScan(ctx, options); err != nil {
return imgID, nil, "", fmt.Errorf("scanning rootfs to generate SBOM for container %q: %w", b.ContainerID, err)
}
if scansDirectory != "" {
defer func() {
if err := os.RemoveAll(scansDirectory); err != nil {
logrus.Warnf("removing temporary directory %q: %v", scansDirectory, err)
}
}()
}
for k, v := range extraImageContent {
if _, set := options.ExtraImageContent[k]; !set {
options.ExtraImageContent[k] = v
}
}
}
// Build an image reference from which we can copy the finished image.
src, err = b.makeContainerImageRef(options)
if err != nil {
@ -402,7 +433,31 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
}
}
// If we're supposed to store SBOM or PURL information in local files, write them now.
for filename, content := range extraLocalContent {
err := func() error {
output, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644)
if err != nil {
return err
}
defer output.Close()
input, err := os.Open(content)
if err != nil {
return err
}
defer input.Close()
if _, err := io.Copy(output, input); err != nil {
return fmt.Errorf("copying from %q to %q: %w", content, filename, err)
}
return nil
}()
if err != nil {
return imgID, nil, "", err
}
}
// Calculate the as-written digest of the image's manifest and build the digested
// reference for the image.
manifestDigest, err := manifest.Digest(manifestBytes)
if err != nil {
return imgID, nil, "", fmt.Errorf("computing digest of manifest of new image %q: %w", transports.ImageName(dest), err)

View File

@ -45,6 +45,7 @@ type CWConvertImageOptions struct {
FirmwareLibrary string
BaseImage string
Logger *logrus.Logger
ExtraImageContent map[string]string
// Passed through to BuilderOptions. Most settings won't make
// sense to be made available here because we don't launch a process.
@ -171,6 +172,8 @@ func CWConvertImage(ctx context.Context, systemContext *types.SystemContext, sto
Slop: options.Slop,
FirmwareLibrary: options.FirmwareLibrary,
Logger: logger,
GraphOptions: store.GraphOptions(),
ExtraImageContent: options.ExtraImageContent,
}
rc, workloadConfig, err := mkcw.Archive(sourceDir, &source.OCIv1, archiveOptions)
if err != nil {

View File

@ -333,4 +333,7 @@ type BuildOptions struct {
// value set in a base image will be preserved, so this does not
// frequently need to be set.
OSVersion string
// SBOMScanOptions encapsulates options which control whether or not we
// run scanners on the rootfs that we're about to commit, and how.
SBOMScanOptions []SBOMScanOptions
}

View File

@ -29,7 +29,7 @@ const (
// identify working containers.
Package = "buildah"
// Version for the Package. Also used by .packit.sh for Packit builds.
Version = "1.33.2-dev"
Version = "1.34.1-dev"
// DefaultRuntime if containers.conf fails.
DefaultRuntime = "runc"
@ -121,7 +121,7 @@ type ConfidentialWorkloadOptions struct {
AttestationURL string
CPUs int
Memory int
TempDir string
TempDir string // used for the temporary plaintext copy of the disk image
TeeType TeeType
IgnoreAttestationErrors bool
WorkloadID string
@ -130,6 +130,42 @@ type ConfidentialWorkloadOptions struct {
FirmwareLibrary string
}
// SBOMMergeStrategy tells us how to merge multiple SBOM documents into one.
type SBOMMergeStrategy string
const (
// SBOMMergeStrategyCat literally concatenates the documents.
SBOMMergeStrategyCat SBOMMergeStrategy = "cat"
// SBOMMergeStrategyCycloneDXByComponentNameAndVersion adds components
// from the second document to the first, so long as they have a
// name+version combination which is not already present in the
// components array.
SBOMMergeStrategyCycloneDXByComponentNameAndVersion SBOMMergeStrategy = "merge-cyclonedx-by-component-name-and-version"
// SBOMMergeStrategySPDXByPackageNameAndVersionInfo adds packages from
// the second document to the first, so long as they have a
// name+versionInfo combination which is not already present in the
// first document's packages array, and adds hasExtractedLicensingInfos
// items from the second document to the first, so long as they include
// a licenseId value which is not already present in the first
// document's hasExtractedLicensingInfos array.
SBOMMergeStrategySPDXByPackageNameAndVersionInfo SBOMMergeStrategy = "merge-spdx-by-package-name-and-versioninfo"
)
// SBOMScanOptions encapsulates options which control whether or not we run a
// scanner on the rootfs that we're about to commit, and how.
type SBOMScanOptions struct {
Type []string // a shorthand name for a defined group of these options
Image string // the scanner image to use
PullPolicy PullPolicy // how to get the scanner image
Commands []string // one or more commands to invoke for the image rootfs or ContextDir locations
ContextDir []string // one or more "source" directory locations
SBOMOutput string // where to save SBOM scanner output outside of the image (i.e., the local filesystem)
PURLOutput string // where to save PURL list outside of the image (i.e., the local filesystem)
ImageSBOMOutput string // where to save SBOM scanner output in the image
ImagePURLOutput string // where to save PURL list in the image
MergeStrategy SBOMMergeStrategy // how to merge the outputs of multiple scans
}
// TempDirForURL checks if the passed-in string looks like a URL or -. If it is,
// TempDirForURL creates a temporary directory, arranges for its contents to be
// the contents of that URL, and returns the temporary directory's path, along

View File

@ -45,9 +45,9 @@ const (
Dockerv2ImageManifest = define.Dockerv2ImageManifest
)
// ExtractRootfsOptions is consumed by ExtractRootfs() which allows
// users to preserve nature of various modes like setuid, setgid and xattrs
// over the extracted file system objects.
// ExtractRootfsOptions is consumed by ExtractRootfs() which allows users to
// control whether various information like the like setuid and setgid bits and
// xattrs are preserved when extracting file system objects.
type ExtractRootfsOptions struct {
StripSetuidBit bool // strip the setuid bit off of items being extracted.
StripSetgidBit bool // strip the setgid bit off of items being extracted.
@ -82,6 +82,7 @@ type containerImageRef struct {
postEmptyLayers []v1.History
overrideChanges []string
overrideConfig *manifest.Schema2Config
extraImageContent map[string]string
}
type blobLayerInfo struct {
@ -171,6 +172,22 @@ func (i *containerImageRef) extractConfidentialWorkloadFS(options ConfidentialWo
if err := json.Unmarshal(i.oconfig, &image); err != nil {
return nil, fmt.Errorf("recreating OCI configuration for %q: %w", i.containerID, err)
}
if options.TempDir == "" {
cdir, err := i.store.ContainerDirectory(i.containerID)
if err != nil {
return nil, fmt.Errorf("getting the per-container data directory for %q: %w", i.containerID, err)
}
tempdir, err := os.MkdirTemp(cdir, "buildah-rootfs")
if err != nil {
return nil, fmt.Errorf("creating a temporary data directory to hold a rootfs image for %q: %w", i.containerID, err)
}
defer func() {
if err := os.RemoveAll(tempdir); err != nil {
logrus.Warnf("removing temporary directory %q: %v", tempdir, err)
}
}()
options.TempDir = tempdir
}
mountPoint, err := i.store.Mount(i.containerID, i.mountLabel)
if err != nil {
return nil, fmt.Errorf("mounting container %q: %w", i.containerID, err)
@ -186,6 +203,8 @@ func (i *containerImageRef) extractConfidentialWorkloadFS(options ConfidentialWo
DiskEncryptionPassphrase: options.DiskEncryptionPassphrase,
Slop: options.Slop,
FirmwareLibrary: options.FirmwareLibrary,
GraphOptions: i.store.GraphOptions(),
ExtraImageContent: i.extraImageContent,
}
rc, _, err := mkcw.Archive(mountPoint, &image, archiveOptions)
if err != nil {
@ -211,9 +230,8 @@ func (i *containerImageRef) extractConfidentialWorkloadFS(options ConfidentialWo
}
// Extract the container's whole filesystem as if it were a single layer.
// Takes ExtractRootfsOptions as argument which allows caller to configure
// preserve nature of setuid,setgid,sticky and extended attributes
// on extracted rootfs.
// The ExtractRootfsOptions control whether or not to preserve setuid and
// setgid bits and extended attributes on contents.
func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadCloser, chan error, error) {
var uidMap, gidMap []idtools.IDMap
mountPoint, err := i.store.Mount(i.containerID, i.mountLabel)
@ -224,6 +242,27 @@ func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadClo
errChan := make(chan error, 1)
go func() {
defer close(errChan)
if len(i.extraImageContent) > 0 {
// Abuse the tar format and _prepend_ the synthesized
// data items to the archive we'll get from
// copier.Get(), in a way that looks right to a reader
// as long as we DON'T Close() the tar Writer.
filename, _, _, err := i.makeExtraImageContentDiff(false)
if err != nil {
errChan <- err
return
}
file, err := os.Open(filename)
if err != nil {
errChan <- err
return
}
defer file.Close()
if _, err = io.Copy(pipeWriter, file); err != nil {
errChan <- err
return
}
}
if i.idMappingOptions != nil {
uidMap, gidMap = convertRuntimeIDMaps(i.idMappingOptions.UIDMap, i.idMappingOptions.GIDMap)
}
@ -234,7 +273,7 @@ func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadClo
StripSetgidBit: opts.StripSetgidBit,
StripXattrs: opts.StripXattrs,
}
err = copier.Get(mountPoint, mountPoint, copierOptions, []string{"."}, pipeWriter)
err := copier.Get(mountPoint, mountPoint, copierOptions, []string{"."}, pipeWriter)
errChan <- err
pipeWriter.Close()
@ -294,8 +333,8 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest,
dimage.RootFS.Type = docker.TypeLayers
dimage.RootFS.DiffIDs = []digest.Digest{}
// Only clear the history if we're squashing, otherwise leave it be so
// that we can append entries to it. Clear the parent, too, we no
// longer include its layers and history.
// that we can append entries to it. Clear the parent, too, to reflect
// that we no longer include its layers and history.
if i.confidentialWorkload.Convert || i.squash || i.omitHistory {
dimage.Parent = ""
dimage.History = []docker.V2S2History{}
@ -368,8 +407,9 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
if err != nil {
return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err)
}
// Walk the list of parent layers, prepending each as we go. If we're squashing,
// stop at the layer ID of the top layer, which we won't really be using anyway.
// Walk the list of parent layers, prepending each as we go. If we're squashing
// or making a confidential workload, we're only producing one layer, so stop at
// the layer ID of the top layer, which we won't really be using anyway.
for layer != nil {
layers = append(append([]string{}, layerID), layers...)
layerID = layer.Parent
@ -382,6 +422,14 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err)
}
}
layer = nil
// If we're slipping in a synthesized layer, we need to add a placeholder for it
// to the list.
const synthesizedLayerID = "(synthesized layer)"
if len(i.extraImageContent) > 0 && !i.confidentialWorkload.Convert && !i.squash {
layers = append(layers, synthesizedLayerID)
}
logrus.Debugf("layer list: %q", layers)
// Make a temporary directory to hold blobs.
@ -407,6 +455,8 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
}
// Extract each layer and compute its digests, both compressed (if requested) and uncompressed.
var extraImageContentDiff string
var extraImageContentDiffDigest digest.Digest
blobLayers := make(map[digest.Digest]blobLayerInfo)
for _, layerID := range layers {
what := fmt.Sprintf("layer %q", layerID)
@ -417,16 +467,32 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
omediaType := v1.MediaTypeImageLayer
dmediaType := docker.V2S2MediaTypeUncompressedLayer
// Look up this layer.
layer, err := i.store.Layer(layerID)
if err != nil {
return nil, fmt.Errorf("unable to locate layer %q: %w", layerID, err)
var layerUncompressedDigest digest.Digest
var layerUncompressedSize int64
if layerID != synthesizedLayerID {
layer, err := i.store.Layer(layerID)
if err != nil {
return nil, fmt.Errorf("unable to locate layer %q: %w", layerID, err)
}
layerID = layer.ID
layerUncompressedDigest = layer.UncompressedDigest
layerUncompressedSize = layer.UncompressedSize
} else {
diffFilename, digest, size, err := i.makeExtraImageContentDiff(true)
if err != nil {
return nil, fmt.Errorf("unable to generate layer for additional content: %w", err)
}
extraImageContentDiff = diffFilename
extraImageContentDiffDigest = digest
layerUncompressedDigest = digest
layerUncompressedSize = size
}
// If we already know the digest of the contents of parent
// layers, reuse their blobsums, diff IDs, and sizes.
if !i.confidentialWorkload.Convert && !i.squash && layerID != i.layerID && layer.UncompressedDigest != "" {
layerBlobSum := layer.UncompressedDigest
layerBlobSize := layer.UncompressedSize
diffID := layer.UncompressedDigest
if !i.confidentialWorkload.Convert && !i.squash && layerID != i.layerID && layerID != synthesizedLayerID && layerUncompressedDigest != "" {
layerBlobSum := layerUncompressedDigest
layerBlobSize := layerUncompressedSize
diffID := layerUncompressedDigest
// Note this layer in the manifest, using the appropriate blobsum.
olayerDescriptor := v1.Descriptor{
MediaType: omediaType,
@ -444,7 +510,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, diffID)
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, diffID)
blobLayers[diffID] = blobLayerInfo{
ID: layer.ID,
ID: layerID,
Size: layerBlobSize,
}
continue
@ -474,15 +540,22 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
return nil, err
}
} else {
// If we're up to the final layer, but we don't want to
// include a diff for it, we're done.
if i.emptyLayer && layerID == i.layerID {
continue
}
// Extract this layer, one of possibly many.
rc, err = i.store.Diff("", layerID, diffOptions)
if err != nil {
return nil, fmt.Errorf("extracting %s: %w", what, err)
if layerID != synthesizedLayerID {
// If we're up to the final layer, but we don't want to
// include a diff for it, we're done.
if i.emptyLayer && layerID == i.layerID {
continue
}
// Extract this layer, one of possibly many.
rc, err = i.store.Diff("", layerID, diffOptions)
if err != nil {
return nil, fmt.Errorf("extracting %s: %w", what, err)
}
} else {
// Slip in additional content as an additional layer.
if rc, err = os.Open(extraImageContentDiff); err != nil {
return nil, err
}
}
}
srcHasher := digest.Canonical.Digester()
@ -624,20 +697,19 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
}
}
// Calculate base image history for special scenarios
// when base layers does not contains any history.
// We will ignore sanity checks if baseImage history is null
// but still add new history for docker parity.
baseImageHistoryLen := len(oimage.History)
// Only attempt to append history if history was not disabled explicitly.
if !i.omitHistory {
// Keep track of how many entries the base image's history had
// before we started adding to it.
baseImageHistoryLen := len(oimage.History)
appendHistory(i.preEmptyLayers)
created := time.Now().UTC()
if i.created != nil {
created = (*i.created).UTC()
}
comment := i.historyComment
// Add a comment for which base image is being used
// Add a comment indicating which base image was used, if it wasn't
// just an image ID.
if strings.Contains(i.parent, i.fromImageID) && i.fromImageName != i.fromImageID {
comment += "FROM " + i.fromImageName
}
@ -659,10 +731,24 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
dimage.History = append(dimage.History, dnews)
appendHistory(i.postEmptyLayers)
// Sanity check that we didn't just create a mismatch between non-empty layers in the
// history and the number of diffIDs. Following sanity check is ignored if build history
// is disabled explicitly by the user.
// Disable sanity check when baseImageHistory is null for docker parity
// Add a history entry for the extra image content if we added a layer for it.
if extraImageContentDiff != "" {
createdBy := fmt.Sprintf(`/bin/sh -c #(nop) ADD dir:%s in /",`, extraImageContentDiffDigest.Encoded())
onews := v1.History{
Created: &created,
CreatedBy: createdBy,
}
oimage.History = append(oimage.History, onews)
dnews := docker.V2S2History{
Created: created,
CreatedBy: createdBy,
}
dimage.History = append(dimage.History, dnews)
}
// Confidence check that we didn't just create a mismatch between non-empty layers in the
// history and the number of diffIDs. Only applicable if the base image (if there was
// one) provided us at least one entry to use as a starting point.
if baseImageHistoryLen != 0 {
expectedDiffIDs := expectedOCIDiffIDs(oimage)
if len(oimage.RootFS.DiffIDs) != expectedDiffIDs {
@ -859,6 +945,68 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo,
return ioutils.NewReadCloserWrapper(layerReadCloser, closer), size, nil
}
// makeExtraImageContentDiff creates an archive file containing the contents of
// files named in i.extraImageContent. The footer that marks the end of the
// archive may be omitted.
func (i *containerImageRef) makeExtraImageContentDiff(includeFooter bool) (string, digest.Digest, int64, error) {
cdir, err := i.store.ContainerDirectory(i.containerID)
if err != nil {
return "", "", -1, err
}
diff, err := os.CreateTemp(cdir, "extradiff")
if err != nil {
return "", "", -1, err
}
defer diff.Close()
digester := digest.Canonical.Digester()
counter := ioutils.NewWriteCounter(digester.Hash())
tw := tar.NewWriter(io.MultiWriter(diff, counter))
created := time.Now()
if i.created != nil {
created = *i.created
}
for path, contents := range i.extraImageContent {
if err := func() error {
content, err := os.Open(contents)
if err != nil {
return err
}
defer content.Close()
st, err := content.Stat()
if err != nil {
return err
}
if err := tw.WriteHeader(&tar.Header{
Name: path,
Typeflag: tar.TypeReg,
Mode: 0o644,
ModTime: created,
Size: st.Size(),
}); err != nil {
return err
}
if _, err := io.Copy(tw, content); err != nil {
return err
}
if err := tw.Flush(); err != nil {
return err
}
return nil
}(); err != nil {
return "", "", -1, err
}
}
if !includeFooter {
return diff.Name(), "", -1, err
}
tw.Close()
return diff.Name(), digester.Digest(), counter.Count, err
}
// makeContainerImageRef creates a containers/image/v5/types.ImageReference
// which is mainly used for representing the working container as a source
// image that can be copied, which is how we commit container to create the
// image.
func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageRef, error) {
var name reference.Named
container, err := b.store.Container(b.ContainerID)
@ -900,11 +1048,21 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
}
parent := ""
forceOmitHistory := false
if b.FromImageID != "" {
parentDigest := digest.NewDigestFromEncoded(digest.Canonical, b.FromImageID)
if parentDigest.Validate() == nil {
parent = parentDigest.String()
}
if !options.OmitHistory && len(b.OCIv1.History) == 0 && len(b.OCIv1.RootFS.DiffIDs) != 0 {
// Parent had layers, but no history. We shouldn't confuse
// our own confidence checks by adding history for layers
// that we're adding, creating an image with multiple layers,
// only some of which have history entries, which would be
// broken in confusing ways.
b.Logger.Debugf("parent image %q had no history but had %d layers, assuming OmitHistory", b.FromImageID, len(b.OCIv1.RootFS.DiffIDs))
forceOmitHistory = true
}
}
ref := &containerImageRef{
@ -926,7 +1084,7 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
preferredManifestType: manifestType,
squash: options.Squash,
confidentialWorkload: options.ConfidentialWorkloadOptions,
omitHistory: options.OmitHistory,
omitHistory: options.OmitHistory || forceOmitHistory,
emptyLayer: options.EmptyLayer && !options.Squash && !options.ConfidentialWorkloadOptions.Convert,
idMappingOptions: &b.IDMappingOptions,
parent: parent,
@ -935,6 +1093,7 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
postEmptyLayers: b.AppendedEmptyLayers,
overrideChanges: options.OverrideChanges,
overrideConfig: options.OverrideConfig,
extraImageContent: copyStringStringMap(options.ExtraImageContent),
}
return ref, nil
}

View File

@ -651,7 +651,7 @@ func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from stri
return nil, fmt.Errorf("reading multiple stages: %w", err)
}
var baseImages []string
nicknames := make(map[string]bool)
nicknames := make(map[string]struct{})
for stageIndex, stage := range stages {
node := stage.Node // first line
for node != nil { // each line
@ -673,7 +673,7 @@ func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from stri
}
}
base := child.Next.Value
if base != "" && base != buildah.BaseImageFakeName && !nicknames[base] {
if base != "" && base != buildah.BaseImageFakeName && !internalUtil.SetHas(nicknames, base) {
headingArgs := argsMapToSlice(stage.Builder.HeadingArgs)
userArgs := argsMapToSlice(stage.Builder.Args)
// append heading args so if --build-arg key=value is not
@ -692,7 +692,7 @@ func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from stri
node = node.Next // next line
}
if stage.Name != strconv.Itoa(stageIndex) {
nicknames[stage.Name] = true
nicknames[stage.Name] = struct{}{}
}
}
return baseImages, nil

View File

@ -14,6 +14,7 @@ import (
"github.com/containers/buildah"
"github.com/containers/buildah/define"
internalUtil "github.com/containers/buildah/internal/util"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/pkg/sshagent"
"github.com/containers/buildah/util"
@ -41,19 +42,19 @@ import (
// complain if we're given values for arguments which have no corresponding ARG
// instruction in the Dockerfile, since that's usually an indication of a user
// error, but for these values we make exceptions and ignore them.
var builtinAllowedBuildArgs = map[string]bool{
"HTTP_PROXY": true,
"http_proxy": true,
"HTTPS_PROXY": true,
"https_proxy": true,
"FTP_PROXY": true,
"ftp_proxy": true,
"NO_PROXY": true,
"no_proxy": true,
"TARGETARCH": true,
"TARGETOS": true,
"TARGETPLATFORM": true,
"TARGETVARIANT": true,
var builtinAllowedBuildArgs = map[string]struct{}{
"HTTP_PROXY": {},
"http_proxy": {},
"HTTPS_PROXY": {},
"https_proxy": {},
"FTP_PROXY": {},
"ftp_proxy": {},
"NO_PROXY": {},
"no_proxy": {},
"TARGETARCH": {},
"TARGETOS": {},
"TARGETPLATFORM": {},
"TARGETVARIANT": {},
}
// Executor is a buildah-based implementation of the imagebuilder.Executor
@ -110,8 +111,8 @@ type Executor struct {
forceRmIntermediateCtrs bool
imageMap map[string]string // Used to map images that we create to handle the AS construct.
containerMap map[string]*buildah.Builder // Used to map from image names to only-created-for-the-rootfs containers.
baseMap map[string]bool // Holds the names of every base image, as given.
rootfsMap map[string]bool // Holds the names of every stage whose rootfs is referenced in a COPY or ADD instruction.
baseMap map[string]struct{} // Holds the names of every base image, as given.
rootfsMap map[string]struct{} // Holds the names of every stage whose rootfs is referenced in a COPY or ADD instruction.
blobDirectory string
excludes []string
groupAdd []string
@ -151,6 +152,7 @@ type Executor struct {
osFeatures []string
envs []string
confidentialWorkload define.ConfidentialWorkloadOptions
sbomScanOptions []define.SBOMScanOptions
}
type imageTypeAndHistoryAndDiffIDs struct {
@ -278,8 +280,8 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs,
imageMap: make(map[string]string),
containerMap: make(map[string]*buildah.Builder),
baseMap: make(map[string]bool),
rootfsMap: make(map[string]bool),
baseMap: make(map[string]struct{}),
rootfsMap: make(map[string]struct{}),
blobDirectory: options.BlobDirectory,
unusedArgs: make(map[string]struct{}),
capabilities: capabilities,
@ -309,6 +311,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
osFeatures: append([]string{}, options.OSFeatures...),
envs: append([]string{}, options.Envs...),
confidentialWorkload: options.ConfidentialWorkload,
sbomScanOptions: options.SBOMScanOptions,
}
if exec.err == nil {
exec.err = os.Stderr
@ -337,13 +340,13 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
// We have to be careful here - it's either an argument
// and value, or just an argument, since they can be
// separated by either "=" or whitespace.
list := strings.SplitN(arg.Value, "=", 2)
argName, argValue, hasValue := strings.Cut(arg.Value, "=")
if !foundFirstStage {
if len(list) > 1 {
globalArgs[list[0]] = list[1]
if hasValue {
globalArgs[argName] = argValue
}
}
delete(exec.unusedArgs, list[0])
delete(exec.unusedArgs, argName)
}
case "FROM":
foundFirstStage = true
@ -491,17 +494,12 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE
// to the Dockerfile that would provide the same result.
// Reason: Docker adds label modification as a last step which can be
// processed like regular steps, and if no modification is done to
// layers, its easier to re-use cached layers.
// layers, its easier to reuse cached layers.
if len(b.labels) > 0 {
var labelLine string
labels := append([]string{}, b.labels...)
for _, labelSpec := range labels {
label := strings.SplitN(labelSpec, "=", 2)
key := label[0]
value := ""
if len(label) > 1 {
value = label[1]
}
key, value, _ := strings.Cut(labelSpec, "=")
// check only for an empty key since docker allows empty values
if key != "" {
labelLine += fmt.Sprintf(" %q=%q", key, value)
@ -523,10 +521,8 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE
if len(b.envs) > 0 {
var envLine string
for _, envSpec := range b.envs {
env := strings.SplitN(envSpec, "=", 2)
key := env[0]
if len(env) > 1 {
value := env[1]
key, value, hasValue := strings.Cut(envSpec, "=")
if hasValue {
envLine += fmt.Sprintf(" %q=%q", key, value)
} else {
return "", nil, false, fmt.Errorf("BUG: unresolved environment variable: %q", key)
@ -613,7 +609,7 @@ func markDependencyStagesForTarget(dependencyMap map[string]*stageDependencyInfo
}
func (b *Executor) warnOnUnsetBuildArgs(stages imagebuilder.Stages, dependencyMap map[string]*stageDependencyInfo, args map[string]string) {
argFound := make(map[string]bool)
argFound := make(map[string]struct{})
for _, stage := range stages {
node := stage.Node // first line
for node != nil { // each line
@ -624,12 +620,12 @@ func (b *Executor) warnOnUnsetBuildArgs(stages imagebuilder.Stages, dependencyMa
if strings.Contains(argName, "=") {
res := strings.Split(argName, "=")
if res[1] != "" {
argFound[res[0]] = true
argFound[res[0]] = struct{}{}
}
}
argHasValue := true
if !strings.Contains(argName, "=") {
argHasValue = argFound[argName]
argHasValue = internalUtil.SetHas(argFound, argName)
}
if _, ok := args[argName]; !argHasValue && !ok {
shouldWarn := true
@ -779,7 +775,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
if err != nil {
return "", nil, fmt.Errorf("while replacing arg variables with values for format %q: %w", base, err)
}
b.baseMap[baseWithArg] = true
b.baseMap[baseWithArg] = struct{}{}
logrus.Debugf("base for stage %d: %q", stageIndex, base)
// Check if selected base is not an additional
// build context and if base is a valid stage
@ -801,7 +797,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
// was named using argument values, we might
// not record the right value here.
rootfs := strings.TrimPrefix(flag, "--from=")
b.rootfsMap[rootfs] = true
b.rootfsMap[rootfs] = struct{}{}
logrus.Debugf("rootfs needed for COPY in stage %d: %q", stageIndex, rootfs)
// Populate dependency tree and check
// if following ADD or COPY needs any other
@ -844,24 +840,18 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
mountFlags := strings.TrimPrefix(flag, "--mount=")
fields := strings.Split(mountFlags, ",")
for _, field := range fields {
if strings.HasPrefix(field, "from=") {
fromField := strings.SplitN(field, "=", 2)
if len(fromField) > 1 {
mountFrom := fromField[1]
// Check if this base is a stage if yes
// add base to current stage's dependency tree
// but also confirm if this is not in additional context.
if _, ok := b.additionalBuildContexts[mountFrom]; !ok {
// Treat from as a rootfs we need to preserve
b.rootfsMap[mountFrom] = true
if _, ok := dependencyMap[mountFrom]; ok {
// update current stage's dependency info
currentStageInfo := dependencyMap[stage.Name]
currentStageInfo.Needs = append(currentStageInfo.Needs, mountFrom)
}
if mountFrom, hasFrom := strings.CutPrefix(field, "from="); hasFrom {
// Check if this base is a stage if yes
// add base to current stage's dependency tree
// but also confirm if this is not in additional context.
if _, ok := b.additionalBuildContexts[mountFrom]; !ok {
// Treat from as a rootfs we need to preserve
b.rootfsMap[mountFrom] = struct{}{}
if _, ok := dependencyMap[mountFrom]; ok {
// update current stage's dependency info
currentStageInfo := dependencyMap[stage.Name]
currentStageInfo.Needs = append(currentStageInfo.Needs, mountFrom)
}
} else {
return "", nil, fmt.Errorf("invalid value for field `from=`: %q", fromField[1])
}
}
}

View File

@ -565,24 +565,23 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte
stageMountPoints := make(map[string]internal.StageMountDetails)
for _, flag := range mountList {
if strings.Contains(flag, "from") {
arr := strings.SplitN(flag, ",", 2)
if len(arr) < 2 {
tokens := strings.Split(flag, ",")
if len(tokens) < 2 {
return nil, fmt.Errorf("Invalid --mount command: %s", flag)
}
tokens := strings.Split(flag, ",")
for _, val := range tokens {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
for _, token := range tokens {
key, val, hasVal := strings.Cut(token, "=")
switch key {
case "from":
if len(kv) == 1 {
if !hasVal {
return nil, fmt.Errorf("unable to resolve argument for `from=`: bad argument")
}
if kv[1] == "" {
if val == "" {
return nil, fmt.Errorf("unable to resolve argument for `from=`: from points to an empty value")
}
from, fromErr := imagebuilder.ProcessWord(kv[1], s.stage.Builder.Arguments())
from, fromErr := imagebuilder.ProcessWord(val, s.stage.Builder.Arguments())
if fromErr != nil {
return nil, fmt.Errorf("unable to resolve argument %q: %w", kv[1], fromErr)
return nil, fmt.Errorf("unable to resolve argument %q: %w", val, fromErr)
}
// If additional buildContext contains this
// give priority to that and break if additional
@ -684,6 +683,15 @@ func (s *StageExecutor) createNeededHeredocMountsForRun(files []imagebuilder.Fil
return mountResult, nil
}
func parseSheBang(data string) string {
lines := strings.Split(data, "\n")
if len(lines) > 2 && strings.HasPrefix(lines[1], "#!") {
shebang := strings.TrimLeft(lines[1], "#!")
return shebang
}
return ""
}
// Run executes a RUN instruction using the stage's current working container
// as a root directory.
func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
@ -694,12 +702,17 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
if heredoc := buildkitparser.MustParseHeredoc(args[0]); heredoc != nil {
if strings.HasPrefix(run.Files[0].Data, "#!") || strings.HasPrefix(run.Files[0].Data, "\n#!") {
// This is a single heredoc with a shebang, so create a file
// and run it.
// and run it with program specified in shebang.
heredocMount, err := s.createNeededHeredocMountsForRun(run.Files)
if err != nil {
return err
}
args = []string{heredocMount[0].Destination}
shebangArgs := parseSheBang(run.Files[0].Data)
if shebangArgs != "" {
args = []string{shebangArgs + " " + heredocMount[0].Destination}
} else {
args = []string{heredocMount[0].Destination}
}
heredocMounts = append(heredocMounts, heredocMount...)
} else {
args = []string{run.Files[0].Data}
@ -1044,8 +1057,8 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
moreStages := s.index < len(s.stages)-1
lastStage := !moreStages
onlyBaseImage := false
imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[strconv.Itoa(stage.Position)])
rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[strconv.Itoa(stage.Position)])
imageIsUsedLater := moreStages && (internalUtil.SetHas(s.executor.baseMap, stage.Name) || internalUtil.SetHas(s.executor.baseMap, strconv.Itoa(stage.Position)))
rootfsIsUsedLater := moreStages && (internalUtil.SetHas(s.executor.rootfsMap, stage.Name) || internalUtil.SetHas(s.executor.rootfsMap, strconv.Itoa(stage.Position)))
// If the base image's name corresponds to the result of an earlier
// stage, make sure that stage has finished building an image, and
@ -1160,7 +1173,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
if len(children) == 0 {
// There are no steps.
if s.builder.FromImageID == "" || s.executor.squash || s.executor.confidentialWorkload.Convert || len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 || len(s.executor.unsetEnvs) > 0 || len(s.executor.unsetLabels) > 0 {
if s.builder.FromImageID == "" || s.executor.squash || s.executor.confidentialWorkload.Convert || len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 || len(s.executor.unsetEnvs) > 0 || len(s.executor.unsetLabels) > 0 || len(s.executor.sbomScanOptions) > 0 {
// We either don't have a base image, or we need to
// transform the contents of the base image, or we need
// to make some changes to just the config blob. Whichever
@ -1169,7 +1182,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// No base image means there's nothing to put in a
// layer, so don't create one.
emptyLayer := (s.builder.FromImageID == "")
if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), emptyLayer, s.output, s.executor.squash, lastStage); err != nil {
if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), emptyLayer, s.output, s.executor.squash || s.executor.confidentialWorkload.Convert, lastStage); err != nil {
return "", nil, false, fmt.Errorf("committing base container: %w", err)
}
} else {
@ -1511,7 +1524,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
}
}
// Note: If the build has squash, we must try to re-use as many layers as possible if cache is found.
// Note: If the build has squash, we must try to reuse as many layers as possible if cache is found.
// So only perform commit if it's the lastInstruction of lastStage.
if cacheID != "" {
logCacheHit(cacheID)
@ -1567,11 +1580,13 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
}
if lastInstruction && lastStage {
if s.executor.squash || s.executor.confidentialWorkload.Convert {
// Create a squashed version of this image
// if we're supposed to create one and this
// is the last instruction of the last stage.
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, true, lastStage && lastInstruction)
if s.executor.squash || s.executor.confidentialWorkload.Convert || len(s.executor.sbomScanOptions) != 0 {
// If this is the last instruction of the last stage,
// create a squashed or confidential workload
// version of the image if that's what we're after,
// or a normal one if we need to scan the image while
// committing it.
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, s.executor.squash || s.executor.confidentialWorkload.Convert, lastStage && lastInstruction)
if err != nil {
return "", nil, false, fmt.Errorf("committing final squash step %+v: %w", *step, err)
}
@ -1726,7 +1741,14 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri
if buildArgs != "" {
return "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " /bin/sh -c " + node.Original[4:]
}
return "/bin/sh -c " + node.Original[4:]
result := "/bin/sh -c " + node.Original[4:]
if len(node.Heredocs) > 0 {
for _, doc := range node.Heredocs {
heredocContent := strings.TrimSpace(doc.Content)
result = result + "\n" + heredocContent
}
}
return result
case "ADD", "COPY":
destination := node
for destination.Next != nil {
@ -1748,9 +1770,9 @@ func (s *StageExecutor) getBuildArgsResolvedForRun() string {
dockerConfig := s.stage.Builder.Config()
for _, env := range dockerConfig.Env {
splitv := strings.SplitN(env, "=", 2)
if len(splitv) == 2 {
configuredEnvs[splitv[0]] = splitv[1]
key, val, hasVal := strings.Cut(env, "=")
if hasVal {
configuredEnvs[key] = val
}
}
@ -2102,8 +2124,8 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
s.builder.SetPort(string(p))
}
for _, envSpec := range config.Env {
spec := strings.SplitN(envSpec, "=", 2)
s.builder.SetEnv(spec[0], spec[1])
key, val, _ := strings.Cut(envSpec, "=")
s.builder.SetEnv(key, val)
}
for _, envSpec := range s.executor.unsetEnvs {
s.builder.UnsetEnv(envSpec)
@ -2139,12 +2161,8 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
// an intermediate image, in such case we must
// honor layer labels if they are configured.
for _, labelString := range s.executor.layerLabels {
label := strings.SplitN(labelString, "=", 2)
if len(label) > 1 {
s.builder.SetLabel(label[0], label[1])
} else {
s.builder.SetLabel(label[0], "")
}
labelk, labelv, _ := strings.Cut(labelString, "=")
s.builder.SetLabel(labelk, labelv)
}
}
for k, v := range config.Labels {
@ -2157,12 +2175,8 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
s.builder.UnsetLabel(key)
}
for _, annotationSpec := range s.executor.annotations {
annotation := strings.SplitN(annotationSpec, "=", 2)
if len(annotation) > 1 {
s.builder.SetAnnotation(annotation[0], annotation[1])
} else {
s.builder.SetAnnotation(annotation[0], "")
}
annotationk, annotationv, _ := strings.Cut(annotationSpec, "=")
s.builder.SetAnnotation(annotationk, annotationv)
}
if imageRef != nil {
logName := transports.ImageName(imageRef)
@ -2192,6 +2206,7 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
}
if finalInstruction {
options.ConfidentialWorkloadOptions = s.executor.confidentialWorkload
options.SBOMScanOptions = s.executor.sbomScanOptions
}
imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options)
if err != nil {

View File

@ -17,7 +17,12 @@ import (
"strings"
"time"
"github.com/containers/buildah/internal/tmpdir"
"github.com/containers/buildah/pkg/overlay"
"github.com/containers/luksy"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/system"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/go-units"
digest "github.com/opencontainers/go-digest"
@ -48,6 +53,8 @@ type ArchiveOptions struct {
DiskEncryptionPassphrase string
FirmwareLibrary string
Logger *logrus.Logger
GraphOptions []string // passed in from a storage Store, probably
ExtraImageContent map[string]string
}
type chainRetrievalError struct {
@ -64,9 +71,7 @@ func (c chainRetrievalError) Error() string {
// Archive generates a WorkloadConfig for a specified directory and produces a
// tar archive of a container image's rootfs with the expected contents.
// The input directory will have a ".krun_config.json" file added to it while
// this function is running, but it will be removed on completion.
func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadCloser, WorkloadConfig, error) {
func Archive(rootfsPath string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadCloser, WorkloadConfig, error) {
const (
teeDefaultCPUs = 2
teeDefaultMemory = 512
@ -74,7 +79,7 @@ func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadC
teeDefaultTeeType = SNP
)
if path == "" {
if rootfsPath == "" {
return nil, WorkloadConfig{}, fmt.Errorf("required path not specified")
}
logger := options.Logger
@ -97,7 +102,7 @@ func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadC
filesystem := teeDefaultFilesystem
workloadID := options.WorkloadID
if workloadID == "" {
digestInput := path + filesystem + time.Now().String()
digestInput := rootfsPath + filesystem + time.Now().String()
workloadID = digest.Canonical.FromString(digestInput).Encoded()
}
workloadConfig := WorkloadConfig{
@ -107,6 +112,9 @@ func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadC
Memory: memory,
AttestationURL: options.AttestationURL,
}
if options.TempDir == "" {
options.TempDir = tmpdir.GetTempDir()
}
// Do things which are specific to the type of TEE we're building for.
var chainBytes []byte
@ -165,12 +173,115 @@ func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadC
workloadConfig.TeeData = string(encodedTeeData)
}
// We're going to want to add some content to the rootfs, so set up an
// overlay that uses it as a lower layer so that we can write to it.
st, err := system.Stat(rootfsPath)
if err != nil {
return nil, WorkloadConfig{}, fmt.Errorf("reading information about the container root filesystem: %w", err)
}
// Create a temporary directory to hold all of this. Use tmpdir.GetTempDir()
// instead of the passed-in location, which a crafty caller might have put in an
// overlay filesystem in storage because there tends to be more room there than
// in, say, /var/tmp, and the plaintext disk image, which we put in the passed-in
// location, can get quite large.
rootfsParentDir, err := os.MkdirTemp(tmpdir.GetTempDir(), "buildah-rootfs")
if err != nil {
return nil, WorkloadConfig{}, fmt.Errorf("setting up parent for container root filesystem: %w", err)
}
defer func() {
if err := os.RemoveAll(rootfsParentDir); err != nil {
logger.Warnf("cleaning up parent for container root filesystem: %v", err)
}
}()
// Create a mountpoint for the new overlay, which we'll use as the rootfs.
rootfsDir := filepath.Join(rootfsParentDir, "rootfs")
if err := idtools.MkdirAndChown(rootfsDir, fs.FileMode(st.Mode()), idtools.IDPair{UID: int(st.UID()), GID: int(st.GID())}); err != nil {
return nil, WorkloadConfig{}, fmt.Errorf("creating mount target for container root filesystem: %w", err)
}
defer func() {
if err := os.Remove(rootfsDir); err != nil {
logger.Warnf("removing mount target for container root filesystem: %v", err)
}
}()
// Create a directory to hold all of the overlay package's working state.
tempDir := filepath.Join(rootfsParentDir, "tmp")
if err = os.Mkdir(tempDir, 0o700); err != nil {
return nil, WorkloadConfig{}, err
}
// Create some working state in there.
overlayTempDir, err := overlay.TempDir(tempDir, int(st.UID()), int(st.GID()))
if err != nil {
return nil, WorkloadConfig{}, fmt.Errorf("setting up mount of container root filesystem: %w", err)
}
defer func() {
if err := overlay.RemoveTemp(overlayTempDir); err != nil {
logger.Warnf("cleaning up mount of container root filesystem: %v", err)
}
}()
// Create a mount point using that working state.
rootfsMount, err := overlay.Mount(overlayTempDir, rootfsPath, rootfsDir, 0, 0, options.GraphOptions)
if err != nil {
return nil, WorkloadConfig{}, fmt.Errorf("setting up support for overlay of container root filesystem: %w", err)
}
defer func() {
if err := overlay.Unmount(overlayTempDir); err != nil {
logger.Warnf("unmounting support for overlay of container root filesystem: %v", err)
}
}()
// Follow through on the overlay or bind mount, whatever the overlay package decided
// to leave to us to do.
rootfsMountOptions := strings.Join(rootfsMount.Options, ",")
logrus.Debugf("mounting %q to %q as %q with options %v", rootfsMount.Source, rootfsMount.Destination, rootfsMount.Type, rootfsMountOptions)
if err := mount.Mount(rootfsMount.Source, rootfsMount.Destination, rootfsMount.Type, rootfsMountOptions); err != nil {
return nil, WorkloadConfig{}, fmt.Errorf("mounting overlay of container root filesystem: %w", err)
}
defer func() {
logrus.Debugf("unmounting %q", rootfsMount.Destination)
if err := mount.Unmount(rootfsMount.Destination); err != nil {
logger.Warnf("unmounting overlay of container root filesystem: %v", err)
}
}()
// Pretend that we didn't have to do any of the preceding.
rootfsPath = rootfsDir
// Write extra content to the rootfs, creating intermediate directories if necessary.
for location, content := range options.ExtraImageContent {
err := func() error {
if err := idtools.MkdirAllAndChownNew(filepath.Dir(filepath.Join(rootfsPath, location)), 0o755, idtools.IDPair{UID: int(st.UID()), GID: int(st.GID())}); err != nil {
return fmt.Errorf("ensuring %q is present in container root filesystem: %w", filepath.Dir(location), err)
}
output, err := os.OpenFile(filepath.Join(rootfsPath, location), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644)
if err != nil {
return fmt.Errorf("preparing to write %q to container root filesystem: %w", location, err)
}
defer output.Close()
input, err := os.Open(content)
if err != nil {
return err
}
defer input.Close()
if _, err := io.Copy(output, input); err != nil {
return fmt.Errorf("copying contents of %q to %q in container root filesystem: %w", content, location, err)
}
if err := output.Chown(int(st.UID()), int(st.GID())); err != nil {
return fmt.Errorf("setting owner of %q in the container root filesystem: %w", location, err)
}
if err := output.Chmod(0o644); err != nil {
return fmt.Errorf("setting permissions on %q in the container root filesystem: %w", location, err)
}
return nil
}()
if err != nil {
return nil, WorkloadConfig{}, err
}
}
// Write part of the config blob where the krun init process will be
// looking for it. The oci2cw tool used `buildah inspect` output, but
// init is just looking for fields that have the right names in any
// object, and the image's config will have that, so let's try encoding
// it directly.
krunConfigPath := filepath.Join(path, ".krun_config.json")
krunConfigPath := filepath.Join(rootfsPath, ".krun_config.json")
krunConfigBytes, err := json.Marshal(ociConfig)
if err != nil {
return nil, WorkloadConfig{}, fmt.Errorf("creating .krun_config from image configuration: %w", err)
@ -178,11 +289,6 @@ func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadC
if err := ioutils.AtomicWriteFile(krunConfigPath, krunConfigBytes, 0o600); err != nil {
return nil, WorkloadConfig{}, fmt.Errorf("saving krun config: %w", err)
}
defer func() {
if err := os.Remove(krunConfigPath); err != nil {
logger.Warnf("removing krun configuration file: %v", err)
}
}()
// Encode the workload config, in case it fails for any reason.
cleanedUpWorkloadConfig := workloadConfig
@ -213,7 +319,7 @@ func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadC
imageSize := slop(options.ImageSize, options.Slop)
if imageSize == 0 {
var sourceSize int64
if err := filepath.WalkDir(path, func(path string, d fs.DirEntry, err error) error {
if err := filepath.WalkDir(rootfsPath, func(path string, d fs.DirEntry, err error) error {
if err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, os.ErrPermission) {
return err
}
@ -261,7 +367,7 @@ func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadC
}
// Format the disk image with the filesystem contents.
if _, stderr, err := MakeFS(path, plain.Name(), filesystem); err != nil {
if _, stderr, err := MakeFS(rootfsPath, plain.Name(), filesystem); err != nil {
if strings.TrimSpace(stderr) != "" {
return nil, WorkloadConfig{}, fmt.Errorf("%s: %w", strings.TrimSpace(stderr), err)
}
@ -381,8 +487,8 @@ func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadC
tmpHeader.Name = "tmp/"
tmpHeader.Typeflag = tar.TypeDir
tmpHeader.Mode = 0o1777
tmpHeader.Uname, workloadConfigHeader.Gname = "", ""
tmpHeader.Uid, workloadConfigHeader.Gid = 0, 0
tmpHeader.Uname, tmpHeader.Gname = "", ""
tmpHeader.Uid, tmpHeader.Gid = 0, 0
tmpHeader.Size = 0
if err = tw.WriteHeader(tmpHeader); err != nil {
logrus.Errorf("writing header for %q: %v", tmpHeader.Name, err)

Binary file not shown.

View File

@ -2,5 +2,5 @@ package mkcw
import _ "embed"
//go:embed "embed/entrypoint.gz"
//go:embed "embed/entrypoint_amd64.gz"
var entrypointCompressedBytes []byte

View File

@ -0,0 +1,296 @@
package sbom
import (
"encoding/json"
"fmt"
"io"
"os"
"sort"
"github.com/containers/buildah/define"
)
// getComponentNameVersionPurl extracts the "name", "version", and "purl"
// fields of a CycloneDX component record
func getComponentNameVersionPurl(anyComponent any) (string, string, error) {
if component, ok := anyComponent.(map[string]any); ok {
// read the "name" field
anyName, ok := component["name"]
if !ok {
return "", "", fmt.Errorf("no name in component %v", anyComponent)
}
name, ok := anyName.(string)
if !ok {
return "", "", fmt.Errorf("name %v is not a string", anyName)
}
// read the optional "version" field
var version string
anyVersion, ok := component["version"]
if ok {
if version, ok = anyVersion.(string); !ok {
return "", "", fmt.Errorf("version %v is not a string", anyVersion)
}
}
// combine them
nameWithVersion := name
if version != "" {
nameWithVersion += ("@" + version)
}
// read the optional "purl" field
var purl string
anyPurl, ok := component["purl"]
if ok {
if purl, ok = anyPurl.(string); !ok {
return "", "", fmt.Errorf("purl %v is not a string", anyPurl)
}
}
return nameWithVersion, purl, nil
}
return "", "", fmt.Errorf("component %v is not an object", anyComponent)
}
// getPackageNameVersionInfoPurl extracts the "name", "versionInfo", and "purl"
// fields of an SPDX package record
func getPackageNameVersionInfoPurl(anyPackage any) (string, string, error) {
if pkg, ok := anyPackage.(map[string]any); ok {
// read the "name" field
anyName, ok := pkg["name"]
if !ok {
return "", "", fmt.Errorf("no name in package %v", anyPackage)
}
name, ok := anyName.(string)
if !ok {
return "", "", fmt.Errorf("name %v is not a string", anyName)
}
// read the optional "versionInfo" field
var versionInfo string
if anyVersionInfo, ok := pkg["versionInfo"]; ok {
if versionInfo, ok = anyVersionInfo.(string); !ok {
return "", "", fmt.Errorf("versionInfo %v is not a string", anyVersionInfo)
}
}
// combine them
nameWithVersionInfo := name
if versionInfo != "" {
nameWithVersionInfo += ("@" + versionInfo)
}
// now look for optional externalRefs[].purl if "referenceCategory"
// is "PACKAGE-MANAGER" and "referenceType" is "purl"
var purl string
if anyExternalRefs, ok := pkg["externalRefs"]; ok {
if externalRefs, ok := anyExternalRefs.([]any); ok {
for _, anyExternalRef := range externalRefs {
if externalRef, ok := anyExternalRef.(map[string]any); ok {
anyReferenceCategory, ok := externalRef["referenceCategory"]
if !ok {
continue
}
if referenceCategory, ok := anyReferenceCategory.(string); !ok || referenceCategory != "PACKAGE-MANAGER" {
continue
}
anyReferenceType, ok := externalRef["referenceType"]
if !ok {
continue
}
if referenceType, ok := anyReferenceType.(string); !ok || referenceType != "purl" {
continue
}
if anyReferenceLocator, ok := externalRef["referenceLocator"]; ok {
if purl, ok = anyReferenceLocator.(string); !ok {
return "", "", fmt.Errorf("purl %v is not a string", anyReferenceLocator)
}
}
}
}
}
}
return nameWithVersionInfo, purl, nil
}
return "", "", fmt.Errorf("package %v is not an object", anyPackage)
}
// getLicenseID extracts the "licenseId" field of an SPDX license record
func getLicenseID(anyLicense any) (string, error) {
var licenseID string
if lic, ok := anyLicense.(map[string]any); ok {
anyID, ok := lic["licenseId"]
if !ok {
return "", fmt.Errorf("no licenseId in license %v", anyID)
}
id, ok := anyID.(string)
if !ok {
return "", fmt.Errorf("licenseId %v is not a string", anyID)
}
licenseID = id
}
return licenseID, nil
}
// mergeSlicesWithoutDuplicates merges a named slice in "base" with items from
// the same slice in "merge", so long as getKey() returns values for them that
// it didn't for items from the "base" slice
func mergeSlicesWithoutDuplicates(base, merge map[string]any, sliceField string, getKey func(record any) (string, error)) error {
uniqueKeys := make(map[string]struct{})
// go through all of the values in the base slice, grab their
// keys, and note them
baseRecords := base[sliceField]
baseRecordsSlice, ok := baseRecords.([]any)
if !ok {
baseRecordsSlice = []any{}
}
for _, anyRecord := range baseRecordsSlice {
key, err := getKey(anyRecord)
if err != nil {
return err
}
uniqueKeys[key] = struct{}{}
}
// go through all of the record values in the merge doc, grab their
// associated keys, and append them to the base records slice if we
// haven't seen the key yet
mergeRecords := merge[sliceField]
mergeRecordsSlice, ok := mergeRecords.([]any)
if !ok {
mergeRecordsSlice = []any{}
}
for _, anyRecord := range mergeRecordsSlice {
key, err := getKey(anyRecord)
if err != nil {
return err
}
if _, present := uniqueKeys[key]; !present {
baseRecordsSlice = append(baseRecordsSlice, anyRecord)
uniqueKeys[key] = struct{}{}
}
}
if len(baseRecordsSlice) > 0 {
base[sliceField] = baseRecordsSlice
}
return nil
}
// decodeJSON decodes a file into a map
func decodeJSON(inputFile string, document *map[string]any) error {
src, err := os.Open(inputFile)
if err != nil {
return err
}
defer src.Close()
if err = json.NewDecoder(src).Decode(document); err != nil {
return fmt.Errorf("decoding JSON document from %q: %w", inputFile, err)
}
return nil
}
// encodeJSON encodes a map and saves it to a file
func encodeJSON(outputFile string, document any) error {
dst, err := os.Create(outputFile)
if err != nil {
return err
}
defer dst.Close()
if err = json.NewEncoder(dst).Encode(document); err != nil {
return fmt.Errorf("writing JSON document to %q: %w", outputFile, err)
}
return nil
}
// Merge adds the contents of inputSBOM to inputOutputSBOM using one of a
// handful of named strategies.
func Merge(mergeStrategy define.SBOMMergeStrategy, inputOutputSBOM, inputSBOM, outputPURL string) (err error) {
type purlImageContents struct {
Dependencies []string `json:"dependencies,omitempty"`
}
type purlDocument struct {
ImageContents purlImageContents `json:"image_contents,omitempty"`
}
purls := []string{}
seenPurls := make(map[string]struct{})
switch mergeStrategy {
case define.SBOMMergeStrategyCycloneDXByComponentNameAndVersion:
var base, merge map[string]any
if err = decodeJSON(inputOutputSBOM, &base); err != nil {
return fmt.Errorf("reading first SBOM to be merged from %q: %w", inputOutputSBOM, err)
}
if err = decodeJSON(inputSBOM, &merge); err != nil {
return fmt.Errorf("reading second SBOM to be merged from %q: %w", inputSBOM, err)
}
// merge the "components" lists based on unique combinations of
// "name" and "version" fields, and save unique package URL
// values
err = mergeSlicesWithoutDuplicates(base, merge, "components", func(anyPackage any) (string, error) {
nameWithVersion, purl, err := getComponentNameVersionPurl(anyPackage)
if purl != "" {
if _, seen := seenPurls[purl]; !seen {
purls = append(purls, purl)
seenPurls[purl] = struct{}{}
}
}
return nameWithVersion, err
})
if err != nil {
return fmt.Errorf("merging the %q field of CycloneDX SBOMs: %w", "components", err)
}
// save the updated doc
err = encodeJSON(inputOutputSBOM, base)
case define.SBOMMergeStrategySPDXByPackageNameAndVersionInfo:
var base, merge map[string]any
if err = decodeJSON(inputOutputSBOM, &base); err != nil {
return fmt.Errorf("reading first SBOM to be merged from %q: %w", inputOutputSBOM, err)
}
if err = decodeJSON(inputSBOM, &merge); err != nil {
return fmt.Errorf("reading second SBOM to be merged from %q: %w", inputSBOM, err)
}
// merge the "packages" lists based on unique combinations of
// "name" and "versionInfo" fields, and save unique package URL
// values
err = mergeSlicesWithoutDuplicates(base, merge, "packages", func(anyPackage any) (string, error) {
nameWithVersionInfo, purl, err := getPackageNameVersionInfoPurl(anyPackage)
if purl != "" {
if _, seen := seenPurls[purl]; !seen {
purls = append(purls, purl)
seenPurls[purl] = struct{}{}
}
}
return nameWithVersionInfo, err
})
if err != nil {
return fmt.Errorf("merging the %q field of SPDX SBOMs: %w", "packages", err)
}
// merge the "hasExtractedLicensingInfos" lists based on unique
// "licenseId" values
err = mergeSlicesWithoutDuplicates(base, merge, "hasExtractedLicensingInfos", getLicenseID)
if err != nil {
return fmt.Errorf("merging the %q field of SPDX SBOMs: %w", "hasExtractedLicensingInfos", err)
}
// save the updated doc
err = encodeJSON(inputOutputSBOM, base)
case define.SBOMMergeStrategyCat:
dst, err := os.OpenFile(inputOutputSBOM, os.O_RDWR|os.O_APPEND, 0o644)
if err != nil {
return err
}
defer dst.Close()
src, err := os.Open(inputSBOM)
if err != nil {
return err
}
defer src.Close()
if _, err = io.Copy(dst, src); err != nil {
return err
}
}
if err == nil {
sort.Strings(purls)
err = encodeJSON(outputPURL, &purlDocument{purlImageContents{Dependencies: purls}})
}
return err
}

View File

@ -0,0 +1,65 @@
package sbom
import "github.com/containers/buildah/define"
// Preset returns a predefined SBOMScanOptions structure that has the passed-in
// name as one of its "Type" values.
func Preset(name string) (preset *define.SBOMScanOptions, err error) {
// If you change these, make sure you update references in
// buildah-commit.1.md and buildah-build.1.md to match!
presets := []define.SBOMScanOptions{
{
Type: []string{"", "syft", "syft-cyclonedx"},
Image: "ghcr.io/anchore/syft",
Commands: []string{
"/syft scan -q dir:{ROOTFS} --output cyclonedx-json={OUTPUT}",
"/syft scan -q dir:{CONTEXT} --output cyclonedx-json={OUTPUT}",
},
// ImageSBOMOutput: "/root/buildinfo/content_manifests/sbom-cyclonedx.json",
// ImagePURLOutput: "/root/buildinfo/content_manifests/sbom-purl.json",
MergeStrategy: define.SBOMMergeStrategyCycloneDXByComponentNameAndVersion,
},
{
Type: []string{"syft-spdx"},
Image: "ghcr.io/anchore/syft",
Commands: []string{
"/syft scan -q dir:{ROOTFS} --output spdx-json={OUTPUT}",
"/syft scan -q dir:{CONTEXT} --output spdx-json={OUTPUT}",
},
// ImageSBOMOutput: "/root/buildinfo/content_manifests/sbom-spdx.json",
// ImagePURLOutput: "/root/buildinfo/content_manifests/sbom-purl.json",
MergeStrategy: define.SBOMMergeStrategySPDXByPackageNameAndVersionInfo,
},
{
Type: []string{"trivy", "trivy-cyclonedx"},
Image: "ghcr.io/aquasecurity/trivy",
Commands: []string{
"trivy filesystem -q {ROOTFS} --format cyclonedx --output {OUTPUT}",
"trivy filesystem -q {CONTEXT} --format cyclonedx --output {OUTPUT}",
},
// ImageSBOMOutput: "/root/buildinfo/content_manifests/sbom-cyclonedx.json",
// ImagePURLOutput: "/root/buildinfo/content_manifests/sbom-purl.json",
MergeStrategy: define.SBOMMergeStrategyCycloneDXByComponentNameAndVersion,
},
{
Type: []string{"trivy-spdx"},
Image: "ghcr.io/aquasecurity/trivy",
Commands: []string{
"trivy filesystem -q {ROOTFS} --format spdx-json --output {OUTPUT}",
"trivy filesystem -q {CONTEXT} --format spdx-json --output {OUTPUT}",
},
// ImageSBOMOutput: "/root/buildinfo/content_manifests/sbom-spdx.json",
// ImagePURLOutput: "/root/buildinfo/content_manifests/sbom-purl.json",
MergeStrategy: define.SBOMMergeStrategySPDXByPackageNameAndVersionInfo,
},
}
for _, preset := range presets {
for _, presetName := range preset.Type {
if presetName == name {
return &preset, nil
}
}
}
return nil, nil
}

View File

@ -97,3 +97,8 @@ func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error {
}
return nil
}
func SetHas(m map[string]struct{}, k string) bool {
_, ok := m[k]
return ok
}

View File

@ -54,6 +54,7 @@ func CacheParent() string {
return filepath.Join(tmpdir.GetTempDir(), buildahCacheDir+"-"+strconv.Itoa(unshare.GetRootlessUID()))
}
// FIXME: this code needs to be merged with pkg/parse/parse.go ValidateVolumeOpts
// GetBindMount parses a single bind mount entry from the --mount flag.
// Returns specifiedMount and a string which contains name of image that we mounted otherwise its empty.
// Caller is expected to perform unmount of any mounted images
@ -69,8 +70,8 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st
fromImage := ""
for _, val := range args {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
argName, argValue, hasArgValue := strings.Cut(val, "=")
switch argName {
case "type":
// This is already processed
continue
@ -80,7 +81,7 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st
case "ro", "nosuid", "nodev", "noexec":
// TODO: detect duplication of these options.
// (Is this necessary?)
newMount.Options = append(newMount.Options, kv[0])
newMount.Options = append(newMount.Options, argName)
mountReadability = true
case "rw", "readwrite":
newMount.Options = append(newMount.Options, "rw")
@ -89,28 +90,31 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st
// Alias for "ro"
newMount.Options = append(newMount.Options, "ro")
mountReadability = true
case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U":
newMount.Options = append(newMount.Options, kv[0])
case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U", "no-dereference":
if hasArgValue {
return newMount, "", fmt.Errorf("%v: %w", val, errBadOptionArg)
}
newMount.Options = append(newMount.Options, argName)
case "from":
if len(kv) == 1 {
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
if !hasArgValue {
return newMount, "", fmt.Errorf("%v: %w", argName, errBadOptionArg)
}
fromImage = kv[1]
fromImage = argValue
case "bind-propagation":
if len(kv) == 1 {
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
if !hasArgValue {
return newMount, "", fmt.Errorf("%v: %w", argName, errBadOptionArg)
}
newMount.Options = append(newMount.Options, kv[1])
newMount.Options = append(newMount.Options, argValue)
case "src", "source":
if len(kv) == 1 {
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
if !hasArgValue {
return newMount, "", fmt.Errorf("%v: %w", argName, errBadOptionArg)
}
newMount.Source = kv[1]
newMount.Source = argValue
case "target", "dst", "destination":
if len(kv) == 1 {
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
if !hasArgValue {
return newMount, "", fmt.Errorf("%v: %w", argName, errBadOptionArg)
}
targetPath := kv[1]
targetPath := argValue
if !path.IsAbs(targetPath) {
targetPath = filepath.Join(workDir, targetPath)
}
@ -124,23 +128,20 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st
return newMount, "", fmt.Errorf("cannot pass 'relabel' option more than once: %w", errBadOptionArg)
}
setRelabel = true
if len(kv) != 2 {
return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], errBadMntOption)
}
switch kv[1] {
switch argValue {
case "private":
newMount.Options = append(newMount.Options, "Z")
case "shared":
newMount.Options = append(newMount.Options, "z")
default:
return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], errBadMntOption)
return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", argName, errBadMntOption)
}
case "consistency":
// Option for OS X only, has no meaning on other platforms
// and can thus be safely ignored.
// See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts
default:
return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadMntOption)
return newMount, "", fmt.Errorf("%v: %w", argName, errBadMntOption)
}
}
@ -244,15 +245,15 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
sharing := "shared"
for _, val := range args {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
argName, argValue, hasArgValue := strings.Cut(val, "=")
switch argName {
case "type":
// This is already processed
continue
case "nosuid", "nodev", "noexec":
// TODO: detect duplication of these options.
// (Is this necessary?)
newMount.Options = append(newMount.Options, kv[0])
newMount.Options = append(newMount.Options, argName)
case "rw", "readwrite":
newMount.Options = append(newMount.Options, "rw")
case "readonly", "ro":
@ -260,33 +261,33 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
newMount.Options = append(newMount.Options, "ro")
setReadOnly = true
case "Z", "z":
newMount.Options = append(newMount.Options, kv[0])
newMount.Options = append(newMount.Options, argName)
foundSElinuxLabel = true
case "shared", "rshared", "private", "rprivate", "slave", "rslave", "U":
newMount.Options = append(newMount.Options, kv[0])
newMount.Options = append(newMount.Options, argName)
setShared = true
case "sharing":
sharing = kv[1]
sharing = argValue
case "bind-propagation":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
if !hasArgValue {
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
}
newMount.Options = append(newMount.Options, kv[1])
newMount.Options = append(newMount.Options, argValue)
case "id":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
if !hasArgValue {
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
}
id = kv[1]
id = argValue
case "from":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
if !hasArgValue {
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
}
fromStage = kv[1]
fromStage = argValue
case "target", "dst", "destination":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
if !hasArgValue {
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
}
targetPath := kv[1]
targetPath := argValue
if !path.IsAbs(targetPath) {
targetPath = filepath.Join(workDir, targetPath)
}
@ -296,36 +297,36 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
newMount.Destination = targetPath
setDest = true
case "src", "source":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
if !hasArgValue {
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
}
newMount.Source = kv[1]
newMount.Source = argValue
case "mode":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
if !hasArgValue {
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
}
mode, err = strconv.ParseUint(kv[1], 8, 32)
mode, err = strconv.ParseUint(argValue, 8, 32)
if err != nil {
return newMount, nil, fmt.Errorf("unable to parse cache mode: %w", err)
}
case "uid":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
if !hasArgValue {
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
}
uid, err = strconv.Atoi(kv[1])
uid, err = strconv.Atoi(argValue)
if err != nil {
return newMount, nil, fmt.Errorf("unable to parse cache uid: %w", err)
}
case "gid":
if len(kv) == 1 {
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
if !hasArgValue {
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg)
}
gid, err = strconv.Atoi(kv[1])
gid, err = strconv.Atoi(argValue)
if err != nil {
return newMount, nil, fmt.Errorf("unable to parse cache gid: %w", err)
}
default:
return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadMntOption)
return newMount, nil, fmt.Errorf("%v: %w", argName, errBadMntOption)
}
}
@ -590,42 +591,42 @@ func GetTmpfsMount(args []string) (specs.Mount, error) {
setDest := false
for _, val := range args {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
argName, argValue, hasArgValue := strings.Cut(val, "=")
switch argName {
case "type":
// This is already processed
continue
case "ro", "nosuid", "nodev", "noexec":
newMount.Options = append(newMount.Options, kv[0])
newMount.Options = append(newMount.Options, argName)
case "readonly":
// Alias for "ro"
newMount.Options = append(newMount.Options, "ro")
case "tmpcopyup":
//the path that is shadowed by the tmpfs mount is recursively copied up to the tmpfs itself.
newMount.Options = append(newMount.Options, kv[0])
newMount.Options = append(newMount.Options, argName)
case "tmpfs-mode":
if len(kv) == 1 {
return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
if !hasArgValue {
return newMount, fmt.Errorf("%v: %w", argName, errBadOptionArg)
}
newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1]))
newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", argValue))
case "tmpfs-size":
if len(kv) == 1 {
return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
if !hasArgValue {
return newMount, fmt.Errorf("%v: %w", argName, errBadOptionArg)
}
newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1]))
newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", argValue))
case "src", "source":
return newMount, errors.New("source is not supported with tmpfs mounts")
case "target", "dst", "destination":
if len(kv) == 1 {
return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg)
if !hasArgValue {
return newMount, fmt.Errorf("%v: %w", argName, errBadOptionArg)
}
if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
if err := parse.ValidateVolumeCtrDir(argValue); err != nil {
return newMount, err
}
newMount.Destination = kv[1]
newMount.Destination = argValue
setDest = true
default:
return newMount, fmt.Errorf("%v: %w", kv[0], errBadMntOption)
return newMount, fmt.Errorf("%v: %w", argName, errBadMntOption)
}
}

View File

@ -1,8 +1,10 @@
package cli
// the cli package contains urfave/cli related structs that help make up
// the command line for buildah commands. it resides here so other projects
// that vendor in this code can use them too.
// the cli package contains spf13/cobra related structs that help make up
// the command line for buildah commands. this file's contents are better
// suited for pkg/parse, but since pkg/parse imports pkg/util which also
// imports pkg/parse, having it there would create a cyclic dependency, so
// here we are.
import (
"errors"
@ -17,6 +19,7 @@ import (
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/pkg/util"
"github.com/containers/common/pkg/auth"
cutil "github.com/containers/common/pkg/util"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types"
"github.com/opencontainers/runtime-spec/specs-go"
@ -88,20 +91,10 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
removeAll = append(removeAll, iopts.BudResults.Authfile)
}
// Allow for --pull, --pull=true, --pull=false, --pull=never, --pull=always
// --pull-always and --pull-never. The --pull-never and --pull-always options
// will not be documented.
pullPolicy := define.PullIfMissing
if strings.EqualFold(strings.TrimSpace(iopts.Pull), "true") {
pullPolicy = define.PullIfNewer
pullPolicy, err := parse.PullPolicyFromOptions(c)
if err != nil {
return options, nil, nil, err
}
if iopts.PullAlways || strings.EqualFold(strings.TrimSpace(iopts.Pull), "always") {
pullPolicy = define.PullAlways
}
if iopts.PullNever || strings.EqualFold(strings.TrimSpace(iopts.Pull), "never") {
pullPolicy = define.PullNever
}
logrus.Debugf("Pull Policy for pull [%v]", pullPolicy)
args := make(map[string]string)
if c.Flag("build-arg-file").Changed {
@ -224,21 +217,6 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
return options, nil, nil, err
}
pullFlagsCount := 0
if c.Flag("pull").Changed {
pullFlagsCount++
}
if c.Flag("pull-always").Changed {
pullFlagsCount++
}
if c.Flag("pull-never").Changed {
pullFlagsCount++
}
if pullFlagsCount > 1 {
return options, nil, nil, errors.New("can only set one of 'pull' or 'pull-always' or 'pull-never'")
}
if (c.Flag("rm").Changed || c.Flag("force-rm").Changed) && (!c.Flag("layers").Changed && !c.Flag("no-cache").Changed) {
return options, nil, nil, errors.New("'rm' and 'force-rm' can only be set with either 'layers' or 'no-cache'")
}
@ -356,6 +334,24 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
}
}
var sbomScanOptions []define.SBOMScanOptions
if c.Flag("sbom").Changed || c.Flag("sbom-scanner-command").Changed || c.Flag("sbom-scanner-image").Changed || c.Flag("sbom-image-output").Changed || c.Flag("sbom-merge-strategy").Changed || c.Flag("sbom-output").Changed || c.Flag("sbom-image-output").Changed || c.Flag("sbom-purl-output").Changed || c.Flag("sbom-image-purl-output").Changed {
sbomScanOption, err := parse.SBOMScanOptions(c)
if err != nil {
return options, nil, nil, err
}
if !cutil.StringInSlice(contextDir, sbomScanOption.ContextDir) {
sbomScanOption.ContextDir = append(sbomScanOption.ContextDir, contextDir)
}
for _, abc := range additionalBuildContext {
if !abc.IsURL && !abc.IsImage {
sbomScanOption.ContextDir = append(sbomScanOption.ContextDir, abc.Value)
}
}
sbomScanOption.PullPolicy = pullPolicy
sbomScanOptions = append(sbomScanOptions, *sbomScanOption)
}
options = define.BuildOptions{
AddCapabilities: iopts.CapAdd,
AdditionalBuildContexts: additionalBuildContext,
@ -416,6 +412,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
Runtime: iopts.Runtime,
RuntimeArgs: runtimeFlags,
RusageLogFile: iopts.RusageLogFile,
SBOMScanOptions: sbomScanOptions,
SignBy: iopts.SignBy,
SignaturePolicyPath: iopts.SignaturePolicy,
SkipUnusedStages: types.NewOptionalBool(iopts.SkipUnusedStages),

View File

@ -1,6 +1,6 @@
package cli
// the cli package contains urfave/cli related structs that help make up
// the cli package contains spf13/cobra related structs that help make up
// the command line for buildah commands. it resides here so other projects
// that vendor in this code can use them too.
@ -90,6 +90,14 @@ type BudResults struct {
Rm bool
Runtime string
RuntimeFlags []string
SbomPreset string
SbomScannerImage string
SbomScannerCommand []string
SbomMergeStrategy string
SbomOutput string
SbomImgOutput string
SbomPurlOutput string
SbomImgPurlOutput string
Secrets []string
SSH []string
SignaturePolicy string
@ -110,6 +118,7 @@ type BudResults struct {
OSFeatures []string
OSVersion string
CWOptions string
SBOMOptions []string
}
// FromAndBugResults represents the results for common flags
@ -253,7 +262,7 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.String("os", runtime.GOOS, "set the OS to the provided value instead of the current operating system of the host")
fs.StringArrayVar(&flags.OSFeatures, "os-feature", []string{}, "set required OS `feature` for the target image in addition to values from the base image")
fs.StringVar(&flags.OSVersion, "os-version", "", "set required OS `version` for the target image instead of the value from the base image")
fs.StringVar(&flags.Pull, "pull", "true", "pull the image from the registry if newer or not present in store, if false, only pull the image if not present, if always, pull the image even if the named image is present in store, if never, only use the image present in store if available")
fs.StringVar(&flags.Pull, "pull", "true", "pull base and SBOM scanner images from the registry if newer or not present in store, if false, only pull base and SBOM scanner images if not present, if always, pull base and SBOM scanner images even if the named images are present in store, if never, only use images present in store if available")
fs.Lookup("pull").NoOptDefVal = "true" //allow `--pull ` to be set to `true` as expected.
fs.BoolVar(&flags.PullAlways, "pull-always", false, "pull the image even if the named image is present in store")
if err := fs.MarkHidden("pull-always"); err != nil {
@ -269,6 +278,14 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.BoolVar(&flags.Rm, "rm", true, "remove intermediate containers after a successful build")
// "runtime" definition moved to avoid name collision in podman build. Defined in cmd/buildah/build.go.
fs.StringSliceVar(&flags.RuntimeFlags, "runtime-flag", []string{}, "add global flags for the container runtime")
fs.StringVar(&flags.SbomPreset, "sbom", "", "scan working container using `preset` configuration")
fs.StringVar(&flags.SbomScannerImage, "sbom-scanner-image", "", "scan working container using scanner command from `image`")
fs.StringArrayVar(&flags.SbomScannerCommand, "sbom-scanner-command", nil, "scan working container using `command` in scanner image")
fs.StringVar(&flags.SbomMergeStrategy, "sbom-merge-strategy", "", "merge scan results using `strategy`")
fs.StringVar(&flags.SbomOutput, "sbom-output", "", "save scan results to `file`")
fs.StringVar(&flags.SbomImgOutput, "sbom-image-output", "", "add scan results to image as `path`")
fs.StringVar(&flags.SbomPurlOutput, "sbom-purl-output", "", "save scan results to `file``")
fs.StringVar(&flags.SbomImgPurlOutput, "sbom-image-purl-output", "", "add scan results to image as `path`")
fs.StringArrayVar(&flags.Secrets, "secret", []string{}, "secret file to expose to the build")
fs.StringVar(&flags.SignBy, "sign-by", "", "sign the image using a GPG key with the specified `FINGERPRINT`")
fs.StringVar(&flags.SignaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)")
@ -324,6 +341,14 @@ func GetBudFlagsCompletions() commonComp.FlagCompletions {
flagCompletion["output"] = commonComp.AutocompleteNone
flagCompletion["pull"] = commonComp.AutocompleteDefault
flagCompletion["runtime-flag"] = commonComp.AutocompleteNone
flagCompletion["sbom"] = commonComp.AutocompleteNone
flagCompletion["sbom-scanner-image"] = commonComp.AutocompleteNone
flagCompletion["sbom-scanner-command"] = commonComp.AutocompleteNone
flagCompletion["sbom-merge-strategy"] = commonComp.AutocompleteNone
flagCompletion["sbom-output"] = commonComp.AutocompleteDefault
flagCompletion["sbom-image-output"] = commonComp.AutocompleteNone
flagCompletion["sbom-purl-output"] = commonComp.AutocompleteDefault
flagCompletion["sbom-image-purl-output"] = commonComp.AutocompleteNone
flagCompletion["secret"] = commonComp.AutocompleteNone
flagCompletion["sign-by"] = commonComp.AutocompleteNone
flagCompletion["signature-policy"] = commonComp.AutocompleteNone

View File

@ -4,6 +4,7 @@
package jail
import (
"fmt"
"strconv"
"strings"
"sync"
@ -187,6 +188,41 @@ func (j *jail) Set(jconf *config) error {
return err
}
func parseVersion(version string) (string, int, int, int, error) {
// Expected formats:
// <major>.<minor>-RELEASE optionally followed by -p<patchlevel>
// <major>-STABLE
// <major>-CURRENT
parts := strings.Split(string(version), "-")
if len(parts) < 2 || len(parts) > 3 {
return "", -1, -1, -1, fmt.Errorf("unexpected OS version: %s", version)
}
ver := strings.Split(parts[0], ".")
if len(ver) != 2 {
return "", -1, -1, -1, fmt.Errorf("unexpected OS version: %s", version)
}
major, err := strconv.Atoi(ver[0])
if err != nil {
return "", -1, -1, -1, fmt.Errorf("unexpected OS version: %s", version)
}
minor, err := strconv.Atoi(ver[1])
if err != nil {
return "", -1, -1, -1, fmt.Errorf("unexpected OS version: %s", version)
}
patchlevel := 0
if len(parts) == 3 {
if parts[1] != "RELEASE" || !strings.HasPrefix(parts[2], "p") {
return "", -1, -1, -1, fmt.Errorf("unexpected OS version: %s", version)
}
patchlevel, err = strconv.Atoi(strings.TrimPrefix(parts[2], "p"))
if err != nil {
return "", -1, -1, -1, fmt.Errorf("unexpected OS version: %s", version)
}
}
return parts[1], major, minor, patchlevel, nil
}
// Return true if its necessary to have a separate jail to own the vnet. For
// FreeBSD 13.3 and later, we don't need a separate vnet jail since it is
// possible to configure the network without either attaching to the container's
@ -194,36 +230,14 @@ func (j *jail) Set(jconf *config) error {
// any reason, we fail to parse the OS version, we default to returning true.
func NeedVnetJail() bool {
needVnetJailOnce.Do(func() {
// FreeBSD 13.3 and later have support for 'ifconfig -j' and 'route -j'
needVnetJail = true
version, err := util.ReadKernelVersion()
if err != nil {
logrus.Errorf("failed to determine OS version: %v", err)
return
}
// Expected formats "<major>.<minor>-<RELEASE|STABLE|CURRENT>" optionally
// followed by "-<patchlevel>"
parts := strings.Split(string(version), "-")
if len(parts) < 2 {
logrus.Errorf("unexpected OS version: %s", version)
return
}
ver := strings.Split(parts[0], ".")
if len(parts) != 2 {
logrus.Errorf("unexpected OS version: %s", version)
return
}
// FreeBSD 13.3 and later have support for 'ifconfig -j' and 'route -j'
major, err := strconv.Atoi(ver[0])
if err != nil {
logrus.Errorf("unexpected OS version: %s", version)
return
}
minor, err := strconv.Atoi(ver[1])
if err != nil {
logrus.Errorf("unexpected OS version: %s", version)
return
}
_, major, minor, _, err := parseVersion(version)
if major > 13 || (major == 13 && minor > 2) {
needVnetJail = false
}

View File

@ -6,6 +6,7 @@ import (
"os/exec"
"path/filepath"
"strings"
"syscall"
"errors"
@ -14,7 +15,6 @@ import (
"github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
// Options type holds various configuration options for overlay
@ -113,10 +113,10 @@ func MountReadOnly(contentDir, source, dest string, rootUID, rootGID int, graphO
// findMountProgram finds if any mount program is specified in the graph options.
func findMountProgram(graphOptions []string) string {
mountMap := map[string]bool{
".mount_program": true,
"overlay.mount_program": true,
"overlay2.mount_program": true,
mountMap := map[string]struct{}{
".mount_program": {},
"overlay.mount_program": {},
"overlay2.mount_program": {},
}
for _, i := range graphOptions {
@ -126,7 +126,7 @@ func findMountProgram(graphOptions []string) string {
}
key := s[0]
val := s[1]
if mountMap[key] {
if _, has := mountMap[key]; has {
return val
}
}
@ -180,7 +180,7 @@ func Unmount(contentDir string) error {
}
// Ignore EINVAL as the specified merge dir is not a mount point
if err := unix.Unmount(mergeDir, 0); err != nil && !errors.Is(err, os.ErrNotExist) && err != unix.EINVAL {
if err := system.Unmount(mergeDir); err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, syscall.EINVAL) {
return fmt.Errorf("unmount overlay %s: %w", mergeDir, err)
}
return nil

View File

@ -18,6 +18,9 @@ import (
// But allows api to set custom workdir, upperdir and other overlay options
// Following API is being used by podman at the moment
func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) {
if opts == nil {
opts = &Options{}
}
if opts.ReadOnly {
// Read-only overlay mounts can be simulated with nullfs
mount.Source = source

View File

@ -17,6 +17,9 @@ import (
// But allows api to set custom workdir, upperdir and other overlay options
// Following API is being used by podman at the moment
func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) {
if opts == nil {
opts = &Options{}
}
mergeDir := filepath.Join(contentDir, "merge")
// Create overlay mount options for rw/ro.

View File

@ -0,0 +1,20 @@
//go:build !freebsd && !linux
// +build !freebsd,!linux
package overlay
import (
"fmt"
"runtime"
"github.com/opencontainers/runtime-spec/specs-go"
)
// MountWithOptions creates a subdir of the contentDir based on the source directory
// from the source system. It then mounts up the source directory on to the
// generated mount point and returns the mount point to the caller.
// But allows api to set custom workdir, upperdir and other overlay options
// Following API is being used by podman at the moment
func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, err error) {
return mount, fmt.Errorf("read/write overlay mounts not supported on %q", runtime.GOOS)
}

View File

@ -18,6 +18,7 @@ import (
"github.com/containers/buildah/define"
mkcwtypes "github.com/containers/buildah/internal/mkcw/types"
internalParse "github.com/containers/buildah/internal/parse"
"github.com/containers/buildah/internal/sbom"
"github.com/containers/buildah/internal/tmpdir"
"github.com/containers/buildah/pkg/sshagent"
"github.com/containers/common/pkg/auth"
@ -446,6 +447,58 @@ func SystemContextFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name strin
return ctx, nil
}
// PullPolicyFromOptions returns a PullPolicy that reflects the combination of
// the specified "pull" and undocumented "pull-always" and "pull-never" flags.
func PullPolicyFromOptions(c *cobra.Command) (define.PullPolicy, error) {
return PullPolicyFromFlagSet(c.Flags(), c.Flag)
}
// PullPolicyFromFlagSet returns a PullPolicy that reflects the combination of
// the specified "pull" and undocumented "pull-always" and "pull-never" flags.
func PullPolicyFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (define.PullPolicy, error) {
pullFlagsCount := 0
if findFlagFunc("pull").Changed {
pullFlagsCount++
}
if findFlagFunc("pull-always").Changed {
pullFlagsCount++
}
if findFlagFunc("pull-never").Changed {
pullFlagsCount++
}
if pullFlagsCount > 1 {
return 0, errors.New("can only set one of 'pull' or 'pull-always' or 'pull-never'")
}
// Allow for --pull, --pull=true, --pull=false, --pull=never, --pull=always
// --pull-always and --pull-never. The --pull-never and --pull-always options
// will not be documented.
pullPolicy := define.PullIfMissing
pullFlagValue := findFlagFunc("pull").Value.String()
if strings.EqualFold(pullFlagValue, "true") || strings.EqualFold(pullFlagValue, "ifnewer") {
pullPolicy = define.PullIfNewer
}
pullAlwaysFlagValue, err := flags.GetBool("pull-always")
if err != nil {
return 0, err
}
if pullAlwaysFlagValue || strings.EqualFold(pullFlagValue, "always") {
pullPolicy = define.PullAlways
}
pullNeverFlagValue, err := flags.GetBool("pull-never")
if err != nil {
return 0, err
}
if pullNeverFlagValue || strings.EqualFold(pullFlagValue, "never") {
pullPolicy = define.PullNever
}
logrus.Debugf("Pull Policy for pull [%v]", pullPolicy)
return pullPolicy, nil
}
func getAuthFile(authfile string) string {
if authfile != "" {
absAuthfile, err := filepath.Abs(authfile)
@ -709,6 +762,73 @@ func GetConfidentialWorkloadOptions(arg string) (define.ConfidentialWorkloadOpti
return options, nil
}
// SBOMScanOptions parses the build options from the cli
func SBOMScanOptions(c *cobra.Command) (*define.SBOMScanOptions, error) {
return SBOMScanOptionsFromFlagSet(c.Flags(), c.Flag)
}
// SBOMScanOptionsFromFlagSet parses scan settings from the cli
func SBOMScanOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (*define.SBOMScanOptions, error) {
preset, err := flags.GetString("sbom")
if err != nil {
return nil, fmt.Errorf("invalid value for --sbom: %w", err)
}
options, err := sbom.Preset(preset)
if err != nil {
return nil, err
}
if options == nil {
return nil, fmt.Errorf("parsing --sbom flag: unrecognized preset name %q", preset)
}
image, err := flags.GetString("sbom-scanner-image")
if err != nil {
return nil, fmt.Errorf("invalid value for --sbom-scanner-image: %w", err)
}
commands, err := flags.GetStringArray("sbom-scanner-command")
if err != nil {
return nil, fmt.Errorf("invalid value for --sbom-scanner-command: %w", err)
}
mergeStrategy, err := flags.GetString("sbom-merge-strategy")
if err != nil {
return nil, fmt.Errorf("invalid value for --sbom-merge-strategy: %w", err)
}
if image != "" || len(commands) > 0 || mergeStrategy != "" {
options = &define.SBOMScanOptions{
Image: image,
Commands: append([]string{}, commands...),
MergeStrategy: define.SBOMMergeStrategy(mergeStrategy),
}
}
if options.ImageSBOMOutput, err = flags.GetString("sbom-image-output"); err != nil {
return nil, fmt.Errorf("invalid value for --sbom-image-output: %w", err)
}
if options.SBOMOutput, err = flags.GetString("sbom-output"); err != nil {
return nil, fmt.Errorf("invalid value for --sbom-output: %w", err)
}
if options.ImagePURLOutput, err = flags.GetString("sbom-image-purl-output"); err != nil {
return nil, fmt.Errorf("invalid value for --sbom-image-purl-output: %w", err)
}
if options.PURLOutput, err = flags.GetString("sbom-purl-output"); err != nil {
return nil, fmt.Errorf("invalid value for --sbom-purl-output: %w", err)
}
if options.Image == "" || len(options.Commands) == 0 || (options.SBOMOutput == "" && options.ImageSBOMOutput == "" && options.PURLOutput == "" && options.ImagePURLOutput == "") {
return options, fmt.Errorf("sbom configuration missing one or more of (%q, %q, %q, %q, %q or %q)", "--sbom-scanner-imag", "--sbom-scanner-command", "--sbom-output", "--sbom-image-output", "--sbom-purl-output", "--sbom-image-purl-output")
}
if len(options.Commands) > 1 && options.MergeStrategy == "" {
return options, fmt.Errorf("sbom configuration included multiple %q values but no %q value", "--sbom-scanner-command", "--sbom-merge-strategy")
}
switch options.MergeStrategy {
default:
return options, fmt.Errorf("sbom arguments included unrecognized merge strategy %q", string(options.MergeStrategy))
case define.SBOMMergeStrategyCat, define.SBOMMergeStrategyCycloneDXByComponentNameAndVersion, define.SBOMMergeStrategySPDXByPackageNameAndVersionInfo:
// all good here
}
return options, nil
}
// IDMappingOptions parses the build options related to user namespaces and ID mapping.
func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) {
return IDMappingOptionsFromFlagSet(c.Flags(), c.PersistentFlags(), c.Flag)
@ -1053,19 +1173,19 @@ func Device(device string) (string, string, string, error) {
// isValidDeviceMode checks if the mode for device is valid or not.
// isValid mode is a composition of r (read), w (write), and m (mknod).
func isValidDeviceMode(mode string) bool {
var legalDeviceMode = map[rune]bool{
'r': true,
'w': true,
'm': true,
var legalDeviceMode = map[rune]struct{}{
'r': {},
'w': {},
'm': {},
}
if mode == "" {
return false
}
for _, c := range mode {
if !legalDeviceMode[c] {
if _, has := legalDeviceMode[c]; !has {
return false
}
legalDeviceMode[c] = false
delete(legalDeviceMode, c)
}
return true
}

View File

@ -110,7 +110,7 @@ func (b *Builder) addResolvConf(rdir string, chownOpts *idtools.IDPair, dnsServe
return "", err
}
if err := label.Relabel(cfile, b.MountLabel, false); err != nil {
if err := relabel(cfile, b.MountLabel, false); err != nil {
return "", err
}
return cfile, nil
@ -169,7 +169,7 @@ func (b *Builder) generateHosts(rdir string, chownOpts *idtools.IDPair, imageRoo
if err = os.Chown(targetfile, uid, gid); err != nil {
return "", err
}
if err := label.Relabel(targetfile, b.MountLabel, false); err != nil {
if err := relabel(targetfile, b.MountLabel, false); err != nil {
return "", err
}
@ -198,7 +198,7 @@ func (b *Builder) generateHostname(rdir, hostname string, chownOpts *idtools.IDP
if err = os.Chown(cfile, uid, gid); err != nil {
return "", err
}
if err := label.Relabel(cfile, b.MountLabel, false); err != nil {
if err := relabel(cfile, b.MountLabel, false); err != nil {
return "", err
}
@ -1410,7 +1410,7 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtin
if err = os.MkdirAll(volumePath, 0755); err != nil {
return nil, err
}
if err = label.Relabel(volumePath, mountLabel, false); err != nil {
if err = relabel(volumePath, mountLabel, false); err != nil {
return nil, err
}
initializeVolume = true
@ -1750,7 +1750,7 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr
return nil, "", err
}
if err := label.Relabel(ctrFileOnHost, b.MountLabel, false); err != nil {
if err := relabel(ctrFileOnHost, b.MountLabel, false); err != nil {
return nil, "", err
}
hostUID, hostGID, err := util.GetHostIDs(idMaps.uidmap, idMaps.gidmap, uid, gid)
@ -1848,13 +1848,13 @@ func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string]
return nil, nil, err
}
if err := label.Relabel(filepath.Dir(hostSock), b.MountLabel, false); err != nil {
if err := relabel(filepath.Dir(hostSock), b.MountLabel, false); err != nil {
if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil {
b.Logger.Errorf("error shutting down agent: %v", shutdownErr)
}
return nil, nil, err
}
if err := label.Relabel(hostSock, b.MountLabel, false); err != nil {
if err := relabel(hostSock, b.MountLabel, false); err != nil {
if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil {
b.Logger.Errorf("error shutting down agent: %v", shutdownErr)
}
@ -1959,3 +1959,13 @@ func setPdeathsig(cmd *exec.Cmd) {
}
cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
}
func relabel(path, mountLabel string, recurse bool) error {
if err := label.Relabel(path, mountLabel, recurse); err != nil {
if !errors.Is(err, syscall.ENOTSUP) {
return err
}
logrus.Debugf("Labeling not supported on %q", path)
}
return nil
}

View File

@ -44,7 +44,6 @@ import (
"github.com/docker/go-units"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@ -310,7 +309,7 @@ rootless=%d
if err = ioutils.AtomicWriteFile(containerenvPath, []byte(containerenv), 0755); err != nil {
return err
}
if err := label.Relabel(containerenvPath, b.MountLabel, false); err != nil {
if err := relabel(containerenvPath, b.MountLabel, false); err != nil {
return err
}
@ -916,12 +915,12 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string,
options = append(options, "rw")
}
if foundz {
if err := label.Relabel(host, mountLabel, true); err != nil {
if err := relabel(host, mountLabel, true); err != nil {
return specs.Mount{}, err
}
}
if foundZ {
if err := label.Relabel(host, mountLabel, false); err != nil {
if err := relabel(host, mountLabel, false); err != nil {
return specs.Mount{}, err
}
}

295
vendor/github.com/containers/buildah/scan.go generated vendored Normal file
View File

@ -0,0 +1,295 @@
package buildah
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/containers/buildah/define"
"github.com/containers/buildah/internal/sbom"
"github.com/containers/common/pkg/util"
"github.com/mattn/go-shellwords"
rspec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
func stringSliceReplaceAll(slice []string, replacements map[string]string, important []string) (built []string, replacedAnImportantValue bool) {
built = make([]string, 0, len(slice))
for i := range slice {
element := slice[i]
for from, to := range replacements {
previous := element
if element = strings.ReplaceAll(previous, from, to); element != previous {
if len(important) == 0 || util.StringInSlice(from, important) {
replacedAnImportantValue = true
}
}
}
built = append(built, element)
}
return built, replacedAnImportantValue
}
// sbomScan iterates through the scanning configuration settings, generating
// SBOM files and storing them either in the rootfs or in a local file path.
func (b *Builder) sbomScan(ctx context.Context, options CommitOptions) (imageFiles, localFiles map[string]string, scansDir string, err error) {
// We'll use a temporary per-container directory for this one.
cdir, err := b.store.ContainerDirectory(b.ContainerID)
if err != nil {
return nil, nil, "", err
}
scansDir, err = os.MkdirTemp(cdir, "buildah-scan")
if err != nil {
return nil, nil, "", err
}
defer func() {
if err != nil {
if err := os.RemoveAll(scansDir); err != nil {
logrus.Warnf("removing temporary directory %q: %v", scansDir, err)
}
}
}()
// We may be producing sets of outputs using temporary containers, and
// there's no need to create more than one container for any one
// specific scanner image.
scanners := make(map[string]*Builder)
defer func() {
for _, scanner := range scanners {
scannerID := scanner.ContainerID
if err := scanner.Delete(); err != nil {
logrus.Warnf("removing temporary scanner container %q: %v", scannerID, err)
}
}
}()
// Just assume that every scanning method will be looking at the rootfs.
rootfs, err := b.Mount(b.MountLabel)
if err != nil {
return nil, nil, "", err
}
defer func(b *Builder) {
if err := b.Unmount(); err != nil {
logrus.Warnf("unmounting temporary scanner container %q: %v", b.ContainerID, err)
}
}(b)
// Iterate through all of the scanning strategies.
for _, scanSpec := range options.SBOMScanOptions {
// Pull the image and create a container we can run the scanner
// in, unless we've done that already for this scanner image.
scanBuilder, ok := scanners[scanSpec.Image]
if !ok {
builderOptions := BuilderOptions{
FromImage: scanSpec.Image,
ContainerSuffix: "scanner",
PullPolicy: scanSpec.PullPolicy,
BlobDirectory: options.BlobDirectory,
Logger: b.Logger,
SystemContext: options.SystemContext,
MountLabel: b.MountLabel,
ProcessLabel: b.ProcessLabel,
IDMappingOptions: &b.IDMappingOptions,
}
if scanBuilder, err = NewBuilder(ctx, b.store, builderOptions); err != nil {
return nil, nil, "", fmt.Errorf("creating temporary working container to run scanner: %w", err)
}
scanners[scanSpec.Image] = scanBuilder
}
// Now figure out which commands we need to run. First, try to
// parse a command ourselves, because syft's image (at least)
// doesn't include a shell. Build a slice of command slices.
var commands [][]string
for _, commandSpec := range scanSpec.Commands {
// Start by assuming it's shell -c $whatever.
parsedCommand := []string{"/bin/sh", "-c", commandSpec}
if shell := scanBuilder.Shell(); len(shell) != 0 {
parsedCommand = append(append([]string{}, shell...), commandSpec)
}
if !strings.ContainsAny(commandSpec, "<>|") { // An imperfect check for shell redirection being used.
// If we can parse it ourselves, though, prefer to use that result,
// in case the scanner image doesn't include a shell.
if parsed, err := shellwords.Parse(commandSpec); err == nil {
parsedCommand = parsed
}
}
commands = append(commands, parsedCommand)
}
// Set up a list of mounts for the rootfs and whichever context
// directories we're told were used.
const rootfsTargetDir = "/.rootfs"
const scansTargetDir = "/.scans"
const contextsTargetDirPrefix = "/.context"
runMounts := []rspec.Mount{
// Our temporary directory, read-write.
{
Type: define.TypeBind,
Source: scansDir,
Destination: scansTargetDir,
Options: []string{"rw", "z"},
},
// The rootfs, read-only.
{
Type: define.TypeBind,
Source: rootfs,
Destination: rootfsTargetDir,
Options: []string{"ro"},
},
}
// Each context directory, also read-only.
for i := range scanSpec.ContextDir {
contextMount := rspec.Mount{
Type: define.TypeBind,
Source: scanSpec.ContextDir[i],
Destination: fmt.Sprintf("%s%d", contextsTargetDirPrefix, i),
Options: []string{"ro"},
}
runMounts = append(runMounts, contextMount)
}
// Set up run options and mounts one time, and reuse it.
runOptions := RunOptions{
Logger: b.Logger,
Isolation: b.Isolation,
SystemContext: options.SystemContext,
Mounts: runMounts,
}
// We'll have to do some text substitutions so that we run the
// right commands, in the right order, pointing at the right
// mount points.
var resolvedCommands [][]string
var resultFiles []string
for _, command := range commands {
// Each command gets to produce its own file that we'll
// combine later if there's more than one of them.
contextDirScans := 0
for i := range scanSpec.ContextDir {
resultFile := filepath.Join(scansTargetDir, fmt.Sprintf("scan%d.json", len(resultFiles)))
// If the command mentions {CONTEXT}...
resolvedCommand, scansContext := stringSliceReplaceAll(command,
map[string]string{
"{CONTEXT}": fmt.Sprintf("%s%d", contextsTargetDirPrefix, i),
"{OUTPUT}": resultFile,
},
[]string{"{CONTEXT}"},
)
if !scansContext {
break
}
// ... resolve the path references and add it to the list of commands.
resolvedCommands = append(resolvedCommands, resolvedCommand)
resultFiles = append(resultFiles, resultFile)
contextDirScans++
}
if contextDirScans == 0 {
resultFile := filepath.Join(scansTargetDir, fmt.Sprintf("scan%d.json", len(resultFiles)))
// If the command didn't mention {CONTEXT}, but does mention {ROOTFS}...
resolvedCommand, scansRootfs := stringSliceReplaceAll(command,
map[string]string{
"{ROOTFS}": rootfsTargetDir,
"{OUTPUT}": resultFile,
},
[]string{"{ROOTFS}"},
)
// ... resolve the path references and add that to the list of commands.
if scansRootfs {
resolvedCommands = append(resolvedCommands, resolvedCommand)
resultFiles = append(resultFiles, resultFile)
}
}
}
// Run all of the commands, one after the other, producing one
// or more files named "scan%d.json" in our temporary directory.
for _, resolvedCommand := range resolvedCommands {
logrus.Debugf("Running scan command %q", resolvedCommand)
if err = scanBuilder.Run(resolvedCommand, runOptions); err != nil {
return nil, nil, "", fmt.Errorf("running scanning command %v: %w", resolvedCommand, err)
}
}
// Produce the combined output files that we need to create, if there are any.
var sbomResult, purlResult string
switch {
case scanSpec.ImageSBOMOutput != "":
sbomResult = filepath.Join(scansDir, filepath.Base(scanSpec.ImageSBOMOutput))
case scanSpec.SBOMOutput != "":
sbomResult = filepath.Join(scansDir, filepath.Base(scanSpec.SBOMOutput))
default:
sbomResult = filepath.Join(scansDir, "sbom-result")
}
switch {
case scanSpec.ImagePURLOutput != "":
purlResult = filepath.Join(scansDir, filepath.Base(scanSpec.ImagePURLOutput))
case scanSpec.PURLOutput != "":
purlResult = filepath.Join(scansDir, filepath.Base(scanSpec.PURLOutput))
default:
purlResult = filepath.Join(scansDir, "purl-result")
}
copyFile := func(destination, source string) error {
dst, err := os.Create(destination)
if err != nil {
return err
}
defer dst.Close()
src, err := os.Open(source)
if err != nil {
return err
}
defer src.Close()
if _, err = io.Copy(dst, src); err != nil {
return fmt.Errorf("copying %q to %q: %w", source, destination, err)
}
return nil
}
err = func() error {
for i := range resultFiles {
thisResultFile := filepath.Join(scansDir, filepath.Base(resultFiles[i]))
switch i {
case 0:
// Straight-up copy to create the first version of the final output.
if err = copyFile(sbomResult, thisResultFile); err != nil {
return err
}
// This shouldn't change any contents, but lets us generate the purl file.
err = sbom.Merge(scanSpec.MergeStrategy, thisResultFile, sbomResult, purlResult)
default:
// Hopefully we know how to merge information from the new one into the final output.
err = sbom.Merge(scanSpec.MergeStrategy, sbomResult, thisResultFile, purlResult)
}
}
return err
}()
if err != nil {
return nil, nil, "", err
}
// If these files are supposed to be written to the local filesystem, add
// their contents to the map of files we expect our caller to write.
if scanSpec.SBOMOutput != "" || scanSpec.PURLOutput != "" {
if localFiles == nil {
localFiles = make(map[string]string)
}
if scanSpec.SBOMOutput != "" {
localFiles[scanSpec.SBOMOutput] = sbomResult
}
if scanSpec.PURLOutput != "" {
localFiles[scanSpec.PURLOutput] = purlResult
}
}
// If these files are supposed to be written to the image, create a map of
// their contents so that we can either create a layer diff for them (or
// slipstream them into a squashed layer diff) later.
if scanSpec.ImageSBOMOutput != "" || scanSpec.ImagePURLOutput != "" {
if imageFiles == nil {
imageFiles = make(map[string]string)
}
if scanSpec.ImageSBOMOutput != "" {
imageFiles[scanSpec.ImageSBOMOutput] = sbomResult
}
if scanSpec.ImagePURLOutput != "" {
imageFiles[scanSpec.ImagePURLOutput] = purlResult
}
}
}
return imageFiles, localFiles, scansDir, nil
}

View File

@ -14,7 +14,7 @@ import (
"syscall"
"github.com/containers/storage/pkg/system"
"github.com/opencontainers/runc/libcontainer/user"
"github.com/moby/sys/user"
)
var (

View File

@ -11,7 +11,7 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/unshare"
"github.com/containers/storage/types"
libcontainerUser "github.com/opencontainers/runc/libcontainer/user"
libcontainerUser "github.com/moby/sys/user"
"github.com/sirupsen/logrus"
)

View File

@ -26,14 +26,19 @@ func isEnabled() bool {
}
func setProcAttr(attr, value string) error {
// Under AppArmor you can only change your own attr, so use /proc/self/
// instead of /proc/<tid>/ like libapparmor does
attrPath := "/proc/self/attr/apparmor/" + attr
if _, err := os.Stat(attrPath); errors.Is(err, os.ErrNotExist) {
attr = utils.CleanPath(attr)
attrSubPath := "attr/apparmor/" + attr
if _, err := os.Stat("/proc/self/" + attrSubPath); errors.Is(err, os.ErrNotExist) {
// fall back to the old convention
attrPath = "/proc/self/attr/" + attr
attrSubPath = "attr/" + attr
}
// Under AppArmor you can only change your own attr, so there's no reason
// to not use /proc/thread-self/ (instead of /proc/<tid>/, like libapparmor
// does).
attrPath, closer := utils.ProcThreadSelf(attrSubPath)
defer closer()
f, err := os.OpenFile(attrPath, os.O_WRONLY, 0)
if err != nil {
return err

View File

@ -10,6 +10,7 @@ import (
"strings"
"sync"
"github.com/opencontainers/runc/libcontainer/utils"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@ -49,24 +50,13 @@ func WriteFile(dir, file, data string) error {
return err
}
defer fd.Close()
if err := retryingWriteFile(fd, data); err != nil {
if _, err := fd.WriteString(data); err != nil {
// Having data in the error message helps in debugging.
return fmt.Errorf("failed to write %q: %w", data, err)
}
return nil
}
func retryingWriteFile(fd *os.File, data string) error {
for {
_, err := fd.Write([]byte(data))
if errors.Is(err, unix.EINTR) {
logrus.Infof("interrupted while writing %s to %s", data, fd.Name())
continue
}
return err
}
}
const (
cgroupfsDir = "/sys/fs/cgroup"
cgroupfsPrefix = cgroupfsDir + "/"
@ -76,16 +66,16 @@ var (
// TestMode is set to true by unit tests that need "fake" cgroupfs.
TestMode bool
cgroupFd int = -1
prepOnce sync.Once
prepErr error
resolveFlags uint64
cgroupRootHandle *os.File
prepOnce sync.Once
prepErr error
resolveFlags uint64
)
func prepareOpenat2() error {
prepOnce.Do(func() {
fd, err := unix.Openat2(-1, cgroupfsDir, &unix.OpenHow{
Flags: unix.O_DIRECTORY | unix.O_PATH,
Flags: unix.O_DIRECTORY | unix.O_PATH | unix.O_CLOEXEC,
})
if err != nil {
prepErr = &os.PathError{Op: "openat2", Path: cgroupfsDir, Err: err}
@ -96,15 +86,16 @@ func prepareOpenat2() error {
}
return
}
file := os.NewFile(uintptr(fd), cgroupfsDir)
var st unix.Statfs_t
if err = unix.Fstatfs(fd, &st); err != nil {
if err := unix.Fstatfs(int(file.Fd()), &st); err != nil {
prepErr = &os.PathError{Op: "statfs", Path: cgroupfsDir, Err: err}
logrus.Warnf("falling back to securejoin: %s", prepErr)
return
}
cgroupFd = fd
cgroupRootHandle = file
resolveFlags = unix.RESOLVE_BENEATH | unix.RESOLVE_NO_MAGICLINKS
if st.Type == unix.CGROUP2_SUPER_MAGIC {
// cgroupv2 has a single mountpoint and no "cpu,cpuacct" symlinks
@ -122,7 +113,7 @@ func openFile(dir, file string, flags int) (*os.File, error) {
flags |= os.O_TRUNC | os.O_CREATE
mode = 0o600
}
path := path.Join(dir, file)
path := path.Join(dir, utils.CleanPath(file))
if prepareOpenat2() != nil {
return openFallback(path, flags, mode)
}
@ -131,7 +122,7 @@ func openFile(dir, file string, flags int) (*os.File, error) {
return openFallback(path, flags, mode)
}
fd, err := unix.Openat2(cgroupFd, relPath,
fd, err := unix.Openat2(int(cgroupRootHandle.Fd()), relPath,
&unix.OpenHow{
Resolve: resolveFlags,
Flags: uint64(flags) | unix.O_CLOEXEC,
@ -139,20 +130,21 @@ func openFile(dir, file string, flags int) (*os.File, error) {
})
if err != nil {
err = &os.PathError{Op: "openat2", Path: path, Err: err}
// Check if cgroupFd is still opened to cgroupfsDir
// Check if cgroupRootHandle is still opened to cgroupfsDir
// (happens when this package is incorrectly used
// across the chroot/pivot_root/mntns boundary, or
// when /sys/fs/cgroup is remounted).
//
// TODO: if such usage will ever be common, amend this
// to reopen cgroupFd and retry openat2.
fdStr := strconv.Itoa(cgroupFd)
fdDest, _ := os.Readlink("/proc/self/fd/" + fdStr)
// to reopen cgroupRootHandle and retry openat2.
fdPath, closer := utils.ProcThreadSelf("fd/" + strconv.Itoa(int(cgroupRootHandle.Fd())))
defer closer()
fdDest, _ := os.Readlink(fdPath)
if fdDest != cgroupfsDir {
// Wrap the error so it is clear that cgroupFd
// Wrap the error so it is clear that cgroupRootHandle
// is opened to an unexpected/wrong directory.
err = fmt.Errorf("cgroupFd %s unexpectedly opened to %s != %s: %w",
fdStr, fdDest, cgroupfsDir, err)
err = fmt.Errorf("cgroupRootHandle %d unexpectedly opened to %s != %s: %w",
cgroupRootHandle.Fd(), fdDest, cgroupfsDir, err)
}
return nil, err
}

View File

@ -84,6 +84,28 @@ func (s *CpuGroup) Set(path string, r *configs.Resources) error {
period = ""
}
}
var burst string
if r.CpuBurst != nil {
burst = strconv.FormatUint(*r.CpuBurst, 10)
if err := cgroups.WriteFile(path, "cpu.cfs_burst_us", burst); err != nil {
// this is a special trick for burst feature, the current systemd and low version of kernel will not support it.
// So, an `no such file or directory` error would be raised, and we can ignore it .
if !errors.Is(err, unix.ENOENT) {
// Sometimes when the burst to be set is larger
// than the current one, it is rejected by the kernel
// (EINVAL) as old_quota/new_burst exceeds the parent
// cgroup quota limit. If this happens and the quota is
// going to be set, ignore the error for now and retry
// after setting the quota.
if !errors.Is(err, unix.EINVAL) || r.CpuQuota == 0 {
return err
}
}
} else {
burst = ""
}
}
if r.CpuQuota != 0 {
if err := cgroups.WriteFile(path, "cpu.cfs_quota_us", strconv.FormatInt(r.CpuQuota, 10)); err != nil {
return err
@ -93,6 +115,13 @@ func (s *CpuGroup) Set(path string, r *configs.Resources) error {
return err
}
}
if burst != "" {
if err := cgroups.WriteFile(path, "cpu.cfs_burst_us", burst); err != nil {
if !errors.Is(err, unix.ENOENT) {
return err
}
}
}
}
if r.CPUIdle != nil {

View File

@ -1,6 +1,8 @@
package fs
import (
"errors"
"os"
"strconv"
"github.com/opencontainers/runc/libcontainer/cgroups"
@ -19,8 +21,23 @@ func (s *HugetlbGroup) Apply(path string, _ *configs.Resources, pid int) error {
}
func (s *HugetlbGroup) Set(path string, r *configs.Resources) error {
const suffix = ".limit_in_bytes"
skipRsvd := false
for _, hugetlb := range r.HugetlbLimit {
if err := cgroups.WriteFile(path, "hugetlb."+hugetlb.Pagesize+".limit_in_bytes", strconv.FormatUint(hugetlb.Limit, 10)); err != nil {
prefix := "hugetlb." + hugetlb.Pagesize
val := strconv.FormatUint(hugetlb.Limit, 10)
if err := cgroups.WriteFile(path, prefix+suffix, val); err != nil {
return err
}
if skipRsvd {
continue
}
if err := cgroups.WriteFile(path, prefix+".rsvd"+suffix, val); err != nil {
if errors.Is(err, os.ErrNotExist) {
skipRsvd = true
continue
}
return err
}
}
@ -32,24 +49,29 @@ func (s *HugetlbGroup) GetStats(path string, stats *cgroups.Stats) error {
if !cgroups.PathExists(path) {
return nil
}
rsvd := ".rsvd"
hugetlbStats := cgroups.HugetlbStats{}
for _, pageSize := range cgroups.HugePageSizes() {
usage := "hugetlb." + pageSize + ".usage_in_bytes"
value, err := fscommon.GetCgroupParamUint(path, usage)
again:
prefix := "hugetlb." + pageSize + rsvd
value, err := fscommon.GetCgroupParamUint(path, prefix+".usage_in_bytes")
if err != nil {
if rsvd != "" && errors.Is(err, os.ErrNotExist) {
rsvd = ""
goto again
}
return err
}
hugetlbStats.Usage = value
maxUsage := "hugetlb." + pageSize + ".max_usage_in_bytes"
value, err = fscommon.GetCgroupParamUint(path, maxUsage)
value, err = fscommon.GetCgroupParamUint(path, prefix+".max_usage_in_bytes")
if err != nil {
return err
}
hugetlbStats.MaxUsage = value
failcnt := "hugetlb." + pageSize + ".failcnt"
value, err = fscommon.GetCgroupParamUint(path, failcnt)
value, err = fscommon.GetCgroupParamUint(path, prefix+".failcnt")
if err != nil {
return err
}

View File

@ -170,6 +170,10 @@ func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error {
return err
}
stats.MemoryStats.SwapUsage = swapUsage
stats.MemoryStats.SwapOnlyUsage = cgroups.MemoryData{
Usage: swapUsage.Usage - memoryUsage.Usage,
Failcnt: swapUsage.Failcnt - memoryUsage.Failcnt,
}
kernelUsage, err := getMemoryData(path, "kmem")
if err != nil {
return err
@ -234,6 +238,12 @@ func getMemoryData(path, name string) (cgroups.MemoryData, error) {
memoryData.Failcnt = value
value, err = fscommon.GetCgroupParamUint(path, limit)
if err != nil {
if name == "kmem" && os.IsNotExist(err) {
// Ignore ENOENT as kmem.limit_in_bytes has
// been removed in newer kernels.
return memoryData, nil
}
return cgroups.MemoryData{}, err
}
memoryData.Limit = value

View File

@ -2,16 +2,19 @@ package fs2
import (
"bufio"
"errors"
"os"
"strconv"
"golang.org/x/sys/unix"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
"github.com/opencontainers/runc/libcontainer/configs"
)
func isCpuSet(r *configs.Resources) bool {
return r.CpuWeight != 0 || r.CpuQuota != 0 || r.CpuPeriod != 0 || r.CPUIdle != nil
return r.CpuWeight != 0 || r.CpuQuota != 0 || r.CpuPeriod != 0 || r.CPUIdle != nil || r.CpuBurst != nil
}
func setCpu(dirPath string, r *configs.Resources) error {
@ -32,6 +35,23 @@ func setCpu(dirPath string, r *configs.Resources) error {
}
}
var burst string
if r.CpuBurst != nil {
burst = strconv.FormatUint(*r.CpuBurst, 10)
if err := cgroups.WriteFile(dirPath, "cpu.max.burst", burst); err != nil {
// Sometimes when the burst to be set is larger
// than the current one, it is rejected by the kernel
// (EINVAL) as old_quota/new_burst exceeds the parent
// cgroup quota limit. If this happens and the quota is
// going to be set, ignore the error for now and retry
// after setting the quota.
if !errors.Is(err, unix.EINVAL) || r.CpuQuota == 0 {
return err
}
} else {
burst = ""
}
}
if r.CpuQuota != 0 || r.CpuPeriod != 0 {
str := "max"
if r.CpuQuota > 0 {
@ -47,6 +67,11 @@ func setCpu(dirPath string, r *configs.Resources) error {
if err := cgroups.WriteFile(dirPath, "cpu.max", str); err != nil {
return err
}
if burst != "" {
if err := cgroups.WriteFile(dirPath, "cpu.max.burst", burst); err != nil {
return err
}
}
}
return nil

View File

@ -55,6 +55,9 @@ func _defaultDirPath(root, cgPath, cgParent, cgName string) (string, error) {
return filepath.Join(root, innerPath), nil
}
// we don't need to use /proc/thread-self here because runc always runs
// with every thread in the same cgroup. This lets us avoid having to do
// runtime.LockOSThread.
ownCgroup, err := parseCgroupFile("/proc/self/cgroup")
if err != nil {
return "", err

View File

@ -133,6 +133,10 @@ func (m *Manager) GetStats() (*cgroups.Stats, error) {
if err := fscommon.RdmaGetStats(m.dirPath, st); err != nil && !os.IsNotExist(err) {
errs = append(errs, err)
}
// misc (since kernel 5.13)
if err := statMisc(m.dirPath, st); err != nil && !os.IsNotExist(err) {
errs = append(errs, err)
}
if len(errs) > 0 && !m.config.Rootless {
return st, fmt.Errorf("error while statting cgroup v2: %+v", errs)
}

View File

@ -1,6 +1,8 @@
package fs2
import (
"errors"
"os"
"strconv"
"github.com/opencontainers/runc/libcontainer/cgroups"
@ -16,8 +18,22 @@ func setHugeTlb(dirPath string, r *configs.Resources) error {
if !isHugeTlbSet(r) {
return nil
}
const suffix = ".max"
skipRsvd := false
for _, hugetlb := range r.HugetlbLimit {
if err := cgroups.WriteFile(dirPath, "hugetlb."+hugetlb.Pagesize+".max", strconv.FormatUint(hugetlb.Limit, 10)); err != nil {
prefix := "hugetlb." + hugetlb.Pagesize
val := strconv.FormatUint(hugetlb.Limit, 10)
if err := cgroups.WriteFile(dirPath, prefix+suffix, val); err != nil {
return err
}
if skipRsvd {
continue
}
if err := cgroups.WriteFile(dirPath, prefix+".rsvd"+suffix, val); err != nil {
if errors.Is(err, os.ErrNotExist) {
skipRsvd = true
continue
}
return err
}
}
@ -27,15 +43,21 @@ func setHugeTlb(dirPath string, r *configs.Resources) error {
func statHugeTlb(dirPath string, stats *cgroups.Stats) error {
hugetlbStats := cgroups.HugetlbStats{}
rsvd := ".rsvd"
for _, pagesize := range cgroups.HugePageSizes() {
value, err := fscommon.GetCgroupParamUint(dirPath, "hugetlb."+pagesize+".current")
again:
prefix := "hugetlb." + pagesize + rsvd
value, err := fscommon.GetCgroupParamUint(dirPath, prefix+".current")
if err != nil {
if rsvd != "" && errors.Is(err, os.ErrNotExist) {
rsvd = ""
goto again
}
return err
}
hugetlbStats.Usage = value
fileName := "hugetlb." + pagesize + ".events"
value, err = fscommon.GetValueByKey(dirPath, fileName, "max")
value, err = fscommon.GetValueByKey(dirPath, prefix+".events", "max")
if err != nil {
return err
}

View File

@ -105,7 +105,7 @@ func statMemory(dirPath string, stats *cgroups.Stats) error {
memoryUsage, err := getMemoryDataV2(dirPath, "")
if err != nil {
if errors.Is(err, unix.ENOENT) && dirPath == UnifiedMountpoint {
// The root cgroup does not have memory.{current,max}
// The root cgroup does not have memory.{current,max,peak}
// so emulate those using data from /proc/meminfo and
// /sys/fs/cgroup/memory.stat
return rootStatsFromMeminfo(stats)
@ -113,10 +113,12 @@ func statMemory(dirPath string, stats *cgroups.Stats) error {
return err
}
stats.MemoryStats.Usage = memoryUsage
swapUsage, err := getMemoryDataV2(dirPath, "swap")
swapOnlyUsage, err := getMemoryDataV2(dirPath, "swap")
if err != nil {
return err
}
stats.MemoryStats.SwapOnlyUsage = swapOnlyUsage
swapUsage := swapOnlyUsage
// As cgroup v1 reports SwapUsage values as mem+swap combined,
// while in cgroup v2 swap values do not include memory,
// report combined mem+swap for v1 compatibility.
@ -124,6 +126,9 @@ func statMemory(dirPath string, stats *cgroups.Stats) error {
if swapUsage.Limit != math.MaxUint64 {
swapUsage.Limit += memoryUsage.Limit
}
// The `MaxUsage` of mem+swap cannot simply combine mem with
// swap. So set it to 0 for v1 compatibility.
swapUsage.MaxUsage = 0
stats.MemoryStats.SwapUsage = swapUsage
return nil
@ -138,6 +143,7 @@ func getMemoryDataV2(path, name string) (cgroups.MemoryData, error) {
}
usage := moduleName + ".current"
limit := moduleName + ".max"
maxUsage := moduleName + ".peak"
value, err := fscommon.GetCgroupParamUint(path, usage)
if err != nil {
@ -157,6 +163,14 @@ func getMemoryDataV2(path, name string) (cgroups.MemoryData, error) {
}
memoryData.Limit = value
// `memory.peak` since kernel 5.19
// `memory.swap.peak` since kernel 6.5
value, err = fscommon.GetCgroupParamUint(path, maxUsage)
if err != nil && !os.IsNotExist(err) {
return cgroups.MemoryData{}, err
}
memoryData.MaxUsage = value
return memoryData, nil
}

View File

@ -0,0 +1,52 @@
package fs2
import (
"bufio"
"os"
"strings"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
)
func statMisc(dirPath string, stats *cgroups.Stats) error {
for _, file := range []string{"current", "events"} {
fd, err := cgroups.OpenFile(dirPath, "misc."+file, os.O_RDONLY)
if err != nil {
return err
}
s := bufio.NewScanner(fd)
for s.Scan() {
key, value, err := fscommon.ParseKeyValue(s.Text())
if err != nil {
fd.Close()
return err
}
key = strings.TrimSuffix(key, ".max")
if _, ok := stats.MiscStats[key]; !ok {
stats.MiscStats[key] = cgroups.MiscStats{}
}
tmp := stats.MiscStats[key]
switch file {
case "current":
tmp.Usage = value
case "events":
tmp.Events = value
}
stats.MiscStats[key] = tmp
}
fd.Close()
if err := s.Err(); err != nil {
return err
}
}
return nil
}

View File

@ -91,6 +91,8 @@ type MemoryStats struct {
Usage MemoryData `json:"usage,omitempty"`
// usage of memory + swap
SwapUsage MemoryData `json:"swap_usage,omitempty"`
// usage of swap only
SwapOnlyUsage MemoryData `json:"swap_only_usage,omitempty"`
// usage of kernel memory
KernelUsage MemoryData `json:"kernel_usage,omitempty"`
// usage of kernel TCP memory
@ -170,6 +172,13 @@ type RdmaStats struct {
RdmaCurrent []RdmaEntry `json:"rdma_current,omitempty"`
}
type MiscStats struct {
// current resource usage for a key in misc
Usage uint64 `json:"usage,omitempty"`
// number of times the resource usage was about to go over the max boundary
Events uint64 `json:"events,omitempty"`
}
type Stats struct {
CpuStats CpuStats `json:"cpu_stats,omitempty"`
CPUSetStats CPUSetStats `json:"cpuset_stats,omitempty"`
@ -179,10 +188,13 @@ type Stats struct {
// the map is in the format "size of hugepage: stats of the hugepage"
HugetlbStats map[string]HugetlbStats `json:"hugetlb_stats,omitempty"`
RdmaStats RdmaStats `json:"rdma_stats,omitempty"`
// the map is in the format "misc resource name: stats of the key"
MiscStats map[string]MiscStats `json:"misc_stats,omitempty"`
}
func NewStats() *Stats {
memoryStats := MemoryStats{Stats: make(map[string]uint64)}
hugetlbStats := make(map[string]HugetlbStats)
return &Stats{MemoryStats: memoryStats, HugetlbStats: hugetlbStats}
miscStats := make(map[string]MiscStats)
return &Stats{MemoryStats: memoryStats, HugetlbStats: hugetlbStats, MiscStats: miscStats}
}

View File

@ -217,10 +217,26 @@ func PathExists(path string) bool {
return true
}
func rmdir(path string) error {
// rmdir tries to remove a directory, optionally retrying on EBUSY.
func rmdir(path string, retry bool) error {
delay := time.Millisecond
tries := 10
again:
err := unix.Rmdir(path)
if err == nil || err == unix.ENOENT {
switch err { // nolint:errorlint // unix errors are bare
case nil, unix.ENOENT:
return nil
case unix.EINTR:
goto again
case unix.EBUSY:
if retry && tries > 0 {
time.Sleep(delay)
delay *= 2
tries--
goto again
}
}
return &os.PathError{Op: "rmdir", Path: path, Err: err}
}
@ -228,68 +244,42 @@ func rmdir(path string) error {
// RemovePath aims to remove cgroup path. It does so recursively,
// by removing any subdirectories (sub-cgroups) first.
func RemovePath(path string) error {
// try the fast path first
if err := rmdir(path); err == nil {
// Try the fast path first.
if err := rmdir(path, false); err == nil {
return nil
}
infos, err := os.ReadDir(path)
if err != nil {
if os.IsNotExist(err) {
err = nil
}
if err != nil && !os.IsNotExist(err) {
return err
}
for _, info := range infos {
if info.IsDir() {
// We should remove subcgroups dir first
// We should remove subcgroup first.
if err = RemovePath(filepath.Join(path, info.Name())); err != nil {
break
}
}
}
if err == nil {
err = rmdir(path)
err = rmdir(path, true)
}
return err
}
// RemovePaths iterates over the provided paths removing them.
// We trying to remove all paths five times with increasing delay between tries.
// If after all there are not removed cgroups - appropriate error will be
// returned.
func RemovePaths(paths map[string]string) (err error) {
const retries = 5
delay := 10 * time.Millisecond
for i := 0; i < retries; i++ {
if i != 0 {
time.Sleep(delay)
delay *= 2
}
for s, p := range paths {
if err := RemovePath(p); err != nil {
// do not log intermediate iterations
switch i {
case 0:
logrus.WithError(err).Warnf("Failed to remove cgroup (will retry)")
case retries - 1:
logrus.WithError(err).Error("Failed to remove cgroup")
}
}
_, err := os.Stat(p)
// We need this strange way of checking cgroups existence because
// RemoveAll almost always returns error, even on already removed
// cgroups
if os.IsNotExist(err) {
delete(paths, s)
}
}
if len(paths) == 0 {
//nolint:ineffassign,staticcheck // done to help garbage collecting: opencontainers/runc#2506
paths = make(map[string]string)
return nil
for s, p := range paths {
if err := RemovePath(p); err == nil {
delete(paths, s)
}
}
if len(paths) == 0 {
//nolint:ineffassign,staticcheck // done to help garbage collecting: opencontainers/runc#2506
// TODO: switch to clear once Go < 1.21 is not supported.
paths = make(map[string]string)
return nil
}
return fmt.Errorf("Failed to remove paths: %v", paths)
}

View File

@ -99,11 +99,12 @@ func tryDefaultPath(cgroupPath, subsystem string) string {
// expensive), so it is assumed that cgroup mounts are not being changed.
func readCgroupMountinfo() ([]*mountinfo.Info, error) {
readMountinfoOnce.Do(func() {
// mountinfo.GetMounts uses /proc/thread-self, so we can use it without
// issues.
cgroupMountinfo, readMountinfoErr = mountinfo.GetMounts(
mountinfo.FSTypeFilter("cgroup"),
)
})
return cgroupMountinfo, readMountinfoErr
}
@ -196,6 +197,9 @@ func getCgroupMountsV1(all bool) ([]Mount, error) {
return nil, err
}
// We don't need to use /proc/thread-self here because runc always runs
// with every thread in the same cgroup. This lets us avoid having to do
// runtime.LockOSThread.
allSubsystems, err := ParseCgroupFile("/proc/self/cgroup")
if err != nil {
return nil, err
@ -214,6 +218,10 @@ func GetOwnCgroup(subsystem string) (string, error) {
if IsCgroup2UnifiedMode() {
return "", errUnified
}
// We don't need to use /proc/thread-self here because runc always runs
// with every thread in the same cgroup. This lets us avoid having to do
// runtime.LockOSThread.
cgroups, err := ParseCgroupFile("/proc/self/cgroup")
if err != nil {
return "", err

View File

@ -69,6 +69,9 @@ type Resources struct {
// CPU hardcap limit (in usecs). Allowed cpu time in a given period.
CpuQuota int64 `json:"cpu_quota"`
// CPU hardcap burst limit (in usecs). Allowed accumulated cpu time additionally for burst in a given period.
CpuBurst *uint64 `json:"cpu_burst"` //nolint:revive
// CPU period to be used for hardcapping (in usecs). 0 to use system default.
CpuPeriod uint64 `json:"cpu_period"`

View File

@ -8,6 +8,7 @@ import (
"time"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
"github.com/opencontainers/runc/libcontainer/devices"
"github.com/opencontainers/runtime-spec/specs-go"
@ -21,9 +22,9 @@ type Rlimit struct {
// IDMap represents UID/GID Mappings for User Namespaces.
type IDMap struct {
ContainerID int `json:"container_id"`
HostID int `json:"host_id"`
Size int `json:"size"`
ContainerID int64 `json:"container_id"`
HostID int64 `json:"host_id"`
Size int64 `json:"size"`
}
// Seccomp represents syscall restrictions
@ -213,12 +214,73 @@ type Config struct {
// When RootlessCgroups is set, cgroups errors are ignored.
RootlessCgroups bool `json:"rootless_cgroups,omitempty"`
// Do not try to remount a bind mount again after the first attempt failed on source
// filesystems that have nodev, noexec, nosuid, noatime, relatime, strictatime, nodiratime set
NoMountFallback bool `json:"no_mount_fallback,omitempty"`
// TimeOffsets specifies the offset for supporting time namespaces.
TimeOffsets map[string]specs.LinuxTimeOffset `json:"time_offsets,omitempty"`
// Scheduler represents the scheduling attributes for a process.
Scheduler *Scheduler `json:"scheduler,omitempty"`
// Personality contains configuration for the Linux personality syscall.
Personality *LinuxPersonality `json:"personality,omitempty"`
}
// Scheduler is based on the Linux sched_setattr(2) syscall.
type Scheduler = specs.Scheduler
// ToSchedAttr is to convert *configs.Scheduler to *unix.SchedAttr.
func ToSchedAttr(scheduler *Scheduler) (*unix.SchedAttr, error) {
var policy uint32
switch scheduler.Policy {
case specs.SchedOther:
policy = 0
case specs.SchedFIFO:
policy = 1
case specs.SchedRR:
policy = 2
case specs.SchedBatch:
policy = 3
case specs.SchedISO:
policy = 4
case specs.SchedIdle:
policy = 5
case specs.SchedDeadline:
policy = 6
default:
return nil, fmt.Errorf("invalid scheduler policy: %s", scheduler.Policy)
}
var flags uint64
for _, flag := range scheduler.Flags {
switch flag {
case specs.SchedFlagResetOnFork:
flags |= 0x01
case specs.SchedFlagReclaim:
flags |= 0x02
case specs.SchedFlagDLOverrun:
flags |= 0x04
case specs.SchedFlagKeepPolicy:
flags |= 0x08
case specs.SchedFlagKeepParams:
flags |= 0x10
case specs.SchedFlagUtilClampMin:
flags |= 0x20
case specs.SchedFlagUtilClampMax:
flags |= 0x40
default:
return nil, fmt.Errorf("invalid scheduler flag: %s", flag)
}
}
return &unix.SchedAttr{
Size: unix.SizeofSchedAttr,
Policy: policy,
Flags: flags,
Nice: scheduler.Nice,
Priority: uint32(scheduler.Priority),
Runtime: scheduler.Runtime,
Deadline: scheduler.Deadline,
Period: scheduler.Period,
}, nil
}
type (

View File

@ -1,14 +1,29 @@
package configs
import "errors"
import (
"errors"
"fmt"
"math"
)
var (
errNoUIDMap = errors.New("User namespaces enabled, but no uid mappings found.")
errNoUserMap = errors.New("User namespaces enabled, but no user mapping found.")
errNoGIDMap = errors.New("User namespaces enabled, but no gid mappings found.")
errNoGroupMap = errors.New("User namespaces enabled, but no group mapping found.")
errNoUIDMap = errors.New("user namespaces enabled, but no uid mappings found")
errNoGIDMap = errors.New("user namespaces enabled, but no gid mappings found")
)
// Please check https://man7.org/linux/man-pages/man2/personality.2.html for const details.
// https://raw.githubusercontent.com/torvalds/linux/master/include/uapi/linux/personality.h
const (
PerLinux = 0x0000
PerLinux32 = 0x0008
)
type LinuxPersonality struct {
// Domain for the personality
// can only contain values "LINUX" and "LINUX32"
Domain int `json:"domain"`
}
// HostUID gets the translated uid for the process on host which could be
// different when user namespaces are enabled.
func (c Config) HostUID(containerId int) (int, error) {
@ -16,11 +31,18 @@ func (c Config) HostUID(containerId int) (int, error) {
if len(c.UIDMappings) == 0 {
return -1, errNoUIDMap
}
id, found := c.hostIDFromMapping(containerId, c.UIDMappings)
id, found := c.hostIDFromMapping(int64(containerId), c.UIDMappings)
if !found {
return -1, errNoUserMap
return -1, fmt.Errorf("user namespaces enabled, but no mapping found for uid %d", containerId)
}
return id, nil
// If we are a 32-bit binary running on a 64-bit system, it's possible
// the mapped user is too large to store in an int, which means we
// cannot do the mapping. We can't just return an int64, because
// os.Setuid() takes an int.
if id > math.MaxInt {
return -1, fmt.Errorf("mapping for uid %d (host id %d) is larger than native integer size (%d)", containerId, id, math.MaxInt)
}
return int(id), nil
}
// Return unchanged id.
return containerId, nil
@ -39,11 +61,18 @@ func (c Config) HostGID(containerId int) (int, error) {
if len(c.GIDMappings) == 0 {
return -1, errNoGIDMap
}
id, found := c.hostIDFromMapping(containerId, c.GIDMappings)
id, found := c.hostIDFromMapping(int64(containerId), c.GIDMappings)
if !found {
return -1, errNoGroupMap
return -1, fmt.Errorf("user namespaces enabled, but no mapping found for gid %d", containerId)
}
return id, nil
// If we are a 32-bit binary running on a 64-bit system, it's possible
// the mapped user is too large to store in an int, which means we
// cannot do the mapping. We can't just return an int64, because
// os.Setgid() takes an int.
if id > math.MaxInt {
return -1, fmt.Errorf("mapping for gid %d (host id %d) is larger than native integer size (%d)", containerId, id, math.MaxInt)
}
return int(id), nil
}
// Return unchanged id.
return containerId, nil
@ -57,7 +86,7 @@ func (c Config) HostRootGID() (int, error) {
// Utility function that gets a host ID for a container ID from user namespace map
// if that ID is present in the map.
func (c Config) hostIDFromMapping(containerID int, uMap []IDMap) (int, bool) {
func (c Config) hostIDFromMapping(containerID int64, uMap []IDMap) (int64, bool) {
for _, m := range uMap {
if (containerID >= m.ContainerID) && (containerID <= (m.ContainerID + m.Size - 1)) {
hostID := m.HostID + (containerID - m.ContainerID)

View File

@ -3,5 +3,5 @@ package configs
const (
// EXT_COPYUP is a directive to copy up the contents of a directory when
// a tmpfs is mounted over it.
EXT_COPYUP = 1 << iota //nolint:golint // ignore "don't use ALL_CAPS" warning
EXT_COPYUP = 1 << iota //nolint:golint,revive // ignore "don't use ALL_CAPS" warning
)

View File

@ -2,6 +2,24 @@ package configs
import "golang.org/x/sys/unix"
type MountIDMapping struct {
// Recursive indicates if the mapping needs to be recursive.
Recursive bool `json:"recursive"`
// UserNSPath is a path to a user namespace that indicates the necessary
// id-mappings for MOUNT_ATTR_IDMAP. If set to non-"", UIDMappings and
// GIDMappings must be set to nil.
UserNSPath string `json:"userns_path,omitempty"`
// UIDMappings is the uid mapping set for this mount, to be used with
// MOUNT_ATTR_IDMAP.
UIDMappings []IDMap `json:"uid_mappings,omitempty"`
// GIDMappings is the gid mapping set for this mount, to be used with
// MOUNT_ATTR_IDMAP.
GIDMappings []IDMap `json:"gid_mappings,omitempty"`
}
type Mount struct {
// Source path for the mount.
Source string `json:"source"`
@ -15,6 +33,10 @@ type Mount struct {
// Mount flags.
Flags int `json:"flags"`
// Mount flags that were explicitly cleared in the configuration (meaning
// the user explicitly requested that these flags *not* be set).
ClearedFlags int `json:"cleared_flags"`
// Propagation Flags
PropagationFlags []int `json:"propagation_flags"`
@ -30,17 +52,9 @@ type Mount struct {
// Extensions are additional flags that are specific to runc.
Extensions int `json:"extensions"`
// UIDMappings is used to changing file user owners w/o calling chown.
// Note that, the underlying filesystem should support this feature to be
// used.
// Every mount point could have its own mapping.
UIDMappings []IDMap `json:"uid_mappings,omitempty"`
// GIDMappings is used to changing file group owners w/o calling chown.
// Note that, the underlying filesystem should support this feature to be
// used.
// Every mount point could have its own mapping.
GIDMappings []IDMap `json:"gid_mappings,omitempty"`
// Mapping is the MOUNT_ATTR_IDMAP configuration for the mount. If non-nil,
// the mount is configured to use MOUNT_ATTR_IDMAP-style id mappings.
IDMapping *MountIDMapping `json:"id_mapping,omitempty"`
}
func (m *Mount) IsBind() bool {
@ -48,5 +62,5 @@ func (m *Mount) IsBind() bool {
}
func (m *Mount) IsIDMapped() bool {
return len(m.UIDMappings) > 0 || len(m.GIDMappings) > 0
return m.IDMapping != nil
}

View File

@ -59,6 +59,9 @@ func IsNamespaceSupported(ns NamespaceType) bool {
if nsFile == "" {
return false
}
// We don't need to use /proc/thread-self here because the list of
// namespace types is unrelated to the thread. This lets us avoid having to
// do runtime.LockOSThread.
_, err := os.Stat("/proc/self/ns/" + nsFile)
// a namespace is supported if it exists and we have permissions to read it
supported = err == nil

View File

@ -0,0 +1,81 @@
package user
import (
"io"
"github.com/moby/sys/user"
)
// LookupUser looks up a user by their username in /etc/passwd. If the user
// cannot be found (or there is no /etc/passwd file on the filesystem), then
// LookupUser returns an error.
func LookupUser(username string) (user.User, error) {
return user.LookupUser(username)
}
// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot
// be found (or there is no /etc/passwd file on the filesystem), then LookupId
// returns an error.
func LookupUid(uid int) (user.User, error) { //nolint:revive // ignore var-naming: func LookupUid should be LookupUID
return user.LookupUid(uid)
}
// LookupGroup looks up a group by its name in /etc/group. If the group cannot
// be found (or there is no /etc/group file on the filesystem), then LookupGroup
// returns an error.
func LookupGroup(groupname string) (user.Group, error) {
return user.LookupGroup(groupname)
}
// LookupGid looks up a group by its group id in /etc/group. If the group cannot
// be found (or there is no /etc/group file on the filesystem), then LookupGid
// returns an error.
func LookupGid(gid int) (user.Group, error) {
return user.LookupGid(gid)
}
func GetPasswdPath() (string, error) {
return user.GetPasswdPath()
}
func GetPasswd() (io.ReadCloser, error) {
return user.GetPasswd()
}
func GetGroupPath() (string, error) {
return user.GetGroupPath()
}
func GetGroup() (io.ReadCloser, error) {
return user.GetGroup()
}
// CurrentUser looks up the current user by their user id in /etc/passwd. If the
// user cannot be found (or there is no /etc/passwd file on the filesystem),
// then CurrentUser returns an error.
func CurrentUser() (user.User, error) {
return user.CurrentUser()
}
// CurrentGroup looks up the current user's group by their primary group id's
// entry in /etc/passwd. If the group cannot be found (or there is no
// /etc/group file on the filesystem), then CurrentGroup returns an error.
func CurrentGroup() (user.Group, error) {
return user.CurrentGroup()
}
func CurrentUserSubUIDs() ([]user.SubID, error) {
return user.CurrentUserSubUIDs()
}
func CurrentUserSubGIDs() ([]user.SubID, error) {
return user.CurrentUserSubGIDs()
}
func CurrentProcessUIDMap() ([]user.IDMap, error) {
return user.CurrentProcessUIDMap()
}
func CurrentProcessGIDMap() ([]user.IDMap, error) {
return user.CurrentProcessGIDMap()
}

View File

@ -1,157 +0,0 @@
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package user
import (
"io"
"os"
"strconv"
"golang.org/x/sys/unix"
)
// Unix-specific path to the passwd and group formatted files.
const (
unixPasswdPath = "/etc/passwd"
unixGroupPath = "/etc/group"
)
// LookupUser looks up a user by their username in /etc/passwd. If the user
// cannot be found (or there is no /etc/passwd file on the filesystem), then
// LookupUser returns an error.
func LookupUser(username string) (User, error) {
return lookupUserFunc(func(u User) bool {
return u.Name == username
})
}
// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot
// be found (or there is no /etc/passwd file on the filesystem), then LookupId
// returns an error.
func LookupUid(uid int) (User, error) {
return lookupUserFunc(func(u User) bool {
return u.Uid == uid
})
}
func lookupUserFunc(filter func(u User) bool) (User, error) {
// Get operating system-specific passwd reader-closer.
passwd, err := GetPasswd()
if err != nil {
return User{}, err
}
defer passwd.Close()
// Get the users.
users, err := ParsePasswdFilter(passwd, filter)
if err != nil {
return User{}, err
}
// No user entries found.
if len(users) == 0 {
return User{}, ErrNoPasswdEntries
}
// Assume the first entry is the "correct" one.
return users[0], nil
}
// LookupGroup looks up a group by its name in /etc/group. If the group cannot
// be found (or there is no /etc/group file on the filesystem), then LookupGroup
// returns an error.
func LookupGroup(groupname string) (Group, error) {
return lookupGroupFunc(func(g Group) bool {
return g.Name == groupname
})
}
// LookupGid looks up a group by its group id in /etc/group. If the group cannot
// be found (or there is no /etc/group file on the filesystem), then LookupGid
// returns an error.
func LookupGid(gid int) (Group, error) {
return lookupGroupFunc(func(g Group) bool {
return g.Gid == gid
})
}
func lookupGroupFunc(filter func(g Group) bool) (Group, error) {
// Get operating system-specific group reader-closer.
group, err := GetGroup()
if err != nil {
return Group{}, err
}
defer group.Close()
// Get the users.
groups, err := ParseGroupFilter(group, filter)
if err != nil {
return Group{}, err
}
// No user entries found.
if len(groups) == 0 {
return Group{}, ErrNoGroupEntries
}
// Assume the first entry is the "correct" one.
return groups[0], nil
}
func GetPasswdPath() (string, error) {
return unixPasswdPath, nil
}
func GetPasswd() (io.ReadCloser, error) {
return os.Open(unixPasswdPath)
}
func GetGroupPath() (string, error) {
return unixGroupPath, nil
}
func GetGroup() (io.ReadCloser, error) {
return os.Open(unixGroupPath)
}
// CurrentUser looks up the current user by their user id in /etc/passwd. If the
// user cannot be found (or there is no /etc/passwd file on the filesystem),
// then CurrentUser returns an error.
func CurrentUser() (User, error) {
return LookupUid(unix.Getuid())
}
// CurrentGroup looks up the current user's group by their primary group id's
// entry in /etc/passwd. If the group cannot be found (or there is no
// /etc/group file on the filesystem), then CurrentGroup returns an error.
func CurrentGroup() (Group, error) {
return LookupGid(unix.Getgid())
}
func currentUserSubIDs(fileName string) ([]SubID, error) {
u, err := CurrentUser()
if err != nil {
return nil, err
}
filter := func(entry SubID) bool {
return entry.Name == u.Name || entry.Name == strconv.Itoa(u.Uid)
}
return ParseSubIDFileFilter(fileName, filter)
}
func CurrentUserSubUIDs() ([]SubID, error) {
return currentUserSubIDs("/etc/subuid")
}
func CurrentUserSubGIDs() ([]SubID, error) {
return currentUserSubIDs("/etc/subgid")
}
func CurrentProcessUIDMap() ([]IDMap, error) {
return ParseIDMapFile("/proc/self/uid_map")
}
func CurrentProcessGIDMap() ([]IDMap, error) {
return ParseIDMapFile("/proc/self/gid_map")
}

View File

@ -1,605 +0,0 @@
package user
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"os"
"strconv"
"strings"
)
const (
minID = 0
maxID = 1<<31 - 1 // for 32-bit systems compatibility
)
var (
// ErrNoPasswdEntries is returned if no matching entries were found in /etc/group.
ErrNoPasswdEntries = errors.New("no matching entries in passwd file")
// ErrNoGroupEntries is returned if no matching entries were found in /etc/passwd.
ErrNoGroupEntries = errors.New("no matching entries in group file")
// ErrRange is returned if a UID or GID is outside of the valid range.
ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minID, maxID)
)
type User struct {
Name string
Pass string
Uid int
Gid int
Gecos string
Home string
Shell string
}
type Group struct {
Name string
Pass string
Gid int
List []string
}
// SubID represents an entry in /etc/sub{u,g}id
type SubID struct {
Name string
SubID int64
Count int64
}
// IDMap represents an entry in /proc/PID/{u,g}id_map
type IDMap struct {
ID int64
ParentID int64
Count int64
}
func parseLine(line []byte, v ...interface{}) {
parseParts(bytes.Split(line, []byte(":")), v...)
}
func parseParts(parts [][]byte, v ...interface{}) {
if len(parts) == 0 {
return
}
for i, p := range parts {
// Ignore cases where we don't have enough fields to populate the arguments.
// Some configuration files like to misbehave.
if len(v) <= i {
break
}
// Use the type of the argument to figure out how to parse it, scanf() style.
// This is legit.
switch e := v[i].(type) {
case *string:
*e = string(p)
case *int:
// "numbers", with conversion errors ignored because of some misbehaving configuration files.
*e, _ = strconv.Atoi(string(p))
case *int64:
*e, _ = strconv.ParseInt(string(p), 10, 64)
case *[]string:
// Comma-separated lists.
if len(p) != 0 {
*e = strings.Split(string(p), ",")
} else {
*e = []string{}
}
default:
// Someone goof'd when writing code using this function. Scream so they can hear us.
panic(fmt.Sprintf("parseLine only accepts {*string, *int, *int64, *[]string} as arguments! %#v is not a pointer!", e))
}
}
}
func ParsePasswdFile(path string) ([]User, error) {
passwd, err := os.Open(path)
if err != nil {
return nil, err
}
defer passwd.Close()
return ParsePasswd(passwd)
}
func ParsePasswd(passwd io.Reader) ([]User, error) {
return ParsePasswdFilter(passwd, nil)
}
func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) {
passwd, err := os.Open(path)
if err != nil {
return nil, err
}
defer passwd.Close()
return ParsePasswdFilter(passwd, filter)
}
func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
if r == nil {
return nil, errors.New("nil source for passwd-formatted data")
}
var (
s = bufio.NewScanner(r)
out = []User{}
)
for s.Scan() {
line := bytes.TrimSpace(s.Bytes())
if len(line) == 0 {
continue
}
// see: man 5 passwd
// name:password:UID:GID:GECOS:directory:shell
// Name:Pass:Uid:Gid:Gecos:Home:Shell
// root:x:0:0:root:/root:/bin/bash
// adm:x:3:4:adm:/var/adm:/bin/false
p := User{}
parseLine(line, &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell)
if filter == nil || filter(p) {
out = append(out, p)
}
}
if err := s.Err(); err != nil {
return nil, err
}
return out, nil
}
func ParseGroupFile(path string) ([]Group, error) {
group, err := os.Open(path)
if err != nil {
return nil, err
}
defer group.Close()
return ParseGroup(group)
}
func ParseGroup(group io.Reader) ([]Group, error) {
return ParseGroupFilter(group, nil)
}
func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) {
group, err := os.Open(path)
if err != nil {
return nil, err
}
defer group.Close()
return ParseGroupFilter(group, filter)
}
func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
if r == nil {
return nil, errors.New("nil source for group-formatted data")
}
rd := bufio.NewReader(r)
out := []Group{}
// Read the file line-by-line.
for {
var (
isPrefix bool
wholeLine []byte
err error
)
// Read the next line. We do so in chunks (as much as reader's
// buffer is able to keep), check if we read enough columns
// already on each step and store final result in wholeLine.
for {
var line []byte
line, isPrefix, err = rd.ReadLine()
if err != nil {
// We should return no error if EOF is reached
// without a match.
if err == io.EOF {
err = nil
}
return out, err
}
// Simple common case: line is short enough to fit in a
// single reader's buffer.
if !isPrefix && len(wholeLine) == 0 {
wholeLine = line
break
}
wholeLine = append(wholeLine, line...)
// Check if we read the whole line already.
if !isPrefix {
break
}
}
// There's no spec for /etc/passwd or /etc/group, but we try to follow
// the same rules as the glibc parser, which allows comments and blank
// space at the beginning of a line.
wholeLine = bytes.TrimSpace(wholeLine)
if len(wholeLine) == 0 || wholeLine[0] == '#' {
continue
}
// see: man 5 group
// group_name:password:GID:user_list
// Name:Pass:Gid:List
// root:x:0:root
// adm:x:4:root,adm,daemon
p := Group{}
parseLine(wholeLine, &p.Name, &p.Pass, &p.Gid, &p.List)
if filter == nil || filter(p) {
out = append(out, p)
}
}
}
type ExecUser struct {
Uid int
Gid int
Sgids []int
Home string
}
// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the
// given file paths and uses that data as the arguments to GetExecUser. If the
// files cannot be opened for any reason, the error is ignored and a nil
// io.Reader is passed instead.
func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) {
var passwd, group io.Reader
if passwdFile, err := os.Open(passwdPath); err == nil {
passwd = passwdFile
defer passwdFile.Close()
}
if groupFile, err := os.Open(groupPath); err == nil {
group = groupFile
defer groupFile.Close()
}
return GetExecUser(userSpec, defaults, passwd, group)
}
// GetExecUser parses a user specification string (using the passwd and group
// readers as sources for /etc/passwd and /etc/group data, respectively). In
// the case of blank fields or missing data from the sources, the values in
// defaults is used.
//
// GetExecUser will return an error if a user or group literal could not be
// found in any entry in passwd and group respectively.
//
// Examples of valid user specifications are:
// - ""
// - "user"
// - "uid"
// - "user:group"
// - "uid:gid
// - "user:gid"
// - "uid:group"
//
// It should be noted that if you specify a numeric user or group id, they will
// not be evaluated as usernames (only the metadata will be filled). So attempting
// to parse a user with user.Name = "1337" will produce the user with a UID of
// 1337.
func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) {
if defaults == nil {
defaults = new(ExecUser)
}
// Copy over defaults.
user := &ExecUser{
Uid: defaults.Uid,
Gid: defaults.Gid,
Sgids: defaults.Sgids,
Home: defaults.Home,
}
// Sgids slice *cannot* be nil.
if user.Sgids == nil {
user.Sgids = []int{}
}
// Allow for userArg to have either "user" syntax, or optionally "user:group" syntax
var userArg, groupArg string
parseLine([]byte(userSpec), &userArg, &groupArg)
// Convert userArg and groupArg to be numeric, so we don't have to execute
// Atoi *twice* for each iteration over lines.
uidArg, uidErr := strconv.Atoi(userArg)
gidArg, gidErr := strconv.Atoi(groupArg)
// Find the matching user.
users, err := ParsePasswdFilter(passwd, func(u User) bool {
if userArg == "" {
// Default to current state of the user.
return u.Uid == user.Uid
}
if uidErr == nil {
// If the userArg is numeric, always treat it as a UID.
return uidArg == u.Uid
}
return u.Name == userArg
})
// If we can't find the user, we have to bail.
if err != nil && passwd != nil {
if userArg == "" {
userArg = strconv.Itoa(user.Uid)
}
return nil, fmt.Errorf("unable to find user %s: %w", userArg, err)
}
var matchedUserName string
if len(users) > 0 {
// First match wins, even if there's more than one matching entry.
matchedUserName = users[0].Name
user.Uid = users[0].Uid
user.Gid = users[0].Gid
user.Home = users[0].Home
} else if userArg != "" {
// If we can't find a user with the given username, the only other valid
// option is if it's a numeric username with no associated entry in passwd.
if uidErr != nil {
// Not numeric.
return nil, fmt.Errorf("unable to find user %s: %w", userArg, ErrNoPasswdEntries)
}
user.Uid = uidArg
// Must be inside valid uid range.
if user.Uid < minID || user.Uid > maxID {
return nil, ErrRange
}
// Okay, so it's numeric. We can just roll with this.
}
// On to the groups. If we matched a username, we need to do this because of
// the supplementary group IDs.
if groupArg != "" || matchedUserName != "" {
groups, err := ParseGroupFilter(group, func(g Group) bool {
// If the group argument isn't explicit, we'll just search for it.
if groupArg == "" {
// Check if user is a member of this group.
for _, u := range g.List {
if u == matchedUserName {
return true
}
}
return false
}
if gidErr == nil {
// If the groupArg is numeric, always treat it as a GID.
return gidArg == g.Gid
}
return g.Name == groupArg
})
if err != nil && group != nil {
return nil, fmt.Errorf("unable to find groups for spec %v: %w", matchedUserName, err)
}
// Only start modifying user.Gid if it is in explicit form.
if groupArg != "" {
if len(groups) > 0 {
// First match wins, even if there's more than one matching entry.
user.Gid = groups[0].Gid
} else {
// If we can't find a group with the given name, the only other valid
// option is if it's a numeric group name with no associated entry in group.
if gidErr != nil {
// Not numeric.
return nil, fmt.Errorf("unable to find group %s: %w", groupArg, ErrNoGroupEntries)
}
user.Gid = gidArg
// Must be inside valid gid range.
if user.Gid < minID || user.Gid > maxID {
return nil, ErrRange
}
// Okay, so it's numeric. We can just roll with this.
}
} else if len(groups) > 0 {
// Supplementary group ids only make sense if in the implicit form.
user.Sgids = make([]int, len(groups))
for i, group := range groups {
user.Sgids[i] = group.Gid
}
}
}
return user, nil
}
// GetAdditionalGroups looks up a list of groups by name or group id
// against the given /etc/group formatted data. If a group name cannot
// be found, an error will be returned. If a group id cannot be found,
// or the given group data is nil, the id will be returned as-is
// provided it is in the legal range.
func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) {
groups := []Group{}
if group != nil {
var err error
groups, err = ParseGroupFilter(group, func(g Group) bool {
for _, ag := range additionalGroups {
if g.Name == ag || strconv.Itoa(g.Gid) == ag {
return true
}
}
return false
})
if err != nil {
return nil, fmt.Errorf("Unable to find additional groups %v: %w", additionalGroups, err)
}
}
gidMap := make(map[int]struct{})
for _, ag := range additionalGroups {
var found bool
for _, g := range groups {
// if we found a matched group either by name or gid, take the
// first matched as correct
if g.Name == ag || strconv.Itoa(g.Gid) == ag {
if _, ok := gidMap[g.Gid]; !ok {
gidMap[g.Gid] = struct{}{}
found = true
break
}
}
}
// we asked for a group but didn't find it. let's check to see
// if we wanted a numeric group
if !found {
gid, err := strconv.ParseInt(ag, 10, 64)
if err != nil {
// Not a numeric ID either.
return nil, fmt.Errorf("Unable to find group %s: %w", ag, ErrNoGroupEntries)
}
// Ensure gid is inside gid range.
if gid < minID || gid > maxID {
return nil, ErrRange
}
gidMap[int(gid)] = struct{}{}
}
}
gids := []int{}
for gid := range gidMap {
gids = append(gids, gid)
}
return gids, nil
}
// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups
// that opens the groupPath given and gives it as an argument to
// GetAdditionalGroups.
func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) {
var group io.Reader
if groupFile, err := os.Open(groupPath); err == nil {
group = groupFile
defer groupFile.Close()
}
return GetAdditionalGroups(additionalGroups, group)
}
func ParseSubIDFile(path string) ([]SubID, error) {
subid, err := os.Open(path)
if err != nil {
return nil, err
}
defer subid.Close()
return ParseSubID(subid)
}
func ParseSubID(subid io.Reader) ([]SubID, error) {
return ParseSubIDFilter(subid, nil)
}
func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error) {
subid, err := os.Open(path)
if err != nil {
return nil, err
}
defer subid.Close()
return ParseSubIDFilter(subid, filter)
}
func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) {
if r == nil {
return nil, errors.New("nil source for subid-formatted data")
}
var (
s = bufio.NewScanner(r)
out = []SubID{}
)
for s.Scan() {
line := bytes.TrimSpace(s.Bytes())
if len(line) == 0 {
continue
}
// see: man 5 subuid
p := SubID{}
parseLine(line, &p.Name, &p.SubID, &p.Count)
if filter == nil || filter(p) {
out = append(out, p)
}
}
if err := s.Err(); err != nil {
return nil, err
}
return out, nil
}
func ParseIDMapFile(path string) ([]IDMap, error) {
r, err := os.Open(path)
if err != nil {
return nil, err
}
defer r.Close()
return ParseIDMap(r)
}
func ParseIDMap(r io.Reader) ([]IDMap, error) {
return ParseIDMapFilter(r, nil)
}
func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error) {
r, err := os.Open(path)
if err != nil {
return nil, err
}
defer r.Close()
return ParseIDMapFilter(r, filter)
}
func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) {
if r == nil {
return nil, errors.New("nil source for idmap-formatted data")
}
var (
s = bufio.NewScanner(r)
out = []IDMap{}
)
for s.Scan() {
line := bytes.TrimSpace(s.Bytes())
if len(line) == 0 {
continue
}
// see: man 7 user_namespaces
p := IDMap{}
parseParts(bytes.Fields(line), &p.ID, &p.ParentID, &p.Count)
if filter == nil || filter(p) {
out = append(out, p)
}
}
if err := s.Err(); err != nil {
return nil, err
}
return out, nil
}

View File

@ -0,0 +1,146 @@
// Package user is an alias for [github.com/moby/sys/user].
//
// Deprecated: use [github.com/moby/sys/user].
package user
import (
"io"
"github.com/moby/sys/user"
)
var (
// ErrNoPasswdEntries is returned if no matching entries were found in /etc/group.
ErrNoPasswdEntries = user.ErrNoPasswdEntries
// ErrNoGroupEntries is returned if no matching entries were found in /etc/passwd.
ErrNoGroupEntries = user.ErrNoGroupEntries
// ErrRange is returned if a UID or GID is outside of the valid range.
ErrRange = user.ErrRange
)
type (
User = user.User
Group = user.Group
// SubID represents an entry in /etc/sub{u,g}id.
SubID = user.SubID
// IDMap represents an entry in /proc/PID/{u,g}id_map.
IDMap = user.IDMap
ExecUser = user.ExecUser
)
func ParsePasswdFile(path string) ([]user.User, error) {
return user.ParsePasswdFile(path)
}
func ParsePasswd(passwd io.Reader) ([]user.User, error) {
return user.ParsePasswd(passwd)
}
func ParsePasswdFileFilter(path string, filter func(user.User) bool) ([]user.User, error) {
return user.ParsePasswdFileFilter(path, filter)
}
func ParsePasswdFilter(r io.Reader, filter func(user.User) bool) ([]user.User, error) {
return user.ParsePasswdFilter(r, filter)
}
func ParseGroupFile(path string) ([]user.Group, error) {
return user.ParseGroupFile(path)
}
func ParseGroup(group io.Reader) ([]user.Group, error) {
return user.ParseGroup(group)
}
func ParseGroupFileFilter(path string, filter func(user.Group) bool) ([]user.Group, error) {
return user.ParseGroupFileFilter(path, filter)
}
func ParseGroupFilter(r io.Reader, filter func(user.Group) bool) ([]user.Group, error) {
return user.ParseGroupFilter(r, filter)
}
// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the
// given file paths and uses that data as the arguments to GetExecUser. If the
// files cannot be opened for any reason, the error is ignored and a nil
// io.Reader is passed instead.
func GetExecUserPath(userSpec string, defaults *user.ExecUser, passwdPath, groupPath string) (*user.ExecUser, error) {
return user.GetExecUserPath(userSpec, defaults, passwdPath, groupPath)
}
// GetExecUser parses a user specification string (using the passwd and group
// readers as sources for /etc/passwd and /etc/group data, respectively). In
// the case of blank fields or missing data from the sources, the values in
// defaults is used.
//
// GetExecUser will return an error if a user or group literal could not be
// found in any entry in passwd and group respectively.
//
// Examples of valid user specifications are:
// - ""
// - "user"
// - "uid"
// - "user:group"
// - "uid:gid
// - "user:gid"
// - "uid:group"
//
// It should be noted that if you specify a numeric user or group id, they will
// not be evaluated as usernames (only the metadata will be filled). So attempting
// to parse a user with user.Name = "1337" will produce the user with a UID of
// 1337.
func GetExecUser(userSpec string, defaults *user.ExecUser, passwd, group io.Reader) (*user.ExecUser, error) {
return user.GetExecUser(userSpec, defaults, passwd, group)
}
// GetAdditionalGroups looks up a list of groups by name or group id
// against the given /etc/group formatted data. If a group name cannot
// be found, an error will be returned. If a group id cannot be found,
// or the given group data is nil, the id will be returned as-is
// provided it is in the legal range.
func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) {
return user.GetAdditionalGroups(additionalGroups, group)
}
// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups
// that opens the groupPath given and gives it as an argument to
// GetAdditionalGroups.
func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) {
return user.GetAdditionalGroupsPath(additionalGroups, groupPath)
}
func ParseSubIDFile(path string) ([]user.SubID, error) {
return user.ParseSubIDFile(path)
}
func ParseSubID(subid io.Reader) ([]user.SubID, error) {
return user.ParseSubID(subid)
}
func ParseSubIDFileFilter(path string, filter func(user.SubID) bool) ([]user.SubID, error) {
return user.ParseSubIDFileFilter(path, filter)
}
func ParseSubIDFilter(r io.Reader, filter func(user.SubID) bool) ([]user.SubID, error) {
return user.ParseSubIDFilter(r, filter)
}
func ParseIDMapFile(path string) ([]user.IDMap, error) {
return user.ParseIDMapFile(path)
}
func ParseIDMap(r io.Reader) ([]user.IDMap, error) {
return user.ParseIDMap(r)
}
func ParseIDMapFileFilter(path string, filter func(user.IDMap) bool) ([]user.IDMap, error) {
return user.ParseIDMapFileFilter(path, filter)
}
func ParseIDMapFilter(r io.Reader, filter func(user.IDMap) bool) ([]user.IDMap, error) {
return user.ParseIDMapFilter(r, filter)
}

View File

@ -1,43 +0,0 @@
//go:build gofuzz
// +build gofuzz
package user
import (
"io"
"strings"
)
func IsDivisbleBy(n int, divisibleby int) bool {
return (n % divisibleby) == 0
}
func FuzzUser(data []byte) int {
if len(data) == 0 {
return -1
}
if !IsDivisbleBy(len(data), 5) {
return -1
}
var divided [][]byte
chunkSize := len(data) / 5
for i := 0; i < len(data); i += chunkSize {
end := i + chunkSize
divided = append(divided, data[i:end])
}
_, _ = ParsePasswdFilter(strings.NewReader(string(divided[0])), nil)
var passwd, group io.Reader
group = strings.NewReader(string(divided[1]))
_, _ = GetAdditionalGroups([]string{string(divided[2])}, group)
passwd = strings.NewReader(string(divided[3]))
_, _ = GetExecUser(string(divided[4]), nil, passwd, group)
return 1
}

View File

@ -0,0 +1,79 @@
#define _GNU_SOURCE
#include <fcntl.h>
#include <sched.h>
#include <stdio.h>
#include <unistd.h>
#include <stdarg.h>
#include <stdlib.h>
/*
* All of the code here is run inside an aync-signal-safe context, so we need
* to be careful to not call any functions that could cause issues. In theory,
* since we are a Go program, there are fewer restrictions in practice, it's
* better to be safe than sorry.
*
* The only exception is exit, which we need to call to make sure we don't
* return into runc.
*/
void bail(int pipefd, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
vdprintf(pipefd, fmt, args);
va_end(args);
exit(1);
}
int spawn_userns_cat(char *userns_path, char *path, int outfd, int errfd)
{
char buffer[4096] = { 0 };
pid_t child = fork();
if (child != 0)
return child;
/* in child */
/* Join the target userns. */
int nsfd = open(userns_path, O_RDONLY);
if (nsfd < 0)
bail(errfd, "open userns path %s failed: %m", userns_path);
int err = setns(nsfd, CLONE_NEWUSER);
if (err < 0)
bail(errfd, "setns %s failed: %m", userns_path);
close(nsfd);
/* Pipe the requested file contents. */
int fd = open(path, O_RDONLY);
if (fd < 0)
bail(errfd, "open %s in userns %s failed: %m", path, userns_path);
int nread, ntotal = 0;
while ((nread = read(fd, buffer, sizeof(buffer))) != 0) {
if (nread < 0)
bail(errfd, "read bytes from %s failed (after %d total bytes read): %m", path, ntotal);
ntotal += nread;
int nwritten = 0;
while (nwritten < nread) {
int n = write(outfd, buffer, nread - nwritten);
if (n < 0)
bail(errfd, "write %d bytes from %s failed (after %d bytes written): %m",
nread - nwritten, path, nwritten);
nwritten += n;
}
if (nread != nwritten)
bail(errfd, "mismatch for bytes read and written: %d read != %d written", nread, nwritten);
}
close(fd);
close(outfd);
close(errfd);
/* We must exit here, otherwise we would return into a forked runc. */
exit(0);
}

View File

@ -0,0 +1,186 @@
//go:build linux
package userns
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"unsafe"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/sirupsen/logrus"
)
/*
#include <stdlib.h>
extern int spawn_userns_cat(char *userns_path, char *path, int outfd, int errfd);
*/
import "C"
func parseIdmapData(data []byte) (ms []configs.IDMap, err error) {
scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
var m configs.IDMap
line := scanner.Text()
if _, err := fmt.Sscanf(line, "%d %d %d", &m.ContainerID, &m.HostID, &m.Size); err != nil {
return nil, fmt.Errorf("parsing id map failed: invalid format in line %q: %w", line, err)
}
ms = append(ms, m)
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("parsing id map failed: %w", err)
}
return ms, nil
}
// Do something equivalent to nsenter --user=<nsPath> cat <path>, but more
// efficiently. Returns the contents of the requested file from within the user
// namespace.
func spawnUserNamespaceCat(nsPath string, path string) ([]byte, error) {
rdr, wtr, err := os.Pipe()
if err != nil {
return nil, fmt.Errorf("create pipe for userns spawn failed: %w", err)
}
defer rdr.Close()
defer wtr.Close()
errRdr, errWtr, err := os.Pipe()
if err != nil {
return nil, fmt.Errorf("create error pipe for userns spawn failed: %w", err)
}
defer errRdr.Close()
defer errWtr.Close()
cNsPath := C.CString(nsPath)
defer C.free(unsafe.Pointer(cNsPath))
cPath := C.CString(path)
defer C.free(unsafe.Pointer(cPath))
childPid := C.spawn_userns_cat(cNsPath, cPath, C.int(wtr.Fd()), C.int(errWtr.Fd()))
if childPid < 0 {
return nil, fmt.Errorf("failed to spawn fork for userns")
} else if childPid == 0 {
// this should never happen
panic("runc executing inside fork child -- unsafe state!")
}
// We are in the parent -- close the write end of the pipe before reading.
wtr.Close()
output, err := io.ReadAll(rdr)
rdr.Close()
if err != nil {
return nil, fmt.Errorf("reading from userns spawn failed: %w", err)
}
// Ditto for the error pipe.
errWtr.Close()
errOutput, err := io.ReadAll(errRdr)
errRdr.Close()
if err != nil {
return nil, fmt.Errorf("reading from userns spawn error pipe failed: %w", err)
}
errOutput = bytes.TrimSpace(errOutput)
// Clean up the child.
child, err := os.FindProcess(int(childPid))
if err != nil {
return nil, fmt.Errorf("could not find userns spawn process: %w", err)
}
state, err := child.Wait()
if err != nil {
return nil, fmt.Errorf("failed to wait for userns spawn process: %w", err)
}
if !state.Success() {
errStr := string(errOutput)
if errStr == "" {
errStr = fmt.Sprintf("unknown error (status code %d)", state.ExitCode())
}
return nil, fmt.Errorf("userns spawn: %s", errStr)
} else if len(errOutput) > 0 {
// We can just ignore weird output in the error pipe if the process
// didn't bail(), but for completeness output for debugging.
logrus.Debugf("userns spawn succeeded but unexpected error message found: %s", string(errOutput))
}
// The subprocess succeeded, return whatever it wrote to the pipe.
return output, nil
}
func GetUserNamespaceMappings(nsPath string) (uidMap, gidMap []configs.IDMap, err error) {
var (
pid int
extra rune
tryFastPath bool
)
// nsPath is usually of the form /proc/<pid>/ns/user, which means that we
// already have a pid that is part of the user namespace and thus we can
// just use the pid to read from /proc/<pid>/*id_map.
//
// Note that Sscanf doesn't consume the whole input, so we check for any
// trailing data with %c. That way, we can be sure the pattern matched
// /proc/$pid/ns/user _exactly_ iff n === 1.
if n, _ := fmt.Sscanf(nsPath, "/proc/%d/ns/user%c", &pid, &extra); n == 1 {
tryFastPath = pid > 0
}
for _, mapType := range []struct {
name string
idMap *[]configs.IDMap
}{
{"uid_map", &uidMap},
{"gid_map", &gidMap},
} {
var mapData []byte
if tryFastPath {
path := fmt.Sprintf("/proc/%d/%s", pid, mapType.name)
data, err := os.ReadFile(path)
if err != nil {
// Do not error out here -- we need to try the slow path if the
// fast path failed.
logrus.Debugf("failed to use fast path to read %s from userns %s (error: %s), falling back to slow userns-join path", mapType.name, nsPath, err)
} else {
mapData = data
}
} else {
logrus.Debugf("cannot use fast path to read %s from userns %s, falling back to slow userns-join path", mapType.name, nsPath)
}
if mapData == nil {
// We have to actually join the namespace if we cannot take the
// fast path. The path is resolved with respect to the child
// process, so just use /proc/self.
data, err := spawnUserNamespaceCat(nsPath, "/proc/self/"+mapType.name)
if err != nil {
return nil, nil, err
}
mapData = data
}
idMap, err := parseIdmapData(mapData)
if err != nil {
return nil, nil, fmt.Errorf("failed to parse %s of userns %s: %w", mapType.name, nsPath, err)
}
*mapType.idMap = idMap
}
return uidMap, gidMap, nil
}
// IsSameMapping returns whether or not the two id mappings are the same. Note
// that if the order of the mappings is different, or a mapping has been split,
// the mappings will be considered different.
func IsSameMapping(a, b []configs.IDMap) bool {
if len(a) != len(b) {
return false
}
for idx := range a {
if a[idx] != b[idx] {
return false
}
}
return true
}

View File

@ -0,0 +1,156 @@
package userns
import (
"fmt"
"os"
"sort"
"strings"
"sync"
"syscall"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
"github.com/opencontainers/runc/libcontainer/configs"
)
type Mapping struct {
UIDMappings []configs.IDMap
GIDMappings []configs.IDMap
}
func (m Mapping) toSys() (uids, gids []syscall.SysProcIDMap) {
for _, uid := range m.UIDMappings {
uids = append(uids, syscall.SysProcIDMap{
ContainerID: int(uid.ContainerID),
HostID: int(uid.HostID),
Size: int(uid.Size),
})
}
for _, gid := range m.GIDMappings {
gids = append(gids, syscall.SysProcIDMap{
ContainerID: int(gid.ContainerID),
HostID: int(gid.HostID),
Size: int(gid.Size),
})
}
return
}
// id returns a unique identifier for this mapping, agnostic of the order of
// the uid and gid mappings (because the order doesn't matter to the kernel).
// The set of userns handles is indexed using this ID.
func (m Mapping) id() string {
var uids, gids []string
for _, idmap := range m.UIDMappings {
uids = append(uids, fmt.Sprintf("%d:%d:%d", idmap.ContainerID, idmap.HostID, idmap.Size))
}
for _, idmap := range m.GIDMappings {
gids = append(gids, fmt.Sprintf("%d:%d:%d", idmap.ContainerID, idmap.HostID, idmap.Size))
}
// We don't care about the sort order -- just sort them.
sort.Strings(uids)
sort.Strings(gids)
return "uid=" + strings.Join(uids, ",") + ";gid=" + strings.Join(gids, ",")
}
type Handles struct {
m sync.Mutex
maps map[string]*os.File
}
// Release all resources associated with this Handle. All existing files
// returned from Get() will continue to work even after calling Release(). The
// same Handles can be re-used after calling Release().
func (hs *Handles) Release() {
hs.m.Lock()
defer hs.m.Unlock()
// Close the files for good measure, though GC will do that for us anyway.
for _, file := range hs.maps {
_ = file.Close()
}
hs.maps = nil
}
func spawnProc(req Mapping) (*os.Process, error) {
// We need to spawn a subprocess with the requested mappings, which is
// unfortunately quite expensive. The "safe" way of doing this is natively
// with Go (and then spawning something like "sleep infinity"), but
// execve() is a waste of cycles because we just need some process to have
// the right mapping, we don't care what it's executing. The "unsafe"
// option of doing a clone() behind the back of Go is probably okay in
// theory as long as we just do kill(getpid(), SIGSTOP). However, if we
// tell Go to put the new process into PTRACE_TRACEME mode, we can avoid
// the exec and not have to faff around with the mappings.
//
// Note that Go's stdlib does not support newuidmap, but in the case of
// id-mapped mounts, it seems incredibly unlikely that the user will be
// requesting us to do a remapping as an unprivileged user with mappings
// they have privileges over.
logrus.Debugf("spawning dummy process for id-mapping %s", req.id())
uidMappings, gidMappings := req.toSys()
// We don't need to use /proc/thread-self here because the exe mm of a
// thread-group is guaranteed to be the same for all threads by definition.
// This lets us avoid having to do runtime.LockOSThread.
return os.StartProcess("/proc/self/exe", []string{"runc", "--help"}, &os.ProcAttr{
Sys: &syscall.SysProcAttr{
Cloneflags: unix.CLONE_NEWUSER,
UidMappings: uidMappings,
GidMappings: gidMappings,
GidMappingsEnableSetgroups: false,
// Put the process into PTRACE_TRACEME mode to allow us to get the
// userns without having a proper execve() target.
Ptrace: true,
},
})
}
func dupFile(f *os.File) (*os.File, error) {
newFd, err := unix.FcntlInt(f.Fd(), unix.F_DUPFD_CLOEXEC, 0)
if err != nil {
return nil, os.NewSyscallError("fcntl(F_DUPFD_CLOEXEC)", err)
}
return os.NewFile(uintptr(newFd), f.Name()), nil
}
// Get returns a handle to a /proc/$pid/ns/user nsfs file with the requested
// mapping. The processes spawned to produce userns nsfds are cached, so if
// equivalent user namespace mappings are requested, the same user namespace
// will be returned. The caller is responsible for closing the returned file
// descriptor.
func (hs *Handles) Get(req Mapping) (file *os.File, err error) {
hs.m.Lock()
defer hs.m.Unlock()
if hs.maps == nil {
hs.maps = make(map[string]*os.File)
}
file, ok := hs.maps[req.id()]
if !ok {
proc, err := spawnProc(req)
if err != nil {
return nil, fmt.Errorf("failed to spawn dummy process for map %s: %w", req.id(), err)
}
// Make sure we kill the helper process. We ignore errors because
// there's not much we can do about them anyway, and ultimately
defer func() {
_ = proc.Kill()
_, _ = proc.Wait()
}()
// Stash away a handle to the userns file. This is neater than keeping
// the process alive, because Go's GC can handle files much better than
// leaked processes, and having long-living useless processes seems
// less than ideal.
file, err = os.Open(fmt.Sprintf("/proc/%d/ns/user", proc.Pid))
if err != nil {
return nil, err
}
hs.maps[req.id()] = file
}
// Duplicate the file, to make sure the lifecycle of each *os.File we
// return is independent.
return dupFile(file)
}

View File

@ -3,15 +3,12 @@ package utils
import (
"encoding/binary"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"strconv"
"strings"
"unsafe"
securejoin "github.com/cyphar/filepath-securejoin"
"golang.org/x/sys/unix"
)
@ -43,6 +40,9 @@ func ExitStatus(status unix.WaitStatus) int {
}
// WriteJSON writes the provided struct v to w using standard json marshaling
// without a trailing newline. This is used instead of json.Encoder because
// there might be a problem in json decoder in some cases, see:
// https://github.com/docker/docker/issues/14203#issuecomment-174177790
func WriteJSON(w io.Writer, v interface{}) error {
data, err := json.Marshal(v)
if err != nil {
@ -99,39 +99,6 @@ func stripRoot(root, path string) string {
return CleanPath("/" + path)
}
// WithProcfd runs the passed closure with a procfd path (/proc/self/fd/...)
// corresponding to the unsafePath resolved within the root. Before passing the
// fd, this path is verified to have been inside the root -- so operating on it
// through the passed fdpath should be safe. Do not access this path through
// the original path strings, and do not attempt to use the pathname outside of
// the passed closure (the file handle will be freed once the closure returns).
func WithProcfd(root, unsafePath string, fn func(procfd string) error) error {
// Remove the root then forcefully resolve inside the root.
unsafePath = stripRoot(root, unsafePath)
path, err := securejoin.SecureJoin(root, unsafePath)
if err != nil {
return fmt.Errorf("resolving path inside rootfs failed: %w", err)
}
// Open the target path.
fh, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC, 0)
if err != nil {
return fmt.Errorf("open o_path procfd: %w", err)
}
defer fh.Close()
// Double-check the path is the one we expected.
procfd := "/proc/self/fd/" + strconv.Itoa(int(fh.Fd()))
if realpath, err := os.Readlink(procfd); err != nil {
return fmt.Errorf("procfd verification failed: %w", err)
} else if realpath != path {
return fmt.Errorf("possibly malicious path detected -- refusing to operate on %s", realpath)
}
// Run the closure.
return fn(procfd)
}
// SearchLabels searches through a list of key=value pairs for a given key,
// returning its value, and the binary flag telling whether the key exist.
func SearchLabels(labels []string, key string) (string, bool) {

View File

@ -7,9 +7,14 @@ import (
"fmt"
"math"
"os"
"path/filepath"
"runtime"
"strconv"
"sync"
_ "unsafe" // for go:linkname
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@ -49,15 +54,15 @@ func haveCloseRangeCloexec() bool {
return haveCloseRangeCloexecBool
}
// CloseExecFrom applies O_CLOEXEC to all file descriptors currently open for
// the process (except for those below the given fd value).
func CloseExecFrom(minFd int) error {
if haveCloseRangeCloexec() {
err := unix.CloseRange(uint(minFd), math.MaxUint, unix.CLOSE_RANGE_CLOEXEC)
return os.NewSyscallError("close_range", err)
}
type fdFunc func(fd int)
fdDir, err := os.Open("/proc/self/fd")
// fdRangeFrom calls the passed fdFunc for each file descriptor that is open in
// the current process.
func fdRangeFrom(minFd int, fn fdFunc) error {
procSelfFd, closer := ProcThreadSelf("fd")
defer closer()
fdDir, err := os.Open(procSelfFd)
if err != nil {
return err
}
@ -81,16 +86,68 @@ func CloseExecFrom(minFd int) error {
if fd < minFd {
continue
}
// Intentionally ignore errors from unix.CloseOnExec -- the cases where
// this might fail are basically file descriptors that have already
// been closed (including and especially the one that was created when
// os.ReadDir did the "opendir" syscall).
unix.CloseOnExec(fd)
// Ignore the file descriptor we used for readdir, as it will be closed
// when we return.
if uintptr(fd) == fdDir.Fd() {
continue
}
// Run the closure.
fn(fd)
}
return nil
}
// NewSockPair returns a new unix socket pair
// CloseExecFrom sets the O_CLOEXEC flag on all file descriptors greater or
// equal to minFd in the current process.
func CloseExecFrom(minFd int) error {
// Use close_range(CLOSE_RANGE_CLOEXEC) if possible.
if haveCloseRangeCloexec() {
err := unix.CloseRange(uint(minFd), math.MaxUint, unix.CLOSE_RANGE_CLOEXEC)
return os.NewSyscallError("close_range", err)
}
// Otherwise, fall back to the standard loop.
return fdRangeFrom(minFd, unix.CloseOnExec)
}
//go:linkname runtime_IsPollDescriptor internal/poll.IsPollDescriptor
// In order to make sure we do not close the internal epoll descriptors the Go
// runtime uses, we need to ensure that we skip descriptors that match
// "internal/poll".IsPollDescriptor. Yes, this is a Go runtime internal thing,
// unfortunately there's no other way to be sure we're only keeping the file
// descriptors the Go runtime needs. Hopefully nothing blows up doing this...
func runtime_IsPollDescriptor(fd uintptr) bool //nolint:revive
// UnsafeCloseFrom closes all file descriptors greater or equal to minFd in the
// current process, except for those critical to Go's runtime (such as the
// netpoll management descriptors).
//
// NOTE: That this function is incredibly dangerous to use in most Go code, as
// closing file descriptors from underneath *os.File handles can lead to very
// bad behaviour (the closed file descriptor can be re-used and then any
// *os.File operations would apply to the wrong file). This function is only
// intended to be called from the last stage of runc init.
func UnsafeCloseFrom(minFd int) error {
// We cannot use close_range(2) even if it is available, because we must
// not close some file descriptors.
return fdRangeFrom(minFd, func(fd int) {
if runtime_IsPollDescriptor(uintptr(fd)) {
// These are the Go runtimes internal netpoll file descriptors.
// These file descriptors are operated on deep in the Go scheduler,
// and closing those files from underneath Go can result in panics.
// There is no issue with keeping them because they are not
// executable and are not useful to an attacker anyway. Also we
// don't have any choice.
return
}
// There's nothing we can do about errors from close(2), and the
// only likely error to be seen is EBADF which indicates the fd was
// already closed (in which case, we got what we wanted).
_ = unix.Close(fd)
})
}
// NewSockPair returns a new SOCK_STREAM unix socket pair.
func NewSockPair(name string) (parent, child *os.File, err error) {
fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM|unix.SOCK_CLOEXEC, 0)
if err != nil {
@ -98,3 +155,109 @@ func NewSockPair(name string) (parent, child *os.File, err error) {
}
return os.NewFile(uintptr(fds[1]), name+"-p"), os.NewFile(uintptr(fds[0]), name+"-c"), nil
}
// WithProcfd runs the passed closure with a procfd path (/proc/self/fd/...)
// corresponding to the unsafePath resolved within the root. Before passing the
// fd, this path is verified to have been inside the root -- so operating on it
// through the passed fdpath should be safe. Do not access this path through
// the original path strings, and do not attempt to use the pathname outside of
// the passed closure (the file handle will be freed once the closure returns).
func WithProcfd(root, unsafePath string, fn func(procfd string) error) error {
// Remove the root then forcefully resolve inside the root.
unsafePath = stripRoot(root, unsafePath)
path, err := securejoin.SecureJoin(root, unsafePath)
if err != nil {
return fmt.Errorf("resolving path inside rootfs failed: %w", err)
}
procSelfFd, closer := ProcThreadSelf("fd/")
defer closer()
// Open the target path.
fh, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC, 0)
if err != nil {
return fmt.Errorf("open o_path procfd: %w", err)
}
defer fh.Close()
procfd := filepath.Join(procSelfFd, strconv.Itoa(int(fh.Fd())))
// Double-check the path is the one we expected.
if realpath, err := os.Readlink(procfd); err != nil {
return fmt.Errorf("procfd verification failed: %w", err)
} else if realpath != path {
return fmt.Errorf("possibly malicious path detected -- refusing to operate on %s", realpath)
}
return fn(procfd)
}
type ProcThreadSelfCloser func()
var (
haveProcThreadSelf bool
haveProcThreadSelfOnce sync.Once
)
// ProcThreadSelf returns a string that is equivalent to
// /proc/thread-self/<subpath>, with a graceful fallback on older kernels where
// /proc/thread-self doesn't exist. This method DOES NOT use SecureJoin,
// meaning that the passed string needs to be trusted. The caller _must_ call
// the returned procThreadSelfCloser function (which is runtime.UnlockOSThread)
// *only once* after it has finished using the returned path string.
func ProcThreadSelf(subpath string) (string, ProcThreadSelfCloser) {
haveProcThreadSelfOnce.Do(func() {
if _, err := os.Stat("/proc/thread-self/"); err == nil {
haveProcThreadSelf = true
} else {
logrus.Debugf("cannot stat /proc/thread-self (%v), falling back to /proc/self/task/<tid>", err)
}
})
// We need to lock our thread until the caller is done with the path string
// because any non-atomic operation on the path (such as opening a file,
// then reading it) could be interrupted by the Go runtime where the
// underlying thread is swapped out and the original thread is killed,
// resulting in pull-your-hair-out-hard-to-debug issues in the caller. In
// addition, the pre-3.17 fallback makes everything non-atomic because the
// same thing could happen between unix.Gettid() and the path operations.
//
// In theory, we don't need to lock in the atomic user case when using
// /proc/thread-self/, but it's better to be safe than sorry (and there are
// only one or two truly atomic users of /proc/thread-self/).
runtime.LockOSThread()
threadSelf := "/proc/thread-self/"
if !haveProcThreadSelf {
// Pre-3.17 kernels did not have /proc/thread-self, so do it manually.
threadSelf = "/proc/self/task/" + strconv.Itoa(unix.Gettid()) + "/"
if _, err := os.Stat(threadSelf); err != nil {
// Unfortunately, this code is called from rootfs_linux.go where we
// are running inside the pid namespace of the container but /proc
// is the host's procfs. Unfortunately there is no real way to get
// the correct tid to use here (the kernel age means we cannot do
// things like set up a private fsopen("proc") -- even scanning
// NSpid in all of the tasks in /proc/self/task/*/status requires
// Linux 4.1).
//
// So, we just have to assume that /proc/self is acceptable in this
// one specific case.
if os.Getpid() == 1 {
logrus.Debugf("/proc/thread-self (tid=%d) cannot be emulated inside the initial container setup -- using /proc/self instead: %v", unix.Gettid(), err)
} else {
// This should never happen, but the fallback should work in most cases...
logrus.Warnf("/proc/thread-self could not be emulated for pid=%d (tid=%d) -- using more buggy /proc/self fallback instead: %v", os.Getpid(), unix.Gettid(), err)
}
threadSelf = "/proc/self/"
}
}
return threadSelf + subpath, runtime.UnlockOSThread
}
// ProcThreadSelfFd is small wrapper around ProcThreadSelf to make it easier to
// create a /proc/thread-self handle for given file descriptor.
//
// It is basically equivalent to ProcThreadSelf(fmt.Sprintf("fd/%d", fd)), but
// without using fmt.Sprintf to avoid unneeded overhead.
func ProcThreadSelfFd(fd uintptr) (string, ProcThreadSelfCloser) {
return ProcThreadSelf("fd/" + strconv.FormatUint(uint64(fd), 10))
}

View File

@ -11,6 +11,7 @@ import (
"strconv"
"strings"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/encoding/json"
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/errors"
@ -23,7 +24,7 @@ import (
"google.golang.org/protobuf/reflect/protoregistry"
)
// Unmarshal reads the given []byte into the given proto.Message.
// Unmarshal reads the given []byte into the given [proto.Message].
// The provided message must be mutable (e.g., a non-nil pointer to a message).
func Unmarshal(b []byte, m proto.Message) error {
return UnmarshalOptions{}.Unmarshal(b, m)
@ -37,7 +38,7 @@ type UnmarshalOptions struct {
// required fields will not return an error.
AllowPartial bool
// If DiscardUnknown is set, unknown fields are ignored.
// If DiscardUnknown is set, unknown fields and enum name values are ignored.
DiscardUnknown bool
// Resolver is used for looking up types when unmarshaling
@ -47,9 +48,13 @@ type UnmarshalOptions struct {
protoregistry.MessageTypeResolver
protoregistry.ExtensionTypeResolver
}
// RecursionLimit limits how deeply messages may be nested.
// If zero, a default limit is applied.
RecursionLimit int
}
// Unmarshal reads the given []byte and populates the given proto.Message
// Unmarshal reads the given []byte and populates the given [proto.Message]
// using options in the UnmarshalOptions object.
// It will clear the message first before setting the fields.
// If it returns an error, the given message may be partially set.
@ -67,6 +72,9 @@ func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error {
if o.Resolver == nil {
o.Resolver = protoregistry.GlobalTypes
}
if o.RecursionLimit == 0 {
o.RecursionLimit = protowire.DefaultRecursionLimit
}
dec := decoder{json.NewDecoder(b), o}
if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil {
@ -114,6 +122,10 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
// unmarshalMessage unmarshals a message into the given protoreflect.Message.
func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error {
d.opts.RecursionLimit--
if d.opts.RecursionLimit < 0 {
return errors.New("exceeded max recursion depth")
}
if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil {
return unmarshal(d, m)
}
@ -266,7 +278,9 @@ func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.Field
if err != nil {
return err
}
m.Set(fd, val)
if val.IsValid() {
m.Set(fd, val)
}
return nil
}
@ -329,7 +343,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.
}
case protoreflect.EnumKind:
if v, ok := unmarshalEnum(tok, fd); ok {
if v, ok := unmarshalEnum(tok, fd, d.opts.DiscardUnknown); ok {
return v, nil
}
@ -474,7 +488,7 @@ func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) {
return protoreflect.ValueOfBytes(b), true
}
func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) {
func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor, discardUnknown bool) (protoreflect.Value, bool) {
switch tok.Kind() {
case json.String:
// Lookup EnumNumber based on name.
@ -482,6 +496,9 @@ func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflec
if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil {
return protoreflect.ValueOfEnum(enumVal.Number()), true
}
if discardUnknown {
return protoreflect.Value{}, true
}
case json.Number:
if n, ok := tok.Int(32); ok {
@ -542,7 +559,9 @@ func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDesc
if err != nil {
return err
}
list.Append(val)
if val.IsValid() {
list.Append(val)
}
}
}
@ -609,8 +628,9 @@ Loop:
if err != nil {
return err
}
mmap.Set(pkey, pval)
if pval.IsValid() {
mmap.Set(pkey, pval)
}
}
return nil

View File

@ -6,6 +6,6 @@
// format. It follows the guide at
// https://protobuf.dev/programming-guides/proto3#json.
//
// This package produces a different output than the standard "encoding/json"
// This package produces a different output than the standard [encoding/json]
// package, which does not operate correctly on protocol buffer messages.
package protojson

View File

@ -31,7 +31,7 @@ func Format(m proto.Message) string {
return MarshalOptions{Multiline: true}.Format(m)
}
// Marshal writes the given proto.Message in JSON format using default options.
// Marshal writes the given [proto.Message] in JSON format using default options.
// Do not depend on the output being stable. It may change over time across
// different versions of the program.
func Marshal(m proto.Message) ([]byte, error) {
@ -81,6 +81,25 @@ type MarshalOptions struct {
// ╚═══════╧════════════════════════════╝
EmitUnpopulated bool
// EmitDefaultValues specifies whether to emit default-valued primitive fields,
// empty lists, and empty maps. The fields affected are as follows:
// ╔═══════╤════════════════════════════════════════╗
// ║ JSON │ Protobuf field ║
// ╠═══════╪════════════════════════════════════════╣
// ║ false │ non-optional scalar boolean fields ║
// ║ 0 │ non-optional scalar numeric fields ║
// ║ "" │ non-optional scalar string/byte fields ║
// ║ [] │ empty repeated fields ║
// ║ {} │ empty map fields ║
// ╚═══════╧════════════════════════════════════════╝
//
// Behaves similarly to EmitUnpopulated, but does not emit "null"-value fields,
// i.e. presence-sensing fields that are omitted will remain omitted to preserve
// presence-sensing.
// EmitUnpopulated takes precedence over EmitDefaultValues since the former generates
// a strict superset of the latter.
EmitDefaultValues bool
// Resolver is used for looking up types when expanding google.protobuf.Any
// messages. If nil, this defaults to using protoregistry.GlobalTypes.
Resolver interface {
@ -102,7 +121,7 @@ func (o MarshalOptions) Format(m proto.Message) string {
return string(b)
}
// Marshal marshals the given proto.Message in the JSON format using options in
// Marshal marshals the given [proto.Message] in the JSON format using options in
// MarshalOptions. Do not depend on the output being stable. It may change over
// time across different versions of the program.
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
@ -178,7 +197,11 @@ func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protorefl
// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range
// method to additionally iterate over unpopulated fields.
type unpopulatedFieldRanger struct{ protoreflect.Message }
type unpopulatedFieldRanger struct {
protoreflect.Message
skipNull bool
}
func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
fds := m.Descriptor().Fields()
@ -192,6 +215,9 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto
isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid()
isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil
if isProto2Scalar || isSingularMessage {
if m.skipNull {
continue
}
v = protoreflect.Value{} // use invalid value to emit null
}
if !f(fd, v) {
@ -217,8 +243,11 @@ func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error {
defer e.EndObject()
var fields order.FieldRanger = m
if e.opts.EmitUnpopulated {
fields = unpopulatedFieldRanger{m}
switch {
case e.opts.EmitUnpopulated:
fields = unpopulatedFieldRanger{Message: m, skipNull: false}
case e.opts.EmitDefaultValues:
fields = unpopulatedFieldRanger{Message: m, skipNull: true}
}
if typeURL != "" {
fields = typeURLFieldRanger{fields, typeURL}

View File

@ -176,7 +176,7 @@ func (d decoder) unmarshalAny(m protoreflect.Message) error {
// Use another decoder to parse the unread bytes for @type field. This
// avoids advancing a read from current decoder because the current JSON
// object may contain the fields of the embedded type.
dec := decoder{d.Clone(), UnmarshalOptions{}}
dec := decoder{d.Clone(), UnmarshalOptions{RecursionLimit: d.opts.RecursionLimit}}
tok, err := findTypeURL(dec)
switch err {
case errEmptyObject:
@ -308,48 +308,25 @@ Loop:
// array) in order to advance the read to the next JSON value. It relies on
// the decoder returning an error if the types are not in valid sequence.
func (d decoder) skipJSONValue() error {
tok, err := d.Read()
if err != nil {
return err
}
// Only need to continue reading for objects and arrays.
switch tok.Kind() {
case json.ObjectOpen:
for {
tok, err := d.Read()
if err != nil {
return err
}
switch tok.Kind() {
case json.ObjectClose:
return nil
case json.Name:
// Skip object field value.
if err := d.skipJSONValue(); err != nil {
return err
}
var open int
for {
tok, err := d.Read()
if err != nil {
return err
}
switch tok.Kind() {
case json.ObjectClose, json.ArrayClose:
open--
case json.ObjectOpen, json.ArrayOpen:
open++
if open > d.opts.RecursionLimit {
return errors.New("exceeded max recursion depth")
}
}
case json.ArrayOpen:
for {
tok, err := d.Peek()
if err != nil {
return err
}
switch tok.Kind() {
case json.ArrayClose:
d.Read()
return nil
default:
// Skip array item.
if err := d.skipJSONValue(); err != nil {
return err
}
}
if open == 0 {
return nil
}
}
return nil
}
// unmarshalAnyValue unmarshals the given custom-type message from the JSON

View File

@ -21,7 +21,7 @@ import (
"google.golang.org/protobuf/reflect/protoregistry"
)
// Unmarshal reads the given []byte into the given proto.Message.
// Unmarshal reads the given []byte into the given [proto.Message].
// The provided message must be mutable (e.g., a non-nil pointer to a message).
func Unmarshal(b []byte, m proto.Message) error {
return UnmarshalOptions{}.Unmarshal(b, m)
@ -51,7 +51,7 @@ type UnmarshalOptions struct {
}
}
// Unmarshal reads the given []byte and populates the given proto.Message
// Unmarshal reads the given []byte and populates the given [proto.Message]
// using options in the UnmarshalOptions object.
// The provided message must be mutable (e.g., a non-nil pointer to a message).
func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error {
@ -739,7 +739,9 @@ func (d decoder) skipValue() error {
case text.ListClose:
return nil
case text.MessageOpen:
return d.skipMessageValue()
if err := d.skipMessageValue(); err != nil {
return err
}
default:
// Skip items. This will not validate whether skipped values are
// of the same type or not, same behavior as C++

View File

@ -33,7 +33,7 @@ func Format(m proto.Message) string {
return MarshalOptions{Multiline: true}.Format(m)
}
// Marshal writes the given proto.Message in textproto format using default
// Marshal writes the given [proto.Message] in textproto format using default
// options. Do not depend on the output being stable. It may change over time
// across different versions of the program.
func Marshal(m proto.Message) ([]byte, error) {
@ -97,7 +97,7 @@ func (o MarshalOptions) Format(m proto.Message) string {
return string(b)
}
// Marshal writes the given proto.Message in textproto format using options in
// Marshal writes the given [proto.Message] in textproto format using options in
// MarshalOptions object. Do not depend on the output being stable. It may
// change over time across different versions of the program.
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {

View File

@ -6,7 +6,7 @@
// See https://protobuf.dev/programming-guides/encoding.
//
// For marshaling and unmarshaling entire protobuf messages,
// use the "google.golang.org/protobuf/proto" package instead.
// use the [google.golang.org/protobuf/proto] package instead.
package protowire
import (
@ -87,7 +87,7 @@ func ParseError(n int) error {
// ConsumeField parses an entire field record (both tag and value) and returns
// the field number, the wire type, and the total length.
// This returns a negative length upon an error (see ParseError).
// This returns a negative length upon an error (see [ParseError]).
//
// The total length includes the tag header and the end group marker (if the
// field is a group).
@ -104,8 +104,8 @@ func ConsumeField(b []byte) (Number, Type, int) {
}
// ConsumeFieldValue parses a field value and returns its length.
// This assumes that the field Number and wire Type have already been parsed.
// This returns a negative length upon an error (see ParseError).
// This assumes that the field [Number] and wire [Type] have already been parsed.
// This returns a negative length upon an error (see [ParseError]).
//
// When parsing a group, the length includes the end group marker and
// the end group is verified to match the starting field number.
@ -164,7 +164,7 @@ func AppendTag(b []byte, num Number, typ Type) []byte {
}
// ConsumeTag parses b as a varint-encoded tag, reporting its length.
// This returns a negative length upon an error (see ParseError).
// This returns a negative length upon an error (see [ParseError]).
func ConsumeTag(b []byte) (Number, Type, int) {
v, n := ConsumeVarint(b)
if n < 0 {
@ -263,7 +263,7 @@ func AppendVarint(b []byte, v uint64) []byte {
}
// ConsumeVarint parses b as a varint-encoded uint64, reporting its length.
// This returns a negative length upon an error (see ParseError).
// This returns a negative length upon an error (see [ParseError]).
func ConsumeVarint(b []byte) (v uint64, n int) {
var y uint64
if len(b) <= 0 {
@ -384,7 +384,7 @@ func AppendFixed32(b []byte, v uint32) []byte {
}
// ConsumeFixed32 parses b as a little-endian uint32, reporting its length.
// This returns a negative length upon an error (see ParseError).
// This returns a negative length upon an error (see [ParseError]).
func ConsumeFixed32(b []byte) (v uint32, n int) {
if len(b) < 4 {
return 0, errCodeTruncated
@ -412,7 +412,7 @@ func AppendFixed64(b []byte, v uint64) []byte {
}
// ConsumeFixed64 parses b as a little-endian uint64, reporting its length.
// This returns a negative length upon an error (see ParseError).
// This returns a negative length upon an error (see [ParseError]).
func ConsumeFixed64(b []byte) (v uint64, n int) {
if len(b) < 8 {
return 0, errCodeTruncated
@ -432,7 +432,7 @@ func AppendBytes(b []byte, v []byte) []byte {
}
// ConsumeBytes parses b as a length-prefixed bytes value, reporting its length.
// This returns a negative length upon an error (see ParseError).
// This returns a negative length upon an error (see [ParseError]).
func ConsumeBytes(b []byte) (v []byte, n int) {
m, n := ConsumeVarint(b)
if n < 0 {
@ -456,7 +456,7 @@ func AppendString(b []byte, v string) []byte {
}
// ConsumeString parses b as a length-prefixed bytes value, reporting its length.
// This returns a negative length upon an error (see ParseError).
// This returns a negative length upon an error (see [ParseError]).
func ConsumeString(b []byte) (v string, n int) {
bb, n := ConsumeBytes(b)
return string(bb), n
@ -471,7 +471,7 @@ func AppendGroup(b []byte, num Number, v []byte) []byte {
// ConsumeGroup parses b as a group value until the trailing end group marker,
// and verifies that the end marker matches the provided num. The value v
// does not contain the end marker, while the length does contain the end marker.
// This returns a negative length upon an error (see ParseError).
// This returns a negative length upon an error (see [ParseError]).
func ConsumeGroup(num Number, b []byte) (v []byte, n int) {
n = ConsumeFieldValue(num, StartGroupType, b)
if n < 0 {
@ -495,8 +495,8 @@ func SizeGroup(num Number, n int) int {
return n + SizeTag(num)
}
// DecodeTag decodes the field Number and wire Type from its unified form.
// The Number is -1 if the decoded field number overflows int32.
// DecodeTag decodes the field [Number] and wire [Type] from its unified form.
// The [Number] is -1 if the decoded field number overflows int32.
// Other than overflow, this does not check for field number validity.
func DecodeTag(x uint64) (Number, Type) {
// NOTE: MessageSet allows for larger field numbers than normal.
@ -506,7 +506,7 @@ func DecodeTag(x uint64) (Number, Type) {
return Number(x >> 3), Type(x & 7)
}
// EncodeTag encodes the field Number and wire Type into its unified form.
// EncodeTag encodes the field [Number] and wire [Type] into its unified form.
func EncodeTag(num Number, typ Type) uint64 {
return uint64(num)<<3 | uint64(typ&7)
}

View File

@ -83,7 +83,13 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string {
case protoreflect.FileImports:
for i := 0; i < vs.Len(); i++ {
var rs records
rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak")
rv := reflect.ValueOf(vs.Get(i))
rs.Append(rv, []methodAndName{
{rv.MethodByName("Path"), "Path"},
{rv.MethodByName("Package"), "Package"},
{rv.MethodByName("IsPublic"), "IsPublic"},
{rv.MethodByName("IsWeak"), "IsWeak"},
}...)
ss = append(ss, "{"+rs.Join()+"}")
}
return start + joinStrings(ss, allowMulti) + end
@ -92,34 +98,26 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string {
for i := 0; i < vs.Len(); i++ {
m := reflect.ValueOf(vs).MethodByName("Get")
v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface()
ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue))
ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue, nil))
}
return start + joinStrings(ss, allowMulti && isEnumValue) + end
}
}
// descriptorAccessors is a list of accessors to print for each descriptor.
//
// Do not print all accessors since some contain redundant information,
// while others are pointers that we do not want to follow since the descriptor
// is actually a cyclic graph.
//
// Using a list allows us to print the accessors in a sensible order.
var descriptorAccessors = map[reflect.Type][]string{
reflect.TypeOf((*protoreflect.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"},
reflect.TypeOf((*protoreflect.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"},
reflect.TypeOf((*protoreflect.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"},
reflect.TypeOf((*protoreflect.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt
reflect.TypeOf((*protoreflect.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"},
reflect.TypeOf((*protoreflect.EnumValueDescriptor)(nil)).Elem(): {"Number"},
reflect.TypeOf((*protoreflect.ServiceDescriptor)(nil)).Elem(): {"Methods"},
reflect.TypeOf((*protoreflect.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"},
type methodAndName struct {
method reflect.Value
name string
}
func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) {
io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#'))))
io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')), nil))
}
func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string {
func InternalFormatDescOptForTesting(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string {
return formatDescOpt(t, isRoot, allowMulti, record)
}
func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string {
rv := reflect.ValueOf(t)
rt := rv.MethodByName("ProtoType").Type().In(0)
@ -129,26 +127,60 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string {
}
_, isFile := t.(protoreflect.FileDescriptor)
rs := records{allowMulti: allowMulti}
rs := records{
allowMulti: allowMulti,
record: record,
}
if t.IsPlaceholder() {
if isFile {
rs.Append(rv, "Path", "Package", "IsPlaceholder")
rs.Append(rv, []methodAndName{
{rv.MethodByName("Path"), "Path"},
{rv.MethodByName("Package"), "Package"},
{rv.MethodByName("IsPlaceholder"), "IsPlaceholder"},
}...)
} else {
rs.Append(rv, "FullName", "IsPlaceholder")
rs.Append(rv, []methodAndName{
{rv.MethodByName("FullName"), "FullName"},
{rv.MethodByName("IsPlaceholder"), "IsPlaceholder"},
}...)
}
} else {
switch {
case isFile:
rs.Append(rv, "Syntax")
rs.Append(rv, methodAndName{rv.MethodByName("Syntax"), "Syntax"})
case isRoot:
rs.Append(rv, "Syntax", "FullName")
rs.Append(rv, []methodAndName{
{rv.MethodByName("Syntax"), "Syntax"},
{rv.MethodByName("FullName"), "FullName"},
}...)
default:
rs.Append(rv, "Name")
rs.Append(rv, methodAndName{rv.MethodByName("Name"), "Name"})
}
switch t := t.(type) {
case protoreflect.FieldDescriptor:
for _, s := range descriptorAccessors[rt] {
switch s {
accessors := []methodAndName{
{rv.MethodByName("Number"), "Number"},
{rv.MethodByName("Cardinality"), "Cardinality"},
{rv.MethodByName("Kind"), "Kind"},
{rv.MethodByName("HasJSONName"), "HasJSONName"},
{rv.MethodByName("JSONName"), "JSONName"},
{rv.MethodByName("HasPresence"), "HasPresence"},
{rv.MethodByName("IsExtension"), "IsExtension"},
{rv.MethodByName("IsPacked"), "IsPacked"},
{rv.MethodByName("IsWeak"), "IsWeak"},
{rv.MethodByName("IsList"), "IsList"},
{rv.MethodByName("IsMap"), "IsMap"},
{rv.MethodByName("MapKey"), "MapKey"},
{rv.MethodByName("MapValue"), "MapValue"},
{rv.MethodByName("HasDefault"), "HasDefault"},
{rv.MethodByName("Default"), "Default"},
{rv.MethodByName("ContainingOneof"), "ContainingOneof"},
{rv.MethodByName("ContainingMessage"), "ContainingMessage"},
{rv.MethodByName("Message"), "Message"},
{rv.MethodByName("Enum"), "Enum"},
}
for _, s := range accessors {
switch s.name {
case "MapKey":
if k := t.MapKey(); k != nil {
rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()})
@ -157,20 +189,20 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string {
if v := t.MapValue(); v != nil {
switch v.Kind() {
case protoreflect.EnumKind:
rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())})
rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Enum().FullName())})
case protoreflect.MessageKind, protoreflect.GroupKind:
rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())})
rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Message().FullName())})
default:
rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()})
rs.AppendRecs("MapValue", [2]string{"MapValue", v.Kind().String()})
}
}
case "ContainingOneof":
if od := t.ContainingOneof(); od != nil {
rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())})
rs.AppendRecs("ContainingOneof", [2]string{"Oneof", string(od.Name())})
}
case "ContainingMessage":
if t.IsExtension() {
rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())})
rs.AppendRecs("ContainingMessage", [2]string{"Extendee", string(t.ContainingMessage().FullName())})
}
case "Message":
if !t.IsMap() {
@ -187,13 +219,61 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string {
ss = append(ss, string(fs.Get(i).Name()))
}
if len(ss) > 0 {
rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"})
rs.AppendRecs("Fields", [2]string{"Fields", "[" + joinStrings(ss, false) + "]"})
}
default:
rs.Append(rv, descriptorAccessors[rt]...)
case protoreflect.FileDescriptor:
rs.Append(rv, []methodAndName{
{rv.MethodByName("Path"), "Path"},
{rv.MethodByName("Package"), "Package"},
{rv.MethodByName("Imports"), "Imports"},
{rv.MethodByName("Messages"), "Messages"},
{rv.MethodByName("Enums"), "Enums"},
{rv.MethodByName("Extensions"), "Extensions"},
{rv.MethodByName("Services"), "Services"},
}...)
case protoreflect.MessageDescriptor:
rs.Append(rv, []methodAndName{
{rv.MethodByName("IsMapEntry"), "IsMapEntry"},
{rv.MethodByName("Fields"), "Fields"},
{rv.MethodByName("Oneofs"), "Oneofs"},
{rv.MethodByName("ReservedNames"), "ReservedNames"},
{rv.MethodByName("ReservedRanges"), "ReservedRanges"},
{rv.MethodByName("RequiredNumbers"), "RequiredNumbers"},
{rv.MethodByName("ExtensionRanges"), "ExtensionRanges"},
{rv.MethodByName("Messages"), "Messages"},
{rv.MethodByName("Enums"), "Enums"},
{rv.MethodByName("Extensions"), "Extensions"},
}...)
case protoreflect.EnumDescriptor:
rs.Append(rv, []methodAndName{
{rv.MethodByName("Values"), "Values"},
{rv.MethodByName("ReservedNames"), "ReservedNames"},
{rv.MethodByName("ReservedRanges"), "ReservedRanges"},
}...)
case protoreflect.EnumValueDescriptor:
rs.Append(rv, []methodAndName{
{rv.MethodByName("Number"), "Number"},
}...)
case protoreflect.ServiceDescriptor:
rs.Append(rv, []methodAndName{
{rv.MethodByName("Methods"), "Methods"},
}...)
case protoreflect.MethodDescriptor:
rs.Append(rv, []methodAndName{
{rv.MethodByName("Input"), "Input"},
{rv.MethodByName("Output"), "Output"},
{rv.MethodByName("IsStreamingClient"), "IsStreamingClient"},
{rv.MethodByName("IsStreamingServer"), "IsStreamingServer"},
}...)
}
if rv.MethodByName("GoType").IsValid() {
rs.Append(rv, "GoType")
if m := rv.MethodByName("GoType"); m.IsValid() {
rs.Append(rv, methodAndName{m, "GoType"})
}
}
return start + rs.Join() + end
@ -202,19 +282,34 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string {
type records struct {
recs [][2]string
allowMulti bool
// record is a function that will be called for every Append() or
// AppendRecs() call, to be used for testing with the
// InternalFormatDescOptForTesting function.
record func(string)
}
func (rs *records) Append(v reflect.Value, accessors ...string) {
func (rs *records) AppendRecs(fieldName string, newRecs [2]string) {
if rs.record != nil {
rs.record(fieldName)
}
rs.recs = append(rs.recs, newRecs)
}
func (rs *records) Append(v reflect.Value, accessors ...methodAndName) {
for _, a := range accessors {
if rs.record != nil {
rs.record(a.name)
}
var rv reflect.Value
if m := v.MethodByName(a); m.IsValid() {
rv = m.Call(nil)[0]
if a.method.IsValid() {
rv = a.method.Call(nil)[0]
}
if v.Kind() == reflect.Struct && !rv.IsValid() {
rv = v.FieldByName(a)
rv = v.FieldByName(a.name)
}
if !rv.IsValid() {
panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a))
panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a.name))
}
if _, ok := rv.Interface().(protoreflect.Value); ok {
rv = rv.MethodByName("Interface").Call(nil)[0]
@ -261,7 +356,7 @@ func (rs *records) Append(v reflect.Value, accessors ...string) {
default:
s = fmt.Sprint(v)
}
rs.recs = append(rs.recs, [2]string{a, s})
rs.recs = append(rs.recs, [2]string{a.name, s})
}
}

View File

@ -21,11 +21,26 @@ import (
"google.golang.org/protobuf/reflect/protoregistry"
)
// Edition is an Enum for proto2.Edition
type Edition int32
// These values align with the value of Enum in descriptor.proto which allows
// direct conversion between the proto enum and this enum.
const (
EditionUnknown Edition = 0
EditionProto2 Edition = 998
EditionProto3 Edition = 999
Edition2023 Edition = 1000
EditionUnsupported Edition = 100000
)
// The types in this file may have a suffix:
// • L0: Contains fields common to all descriptors (except File) and
// must be initialized up front.
// • L1: Contains fields specific to a descriptor and
// must be initialized up front.
// must be initialized up front. If the associated proto uses Editions, the
// Editions features must always be resolved. If not explicitly set, the
// appropriate default must be resolved and set.
// • L2: Contains fields that are lazily initialized when constructing
// from the raw file descriptor. When constructing as a literal, the L2
// fields must be initialized up front.
@ -44,6 +59,7 @@ type (
}
FileL1 struct {
Syntax protoreflect.Syntax
Edition Edition // Only used if Syntax == Editions
Path string
Package protoreflect.FullName
@ -51,12 +67,35 @@ type (
Messages Messages
Extensions Extensions
Services Services
EditionFeatures FileEditionFeatures
}
FileL2 struct {
Options func() protoreflect.ProtoMessage
Imports FileImports
Locations SourceLocations
}
FileEditionFeatures struct {
// IsFieldPresence is true if field_presence is EXPLICIT
// https://protobuf.dev/editions/features/#field_presence
IsFieldPresence bool
// IsOpenEnum is true if enum_type is OPEN
// https://protobuf.dev/editions/features/#enum_type
IsOpenEnum bool
// IsPacked is true if repeated_field_encoding is PACKED
// https://protobuf.dev/editions/features/#repeated_field_encoding
IsPacked bool
// IsUTF8Validated is true if utf_validation is VERIFY
// https://protobuf.dev/editions/features/#utf8_validation
IsUTF8Validated bool
// IsDelimitedEncoded is true if message_encoding is DELIMITED
// https://protobuf.dev/editions/features/#message_encoding
IsDelimitedEncoded bool
// IsJSONCompliant is true if json_format is ALLOW
// https://protobuf.dev/editions/features/#json_format
IsJSONCompliant bool
}
)
func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd }
@ -210,6 +249,9 @@ type (
ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields
Enum protoreflect.EnumDescriptor
Message protoreflect.MessageDescriptor
// Edition features.
Presence bool
}
Oneof struct {
@ -273,6 +315,9 @@ func (fd *Field) HasJSONName() bool { return fd.L1.StringNam
func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) }
func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) }
func (fd *Field) HasPresence() bool {
if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions {
return fd.L1.Presence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil
}
return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil)
}
func (fd *Field) HasOptionalKeyword() bool {

View File

@ -12,6 +12,12 @@ import (
const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto"
// Full and short names for google.protobuf.Edition.
const (
Edition_enum_fullname = "google.protobuf.Edition"
Edition_enum_name = "Edition"
)
// Names for google.protobuf.FileDescriptorSet.
const (
FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet"
@ -81,7 +87,7 @@ const (
FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8
FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9
FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12
FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 13
FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 14
)
// Names for google.protobuf.DescriptorProto.
@ -184,10 +190,12 @@ const (
const (
ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
ExtensionRangeOptions_Declaration_field_name protoreflect.Name = "declaration"
ExtensionRangeOptions_Features_field_name protoreflect.Name = "features"
ExtensionRangeOptions_Verification_field_name protoreflect.Name = "verification"
ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option"
ExtensionRangeOptions_Declaration_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.declaration"
ExtensionRangeOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.features"
ExtensionRangeOptions_Verification_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.verification"
)
@ -195,6 +203,7 @@ const (
const (
ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
ExtensionRangeOptions_Declaration_field_number protoreflect.FieldNumber = 2
ExtensionRangeOptions_Features_field_number protoreflect.FieldNumber = 50
ExtensionRangeOptions_Verification_field_number protoreflect.FieldNumber = 3
)
@ -212,29 +221,26 @@ const (
// Field names for google.protobuf.ExtensionRangeOptions.Declaration.
const (
ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number"
ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name"
ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type"
ExtensionRangeOptions_Declaration_IsRepeated_field_name protoreflect.Name = "is_repeated"
ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved"
ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated"
ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number"
ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name"
ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type"
ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved"
ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated"
ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number"
ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name"
ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type"
ExtensionRangeOptions_Declaration_IsRepeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.is_repeated"
ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved"
ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated"
ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number"
ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name"
ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type"
ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved"
ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated"
)
// Field numbers for google.protobuf.ExtensionRangeOptions.Declaration.
const (
ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1
ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2
ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3
ExtensionRangeOptions_Declaration_IsRepeated_field_number protoreflect.FieldNumber = 4
ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5
ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6
ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1
ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2
ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3
ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5
ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6
)
// Names for google.protobuf.FieldDescriptorProto.
@ -478,6 +484,7 @@ const (
FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace"
FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace"
FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package"
FileOptions_Features_field_name protoreflect.Name = "features"
FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package"
@ -500,6 +507,7 @@ const (
FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace"
FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace"
FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package"
FileOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.features"
FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option"
)
@ -525,6 +533,7 @@ const (
FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41
FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44
FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45
FileOptions_Features_field_number protoreflect.FieldNumber = 50
FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@ -547,6 +556,7 @@ const (
MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated"
MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry"
MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts"
MessageOptions_Features_field_name protoreflect.Name = "features"
MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format"
@ -554,6 +564,7 @@ const (
MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated"
MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry"
MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts"
MessageOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.features"
MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option"
)
@ -564,6 +575,7 @@ const (
MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3
MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7
MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11
MessageOptions_Features_field_number protoreflect.FieldNumber = 12
MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@ -584,8 +596,9 @@ const (
FieldOptions_Weak_field_name protoreflect.Name = "weak"
FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact"
FieldOptions_Retention_field_name protoreflect.Name = "retention"
FieldOptions_Target_field_name protoreflect.Name = "target"
FieldOptions_Targets_field_name protoreflect.Name = "targets"
FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults"
FieldOptions_Features_field_name protoreflect.Name = "features"
FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype"
@ -597,8 +610,9 @@ const (
FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak"
FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact"
FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention"
FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target"
FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets"
FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults"
FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features"
FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option"
)
@ -613,8 +627,9 @@ const (
FieldOptions_Weak_field_number protoreflect.FieldNumber = 10
FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16
FieldOptions_Retention_field_number protoreflect.FieldNumber = 17
FieldOptions_Target_field_number protoreflect.FieldNumber = 18
FieldOptions_Targets_field_number protoreflect.FieldNumber = 19
FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20
FieldOptions_Features_field_number protoreflect.FieldNumber = 21
FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@ -642,6 +657,27 @@ const (
FieldOptions_OptionTargetType_enum_name = "OptionTargetType"
)
// Names for google.protobuf.FieldOptions.EditionDefault.
const (
FieldOptions_EditionDefault_message_name protoreflect.Name = "EditionDefault"
FieldOptions_EditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault"
)
// Field names for google.protobuf.FieldOptions.EditionDefault.
const (
FieldOptions_EditionDefault_Edition_field_name protoreflect.Name = "edition"
FieldOptions_EditionDefault_Value_field_name protoreflect.Name = "value"
FieldOptions_EditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.edition"
FieldOptions_EditionDefault_Value_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.value"
)
// Field numbers for google.protobuf.FieldOptions.EditionDefault.
const (
FieldOptions_EditionDefault_Edition_field_number protoreflect.FieldNumber = 3
FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2
)
// Names for google.protobuf.OneofOptions.
const (
OneofOptions_message_name protoreflect.Name = "OneofOptions"
@ -650,13 +686,16 @@ const (
// Field names for google.protobuf.OneofOptions.
const (
OneofOptions_Features_field_name protoreflect.Name = "features"
OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
OneofOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.features"
OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option"
)
// Field numbers for google.protobuf.OneofOptions.
const (
OneofOptions_Features_field_number protoreflect.FieldNumber = 1
OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@ -671,11 +710,13 @@ const (
EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias"
EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated"
EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts"
EnumOptions_Features_field_name protoreflect.Name = "features"
EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias"
EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated"
EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts"
EnumOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.features"
EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option"
)
@ -684,6 +725,7 @@ const (
EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2
EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3
EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6
EnumOptions_Features_field_number protoreflect.FieldNumber = 7
EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@ -696,15 +738,21 @@ const (
// Field names for google.protobuf.EnumValueOptions.
const (
EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated"
EnumValueOptions_Features_field_name protoreflect.Name = "features"
EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact"
EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated"
EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features"
EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact"
EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option"
)
// Field numbers for google.protobuf.EnumValueOptions.
const (
EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1
EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2
EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3
EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@ -716,15 +764,18 @@ const (
// Field names for google.protobuf.ServiceOptions.
const (
ServiceOptions_Features_field_name protoreflect.Name = "features"
ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated"
ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
ServiceOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.features"
ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated"
ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option"
)
// Field numbers for google.protobuf.ServiceOptions.
const (
ServiceOptions_Features_field_number protoreflect.FieldNumber = 34
ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33
ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@ -739,10 +790,12 @@ const (
const (
MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated"
MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level"
MethodOptions_Features_field_name protoreflect.Name = "features"
MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated"
MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level"
MethodOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.features"
MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option"
)
@ -750,6 +803,7 @@ const (
const (
MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33
MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34
MethodOptions_Features_field_number protoreflect.FieldNumber = 35
MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@ -816,6 +870,120 @@ const (
UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2
)
// Names for google.protobuf.FeatureSet.
const (
FeatureSet_message_name protoreflect.Name = "FeatureSet"
FeatureSet_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet"
)
// Field names for google.protobuf.FeatureSet.
const (
FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence"
FeatureSet_EnumType_field_name protoreflect.Name = "enum_type"
FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding"
FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation"
FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding"
FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format"
FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence"
FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type"
FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding"
FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation"
FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding"
FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format"
)
// Field numbers for google.protobuf.FeatureSet.
const (
FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1
FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2
FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3
FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4
FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5
FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6
)
// Full and short names for google.protobuf.FeatureSet.FieldPresence.
const (
FeatureSet_FieldPresence_enum_fullname = "google.protobuf.FeatureSet.FieldPresence"
FeatureSet_FieldPresence_enum_name = "FieldPresence"
)
// Full and short names for google.protobuf.FeatureSet.EnumType.
const (
FeatureSet_EnumType_enum_fullname = "google.protobuf.FeatureSet.EnumType"
FeatureSet_EnumType_enum_name = "EnumType"
)
// Full and short names for google.protobuf.FeatureSet.RepeatedFieldEncoding.
const (
FeatureSet_RepeatedFieldEncoding_enum_fullname = "google.protobuf.FeatureSet.RepeatedFieldEncoding"
FeatureSet_RepeatedFieldEncoding_enum_name = "RepeatedFieldEncoding"
)
// Full and short names for google.protobuf.FeatureSet.Utf8Validation.
const (
FeatureSet_Utf8Validation_enum_fullname = "google.protobuf.FeatureSet.Utf8Validation"
FeatureSet_Utf8Validation_enum_name = "Utf8Validation"
)
// Full and short names for google.protobuf.FeatureSet.MessageEncoding.
const (
FeatureSet_MessageEncoding_enum_fullname = "google.protobuf.FeatureSet.MessageEncoding"
FeatureSet_MessageEncoding_enum_name = "MessageEncoding"
)
// Full and short names for google.protobuf.FeatureSet.JsonFormat.
const (
FeatureSet_JsonFormat_enum_fullname = "google.protobuf.FeatureSet.JsonFormat"
FeatureSet_JsonFormat_enum_name = "JsonFormat"
)
// Names for google.protobuf.FeatureSetDefaults.
const (
FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults"
FeatureSetDefaults_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults"
)
// Field names for google.protobuf.FeatureSetDefaults.
const (
FeatureSetDefaults_Defaults_field_name protoreflect.Name = "defaults"
FeatureSetDefaults_MinimumEdition_field_name protoreflect.Name = "minimum_edition"
FeatureSetDefaults_MaximumEdition_field_name protoreflect.Name = "maximum_edition"
FeatureSetDefaults_Defaults_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.defaults"
FeatureSetDefaults_MinimumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.minimum_edition"
FeatureSetDefaults_MaximumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.maximum_edition"
)
// Field numbers for google.protobuf.FeatureSetDefaults.
const (
FeatureSetDefaults_Defaults_field_number protoreflect.FieldNumber = 1
FeatureSetDefaults_MinimumEdition_field_number protoreflect.FieldNumber = 4
FeatureSetDefaults_MaximumEdition_field_number protoreflect.FieldNumber = 5
)
// Names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.
const (
FeatureSetDefaults_FeatureSetEditionDefault_message_name protoreflect.Name = "FeatureSetEditionDefault"
FeatureSetDefaults_FeatureSetEditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault"
)
// Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.
const (
FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition"
FeatureSetDefaults_FeatureSetEditionDefault_Features_field_name protoreflect.Name = "features"
FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition"
FeatureSetDefaults_FeatureSetEditionDefault_Features_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features"
)
// Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.
const (
FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3
FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number protoreflect.FieldNumber = 2
)
// Names for google.protobuf.SourceCodeInfo.
const (
SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo"

View File

@ -162,11 +162,20 @@ func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions
func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.BoolSlice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
count := 0
for _, v := range b {
if v < 0x80 {
count++
}
}
if count > 0 {
p.growBoolSlice(count)
}
s := *sp
for len(b) > 0 {
var v uint64
var n int
@ -732,11 +741,20 @@ func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption
func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int32Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
count := 0
for _, v := range b {
if v < 0x80 {
count++
}
}
if count > 0 {
p.growInt32Slice(count)
}
s := *sp
for len(b) > 0 {
var v uint64
var n int
@ -1138,11 +1156,20 @@ func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio
func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int32Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
count := 0
for _, v := range b {
if v < 0x80 {
count++
}
}
if count > 0 {
p.growInt32Slice(count)
}
s := *sp
for len(b) > 0 {
var v uint64
var n int
@ -1544,11 +1571,20 @@ func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio
func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Uint32Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
count := 0
for _, v := range b {
if v < 0x80 {
count++
}
}
if count > 0 {
p.growUint32Slice(count)
}
s := *sp
for len(b) > 0 {
var v uint64
var n int
@ -1950,11 +1986,20 @@ func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption
func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int64Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
count := 0
for _, v := range b {
if v < 0x80 {
count++
}
}
if count > 0 {
p.growInt64Slice(count)
}
s := *sp
for len(b) > 0 {
var v uint64
var n int
@ -2356,11 +2401,20 @@ func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio
func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int64Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
count := 0
for _, v := range b {
if v < 0x80 {
count++
}
}
if count > 0 {
p.growInt64Slice(count)
}
s := *sp
for len(b) > 0 {
var v uint64
var n int
@ -2762,11 +2816,20 @@ func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio
func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Uint64Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
count := 0
for _, v := range b {
if v < 0x80 {
count++
}
}
if count > 0 {
p.growUint64Slice(count)
}
s := *sp
for len(b) > 0 {
var v uint64
var n int
@ -3145,11 +3208,15 @@ func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt
func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int32Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
count := len(b) / protowire.SizeFixed32()
if count > 0 {
p.growInt32Slice(count)
}
s := *sp
for len(b) > 0 {
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
@ -3461,11 +3528,15 @@ func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti
func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Uint32Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
count := len(b) / protowire.SizeFixed32()
if count > 0 {
p.growUint32Slice(count)
}
s := *sp
for len(b) > 0 {
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
@ -3777,11 +3848,15 @@ func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption
func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Float32Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
count := len(b) / protowire.SizeFixed32()
if count > 0 {
p.growFloat32Slice(count)
}
s := *sp
for len(b) > 0 {
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
@ -4093,11 +4168,15 @@ func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt
func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int64Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
count := len(b) / protowire.SizeFixed64()
if count > 0 {
p.growInt64Slice(count)
}
s := *sp
for len(b) > 0 {
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
@ -4409,11 +4488,15 @@ func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti
func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Uint64Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
count := len(b) / protowire.SizeFixed64()
if count > 0 {
p.growUint64Slice(count)
}
s := *sp
for len(b) > 0 {
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
@ -4725,11 +4808,15 @@ func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio
func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Float64Slice()
if wtyp == protowire.BytesType {
s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
count := len(b) / protowire.SizeFixed64()
if count > 0 {
p.growFloat64Slice(count)
}
s := *sp
for len(b) > 0 {
v, n := protowire.ConsumeFixed64(b)
if n < 0 {

Some files were not shown because too many files have changed in this diff Show More