Vendor in latest containers/common with default capabilities

Also update vendor of containers/storage and image

Cleanup display of added/dropped capabilties as well

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
Daniel J Walsh 2022-12-06 19:49:31 -05:00
parent 1cc22631f6
commit 3718ac8e96
No known key found for this signature in database
GPG Key ID: A2DF901DABE2C028
141 changed files with 2344 additions and 1555 deletions

View File

@ -162,7 +162,7 @@ When executing RUN instructions, run the command specified in the instruction
with the specified capability removed from its capability set. with the specified capability removed from its capability set.
The CAP\_CHOWN, CAP\_DAC\_OVERRIDE, CAP\_FOWNER, The CAP\_CHOWN, CAP\_DAC\_OVERRIDE, CAP\_FOWNER,
CAP\_FSETID, CAP\_KILL, CAP\_NET\_BIND\_SERVICE, CAP\_SETFCAP, CAP\_FSETID, CAP\_KILL, CAP\_NET\_BIND\_SERVICE, CAP\_SETFCAP,
CAP\_SETGID, CAP\_SETPCAP, CAP\_SETUID, and CAP\_SYS\_CHROOT capabilities are CAP\_SETGID, CAP\_SETPCAP, and CAP\_SETUID capabilities are
granted by default; this option can be used to remove them. granted by default; this option can be used to remove them.
If a capability is specified to both the **--cap-add** and **--cap-drop** If a capability is specified to both the **--cap-add** and **--cap-drop**

View File

@ -129,7 +129,6 @@ $ podman container inspect foobar
"CAP_SETGID", "CAP_SETGID",
"CAP_SETPCAP", "CAP_SETPCAP",
"CAP_SETUID", "CAP_SETUID",
"CAP_SYS_CHROOT"
], ],
"BoundingCaps": [ "BoundingCaps": [
"CAP_CHOWN", "CAP_CHOWN",
@ -142,7 +141,6 @@ $ podman container inspect foobar
"CAP_SETGID", "CAP_SETGID",
"CAP_SETPCAP", "CAP_SETPCAP",
"CAP_SETUID", "CAP_SETUID",
"CAP_SYS_CHROOT"
], ],
"ExecIDs": [], "ExecIDs": [],
"GraphDriver": { "GraphDriver": {
@ -248,11 +246,7 @@ $ podman container inspect foobar
"VolumeDriver": "", "VolumeDriver": "",
"VolumesFrom": null, "VolumesFrom": null,
"CapAdd": [], "CapAdd": [],
"CapDrop": [ "CapDrop": [],
"CAP_AUDIT_WRITE",
"CAP_MKNOD",
"CAP_NET_RAW"
],
"Dns": [], "Dns": [],
"DnsOptions": [], "DnsOptions": [],
"DnsSearch": [], "DnsSearch": [],
@ -325,7 +319,7 @@ overlay
``` ```
$ podman container inspect --latest --format {{.EffectiveCaps}} $ podman container inspect --latest --format {{.EffectiveCaps}}
[CAP_CHOWN CAP_DAC_OVERRIDE CAP_FOWNER CAP_FSETID CAP_KILL CAP_NET_BIND_SERVICE CAP_SETFCAP CAP_SETGID CAP_SETPCAP CAP_SETUID CAP_SYS_CHROOT] [CAP_CHOWN CAP_DAC_OVERRIDE CAP_FOWNER CAP_FSETID CAP_KILL CAP_NET_BIND_SERVICE CAP_SETFCAP CAP_SETGID CAP_SETPCAP CAP_SETUID]
``` ```
## SEE ALSO ## SEE ALSO

View File

@ -91,7 +91,7 @@ host:
path: /run/user/3267/podman/podman.sock path: /run/user/3267/podman/podman.sock
security: security:
apparmorEnabled: false apparmorEnabled: false
capabilities: CAP_CHOWN,CAP_DAC_OVERRIDE,CAP_FOWNER,CAP_FSETID,CAP_KILL,CAP_NET_BIND_SERVICE,CAP_SETFCAP,CAP_SETGID,CAP_SETPCAP,CAP_SETUID,CAP_SYS_CHROOT capabilities: CAP_CHOWN,CAP_DAC_OVERRIDE,CAP_FOWNER,CAP_FSETID,CAP_KILL,CAP_NET_BIND_SERVICE,CAP_SETFCAP,CAP_SETGID,CAP_SETPCAP,CAP_SETUID
rootless: true rootless: true
seccompEnabled: true seccompEnabled: true
seccompProfilePath: /usr/share/containers/seccomp.json seccompProfilePath: /usr/share/containers/seccomp.json
@ -224,7 +224,7 @@ $ podman info --format json
"serviceIsRemote": false, "serviceIsRemote": false,
"security": { "security": {
"apparmorEnabled": false, "apparmorEnabled": false,
"capabilities": "CAP_CHOWN,CAP_DAC_OVERRIDE,CAP_FOWNER,CAP_FSETID,CAP_KILL,CAP_NET_BIND_SERVICE,CAP_SETFCAP,CAP_SETGID,CAP_SETPCAP,CAP_SETUID,CAP_SYS_CHROOT", "capabilities": "CAP_CHOWN,CAP_DAC_OVERRIDE,CAP_FOWNER,CAP_FSETID,CAP_KILL,CAP_NET_BIND_SERVICE,CAP_SETFCAP,CAP_SETGID,CAP_SETPCAP,CAP_SETUID",
"rootless": true, "rootless": true,
"seccompEnabled": true, "seccompEnabled": true,
"seccompProfilePath": "/usr/share/containers/seccomp.json", "seccompProfilePath": "/usr/share/containers/seccomp.json",

View File

@ -138,7 +138,7 @@ size: 4405240
``` ```
podman container inspect --latest --format {{.EffectiveCaps}} podman container inspect --latest --format {{.EffectiveCaps}}
[CAP_CHOWN CAP_DAC_OVERRIDE CAP_FSETID CAP_FOWNER CAP_MKNOD CAP_NET_RAW CAP_SETGID CAP_SETUID CAP_SETFCAP CAP_SETPCAP CAP_NET_BIND_SERVICE CAP_SYS_CHROOT CAP_KILL CAP_AUDIT_WRITE] [CAP_CHOWN CAP_DAC_OVERRIDE CAP_FSETID CAP_FOWNER CAP_SETGID CAP_SETUID CAP_SETFCAP CAP_SETPCAP CAP_NET_BIND_SERVICE CAP_KILL]
``` ```
``` ```

View File

@ -78,12 +78,6 @@ spec:
- containerPort: 3306 - containerPort: 3306
hostPort: 36533 hostPort: 36533
resources: {} resources: {}
securityContext:
capabilities:
drop:
- CAP_MKNOD
- CAP_NET_RAW
- CAP_AUDIT_WRITE
tty: true tty: true
status: {} status: {}
``` ```
@ -109,12 +103,6 @@ spec:
image: docker.io/library/alpine:latest image: docker.io/library/alpine:latest
name: test-bind-mount name: test-bind-mount
resources: {} resources: {}
securityContext:
capabilities:
drop:
- CAP_MKNOD
- CAP_NET_RAW
- CAP_AUDIT_WRITE
volumeMounts: volumeMounts:
- mountPath: /volume - mountPath: /volume
name: home-user-my-data-host name: home-user-my-data-host
@ -148,12 +136,6 @@ spec:
image: docker.io/library/alpine:latest image: docker.io/library/alpine:latest
name: test-bind-mount name: test-bind-mount
resources: {} resources: {}
securityContext:
capabilities:
drop:
- CAP_MKNOD
- CAP_NET_RAW
- CAP_AUDIT_WRITE
volumeMounts: volumeMounts:
- mountPath: /volume - mountPath: /volume
name: priceless-data-pvc name: priceless-data-pvc

26
go.mod
View File

@ -12,12 +12,12 @@ require (
github.com/containernetworking/cni v1.1.2 github.com/containernetworking/cni v1.1.2
github.com/containernetworking/plugins v1.1.1 github.com/containernetworking/plugins v1.1.1
github.com/containers/buildah v1.28.1-0.20221130132810-cf661299d14f github.com/containers/buildah v1.28.1-0.20221130132810-cf661299d14f
github.com/containers/common v0.50.2-0.20221206110749-eb48ebbf8ca9 github.com/containers/common v0.50.2-0.20221215152004-4a63cf13ee8d
github.com/containers/conmon v2.0.20+incompatible github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.23.1-0.20221130170538-333c50e3eac8 github.com/containers/image/v5 v5.23.1-0.20221215093114-15fbbcf9f484
github.com/containers/ocicrypt v1.1.6 github.com/containers/ocicrypt v1.1.6
github.com/containers/psgo v1.8.0 github.com/containers/psgo v1.8.0
github.com/containers/storage v1.44.1-0.20221201083122-c5a80ad65f42 github.com/containers/storage v1.44.1-0.20221215163359-b0949d90efad
github.com/coreos/go-systemd/v22 v22.5.0 github.com/coreos/go-systemd/v22 v22.5.0
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3 github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
github.com/cyphar/filepath-securejoin v0.2.3 github.com/cyphar/filepath-securejoin v0.2.3
@ -73,12 +73,12 @@ require (
require ( require (
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Microsoft/go-winio v0.6.0 // indirect github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/Microsoft/hcsshim v0.9.5 // indirect github.com/Microsoft/hcsshim v0.9.6 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect github.com/VividCortex/ewma v1.2.0 // indirect
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
github.com/chzyer/readline v1.5.1 // indirect github.com/chzyer/readline v1.5.1 // indirect
github.com/containerd/cgroups v1.0.4 // indirect github.com/containerd/cgroups v1.0.4 // indirect
github.com/containerd/containerd v1.6.10 // indirect github.com/containerd/containerd v1.6.12 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.13.0 // indirect github.com/containerd/stargz-snapshotter/estargz v0.13.0 // indirect
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
@ -99,7 +99,7 @@ require (
github.com/imdario/mergo v0.3.13 // indirect github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/jinzhu/copier v0.3.5 // indirect github.com/jinzhu/copier v0.3.5 // indirect
github.com/klauspost/compress v1.15.12 // indirect github.com/klauspost/compress v1.15.13 // indirect
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect
github.com/kr/fs v0.1.0 // indirect github.com/kr/fs v0.1.0 // indirect
github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf // indirect github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf // indirect
@ -119,11 +119,11 @@ require (
github.com/proglottis/gpgme v0.1.3 // indirect github.com/proglottis/gpgme v0.1.3 // indirect
github.com/rivo/uniseg v0.2.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect
github.com/seccomp/libseccomp-golang v0.10.0 // indirect github.com/seccomp/libseccomp-golang v0.10.0 // indirect
github.com/sigstore/sigstore v1.4.6 // indirect github.com/sigstore/sigstore v1.5.0 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
github.com/sylabs/sif/v2 v2.9.0 // indirect github.com/sylabs/sif/v2 v2.9.0 // indirect
github.com/tchap/go-patricia v2.3.0+incompatible // indirect github.com/tchap/go-patricia v2.3.0+incompatible // indirect
github.com/theupdateframework/go-tuf v0.5.2-0.20220930112810-3890c1e7ace4 // indirect github.com/theupdateframework/go-tuf v0.5.2-0.20221207161717-9cb61d6e65f5 // indirect
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
github.com/vbatts/tar-split v0.11.2 // indirect github.com/vbatts/tar-split v0.11.2 // indirect
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect
@ -132,11 +132,11 @@ require (
github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
go.opencensus.io v0.24.0 // indirect go.opencensus.io v0.24.0 // indirect
golang.org/x/crypto v0.3.0 // indirect golang.org/x/crypto v0.4.0 // indirect
golang.org/x/mod v0.6.0 // indirect golang.org/x/mod v0.7.0 // indirect
golang.org/x/tools v0.2.0 // indirect golang.org/x/tools v0.4.0 // indirect
google.golang.org/genproto v0.0.0-20221111202108-142d8a6fa32e // indirect google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6 // indirect
google.golang.org/grpc v1.50.1 // indirect google.golang.org/grpc v1.51.0 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect

53
go.sum
View File

@ -83,8 +83,8 @@ github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwT
github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0= github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0=
github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
github.com/Microsoft/hcsshim v0.9.5 h1:AbV+VPfTrIVffukazHcpxmz/sRiE6YaMDzHWR9BXZHo= github.com/Microsoft/hcsshim v0.9.6 h1:VwnDOgLeoi2du6dAznfmspNqTiwczvjv4K7NxuY9jsY=
github.com/Microsoft/hcsshim v0.9.5/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim v0.9.6/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
@ -203,8 +203,8 @@ github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoT
github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ=
github.com/containerd/containerd v1.6.10 h1:8aiav7I2ZyQLbTlNMcBXyAU1FtFvp6VuyuW13qSd6Hk= github.com/containerd/containerd v1.6.12 h1:kJ9b3mOFKf8yqo05Ob+tMoxvt1pbVWhnB0re9Y+k+8c=
github.com/containerd/containerd v1.6.10/go.mod h1:CVqfxdJ95PDgORwA219AwwLrREZgrTFybXu2HfMKRG0= github.com/containerd/containerd v1.6.12/go.mod h1:K4Bw7gjgh4TnkmQY+py/PYQGp4e7xgnHAeg87VeWb3A=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
@ -264,12 +264,12 @@ github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNG
github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8= github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8=
github.com/containers/buildah v1.28.1-0.20221130132810-cf661299d14f h1:Nzbda2tG7/aimoKnDxysqFgS1Q/gSsbcn88lFPj9LwY= github.com/containers/buildah v1.28.1-0.20221130132810-cf661299d14f h1:Nzbda2tG7/aimoKnDxysqFgS1Q/gSsbcn88lFPj9LwY=
github.com/containers/buildah v1.28.1-0.20221130132810-cf661299d14f/go.mod h1:0HcSoS6BHXWzMKqtxY1L0gupebEX33oPC+X62lPi6+c= github.com/containers/buildah v1.28.1-0.20221130132810-cf661299d14f/go.mod h1:0HcSoS6BHXWzMKqtxY1L0gupebEX33oPC+X62lPi6+c=
github.com/containers/common v0.50.2-0.20221206110749-eb48ebbf8ca9 h1:L54LXA/DGRhp1cDN11HLaXcLCYh/ftqDhKYn9S1uetc= github.com/containers/common v0.50.2-0.20221215152004-4a63cf13ee8d h1:hdbVVcY0ae0IDi4PUCJLgnrpguu9F+fSGcZO/OYWx8w=
github.com/containers/common v0.50.2-0.20221206110749-eb48ebbf8ca9/go.mod h1:M1epBsHlUAeySDuMx+HdbvKBVf0odzLciecS5AQa6FA= github.com/containers/common v0.50.2-0.20221215152004-4a63cf13ee8d/go.mod h1:w/ixwYVLjaj9XSeTIngI9ZZx/VH1TqmzrjC4eGF2AtU=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.23.1-0.20221130170538-333c50e3eac8 h1:GLTTwKYkNGDhG3HagLuPvhieu1JEjDs9RsCDr8oJr9s= github.com/containers/image/v5 v5.23.1-0.20221215093114-15fbbcf9f484 h1:MOoO2wWaMCwP5nIte9H2512yEkvVVsjlxV5B+vK8ujE=
github.com/containers/image/v5 v5.23.1-0.20221130170538-333c50e3eac8/go.mod h1:dXknI7O86/PznywqTwyHaCpuK1MBh1QYxLkSqyDswI0= github.com/containers/image/v5 v5.23.1-0.20221215093114-15fbbcf9f484/go.mod h1:QPpyvEyj40UAP6rG7rdxlPmitvoOefgdigMKi30iaBw=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU= github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
@ -281,8 +281,8 @@ github.com/containers/psgo v1.8.0 h1:2loGekmGAxM9ir5OsXWEfGwFxorMPYnc6gEDsGFQvhY
github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc= github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc=
github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4= github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4=
github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s= github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s=
github.com/containers/storage v1.44.1-0.20221201083122-c5a80ad65f42 h1:lba+h0VcMGvO/C4Q+oMhGxpgajzyQifbcedOYQNVRx8= github.com/containers/storage v1.44.1-0.20221215163359-b0949d90efad h1:s1HIzrTnTI51kcrhnkXlvvzKs05V3MKviDsrTarhFyU=
github.com/containers/storage v1.44.1-0.20221201083122-c5a80ad65f42/go.mod h1:pYkSXaKIGAuEQmIf/melI5wbS/JBM++6Xp4JuVTqY7U= github.com/containers/storage v1.44.1-0.20221215163359-b0949d90efad/go.mod h1:o+bCRUdLbr6MPQaV5TphvdxBUucUBDFqzAcPIi8WWtg=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
@ -614,8 +614,9 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/klauspost/compress v1.15.13 h1:NFn1Wr8cfnenSJSA46lLq4wHCcBzKTSjnBIexDMMOV0=
github.com/klauspost/compress v1.15.13/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 h1:BcxbplxjtczA1a6d3wYoa7a0WL3rq9DKBMGHeKyjEF0= github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 h1:BcxbplxjtczA1a6d3wYoa7a0WL3rq9DKBMGHeKyjEF0=
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
@ -659,8 +660,8 @@ github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vq
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
@ -848,8 +849,8 @@ github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2
github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sigstore/sigstore v1.4.6 h1:2F1LPnQf6h1lRDCyNMoBE0WCPsA+IU5kAEAbGxG8S+U= github.com/sigstore/sigstore v1.5.0 h1:NqstQ6SwwhQsp6Ll0wgk/d9g5MlfmEppo14aquUjJ/8=
github.com/sigstore/sigstore v1.4.6/go.mod h1:jGHEfVTFgpfDpBz7pSY4X+Sd+g36qdAUxGufNk47k7g= github.com/sigstore/sigstore v1.5.0/go.mod h1:fRAaZ9xXh7ZQ0GJqZdpmNJ3pemuHBu2PgIAngmzIFSI=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@ -917,8 +918,8 @@ github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG
github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs= github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/theupdateframework/go-tuf v0.5.2-0.20220930112810-3890c1e7ace4 h1:1i/Afw3rmaR1gF3sfVkG2X6ldkikQwA9zY380LrR5YI= github.com/theupdateframework/go-tuf v0.5.2-0.20221207161717-9cb61d6e65f5 h1:s+Yvt6bzRwHljSE7j6DLBDcfpZEdBhrvLgOUmd8f7ZM=
github.com/theupdateframework/go-tuf v0.5.2-0.20220930112810-3890c1e7ace4/go.mod h1:vAqWV3zEs89byeFsAYoh/Q14vJTgJkHwnnRCWBBBINY= github.com/theupdateframework/go-tuf v0.5.2-0.20221207161717-9cb61d6e65f5/go.mod h1:Le8NAjvDJK1vmLgpVYr4AR1Tqam/b/mTdQyTy37UJDA=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
@ -1017,8 +1018,8 @@ golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A= golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -1054,8 +1055,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I= golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -1319,8 +1320,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE= golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4=
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1401,8 +1402,8 @@ google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20221111202108-142d8a6fa32e h1:azcyH5lGzGy7pkLCbhPe0KkKxsM7c6UA/FZIXImKE7M= google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6 h1:AGXp12e/9rItf6/4QymU7WsAUwCf+ICW75cuR91nJIc=
google.golang.org/genproto v0.0.0-20221111202108-142d8a6fa32e/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6/go.mod h1:1dOng4TWOomJrDGhpXjfCD35wQC6jnC7HpRmOFRqEV0=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@ -1428,8 +1429,8 @@ google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -139,15 +139,15 @@ func (c *Container) platformInspectContainerHostConfig(ctrSpec *spec.Spec, hostC
if ctrSpec.Process != nil { if ctrSpec.Process != nil {
// Max an O(1) lookup table for default bounding caps. // Max an O(1) lookup table for default bounding caps.
boundingCaps := make(map[string]bool) boundingCaps := make(map[string]bool)
g, err := generate.New("linux")
if err != nil {
return err
}
if !hostConfig.Privileged { if !hostConfig.Privileged {
for _, cap := range g.Config.Process.Capabilities.Bounding { for _, cap := range c.runtime.config.Containers.DefaultCapabilities {
boundingCaps[cap] = true boundingCaps[cap] = true
} }
} else { } else {
g, err := generate.New("linux")
if err != nil {
return err
}
// If we are privileged, use all caps. // If we are privileged, use all caps.
for _, cap := range capability.List() { for _, cap := range capability.List() {
if g.HostSpecific && cap > validate.LastCap() { if g.HostSpecific && cap > validate.LastCap() {
@ -156,7 +156,7 @@ func (c *Container) platformInspectContainerHostConfig(ctrSpec *spec.Spec, hostC
boundingCaps[fmt.Sprintf("CAP_%s", strings.ToUpper(cap.String()))] = true boundingCaps[fmt.Sprintf("CAP_%s", strings.ToUpper(cap.String()))] = true
} }
} }
// Iterate through spec caps. // Iterate through default caps.
// If it's not in default bounding caps, it was added. // If it's not in default bounding caps, it was added.
// If it is, delete from the default set. Whatever remains after // If it is, delete from the default set. Whatever remains after
// we finish are the dropped caps. // we finish are the dropped caps.

View File

@ -27,7 +27,6 @@ import (
"github.com/containers/podman/v4/pkg/specgen" "github.com/containers/podman/v4/pkg/specgen"
"github.com/containers/podman/v4/pkg/util" "github.com/containers/podman/v4/pkg/util"
"github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
@ -602,7 +601,7 @@ func containerToV1Container(ctx context.Context, c *Container) (v1.Container, []
kubeContainer := v1.Container{} kubeContainer := v1.Container{}
kubeVolumes := []v1.Volume{} kubeVolumes := []v1.Volume{}
annotations := make(map[string]string) annotations := make(map[string]string)
kubeSec, err := generateKubeSecurityContext(c) kubeSec, hasSecData, err := generateKubeSecurityContext(c)
if err != nil { if err != nil {
return kubeContainer, kubeVolumes, nil, annotations, err return kubeContainer, kubeVolumes, nil, annotations, err
} }
@ -677,7 +676,7 @@ func containerToV1Container(ctx context.Context, c *Container) (v1.Container, []
kubeContainer.WorkingDir = c.WorkingDir() kubeContainer.WorkingDir = c.WorkingDir()
} }
if imgData.User == c.User() { if imgData.User == c.User() && hasSecData {
kubeSec.RunAsGroup, kubeSec.RunAsUser = nil, nil kubeSec.RunAsGroup, kubeSec.RunAsUser = nil, nil
} }
@ -690,7 +689,9 @@ func containerToV1Container(ctx context.Context, c *Container) (v1.Container, []
kubeContainer.Ports = ports kubeContainer.Ports = ports
// This should not be applicable // This should not be applicable
// container.EnvFromSource = // container.EnvFromSource =
kubeContainer.SecurityContext = kubeSec if hasSecData {
kubeContainer.SecurityContext = kubeSec
}
kubeContainer.StdinOnce = false kubeContainer.StdinOnce = false
kubeContainer.TTY = c.Terminal() kubeContainer.TTY = c.Terminal()
@ -983,27 +984,16 @@ func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v
} }
} }
return &v1.Capabilities{ if len(add) > 0 || len(drop) > 0 {
Add: add, return &v1.Capabilities{
Drop: drop, Add: add,
Drop: drop,
}
} }
return nil
} }
func capAddDrop(caps *specs.LinuxCapabilities) (*v1.Capabilities, error) { func (c *Container) capAddDrop(caps *specs.LinuxCapabilities) *v1.Capabilities {
g, err := generate.New("linux")
if err != nil {
return nil, err
}
defCaps := g.Config.Process.Capabilities
// Combine all the default capabilities into a slice
defaultCaps := make([]string, 0, len(defCaps.Ambient)+len(defCaps.Bounding)+len(defCaps.Effective)+len(defCaps.Inheritable)+len(defCaps.Permitted))
defaultCaps = append(defaultCaps, defCaps.Ambient...)
defaultCaps = append(defaultCaps, defCaps.Bounding...)
defaultCaps = append(defaultCaps, defCaps.Effective...)
defaultCaps = append(defaultCaps, defCaps.Inheritable...)
defaultCaps = append(defaultCaps, defCaps.Permitted...)
// Combine all the container's capabilities into a slice // Combine all the container's capabilities into a slice
containerCaps := make([]string, 0, len(caps.Ambient)+len(caps.Bounding)+len(caps.Effective)+len(caps.Inheritable)+len(caps.Permitted)) containerCaps := make([]string, 0, len(caps.Ambient)+len(caps.Bounding)+len(caps.Effective)+len(caps.Inheritable)+len(caps.Permitted))
containerCaps = append(containerCaps, caps.Ambient...) containerCaps = append(containerCaps, caps.Ambient...)
@ -1012,12 +1002,12 @@ func capAddDrop(caps *specs.LinuxCapabilities) (*v1.Capabilities, error) {
containerCaps = append(containerCaps, caps.Inheritable...) containerCaps = append(containerCaps, caps.Inheritable...)
containerCaps = append(containerCaps, caps.Permitted...) containerCaps = append(containerCaps, caps.Permitted...)
calculatedCaps := determineCapAddDropFromCapabilities(defaultCaps, containerCaps) calculatedCaps := determineCapAddDropFromCapabilities(c.runtime.config.Containers.DefaultCapabilities, containerCaps)
return calculatedCaps, nil return calculatedCaps
} }
// generateKubeSecurityContext generates a securityContext based on the existing container // generateKubeSecurityContext generates a securityContext based on the existing container
func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) { func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, bool, error) {
privileged := c.Privileged() privileged := c.Privileged()
ro := c.IsReadOnly() ro := c.IsReadOnly()
allowPrivEscalation := !c.config.Spec.Process.NoNewPrivileges allowPrivEscalation := !c.config.Spec.Process.NoNewPrivileges
@ -1025,19 +1015,17 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
var capabilities *v1.Capabilities var capabilities *v1.Capabilities
if !privileged { if !privileged {
// Running privileged adds all caps. // Running privileged adds all caps.
newCaps, err := capAddDrop(c.config.Spec.Process.Capabilities) capabilities = c.capAddDrop(c.config.Spec.Process.Capabilities)
if err != nil {
return nil, err
}
capabilities = newCaps
} }
scHasData := false
sc := v1.SecurityContext{ sc := v1.SecurityContext{
// RunAsNonRoot is an optional parameter; our first implementations should be root only; however // RunAsNonRoot is an optional parameter; our first implementations should be root only; however
// I'm leaving this as a bread-crumb for later // I'm leaving this as a bread-crumb for later
//RunAsNonRoot: &nonRoot, //RunAsNonRoot: &nonRoot,
} }
if capabilities != nil { if capabilities != nil {
scHasData = true
sc.Capabilities = capabilities sc.Capabilities = capabilities
} }
var selinuxOpts v1.SELinuxOptions var selinuxOpts v1.SELinuxOptions
@ -1048,24 +1036,30 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
case "type": case "type":
selinuxOpts.Type = opts[1] selinuxOpts.Type = opts[1]
sc.SELinuxOptions = &selinuxOpts sc.SELinuxOptions = &selinuxOpts
scHasData = true
case "level": case "level":
selinuxOpts.Level = opts[1] selinuxOpts.Level = opts[1]
sc.SELinuxOptions = &selinuxOpts sc.SELinuxOptions = &selinuxOpts
scHasData = true
} }
case 1: case 1:
if opts[0] == "disable" { if opts[0] == "disable" {
selinuxOpts.Type = "spc_t" selinuxOpts.Type = "spc_t"
sc.SELinuxOptions = &selinuxOpts sc.SELinuxOptions = &selinuxOpts
scHasData = true
} }
} }
if !allowPrivEscalation { if !allowPrivEscalation {
scHasData = true
sc.AllowPrivilegeEscalation = &allowPrivEscalation sc.AllowPrivilegeEscalation = &allowPrivEscalation
} }
if privileged { if privileged {
scHasData = true
sc.Privileged = &privileged sc.Privileged = &privileged
} }
if ro { if ro {
scHasData = true
sc.ReadOnlyRootFilesystem = &ro sc.ReadOnlyRootFilesystem = &ro
} }
if c.User() != "" { if c.User() != "" {
@ -1074,7 +1068,7 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
defer c.lock.Unlock() defer c.lock.Unlock()
} }
if err := c.syncContainer(); err != nil { if err := c.syncContainer(); err != nil {
return nil, fmt.Errorf("unable to sync container during YAML generation: %w", err) return nil, false, fmt.Errorf("unable to sync container during YAML generation: %w", err)
} }
mountpoint := c.state.Mountpoint mountpoint := c.state.Mountpoint
@ -1082,7 +1076,7 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
var err error var err error
mountpoint, err = c.mount() mountpoint, err = c.mount()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to mount %s mountpoint: %w", c.ID(), err) return nil, false, fmt.Errorf("failed to mount %s mountpoint: %w", c.ID(), err)
} }
defer func() { defer func() {
if err := c.unmount(false); err != nil { if err := c.unmount(false); err != nil {
@ -1094,14 +1088,16 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
execUser, err := lookup.GetUserGroupInfo(mountpoint, c.User(), nil) execUser, err := lookup.GetUserGroupInfo(mountpoint, c.User(), nil)
if err != nil { if err != nil {
return nil, err return nil, false, err
} }
uid := int64(execUser.Uid) uid := int64(execUser.Uid)
gid := int64(execUser.Gid) gid := int64(execUser.Gid)
scHasData = true
sc.RunAsUser = &uid sc.RunAsUser = &uid
sc.RunAsGroup = &gid sc.RunAsGroup = &gid
} }
return &sc, nil
return &sc, scHasData, nil
} }
// generateKubeVolumeDeviceFromLinuxDevice takes a list of devices and makes a VolumeDevice struct for kube // generateKubeVolumeDeviceFromLinuxDevice takes a list of devices and makes a VolumeDevice struct for kube

View File

@ -293,7 +293,7 @@ func (r *RootlessNetNS) Cleanup(runtime *Runtime) error {
return nil return nil
} }
logrus.Debug("Cleaning up rootless network namespace") logrus.Debug("Cleaning up rootless network namespace")
err = netns.UnmountNS(r.ns) err = netns.UnmountNS(r.ns.Path())
if err != nil { if err != nil {
return err return err
} }
@ -605,7 +605,7 @@ func (r *Runtime) createNetNS(ctr *Container) (n ns.NetNS, q map[string]types.St
} }
defer func() { defer func() {
if retErr != nil { if retErr != nil {
if err := netns.UnmountNS(ctrNS); err != nil { if err := netns.UnmountNS(ctrNS.Path()); err != nil {
logrus.Errorf("Unmounting partially created network namespace for container %s: %v", ctr.ID(), err) logrus.Errorf("Unmounting partially created network namespace for container %s: %v", ctr.ID(), err)
} }
if err := ctrNS.Close(); err != nil { if err := ctrNS.Close(); err != nil {
@ -705,7 +705,7 @@ func (r *Runtime) teardownNetNS(ctr *Container) error {
prevErr := r.teardownCNI(ctr) prevErr := r.teardownCNI(ctr)
// First unmount the namespace // First unmount the namespace
if err := netns.UnmountNS(ctr.state.NetNS); err != nil { if err := netns.UnmountNS(ctr.state.NetNS.Path()); err != nil {
if prevErr != nil { if prevErr != nil {
logrus.Error(prevErr) logrus.Error(prevErr)
} }

View File

@ -6,12 +6,10 @@
default_capabilities = [ default_capabilities = [
"CHOWN", "CHOWN",
"DAC_OVERRIDE", "DAC_OVERRIDE",
"FOWNER",
"FSETID", "FSETID",
"KILL", "KILL",
"MKNOD", "MKNOD",
"NET_BIND_SERVICE", "NET_BIND_SERVICE",
"SETGID", "SETGID",
"SETPCAP", "SETPCAP",
"SETUID",
] ]

View File

@ -113,8 +113,8 @@ var _ = Describe("Verify podman containers.conf usage", func() {
Expect(result).Should(Exit(0)) Expect(result).Should(Exit(0))
Expect(result.Out.Contents()).To( Expect(result.Out.Contents()).To(
And( And(
ContainSubstring("SYS_CHROOT"), ContainSubstring("FOWNER"),
ContainSubstring("NET_RAW"), ContainSubstring("SETFCAP"),
)) ))
}) })
@ -130,8 +130,8 @@ var _ = Describe("Verify podman containers.conf usage", func() {
Expect(result).Should(Exit(0)) Expect(result).Should(Exit(0))
Expect(result.Out.Contents()).ToNot( Expect(result.Out.Contents()).ToNot(
And( And(
ContainSubstring("SYS_CHROOT"), ContainSubstring("SETUID"),
ContainSubstring("NET_RAW"), ContainSubstring("FOWNER"),
)) ))
}) })

View File

@ -69,6 +69,7 @@ var _ = Describe("Podman generate kube", func() {
Expect(pod.Spec.SecurityContext).To(BeNil()) Expect(pod.Spec.SecurityContext).To(BeNil())
Expect(pod.Spec.DNSConfig).To(BeNil()) Expect(pod.Spec.DNSConfig).To(BeNil())
Expect(pod.Spec.Containers[0]).To(HaveField("WorkingDir", "")) Expect(pod.Spec.Containers[0]).To(HaveField("WorkingDir", ""))
Expect(pod.Spec.Containers[0].SecurityContext).To(BeNil())
Expect(pod.Spec.Containers[0].Env).To(BeNil()) Expect(pod.Spec.Containers[0].Env).To(BeNil())
Expect(pod).To(HaveField("Name", "top-pod")) Expect(pod).To(HaveField("Name", "top-pod"))

View File

@ -514,7 +514,7 @@ var _ = Describe("Podman inspect", func() {
It("Dropped capabilities are sorted", func() { It("Dropped capabilities are sorted", func() {
ctrName := "testCtr" ctrName := "testCtr"
session := podmanTest.Podman([]string{"run", "-d", "--cap-drop", "CAP_AUDIT_WRITE", "--cap-drop", "CAP_MKNOD", "--cap-drop", "CAP_NET_RAW", "--name", ctrName, ALPINE, "top"}) session := podmanTest.Podman([]string{"run", "-d", "--cap-drop", "SETUID", "--cap-drop", "SETGID", "--cap-drop", "CAP_NET_BIND_SERVICE", "--name", ctrName, ALPINE, "top"})
session.WaitWithDefaultTimeout() session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0)) Expect(session).Should(Exit(0))
@ -525,9 +525,26 @@ var _ = Describe("Podman inspect", func() {
data := inspect.InspectContainerToJSON() data := inspect.InspectContainerToJSON()
Expect(data).To(HaveLen(1)) Expect(data).To(HaveLen(1))
Expect(data[0].HostConfig.CapDrop).To(HaveLen(3)) Expect(data[0].HostConfig.CapDrop).To(HaveLen(3))
Expect(data[0].HostConfig.CapDrop[0]).To(Equal("CAP_AUDIT_WRITE")) Expect(data[0].HostConfig.CapDrop[0]).To(Equal("CAP_NET_BIND_SERVICE"))
Expect(data[0].HostConfig.CapDrop[1]).To(Equal("CAP_MKNOD")) Expect(data[0].HostConfig.CapDrop[1]).To(Equal("CAP_SETGID"))
Expect(data[0].HostConfig.CapDrop[2]).To(Equal("CAP_NET_RAW")) Expect(data[0].HostConfig.CapDrop[2]).To(Equal("CAP_SETUID"))
})
It("Add capabilities are sorted", func() {
ctrName := "testCtr"
session := podmanTest.Podman([]string{"run", "-d", "--cap-add", "SYS_ADMIN", "--cap-add", "CAP_NET_ADMIN", "--name", ctrName, ALPINE, "top"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
inspect := podmanTest.Podman([]string{"inspect", ctrName})
inspect.WaitWithDefaultTimeout()
Expect(inspect).Should(Exit(0))
data := inspect.InspectContainerToJSON()
Expect(data).To(HaveLen(1))
Expect(data[0].HostConfig.CapAdd).To(HaveLen(2))
Expect(data[0].HostConfig.CapAdd[0]).To(Equal("CAP_NET_ADMIN"))
Expect(data[0].HostConfig.CapAdd[1]).To(Equal("CAP_SYS_ADMIN"))
}) })
It("podman inspect container with GO format for PidFile", func() { It("podman inspect container with GO format for PidFile", func() {

View File

@ -62,11 +62,6 @@ spec:
resources: {} resources: {}
securityContext: securityContext:
allowPrivilegeEscalation: true allowPrivilegeEscalation: true
capabilities:
drop:
- CAP_MKNOD
- CAP_NET_RAW
- CAP_AUDIT_WRITE
privileged: false privileged: false
readOnlyRootFilesystem: false readOnlyRootFilesystem: false
seLinuxOptions: {} seLinuxOptions: {}

View File

@ -423,11 +423,6 @@ spec:
name: test name: test
securityContext: securityContext:
allowPrivilegeEscalation: true allowPrivilegeEscalation: true
capabilities:
drop:
- CAP_MKNOD
- CAP_NET_RAW
- CAP_AUDIT_WRITE
privileged: false privileged: false
readOnlyRootFilesystem: false readOnlyRootFilesystem: false
seLinuxOptions: seLinuxOptions:
@ -4463,12 +4458,6 @@ spec:
- "1000" - "1000"
image: non-existing-image image: non-existing-image
name: vol-test-3 name: vol-test-3
securityContext:
capabilities:
drop:
- CAP_MKNOD
- CAP_NET_RAW
- CAP_AUDIT_WRITE
` `
// the image is incorrect so the kube play will fail, but it will clean up the pod that was created for it before the failure happened // the image is incorrect so the kube play will fail, but it will clean up the pod that was created for it before the failure happened

View File

@ -411,7 +411,7 @@ var _ = Describe("Podman pod create", func() {
Expect(session.ErrorToString()).To(ContainSubstring("extra host entries must be specified on the pod: network cannot be configured when it is shared with a pod")) Expect(session.ErrorToString()).To(ContainSubstring("extra host entries must be specified on the pod: network cannot be configured when it is shared with a pod"))
// verify we can see the pods hosts // verify we can see the pods hosts
session = podmanTest.Podman([]string{"run", "--pod", podID, ALPINE, "ping", "-c", "1", "host1"}) session = podmanTest.Podman([]string{"run", "--cap-add", "net_raw", "--pod", podID, ALPINE, "ping", "-c", "1", "host1"})
session.WaitWithDefaultTimeout() session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0)) Expect(session).Should(Exit(0))
}) })

View File

@ -434,7 +434,7 @@ EXPOSE 2004-2005/tcp`, ALPINE)
}) })
It("podman run slirp4netns network with host loopback", func() { It("podman run slirp4netns network with host loopback", func() {
session := podmanTest.Podman([]string{"run", "--network", "slirp4netns:allow_host_loopback=true", ALPINE, "ping", "-c1", "10.0.2.2"}) session := podmanTest.Podman([]string{"run", "--cap-add", "net_raw", "--network", "slirp4netns:allow_host_loopback=true", ALPINE, "ping", "-c1", "10.0.2.2"})
session.Wait(30) session.Wait(30)
Expect(session).Should(Exit(0)) Expect(session).Should(Exit(0))
}) })
@ -451,7 +451,7 @@ EXPOSE 2004-2005/tcp`, ALPINE)
Expect(slirp4netnsHelp).Should(Exit(0)) Expect(slirp4netnsHelp).Should(Exit(0))
networkConfiguration := "slirp4netns:cidr=192.168.0.0/24,allow_host_loopback=true" networkConfiguration := "slirp4netns:cidr=192.168.0.0/24,allow_host_loopback=true"
session := podmanTest.Podman([]string{"run", "--network", networkConfiguration, ALPINE, "ping", "-c1", "192.168.0.2"}) session := podmanTest.Podman([]string{"run", "--cap-add", "net_raw", "--network", networkConfiguration, ALPINE, "ping", "-c1", "192.168.0.2"})
session.Wait(30) session.Wait(30)
if strings.Contains(slirp4netnsHelp.OutputToString(), "cidr") { if strings.Contains(slirp4netnsHelp.OutputToString(), "cidr") {
@ -988,11 +988,11 @@ EXPOSE 2004-2005/tcp`, ALPINE)
pingTest := func(netns string) { pingTest := func(netns string) {
hostname := "testctr" hostname := "testctr"
run := podmanTest.Podman([]string{"run", netns, "--hostname", hostname, ALPINE, "ping", "-c", "1", hostname}) run := podmanTest.Podman([]string{"run", netns, "--cap-add", "net_raw", "--hostname", hostname, ALPINE, "ping", "-c", "1", hostname})
run.WaitWithDefaultTimeout() run.WaitWithDefaultTimeout()
Expect(run).Should(Exit(0)) Expect(run).Should(Exit(0))
run = podmanTest.Podman([]string{"run", netns, "--hostname", hostname, "--name", "test", ALPINE, "ping", "-c", "1", "test"}) run = podmanTest.Podman([]string{"run", netns, "--cap-add", "net_raw", "--hostname", hostname, "--name", "test", ALPINE, "ping", "-c", "1", "test"})
run.WaitWithDefaultTimeout() run.WaitWithDefaultTimeout()
Expect(run).Should(Exit(0)) Expect(run).Should(Exit(0))
} }

View File

@ -94,7 +94,7 @@ var _ = Describe("Podman generate kube", func() {
test1.WaitWithDefaultTimeout() test1.WaitWithDefaultTimeout()
Expect(test1).Should(Exit(0)) Expect(test1).Should(Exit(0))
commit := podmanTest.Podman([]string{"commit", "-c", "label=io.containers.capabilities=sys_chroot,setuid", "test1", "image1"}) commit := podmanTest.Podman([]string{"commit", "-c", "label=io.containers.capabilities=setgid,setuid", "test1", "image1"})
commit.WaitWithDefaultTimeout() commit.WaitWithDefaultTimeout()
Expect(commit).Should(Exit(0)) Expect(commit).Should(Exit(0))
@ -108,7 +108,7 @@ var _ = Describe("Podman generate kube", func() {
ctr := inspect.InspectContainerToJSON() ctr := inspect.InspectContainerToJSON()
caps := strings.Join(ctr[0].EffectiveCaps, ",") caps := strings.Join(ctr[0].EffectiveCaps, ",")
Expect(caps).To(Equal("CAP_SETUID,CAP_SYS_CHROOT")) Expect(caps).To(Equal("CAP_SETGID,CAP_SETUID"))
}) })

View File

@ -495,7 +495,7 @@ var _ = Describe("Podman run", func() {
session := podmanTest.Podman([]string{"run", "--rm", "--user", "bin", ALPINE, "grep", "CapBnd", "/proc/self/status"}) session := podmanTest.Podman([]string{"run", "--rm", "--user", "bin", ALPINE, "grep", "CapBnd", "/proc/self/status"})
session.WaitWithDefaultTimeout() session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0)) Expect(session).Should(Exit(0))
Expect(session.OutputToString()).To(ContainSubstring("00000000a80425fb")) Expect(session.OutputToString()).To(ContainSubstring("00000000800005fb"))
session = podmanTest.Podman([]string{"run", "--rm", "--user", "bin", ALPINE, "grep", "CapEff", "/proc/self/status"}) session = podmanTest.Podman([]string{"run", "--rm", "--user", "bin", ALPINE, "grep", "CapEff", "/proc/self/status"})
session.WaitWithDefaultTimeout() session.WaitWithDefaultTimeout()
@ -510,12 +510,12 @@ var _ = Describe("Podman run", func() {
session = podmanTest.Podman([]string{"run", "--rm", "--user", "root", ALPINE, "grep", "CapBnd", "/proc/self/status"}) session = podmanTest.Podman([]string{"run", "--rm", "--user", "root", ALPINE, "grep", "CapBnd", "/proc/self/status"})
session.WaitWithDefaultTimeout() session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0)) Expect(session).Should(Exit(0))
Expect(session.OutputToString()).To(ContainSubstring("00000000a80425fb")) Expect(session.OutputToString()).To(ContainSubstring("00000000800005fb"))
session = podmanTest.Podman([]string{"run", "--rm", "--user", "root", ALPINE, "grep", "CapEff", "/proc/self/status"}) session = podmanTest.Podman([]string{"run", "--rm", "--user", "root", ALPINE, "grep", "CapEff", "/proc/self/status"})
session.WaitWithDefaultTimeout() session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0)) Expect(session).Should(Exit(0))
Expect(session.OutputToString()).To(ContainSubstring("00000000a80425fb")) Expect(session.OutputToString()).To(ContainSubstring("00000000800005fb"))
session = podmanTest.Podman([]string{"run", "--rm", "--user", "root", ALPINE, "grep", "CapInh", "/proc/self/status"}) session = podmanTest.Podman([]string{"run", "--rm", "--user", "root", ALPINE, "grep", "CapInh", "/proc/self/status"})
session.WaitWithDefaultTimeout() session.WaitWithDefaultTimeout()
@ -525,12 +525,12 @@ var _ = Describe("Podman run", func() {
session = podmanTest.Podman([]string{"run", "--rm", ALPINE, "grep", "CapBnd", "/proc/self/status"}) session = podmanTest.Podman([]string{"run", "--rm", ALPINE, "grep", "CapBnd", "/proc/self/status"})
session.WaitWithDefaultTimeout() session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0)) Expect(session).Should(Exit(0))
Expect(session.OutputToString()).To(ContainSubstring("00000000a80425fb")) Expect(session.OutputToString()).To(ContainSubstring("00000000800005fb"))
session = podmanTest.Podman([]string{"run", "--rm", ALPINE, "grep", "CapEff", "/proc/self/status"}) session = podmanTest.Podman([]string{"run", "--rm", ALPINE, "grep", "CapEff", "/proc/self/status"})
session.WaitWithDefaultTimeout() session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0)) Expect(session).Should(Exit(0))
Expect(session.OutputToString()).To(ContainSubstring("00000000a80425fb")) Expect(session.OutputToString()).To(ContainSubstring("00000000800005fb"))
session = podmanTest.Podman([]string{"run", "--user=1000:1000", "--cap-add=DAC_OVERRIDE", "--rm", ALPINE, "grep", "CapAmb", "/proc/self/status"}) session = podmanTest.Podman([]string{"run", "--user=1000:1000", "--cap-add=DAC_OVERRIDE", "--rm", ALPINE, "grep", "CapAmb", "/proc/self/status"})
session.WaitWithDefaultTimeout() session.WaitWithDefaultTimeout()
@ -598,7 +598,7 @@ USER bin`, BB)
session := podmanTest.Podman([]string{"run", "--rm", "--user", "bin", "test", "grep", "CapBnd", "/proc/self/status"}) session := podmanTest.Podman([]string{"run", "--rm", "--user", "bin", "test", "grep", "CapBnd", "/proc/self/status"})
session.WaitWithDefaultTimeout() session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0)) Expect(session).Should(Exit(0))
Expect(session.OutputToString()).To(ContainSubstring("00000000a80425fb")) Expect(session.OutputToString()).To(ContainSubstring("00000000800005fb"))
session = podmanTest.Podman([]string{"run", "--rm", "--user", "bin", "test", "grep", "CapEff", "/proc/self/status"}) session = podmanTest.Podman([]string{"run", "--rm", "--user", "bin", "test", "grep", "CapEff", "/proc/self/status"})
session.WaitWithDefaultTimeout() session.WaitWithDefaultTimeout()

View File

@ -262,10 +262,10 @@ EOF
t0=$(date --iso-8601=seconds) t0=$(date --iso-8601=seconds)
CONTAINERS_CONF=$containersConf run_podman create --name=$cname $IMAGE CONTAINERS_CONF=$containersConf run_podman create --name=$cname $IMAGE
run_podman container inspect --size=true $cname CONTAINERS_CONF=$containersConf run_podman container inspect --size=true $cname
inspect_json=$(jq -r --tab . <<< "$output") inspect_json=$(jq -r --tab . <<< "$output")
run_podman --events-backend=$1 events \ CONTAINERS_CONF=$containersConf run_podman --events-backend=$1 events \
--since="$t0" \ --since="$t0" \
--filter=status=$cname \ --filter=status=$cname \
--filter=status=create \ --filter=status=create \
@ -276,7 +276,7 @@ EOF
# Make sure that the inspect data doesn't show by default in # Make sure that the inspect data doesn't show by default in
# podman-events. # podman-events.
run_podman --events-backend=$1 events \ CONTAINERS_CONF=$containersConf run_podman --events-backend=$1 events \
--since="$t0" \ --since="$t0" \
--filter=status=$cname \ --filter=status=$cname \
--filter=status=create \ --filter=status=create \

View File

@ -5,8 +5,8 @@
load helpers load helpers
# standard capability drop list # capability drop list
capabilities='{"drop":["CAP_MKNOD","CAP_NET_RAW","CAP_AUDIT_WRITE"]}' capabilities='{"drop":["CAP_FOWNER","CAP_SETFCAP"]}'
# Warning that is emitted once on containers, multiple times on pods # Warning that is emitted once on containers, multiple times on pods
kubernetes_63='Truncation Annotation: .* Kubernetes only allows 63 characters' kubernetes_63='Truncation Annotation: .* Kubernetes only allows 63 characters'
@ -31,7 +31,7 @@ json.dump(yaml.safe_load(sys.stdin), sys.stdout)'
@test "podman kube generate - container" { @test "podman kube generate - container" {
cname=c$(random_string 15) cname=c$(random_string 15)
run_podman container create --name $cname $IMAGE top run_podman container create --cap-drop fowner --cap-drop setfcap --name $cname $IMAGE top
run_podman kube generate $cname run_podman kube generate $cname
# Convert yaml to json, and dump to stdout (to help in case of errors) # Convert yaml to json, and dump to stdout (to help in case of errors)
@ -95,7 +95,7 @@ status | = | null
run_podman 125 kube generate $pname run_podman 125 kube generate $pname
assert "$output" =~ "Error: .* only has an infra container" assert "$output" =~ "Error: .* only has an infra container"
run_podman container create --name $cname1 --pod $pname $IMAGE top run_podman container create --cap-drop fowner --cap-drop setfcap --name $cname1 --pod $pname $IMAGE top
run_podman container create --name $cname2 --pod $pname $IMAGE bottom run_podman container create --name $cname2 --pod $pname $IMAGE bottom
run_podman kube generate $pname run_podman kube generate $pname

View File

@ -86,6 +86,12 @@ type Container interface {
// container to be terminated by some error condition (including calling // container to be terminated by some error condition (including calling
// Close). // Close).
Wait() error Wait() error
// WaitChannel returns the wait channel of the container
WaitChannel() <-chan struct{}
// WaitError returns the container termination error.
// This function should only be called after the channel in WaitChannel()
// is closed. Otherwise it is not thread safe.
WaitError() error
// Modify sends a request to modify container resources // Modify sends a request to modify container resources
Modify(ctx context.Context, config interface{}) error Modify(ctx context.Context, config interface{}) error
} }

View File

@ -287,11 +287,19 @@ func (computeSystem *System) waitBackground() {
oc.SetSpanStatus(span, err) oc.SetSpanStatus(span, err)
} }
func (computeSystem *System) WaitChannel() <-chan struct{} {
return computeSystem.waitBlock
}
func (computeSystem *System) WaitError() error {
return computeSystem.waitError
}
// Wait synchronously waits for the compute system to shutdown or terminate. If // Wait synchronously waits for the compute system to shutdown or terminate. If
// the compute system has already exited returns the previous error (if any). // the compute system has already exited returns the previous error (if any).
func (computeSystem *System) Wait() error { func (computeSystem *System) Wait() error {
<-computeSystem.waitBlock <-computeSystem.WaitChannel()
return computeSystem.waitError return computeSystem.WaitError()
} }
// ExitError returns an error describing the reason the compute system terminated. // ExitError returns an error describing the reason the compute system terminated.

View File

@ -15,6 +15,10 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
func (n *cniNetwork) NetworkUpdate(name string, options types.NetworkUpdateOptions) error {
return fmt.Errorf("NetworkUpdate is not supported for backend CNI: %w", types.ErrInvalidArg)
}
// NetworkCreate will take a partial filled Network and fill the // NetworkCreate will take a partial filled Network and fill the
// missing fields. It creates the Network and returns the full Network. // missing fields. It creates the Network and returns the full Network.
func (n *cniNetwork) NetworkCreate(net types.Network, options *types.NetworkCreateOptions) (types.Network, error) { func (n *cniNetwork) NetworkCreate(net types.Network, options *types.NetworkCreateOptions) (types.Network, error) {

View File

@ -10,6 +10,7 @@ import (
"net" "net"
"os" "os"
"path/filepath" "path/filepath"
"reflect"
"strconv" "strconv"
"time" "time"
@ -19,6 +20,65 @@ import (
"github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/stringid"
) )
func sliceRemoveDuplicates(strList []string) []string {
list := make([]string, 0, len(strList))
for _, item := range strList {
if !util.StringInSlice(item, list) {
list = append(list, item)
}
}
return list
}
func (n *netavarkNetwork) commitNetwork(network *types.Network) error {
confPath := filepath.Join(n.networkConfigDir, network.Name+".json")
f, err := os.Create(confPath)
if err != nil {
return err
}
defer f.Close()
enc := json.NewEncoder(f)
enc.SetIndent("", " ")
err = enc.Encode(network)
if err != nil {
return err
}
return nil
}
func (n *netavarkNetwork) NetworkUpdate(name string, options types.NetworkUpdateOptions) error {
n.lock.Lock()
defer n.lock.Unlock()
err := n.loadNetworks()
if err != nil {
return err
}
network, err := n.getNetwork(name)
if err != nil {
return err
}
networkDNSServersBefore := network.NetworkDNSServers
networkDNSServersAfter := []string{}
for _, server := range networkDNSServersBefore {
if util.StringInSlice(server, options.RemoveDNSServers) {
continue
}
networkDNSServersAfter = append(networkDNSServersAfter, server)
}
networkDNSServersAfter = append(networkDNSServersAfter, options.AddDNSServers...)
networkDNSServersAfter = sliceRemoveDuplicates(networkDNSServersAfter)
network.NetworkDNSServers = networkDNSServersAfter
if reflect.DeepEqual(networkDNSServersBefore, networkDNSServersAfter) {
return nil
}
err = n.commitNetwork(network)
if err != nil {
return err
}
return n.execUpdate(network.Name, network.NetworkDNSServers)
}
// NetworkCreate will take a partial filled Network and fill the // NetworkCreate will take a partial filled Network and fill the
// missing fields. It creates the Network and returns the full Network. // missing fields. It creates the Network and returns the full Network.
func (n *netavarkNetwork) NetworkCreate(net types.Network, options *types.NetworkCreateOptions) (types.Network, error) { func (n *netavarkNetwork) NetworkCreate(net types.Network, options *types.NetworkCreateOptions) (types.Network, error) {
@ -163,15 +223,7 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
newNetwork.Created = time.Now() newNetwork.Created = time.Now()
if !defaultNet { if !defaultNet {
confPath := filepath.Join(n.networkConfigDir, newNetwork.Name+".json") err = n.commitNetwork(newNetwork)
f, err := os.Create(confPath)
if err != nil {
return nil, err
}
defer f.Close()
enc := json.NewEncoder(f)
enc.SetIndent("", " ")
err = enc.Encode(newNetwork)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -7,6 +7,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"strconv" "strconv"
"strings"
"github.com/containers/common/libnetwork/internal/util" "github.com/containers/common/libnetwork/internal/util"
"github.com/containers/common/libnetwork/types" "github.com/containers/common/libnetwork/types"
@ -18,6 +19,11 @@ type netavarkOptions struct {
Networks map[string]*types.Network `json:"network_info"` Networks map[string]*types.Network `json:"network_info"`
} }
func (n *netavarkNetwork) execUpdate(networkName string, networkDNSServers []string) error {
retErr := n.execNetavark([]string{"update", networkName, "--network-dns-servers", strings.Join(networkDNSServers, ",")}, nil, nil)
return retErr
}
// Setup will setup the container network namespace. It returns // Setup will setup the container network namespace. It returns
// a map of StatusBlocks, the key is the network name. // a map of StatusBlocks, the key is the network name.
func (n *netavarkNetwork) Setup(namespacePath string, options types.SetupOptions) (map[string]types.StatusBlock, error) { func (n *netavarkNetwork) Setup(namespacePath string, options types.SetupOptions) (map[string]types.StatusBlock, error) {

View File

@ -10,6 +10,8 @@ type ContainerNetwork interface {
// NetworkCreate will take a partial filled Network and fill the // NetworkCreate will take a partial filled Network and fill the
// missing fields. It creates the Network and returns the full Network. // missing fields. It creates the Network and returns the full Network.
NetworkCreate(Network, *NetworkCreateOptions) (Network, error) NetworkCreate(Network, *NetworkCreateOptions) (Network, error)
// NetworkUpdate will take network name and ID and updates network DNS Servers.
NetworkUpdate(nameOrID string, options NetworkUpdateOptions) error
// NetworkRemove will remove the Network with the given name or ID. // NetworkRemove will remove the Network with the given name or ID.
NetworkRemove(nameOrID string) error NetworkRemove(nameOrID string) error
// NetworkList will return all known Networks. Optionally you can // NetworkList will return all known Networks. Optionally you can
@ -70,6 +72,14 @@ type Network struct {
IPAMOptions map[string]string `json:"ipam_options,omitempty"` IPAMOptions map[string]string `json:"ipam_options,omitempty"`
} }
// NetworkOptions for a given container.
type NetworkUpdateOptions struct {
// List of custom DNS server for podman's DNS resolver.
// Priority order will be kept as defined by user in the configuration.
AddDNSServers []string `json:"add_dns_servers,omitempty"`
RemoveDNSServers []string `json:"remove_dns_servers,omitempty"`
}
// IPNet is used as custom net.IPNet type to add Marshal/Unmarshal methods. // IPNet is used as custom net.IPNet type to add Marshal/Unmarshal methods.
type IPNet struct { type IPNet struct {
net.IPNet net.IPNet

View File

@ -52,19 +52,18 @@
# List of default capabilities for containers. If it is empty or commented out, # List of default capabilities for containers. If it is empty or commented out,
# the default capabilities defined in the container engine will be added. # the default capabilities defined in the container engine will be added.
# #
default_capabilities = [ #default_capabilities = [
"CHOWN", # "CHOWN",
"DAC_OVERRIDE", # "DAC_OVERRIDE",
"FOWNER", # "FOWNER",
"FSETID", # "FSETID",
"KILL", # "KILL",
"NET_BIND_SERVICE", # "NET_BIND_SERVICE",
"SETFCAP", # "SETFCAP",
"SETGID", # "SETGID",
"SETPCAP", # "SETPCAP",
"SETUID", # "SETUID",
"SYS_CHROOT" #]
]
# A list of sysctls to be set in containers by default, # A list of sysctls to be set in containers by default,
# specified as "name=value", # specified as "name=value",

View File

@ -50,20 +50,16 @@ var (
DefaultHooksDirs = []string{"/usr/share/containers/oci/hooks.d"} DefaultHooksDirs = []string{"/usr/share/containers/oci/hooks.d"}
// DefaultCapabilities is the default for the default_capabilities option in the containers.conf file. // DefaultCapabilities is the default for the default_capabilities option in the containers.conf file.
DefaultCapabilities = []string{ DefaultCapabilities = []string{
"CAP_AUDIT_WRITE",
"CAP_CHOWN", "CAP_CHOWN",
"CAP_DAC_OVERRIDE", "CAP_DAC_OVERRIDE",
"CAP_FOWNER", "CAP_FOWNER",
"CAP_FSETID", "CAP_FSETID",
"CAP_KILL", "CAP_KILL",
"CAP_MKNOD",
"CAP_NET_BIND_SERVICE", "CAP_NET_BIND_SERVICE",
"CAP_NET_RAW",
"CAP_SETFCAP", "CAP_SETFCAP",
"CAP_SETGID", "CAP_SETGID",
"CAP_SETPCAP", "CAP_SETPCAP",
"CAP_SETUID", "CAP_SETUID",
"CAP_SYS_CHROOT",
} }
// Search these locations in which CNIPlugins can be installed. // Search these locations in which CNIPlugins can be installed.

View File

@ -14,5 +14,9 @@ func getLibpodTmpDir() string {
// getDefaultMachineVolumes returns default mounted volumes (possibly with env vars, which will be expanded) // getDefaultMachineVolumes returns default mounted volumes (possibly with env vars, which will be expanded)
func getDefaultMachineVolumes() []string { func getDefaultMachineVolumes() []string {
return []string{"$HOME:$HOME"} return []string{
"/Users:/Users",
"/private:/private",
"/var/folders:/var/folders",
}
} }

View File

@ -179,14 +179,13 @@ func NewNSWithName(name string) (ns.NetNS, error) {
return ns.GetNS(nsPath) return ns.GetNS(nsPath)
} }
// UnmountNS unmounts the NS held by the netns object // UnmountNS unmounts the given netns path
func UnmountNS(netns ns.NetNS) error { func UnmountNS(nsPath string) error {
nsRunDir, err := GetNSRunDir() nsRunDir, err := GetNSRunDir()
if err != nil { if err != nil {
return err return err
} }
nsPath := netns.Path()
// Only unmount if it's been bind-mounted (don't touch namespaces in /proc...) // Only unmount if it's been bind-mounted (don't touch namespaces in /proc...)
if strings.HasPrefix(nsPath, nsRunDir) { if strings.HasPrefix(nsPath, nsRunDir) {
if err := unix.Unmount(nsPath, unix.MNT_DETACH); err != nil { if err := unix.Unmount(nsPath, unix.MNT_DETACH); err != nil {

View File

@ -1,6 +1,8 @@
/* /*
Package report provides helper structs/methods/funcs for formatting output Package report provides helper structs/methods/funcs for formatting output
# Examples
To format output for an array of structs: To format output for an array of structs:
ExamplePodman: ExamplePodman:
@ -54,7 +56,7 @@ Helpers:
... "table" keyword prefix in format text ... "table" keyword prefix in format text
} }
Template Functions: # Template Functions
The following template functions are added to the template when parsed: The following template functions are added to the template when parsed:
- join strings.Join, {{join .Field separator}} - join strings.Join, {{join .Field separator}}

View File

@ -522,9 +522,8 @@ func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Durat
return time.Duration(num) * time.Second return time.Duration(num) * time.Second
} }
// Second, check if we have an HTTP date. // Second, check if we have an HTTP date.
// If the delta between the date and now is positive, use it.
// Otherwise, fall back to using the default exponential back off.
if t, err := http.ParseTime(after); err == nil { if t, err := http.ParseTime(after); err == nil {
// If the delta between the date and now is positive, use it.
delta := time.Until(t) delta := time.Until(t)
if delta > 0 { if delta > 0 {
return delta return delta
@ -532,7 +531,6 @@ func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Durat
logrus.Debugf("Retry-After date in the past, ignoring it") logrus.Debugf("Retry-After date in the past, ignoring it")
return fallbackDelay return fallbackDelay
} }
// If the header contents are bogus, fall back to using the default exponential back off.
logrus.Debugf("Invalid Retry-After format, ignoring it") logrus.Debugf("Invalid Retry-After format, ignoring it")
return fallbackDelay return fallbackDelay
} }
@ -590,7 +588,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method stri
case <-time.After(delay): case <-time.After(delay):
// Nothing // Nothing
} }
delay = delay * 2 // exponential back off delay = delay * 2 // If the registry does not specify a delay, back off exponentially.
} }
} }

View File

@ -639,7 +639,7 @@ func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.
ociManifest, err := d.c.getSigstoreAttachmentManifest(ctx, d.ref, manifestDigest) ociManifest, err := d.c.getSigstoreAttachmentManifest(ctx, d.ref, manifestDigest)
if err != nil { if err != nil {
return nil return err
} }
var ociConfig imgspecv1.Image // Most fields empty by default var ociConfig imgspecv1.Image // Most fields empty by default
if ociManifest == nil { if ociManifest == nil {
@ -711,13 +711,13 @@ func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.
LayerIndex: nil, LayerIndex: nil,
}) })
if err != nil { if err != nil {
return nil return err
} }
ociManifest.Config = configDesc ociManifest.Config = configDesc
manifestBlob, err := ociManifest.Serialize() manifestBlob, err := ociManifest.Serialize()
if err != nil { if err != nil {
return nil return err
} }
logrus.Debugf("Uploading sigstore attachment manifest") logrus.Debugf("Uploading sigstore attachment manifest")
return d.uploadManifest(ctx, manifestBlob, sigstoreAttachmentTag(manifestDigest)) return d.uploadManifest(ctx, manifestBlob, sigstoreAttachmentTag(manifestDigest))

View File

@ -34,15 +34,19 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
} }
defer file.Close() defer file.Close()
// If the file is already not compressed we can just return the file itself // If the file is seekable and already not compressed we can just return the file itself
// as a source. Otherwise we pass the stream to NewReaderFromStream. // as a source. Otherwise we pass the stream to NewReaderFromStream.
stream, isCompressed, err := compression.AutoDecompress(file) var stream io.Reader = file
if err != nil { if _, err := file.Seek(0, io.SeekCurrent); err == nil { // seeking is possible
return nil, fmt.Errorf("detecting compression for file %q: %w", path, err) decompressed, isCompressed, err := compression.AutoDecompress(file)
} if err != nil {
defer stream.Close() return nil, fmt.Errorf("detecting compression for file %q: %w", path, err)
if !isCompressed { }
return newReader(path, false) defer decompressed.Close()
stream = decompressed
if !isCompressed {
return newReader(path, false)
}
} }
return NewReaderFromStream(sys, stream) return NewReaderFromStream(sys, stream)
} }

View File

@ -346,7 +346,7 @@ func (t *tarFI) Sys() interface{} {
func (w *Writer) sendSymlinkLocked(path string, target string) error { func (w *Writer) sendSymlinkLocked(path string, target string) error {
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target) hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target)
if err != nil { if err != nil {
return nil return err
} }
logrus.Debugf("Sending as tar link %s -> %s", path, target) logrus.Debugf("Sending as tar link %s -> %s", path, target)
return w.tar.WriteHeader(hdr) return w.tar.WriteHeader(hdr)
@ -363,7 +363,7 @@ func (w *Writer) sendBytesLocked(path string, b []byte) error {
func (w *Writer) sendFileLocked(path string, expectedSize int64, stream io.Reader) error { func (w *Writer) sendFileLocked(path string, expectedSize int64, stream io.Reader) error {
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "") hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "")
if err != nil { if err != nil {
return nil return err
} }
logrus.Debugf("Sending as tar file %s", path) logrus.Debugf("Sending as tar file %s", path)
if err := w.tar.WriteHeader(hdr); err != nil { if err := w.tar.WriteHeader(hdr); err != nil {

View File

@ -56,7 +56,7 @@ var (
// appropriate for sys and the users configuration. // appropriate for sys and the users configuration.
// A valid key is a repository, a namespace within a registry, or a registry hostname; // A valid key is a repository, a namespace within a registry, or a registry hostname;
// using forms other than just a registry may fail depending on configuration. // using forms other than just a registry may fail depending on configuration.
// Returns a human-redable description of the location that was updated. // Returns a human-readable description of the location that was updated.
// NOTE: The return value is only intended to be read by humans; its form is not an API, // NOTE: The return value is only intended to be read by humans; its form is not an API,
// it may change (or new forms can be added) any time. // it may change (or new forms can be added) any time.
func SetCredentials(sys *types.SystemContext, key, username, password string) (string, error) { func SetCredentials(sys *types.SystemContext, key, username, password string) (string, error) {
@ -78,25 +78,28 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s
switch helper { switch helper {
// Special-case the built-in helpers for auth files. // Special-case the built-in helpers for auth files.
case sysregistriesv2.AuthenticationFileHelper: case sysregistriesv2.AuthenticationFileHelper:
desc, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { desc, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, string, error) {
if ch, exists := auths.CredHelpers[key]; exists { if ch, exists := auths.CredHelpers[key]; exists {
if isNamespaced { if isNamespaced {
return false, unsupportedNamespaceErr(ch) return false, "", unsupportedNamespaceErr(ch)
} }
return false, setAuthToCredHelper(ch, key, username, password) desc, err := setAuthToCredHelper(ch, key, username, password)
if err != nil {
return false, "", err
}
return false, desc, nil
} }
creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
newCreds := dockerAuthConfig{Auth: creds} newCreds := dockerAuthConfig{Auth: creds}
auths.AuthConfigs[key] = newCreds auths.AuthConfigs[key] = newCreds
return true, nil return true, "", nil
}) })
// External helpers. // External helpers.
default: default:
if isNamespaced { if isNamespaced {
err = unsupportedNamespaceErr(helper) err = unsupportedNamespaceErr(helper)
} else { } else {
desc = fmt.Sprintf("credential helper: %s", helper) desc, err = setAuthToCredHelper(helper, key, username, password)
err = setAuthToCredHelper(helper, key, username, password)
} }
} }
if err != nil { if err != nil {
@ -214,23 +217,25 @@ func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath {
// Logging the error as a warning instead and moving on to pulling the image // Logging the error as a warning instead and moving on to pulling the image
logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err) logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err)
} }
xdgCfgHome := os.Getenv("XDG_CONFIG_HOME") if sys == nil || (sys.AuthFilePath == "" && sys.LegacyFormatAuthFilePath == "") {
if xdgCfgHome == "" { xdgCfgHome := os.Getenv("XDG_CONFIG_HOME")
xdgCfgHome = filepath.Join(homeDir, ".config") if xdgCfgHome == "" {
} xdgCfgHome = filepath.Join(homeDir, ".config")
paths = append(paths, authPath{path: filepath.Join(xdgCfgHome, xdgConfigHomePath), legacyFormat: false}) }
if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" { paths = append(paths, authPath{path: filepath.Join(xdgCfgHome, xdgConfigHomePath), legacyFormat: false})
if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" {
paths = append(paths,
authPath{path: filepath.Join(dockerConfig, "config.json"), legacyFormat: false},
)
} else {
paths = append(paths,
authPath{path: filepath.Join(homeDir, dockerHomePath), legacyFormat: false},
)
}
paths = append(paths, paths = append(paths,
authPath{path: filepath.Join(dockerConfig, "config.json"), legacyFormat: false}, authPath{path: filepath.Join(homeDir, dockerLegacyHomePath), legacyFormat: true},
)
} else {
paths = append(paths,
authPath{path: filepath.Join(homeDir, dockerHomePath), legacyFormat: false},
) )
} }
paths = append(paths,
authPath{path: filepath.Join(homeDir, dockerLegacyHomePath), legacyFormat: true},
)
return paths return paths
} }
@ -403,7 +408,7 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error {
switch helper { switch helper {
// Special-case the built-in helper for auth files. // Special-case the built-in helper for auth files.
case sysregistriesv2.AuthenticationFileHelper: case sysregistriesv2.AuthenticationFileHelper:
_, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { _, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, string, error) {
if innerHelper, exists := auths.CredHelpers[key]; exists { if innerHelper, exists := auths.CredHelpers[key]; exists {
removeFromCredHelper(innerHelper) removeFromCredHelper(innerHelper)
} }
@ -411,7 +416,7 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error {
isLoggedIn = true isLoggedIn = true
delete(auths.AuthConfigs, key) delete(auths.AuthConfigs, key)
} }
return true, multiErr return true, "", multiErr
}) })
if err != nil { if err != nil {
multiErr = multierror.Append(multiErr, err) multiErr = multierror.Append(multiErr, err)
@ -446,18 +451,18 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
switch helper { switch helper {
// Special-case the built-in helper for auth files. // Special-case the built-in helper for auth files.
case sysregistriesv2.AuthenticationFileHelper: case sysregistriesv2.AuthenticationFileHelper:
_, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { _, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, string, error) {
for registry, helper := range auths.CredHelpers { for registry, helper := range auths.CredHelpers {
// Helpers in auth files are expected // Helpers in auth files are expected
// to exist, so no special treatment // to exist, so no special treatment
// for them. // for them.
if err := deleteAuthFromCredHelper(helper, registry); err != nil { if err := deleteAuthFromCredHelper(helper, registry); err != nil {
return false, err return false, "", err
} }
} }
auths.CredHelpers = make(map[string]string) auths.CredHelpers = make(map[string]string)
auths.AuthConfigs = make(map[string]dockerAuthConfig) auths.AuthConfigs = make(map[string]dockerAuthConfig)
return true, nil return true, "", nil
}) })
// External helpers. // External helpers.
default: default:
@ -573,8 +578,11 @@ func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) {
// modifyJSON finds an auth.json file, calls editor on the contents, and // modifyJSON finds an auth.json file, calls editor on the contents, and
// writes it back if editor returns true. // writes it back if editor returns true.
// Returns a human-redable description of the file, to be returned by SetCredentials. // Returns a human-readable description of the file, to be returned by SetCredentials.
func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, error)) (string, error) { //
// The editor may also return a human-readable description of the updated location; if it is "",
// the file itself is used.
func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, string, error)) (string, error) {
path, legacyFormat, err := getPathToAuth(sys) path, legacyFormat, err := getPathToAuth(sys)
if err != nil { if err != nil {
return "", err return "", err
@ -593,7 +601,7 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (
return "", fmt.Errorf("reading JSON file %q: %w", path, err) return "", fmt.Errorf("reading JSON file %q: %w", path, err)
} }
updated, err := editor(&auths) updated, description, err := editor(&auths)
if err != nil { if err != nil {
return "", fmt.Errorf("updating %q: %w", path, err) return "", fmt.Errorf("updating %q: %w", path, err)
} }
@ -608,7 +616,10 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (
} }
} }
return path, nil if description == "" {
description = path
}
return description, nil
} }
func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) { func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) {
@ -636,7 +647,9 @@ func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig,
} }
} }
func setAuthToCredHelper(credHelper, registry, username, password string) error { // setAuthToCredHelper stores (username, password) for registry in credHelper.
// Returns a human-readable description of the destination, to be returned by SetCredentials.
func setAuthToCredHelper(credHelper, registry, username, password string) (string, error) {
helperName := fmt.Sprintf("docker-credential-%s", credHelper) helperName := fmt.Sprintf("docker-credential-%s", credHelper)
p := helperclient.NewShellProgramFunc(helperName) p := helperclient.NewShellProgramFunc(helperName)
creds := &credentials.Credentials{ creds := &credentials.Credentials{
@ -644,7 +657,10 @@ func setAuthToCredHelper(credHelper, registry, username, password string) error
Username: username, Username: username,
Secret: password, Secret: password,
} }
return helperclient.Store(p, creds) if err := helperclient.Store(p, creds); err != nil {
return "", err
}
return fmt.Sprintf("credential helper: %s", credHelper), nil
} }
func deleteAuthFromCredHelper(credHelper, registry string) error { func deleteAuthFromCredHelper(credHelper, registry string) error {

View File

@ -198,6 +198,7 @@ type V1RegistriesConf struct {
} }
// Nonempty returns true if config contains at least one configuration entry. // Nonempty returns true if config contains at least one configuration entry.
// Empty arrays are treated as missing entries.
func (config *V1RegistriesConf) Nonempty() bool { func (config *V1RegistriesConf) Nonempty() bool {
copy := *config // A shallow copy copy := *config // A shallow copy
if copy.V1TOMLConfig.Search.Registries != nil && len(copy.V1TOMLConfig.Search.Registries) == 0 { if copy.V1TOMLConfig.Search.Registries != nil && len(copy.V1TOMLConfig.Search.Registries) == 0 {
@ -209,7 +210,15 @@ func (config *V1RegistriesConf) Nonempty() bool {
if copy.V1TOMLConfig.Block.Registries != nil && len(copy.V1TOMLConfig.Block.Registries) == 0 { if copy.V1TOMLConfig.Block.Registries != nil && len(copy.V1TOMLConfig.Block.Registries) == 0 {
copy.V1TOMLConfig.Block.Registries = nil copy.V1TOMLConfig.Block.Registries = nil
} }
return !reflect.DeepEqual(copy, V1RegistriesConf{}) return copy.hasSetField()
}
// hasSetField returns true if config contains at least one configuration entry.
// This is useful because of a subtlety of the behavior of the TOML decoder, where a missing array field
// is not modified while unmarshaling (in our case remains to nil), while an [] is unmarshaled
// as a non-nil []string{}.
func (config *V1RegistriesConf) hasSetField() bool {
return !reflect.DeepEqual(*config, V1RegistriesConf{})
} }
// V2RegistriesConf is the sysregistries v2 configuration format. // V2RegistriesConf is the sysregistries v2 configuration format.
@ -257,7 +266,15 @@ func (config *V2RegistriesConf) Nonempty() bool {
if !copy.shortNameAliasConf.nonempty() { if !copy.shortNameAliasConf.nonempty() {
copy.shortNameAliasConf = shortNameAliasConf{} copy.shortNameAliasConf = shortNameAliasConf{}
} }
return !reflect.DeepEqual(copy, V2RegistriesConf{}) return copy.hasSetField()
}
// hasSetField returns true if config contains at least one configuration entry.
// This is useful because of a subtlety of the behavior of the TOML decoder, where a missing array field
// is not modified while unmarshaling (in our case remains to nil), while an [] is unmarshaled
// as a non-nil []string{}.
func (config *V2RegistriesConf) hasSetField() bool {
return !reflect.DeepEqual(*config, V2RegistriesConf{})
} }
// parsedConfig is the result of parsing, and possibly merging, configuration files; // parsedConfig is the result of parsing, and possibly merging, configuration files;
@ -923,15 +940,15 @@ func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) {
logrus.Debugf("Failed to decode keys %q from %q", keys, path) logrus.Debugf("Failed to decode keys %q from %q", keys, path)
} }
if combinedTOML.V1RegistriesConf.Nonempty() { if combinedTOML.V1RegistriesConf.hasSetField() {
// Enforce the v2 format if requested. // Enforce the v2 format if requested.
if forceV2 { if forceV2 {
return nil, &InvalidRegistries{s: "registry must be in v2 format but is in v1"} return nil, &InvalidRegistries{s: "registry must be in v2 format but is in v1"}
} }
// Convert a v1 config into a v2 config. // Convert a v1 config into a v2 config.
if combinedTOML.V2RegistriesConf.Nonempty() { if combinedTOML.V2RegistriesConf.hasSetField() {
return nil, &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"} return nil, &InvalidRegistries{s: fmt.Sprintf("mixing sysregistry v1/v2 is not supported: %#v", combinedTOML)}
} }
converted, err := combinedTOML.V1RegistriesConf.ConvertToV2() converted, err := combinedTOML.V1RegistriesConf.ConvertToV2()
if err != nil { if err != nil {

View File

@ -2,6 +2,7 @@ package tlsclientconfig
import ( import (
"crypto/tls" "crypto/tls"
"crypto/x509"
"fmt" "fmt"
"net" "net"
"net/http" "net/http"
@ -10,8 +11,6 @@ import (
"strings" "strings"
"time" "time"
"github.com/docker/go-connections/sockets"
"github.com/docker/go-connections/tlsconfig"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
@ -47,7 +46,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
return err return err
} }
if tlsc.RootCAs == nil { if tlsc.RootCAs == nil {
systemPool, err := tlsconfig.SystemCertPool() systemPool, err := x509.SystemCertPool()
if err != nil { if err != nil {
return fmt.Errorf("unable to get system cert pool: %w", err) return fmt.Errorf("unable to get system cert pool: %w", err)
} }
@ -103,8 +102,5 @@ func NewTransport() *http.Transport {
// TODO(dmcgowan): Call close idle connections when complete and use keep alive // TODO(dmcgowan): Call close idle connections when complete and use keep alive
DisableKeepAlives: true, DisableKeepAlives: true,
} }
if _, err := sockets.DialerFromEnvironment(direct); err != nil {
logrus.Debugf("Can't execute DialerFromEnvironment: %v", err)
}
return tr return tr
} }

View File

@ -89,14 +89,37 @@ func (s *storageImageSource) Close() error {
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) { func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
if info.Digest == image.GzippedEmptyLayerDigest { // We need a valid digest value.
digest := info.Digest
err = digest.Validate()
if err != nil {
return nil, 0, err
}
if digest == image.GzippedEmptyLayerDigest {
return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
} }
// Check if the blob corresponds to a diff that was used to initialize any layers. Our
// callers should try to retrieve layers using their uncompressed digests, so no need to
// check if they're using one of the compressed digests, which we can't reproduce anyway.
layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(digest)
// If it's not a layer, then it must be a data item.
if len(layers) == 0 {
b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, digest.String())
if err != nil {
return nil, 0, err
}
r := bytes.NewReader(b)
logrus.Debugf("exporting opaque data as blob %q", digest.String())
return io.NopCloser(r), int64(r.Len()), nil
}
// NOTE: the blob is first written to a temporary file and subsequently // NOTE: the blob is first written to a temporary file and subsequently
// closed. The intention is to keep the time we own the storage lock // closed. The intention is to keep the time we own the storage lock
// as short as possible to allow other processes to access the storage. // as short as possible to allow other processes to access the storage.
rc, n, _, err = s.getBlobAndLayerID(info) rc, n, _, err = s.getBlobAndLayerID(digest, layers)
if err != nil { if err != nil {
return nil, 0, err return nil, 0, err
} }
@ -111,7 +134,7 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c
return nil, 0, err return nil, 0, err
} }
if _, err := tmpFile.Seek(0, 0); err != nil { if _, err := tmpFile.Seek(0, io.SeekStart); err != nil {
return nil, 0, err return nil, 0, err
} }
@ -124,35 +147,16 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c
} }
// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given. // getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given.
func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { func (s *storageImageSource) getBlobAndLayerID(digest digest.Digest, layers []storage.Layer) (rc io.ReadCloser, n int64, layerID string, err error) {
var layer storage.Layer var layer storage.Layer
var diffOptions *storage.DiffOptions var diffOptions *storage.DiffOptions
// We need a valid digest value.
err = info.Digest.Validate()
if err != nil {
return nil, -1, "", err
}
// Check if the blob corresponds to a diff that was used to initialize any layers. Our
// callers should try to retrieve layers using their uncompressed digests, so no need to
// check if they're using one of the compressed digests, which we can't reproduce anyway.
layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest)
// If it's not a layer, then it must be a data item.
if len(layers) == 0 {
b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, info.Digest.String())
if err != nil {
return nil, -1, "", err
}
r := bytes.NewReader(b)
logrus.Debugf("exporting opaque data as blob %q", info.Digest.String())
return io.NopCloser(r), int64(r.Len()), "", nil
}
// Step through the list of matching layers. Tests may want to verify that if we have multiple layers // Step through the list of matching layers. Tests may want to verify that if we have multiple layers
// which claim to have the same contents, that we actually do have multiple layers, otherwise we could // which claim to have the same contents, that we actually do have multiple layers, otherwise we could
// just go ahead and use the first one every time. // just go ahead and use the first one every time.
s.getBlobMutex.Lock() s.getBlobMutex.Lock()
i := s.layerPosition[info.Digest] i := s.layerPosition[digest]
s.layerPosition[info.Digest] = i + 1 s.layerPosition[digest] = i + 1
s.getBlobMutex.Unlock() s.getBlobMutex.Unlock()
if len(layers) > 0 { if len(layers) > 0 {
layer = layers[i%len(layers)] layer = layers[i%len(layers)]
@ -168,7 +172,7 @@ func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadC
} else { } else {
n = layer.UncompressedSize n = layer.UncompressedSize
} }
logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest) logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, digest)
rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions) rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
if err != nil { if err != nil {
return nil, -1, "", err return nil, -1, "", err

View File

@ -125,6 +125,13 @@ type BlobInfo struct {
URLs []string URLs []string
Annotations map[string]string Annotations map[string]string
MediaType string MediaType string
// NOTE: The following fields contain desired _edits_ to blob infos.
// Conceptually then don't belong in the BlobInfo object at all;
// the edits should be provided specifically as parameters to the edit implementation.
// We cant remove the fields without breaking compatibility, but dont
// add any more.
// CompressionOperation is used in Image.UpdateLayerInfos to instruct // CompressionOperation is used in Image.UpdateLayerInfos to instruct
// whether the original layer's "compressed or not" should be preserved, // whether the original layer's "compressed or not" should be preserved,
// possibly while changing the compression algorithm from one to another, // possibly while changing the compression algorithm from one to another,
@ -144,6 +151,7 @@ type BlobInfo struct {
// TODO: To remove together with CompressionOperation in re-design to // TODO: To remove together with CompressionOperation in re-design to
// remove field out out of BlobInfo. // remove field out out of BlobInfo.
CryptoOperation LayerCrypto CryptoOperation LayerCrypto
// Before adding any fields to this struct, read the NOTE above.
} }
// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present. // BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present.

View File

@ -58,6 +58,11 @@ func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int {
} }
infoOp(m) infoOp(m)
count := m.count count := m.count
if count <= 0 {
// If the mounted path has been decremented enough have no references,
// then its entry can be removed.
delete(c.counts, path)
}
c.mu.Unlock() c.mu.Unlock()
return count return count
} }

View File

@ -1202,6 +1202,9 @@ func (d *Driver) Remove(id string) error {
if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) {
return err return err
} }
if d.quotaCtl != nil {
d.quotaCtl.ClearQuota(dir)
}
return nil return nil
} }

View File

@ -211,6 +211,12 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error {
return q.setProjectQuota(projectID, quota) return q.setProjectQuota(projectID, quota)
} }
// ClearQuota removes the map entry in the quotas map for targetPath.
// It does so to prevent the map leaking entries as directories are deleted.
func (q *Control) ClearQuota(targetPath string) {
delete(q.quotas, targetPath)
}
// setProjectQuota - set the quota for project id on xfs block device // setProjectQuota - set the quota for project id on xfs block device
func (q *Control) setProjectQuota(projectID uint32, quota Quota) error { func (q *Control) setProjectQuota(projectID uint32, quota Quota) error {
var d C.fs_disk_quota_t var d C.fs_disk_quota_t

View File

@ -57,12 +57,12 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) {
return nil, fmt.Errorf("the 'zfs' command is not available: %w", graphdriver.ErrPrerequisites) return nil, fmt.Errorf("the 'zfs' command is not available: %w", graphdriver.ErrPrerequisites)
} }
file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 0600) file, err := unix.Open("/dev/zfs", unix.O_RDWR, 0600)
if err != nil { if err != nil {
logger.Debugf("cannot open /dev/zfs: %v", err) logger.Debugf("cannot open /dev/zfs: %v", err)
return nil, fmt.Errorf("could not open /dev/zfs: %v: %w", err, graphdriver.ErrPrerequisites) return nil, fmt.Errorf("could not open /dev/zfs: %v: %w", err, graphdriver.ErrPrerequisites)
} }
defer file.Close() defer unix.Close(file)
options, err := parseOptions(opt.DriverOptions) options, err := parseOptions(opt.DriverOptions)
if err != nil { if err != nil {

View File

@ -299,6 +299,9 @@ type rwLayerStore interface {
// Clean up unreferenced layers // Clean up unreferenced layers
GarbageCollect() error GarbageCollect() error
// supportsShifting() returns true if the driver.Driver.SupportsShifting().
supportsShifting() bool
} }
type layerStore struct { type layerStore struct {
@ -806,15 +809,14 @@ func (r *layerStore) saveLayers(saveLocations layerLocations) error {
if err != nil { if err != nil {
return err return err
} }
var opts *ioutils.AtomicFileWriterOptions opts := ioutils.AtomicFileWriterOptions{}
if location == volatileLayerLocation { if location == volatileLayerLocation {
opts = &ioutils.AtomicFileWriterOptions{ opts.NoSync = true
NoSync: true,
}
} }
if err := ioutils.AtomicWriteFileWithOpts(rpath, jldata, 0600, opts); err != nil { if err := ioutils.AtomicWriteFileWithOpts(rpath, jldata, 0600, &opts); err != nil {
return err return err
} }
r.layerspathsModified[locationIndex] = opts.ModTime
} }
lw, err := r.lockfile.RecordWrite() lw, err := r.lockfile.RecordWrite()
if err != nil { if err != nil {
@ -2234,6 +2236,10 @@ func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error
return r.layersByDigestMap(r.byuncompressedsum, d) return r.layersByDigestMap(r.byuncompressedsum, d)
} }
func (r *layerStore) supportsShifting() bool {
return r.driver.SupportsShifting()
}
func closeAll(closes ...func() error) (rErr error) { func closeAll(closes ...func() error) (rErr error) {
for _, f := range closes { for _, f := range closes {
if err := f(); err != nil { if err := f(); err != nil {

View File

@ -4,6 +4,7 @@ import (
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
"time"
) )
// AtomicFileWriterOptions specifies options for creating the atomic file writer. // AtomicFileWriterOptions specifies options for creating the atomic file writer.
@ -13,6 +14,9 @@ type AtomicFileWriterOptions struct {
// storage after it has been written and before it is moved to // storage after it has been written and before it is moved to
// the specified path. // the specified path.
NoSync bool NoSync bool
// On successful return from Close() this is set to the mtime of the
// newly written file.
ModTime time.Time
} }
var defaultWriterOptions = AtomicFileWriterOptions{} var defaultWriterOptions = AtomicFileWriterOptions{}
@ -74,6 +78,11 @@ func AtomicWriteFileWithOpts(filename string, data []byte, perm os.FileMode, opt
if err1 := f.Close(); err == nil { if err1 := f.Close(); err == nil {
err = err1 err = err1
} }
if opts != nil {
opts.ModTime = f.modTime
}
return err return err
} }
@ -87,6 +96,7 @@ type atomicFileWriter struct {
writeErr error writeErr error
perm os.FileMode perm os.FileMode
noSync bool noSync bool
modTime time.Time
} }
func (w *atomicFileWriter) Write(dt []byte) (int, error) { func (w *atomicFileWriter) Write(dt []byte) (int, error) {
@ -109,9 +119,25 @@ func (w *atomicFileWriter) Close() (retErr error) {
return err return err
} }
} }
// fstat before closing the fd
info, statErr := w.f.Stat()
if statErr == nil {
w.modTime = info.ModTime()
}
// We delay error reporting until after the real call to close()
// to match the traditional linux close() behaviour that an fd
// is invalid (closed) even if close returns failure. While
// weird, this allows a well defined way to not leak open fds.
if err := w.f.Close(); err != nil { if err := w.f.Close(); err != nil {
return err return err
} }
if statErr != nil {
return statErr
}
if err := os.Chmod(w.f.Name(), w.perm); err != nil { if err := os.Chmod(w.f.Name(), w.perm); err != nil {
return err return err
} }

File diff suppressed because it is too large Load Diff

View File

@ -264,7 +264,7 @@ func (s *store) getAutoUserNS(options *types.AutoUserNsOptions, image *Image, rl
} }
} }
if s.autoNsMaxSize > 0 && size > s.autoNsMaxSize { if s.autoNsMaxSize > 0 && size > s.autoNsMaxSize {
return nil, nil, fmt.Errorf("the container needs a user namespace with size %q that is bigger than the maximum value allowed with userns=auto %q", size, s.autoNsMaxSize) return nil, nil, fmt.Errorf("the container needs a user namespace with size %v that is bigger than the maximum value allowed with userns=auto %v", size, s.autoNsMaxSize)
} }
} }

View File

@ -17,6 +17,11 @@ This package provides various compression algorithms.
# changelog # changelog
* Oct 26, 2022 (v1.15.12)
* zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680
* gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683
* Sept 26, 2022 (v1.15.11) * Sept 26, 2022 (v1.15.11)
* flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678

View File

@ -365,29 +365,29 @@ func (s *Scratch) countSimple(in []byte) (max int, reuse bool) {
m := uint32(0) m := uint32(0)
if len(s.prevTable) > 0 { if len(s.prevTable) > 0 {
for i, v := range s.count[:] { for i, v := range s.count[:] {
if v == 0 {
continue
}
if v > m { if v > m {
m = v m = v
} }
if v > 0 { s.symbolLen = uint16(i) + 1
s.symbolLen = uint16(i) + 1 if i >= len(s.prevTable) {
if i >= len(s.prevTable) { reuse = false
reuse = false } else if s.prevTable[i].nBits == 0 {
} else { reuse = false
if s.prevTable[i].nBits == 0 {
reuse = false
}
}
} }
} }
return int(m), reuse return int(m), reuse
} }
for i, v := range s.count[:] { for i, v := range s.count[:] {
if v == 0 {
continue
}
if v > m { if v > m {
m = v m = v
} }
if v > 0 { s.symbolLen = uint16(i) + 1
s.symbolLen = uint16(i) + 1
}
} }
return int(m), false return int(m), false
} }

View File

@ -82,8 +82,9 @@ type blockDec struct {
err error err error
// Check against this crc // Check against this crc, if hasCRC is true.
checkCRC []byte checkCRC uint32
hasCRC bool
// Frame to use for singlethreaded decoding. // Frame to use for singlethreaded decoding.
// Should not be used by the decoder itself since parent may be another frame. // Should not be used by the decoder itself since parent may be another frame.

View File

@ -4,7 +4,6 @@
package zstd package zstd
import ( import (
"bytes"
"encoding/binary" "encoding/binary"
"errors" "errors"
"io" "io"
@ -102,8 +101,8 @@ func (h *Header) Decode(in []byte) error {
} }
h.HeaderSize += 4 h.HeaderSize += 4
b, in := in[:4], in[4:] b, in := in[:4], in[4:]
if !bytes.Equal(b, frameMagic) { if string(b) != frameMagic {
if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 {
return ErrMagicMismatch return ErrMagicMismatch
} }
if len(in) < 4 { if len(in) < 4 {

View File

@ -5,7 +5,6 @@
package zstd package zstd
import ( import (
"bytes"
"context" "context"
"encoding/binary" "encoding/binary"
"io" "io"
@ -459,7 +458,11 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
} }
if !d.o.ignoreChecksum && len(next.b) > 0 { if d.o.ignoreChecksum {
return true
}
if len(next.b) > 0 {
n, err := d.current.crc.Write(next.b) n, err := d.current.crc.Write(next.b)
if err == nil { if err == nil {
if n != len(next.b) { if n != len(next.b) {
@ -467,18 +470,16 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
} }
} }
} }
if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 { if next.err == nil && next.d != nil && next.d.hasCRC {
got := d.current.crc.Sum64() got := uint32(d.current.crc.Sum64())
var tmp [4]byte if got != next.d.checkCRC {
binary.LittleEndian.PutUint32(tmp[:], uint32(got))
if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) {
if debugDecoder { if debugDecoder {
println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)") printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC)
} }
d.current.err = ErrCRCMismatch d.current.err = ErrCRCMismatch
} else { } else {
if debugDecoder { if debugDecoder {
println("CRC ok", tmp[:]) printf("CRC ok %08x\n", got)
} }
} }
} }
@ -918,18 +919,22 @@ decodeStream:
println("next block returned error:", err) println("next block returned error:", err)
} }
dec.err = err dec.err = err
dec.checkCRC = nil dec.hasCRC = false
if dec.Last && frame.HasCheckSum && err == nil { if dec.Last && frame.HasCheckSum && err == nil {
crc, err := frame.rawInput.readSmall(4) crc, err := frame.rawInput.readSmall(4)
if err != nil { if len(crc) < 4 {
if err == nil {
err = io.ErrUnexpectedEOF
}
println("CRC missing?", err) println("CRC missing?", err)
dec.err = err dec.err = err
} } else {
var tmp [4]byte dec.checkCRC = binary.LittleEndian.Uint32(crc)
copy(tmp[:], crc) dec.hasCRC = true
dec.checkCRC = tmp[:] if debugDecoder {
if debugDecoder { printf("found crc to check: %08x\n", dec.checkCRC)
println("found crc to check:", dec.checkCRC) }
} }
} }
err = dec.err err = dec.err

View File

@ -1,7 +1,6 @@
package zstd package zstd
import ( import (
"bytes"
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
@ -20,7 +19,7 @@ type dict struct {
content []byte content []byte
} }
var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} const dictMagic = "\x37\xa4\x30\xec"
// ID returns the dictionary id or 0 if d is nil. // ID returns the dictionary id or 0 if d is nil.
func (d *dict) ID() uint32 { func (d *dict) ID() uint32 {
@ -50,7 +49,7 @@ func loadDict(b []byte) (*dict, error) {
ofDec: sequenceDec{fse: &fseDecoder{}}, ofDec: sequenceDec{fse: &fseDecoder{}},
mlDec: sequenceDec{fse: &fseDecoder{}}, mlDec: sequenceDec{fse: &fseDecoder{}},
} }
if !bytes.Equal(b[:4], dictMagic[:]) { if string(b[:4]) != dictMagic {
return nil, ErrMagicMismatch return nil, ErrMagicMismatch
} }
d.id = binary.LittleEndian.Uint32(b[4:8]) d.id = binary.LittleEndian.Uint32(b[4:8])

View File

@ -16,6 +16,7 @@ type fastBase struct {
cur int32 cur int32
// maximum offset. Should be at least 2x block size. // maximum offset. Should be at least 2x block size.
maxMatchOff int32 maxMatchOff int32
bufferReset int32
hist []byte hist []byte
crc *xxhash.Digest crc *xxhash.Digest
tmp [8]byte tmp [8]byte
@ -56,8 +57,8 @@ func (e *fastBase) Block() *blockEnc {
} }
func (e *fastBase) addBlock(src []byte) int32 { func (e *fastBase) addBlock(src []byte) int32 {
if debugAsserts && e.cur > bufferReset { if debugAsserts && e.cur > e.bufferReset {
panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset))
} }
// check if we have space already // check if we have space already
if len(e.hist)+len(src) > cap(e.hist) { if len(e.hist)+len(src) > cap(e.hist) {
@ -126,24 +127,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
} }
} }
a := src[s:] return int32(matchLen(src[s:], src[t:]))
b := src[t:]
b = b[:len(a)]
end := int32((len(a) >> 3) << 3)
for i := int32(0); i < end; i += 8 {
if diff := load6432(a, i) ^ load6432(b, i); diff != 0 {
return i + int32(bits.TrailingZeros64(diff)>>3)
}
}
a = a[end:]
b = b[end:]
for i := range a {
if a[i] != b[i] {
return int32(i) + end
}
}
return int32(len(a)) + end
} }
// Reset the encoding table. // Reset the encoding table.
@ -171,7 +155,7 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) {
// We offset current position so everything will be out of reach. // We offset current position so everything will be out of reach.
// If above reset line, history will be purged. // If above reset line, history will be purged.
if e.cur < bufferReset { if e.cur < e.bufferReset {
e.cur += e.maxMatchOff + int32(len(e.hist)) e.cur += e.maxMatchOff + int32(len(e.hist))
} }
e.hist = e.hist[:0] e.hist = e.hist[:0]

View File

@ -85,14 +85,10 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
) )
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
for e.cur >= bufferReset { for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 { if len(e.hist) == 0 {
for i := range e.table[:] { e.table = [bestShortTableSize]prevEntry{}
e.table[i] = prevEntry{} e.longTable = [bestLongTableSize]prevEntry{}
}
for i := range e.longTable[:] {
e.longTable[i] = prevEntry{}
}
e.cur = e.maxMatchOff e.cur = e.maxMatchOff
break break
} }
@ -193,8 +189,8 @@ encodeLoop:
panic("offset0 was 0") panic("offset0 was 0")
} }
bestOf := func(a, b match) match { bestOf := func(a, b *match) *match {
if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 { if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 {
return a return a
} }
return b return b
@ -220,22 +216,26 @@ encodeLoop:
return m return m
} }
best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)) m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)
best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4))
if canRepeat && best.length < goodEnough { if canRepeat && best.length < goodEnough {
cv32 := uint32(cv >> 8) cv32 := uint32(cv >> 8)
spp := s + 1 spp := s + 1
best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) m1 := matchAt(spp-offset1, spp, cv32, 1)
best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) m2 := matchAt(spp-offset2, spp, cv32, 2)
best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) m3 := matchAt(spp-offset3, spp, cv32, 3)
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
if best.length > 0 { if best.length > 0 {
cv32 = uint32(cv >> 24) cv32 = uint32(cv >> 24)
spp += 2 spp += 2
best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) m1 := matchAt(spp-offset1, spp, cv32, 1)
best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) m2 := matchAt(spp-offset2, spp, cv32, 2)
best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) m3 := matchAt(spp-offset3, spp, cv32, 3)
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
} }
} }
// Load next and check... // Load next and check...
@ -262,26 +262,33 @@ encodeLoop:
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
// Short at s+1 // Short at s+1
best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
// Long at s+1, s+2 // Long at s+1, s+2
best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)) m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)) m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)
best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)) m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)
best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5))
if false { if false {
// Short at s+3. // Short at s+3.
// Too often worse... // Too often worse...
best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)) m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)
best = bestOf(best, &m)
} }
// See if we can find a better match by checking where the current best ends. // See if we can find a better match by checking where the current best ends.
// Use that offset to see if we can find a better full match. // Use that offset to see if we can find a better full match.
if sAt := best.s + best.length; sAt < sLimit { if sAt := best.s + best.length; sAt < sLimit {
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
candidateEnd := e.longTable[nextHashL] candidateEnd := e.longTable[nextHashL]
if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 { // Start check at a fixed offset to allow for a few mismatches.
bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1)) // For this compression level 2 yields the best results.
if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 { const skipBeginning = 2
bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1)) if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 {
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
bestEnd := bestOf(best, &m)
if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 {
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
bestEnd = bestOf(bestEnd, &m)
} }
best = bestEnd best = bestEnd
} }

View File

@ -62,14 +62,10 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
) )
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
for e.cur >= bufferReset { for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 { if len(e.hist) == 0 {
for i := range e.table[:] { e.table = [betterShortTableSize]tableEntry{}
e.table[i] = tableEntry{} e.longTable = [betterLongTableSize]prevEntry{}
}
for i := range e.longTable[:] {
e.longTable[i] = prevEntry{}
}
e.cur = e.maxMatchOff e.cur = e.maxMatchOff
break break
} }
@ -587,7 +583,7 @@ func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) {
) )
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
for e.cur >= bufferReset { for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 { if len(e.hist) == 0 {
for i := range e.table[:] { for i := range e.table[:] {
e.table[i] = tableEntry{} e.table[i] = tableEntry{}

View File

@ -44,14 +44,10 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
) )
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
for e.cur >= bufferReset { for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 { if len(e.hist) == 0 {
for i := range e.table[:] { e.table = [dFastShortTableSize]tableEntry{}
e.table[i] = tableEntry{} e.longTable = [dFastLongTableSize]tableEntry{}
}
for i := range e.longTable[:] {
e.longTable[i] = tableEntry{}
}
e.cur = e.maxMatchOff e.cur = e.maxMatchOff
break break
} }
@ -388,7 +384,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
) )
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
if e.cur >= bufferReset { if e.cur >= e.bufferReset {
for i := range e.table[:] { for i := range e.table[:] {
e.table[i] = tableEntry{} e.table[i] = tableEntry{}
} }
@ -685,7 +681,7 @@ encodeLoop:
} }
// We do not store history, so we must offset e.cur to avoid false matches for next user. // We do not store history, so we must offset e.cur to avoid false matches for next user.
if e.cur < bufferReset { if e.cur < e.bufferReset {
e.cur += int32(len(src)) e.cur += int32(len(src))
} }
} }
@ -700,7 +696,7 @@ func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) {
) )
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
for e.cur >= bufferReset { for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 { if len(e.hist) == 0 {
for i := range e.table[:] { for i := range e.table[:] {
e.table[i] = tableEntry{} e.table[i] = tableEntry{}

View File

@ -43,7 +43,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
) )
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
for e.cur >= bufferReset { for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 { if len(e.hist) == 0 {
for i := range e.table[:] { for i := range e.table[:] {
e.table[i] = tableEntry{} e.table[i] = tableEntry{}
@ -310,7 +310,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
} }
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
if e.cur >= bufferReset { if e.cur >= e.bufferReset {
for i := range e.table[:] { for i := range e.table[:] {
e.table[i] = tableEntry{} e.table[i] = tableEntry{}
} }
@ -538,7 +538,7 @@ encodeLoop:
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
} }
// We do not store history, so we must offset e.cur to avoid false matches for next user. // We do not store history, so we must offset e.cur to avoid false matches for next user.
if e.cur < bufferReset { if e.cur < e.bufferReset {
e.cur += int32(len(src)) e.cur += int32(len(src))
} }
} }
@ -555,11 +555,9 @@ func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) {
return return
} }
// Protect against e.cur wraparound. // Protect against e.cur wraparound.
for e.cur >= bufferReset { for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 { if len(e.hist) == 0 {
for i := range e.table[:] { e.table = [tableSize]tableEntry{}
e.table[i] = tableEntry{}
}
e.cur = e.maxMatchOff e.cur = e.maxMatchOff
break break
} }

View File

@ -8,6 +8,7 @@ import (
"crypto/rand" "crypto/rand"
"fmt" "fmt"
"io" "io"
"math"
rdebug "runtime/debug" rdebug "runtime/debug"
"sync" "sync"
@ -639,3 +640,37 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
} }
return dst return dst
} }
// MaxEncodedSize returns the expected maximum
// size of an encoded block or stream.
func (e *Encoder) MaxEncodedSize(size int) int {
frameHeader := 4 + 2 // magic + frame header & window descriptor
if e.o.dict != nil {
frameHeader += 4
}
// Frame content size:
if size < 256 {
frameHeader++
} else if size < 65536+256 {
frameHeader += 2
} else if size < math.MaxInt32 {
frameHeader += 4
} else {
frameHeader += 8
}
// Final crc
if e.o.crc {
frameHeader += 4
}
// Max overhead is 3 bytes/block.
// There cannot be 0 blocks.
blocks := (size + e.o.blockSize) / e.o.blockSize
// Combine, add padding.
maxSz := frameHeader + 3*blocks + size
if e.o.pad > 1 {
maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad))
}
return maxSz
}

View File

@ -3,6 +3,7 @@ package zstd
import ( import (
"errors" "errors"
"fmt" "fmt"
"math"
"runtime" "runtime"
"strings" "strings"
) )
@ -47,22 +48,22 @@ func (o encoderOptions) encoder() encoder {
switch o.level { switch o.level {
case SpeedFastest: case SpeedFastest:
if o.dict != nil { if o.dict != nil {
return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
} }
return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
case SpeedDefault: case SpeedDefault:
if o.dict != nil { if o.dict != nil {
return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}} return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}}
} }
return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
case SpeedBetterCompression: case SpeedBetterCompression:
if o.dict != nil { if o.dict != nil {
return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
} }
return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
case SpeedBestCompression: case SpeedBestCompression:
return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
} }
panic("unknown compression level") panic("unknown compression level")
} }

View File

@ -5,7 +5,7 @@
package zstd package zstd
import ( import (
"bytes" "encoding/binary"
"encoding/hex" "encoding/hex"
"errors" "errors"
"io" "io"
@ -43,9 +43,9 @@ const (
MaxWindowSize = 1 << 29 MaxWindowSize = 1 << 29
) )
var ( const (
frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} frameMagic = "\x28\xb5\x2f\xfd"
skippableFrameMagic = []byte{0x2a, 0x4d, 0x18} skippableFrameMagic = "\x2a\x4d\x18"
) )
func newFrameDec(o decoderOptions) *frameDec { func newFrameDec(o decoderOptions) *frameDec {
@ -89,9 +89,9 @@ func (d *frameDec) reset(br byteBuffer) error {
copy(signature[1:], b) copy(signature[1:], b)
} }
if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 { if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 {
if debugDecoder { if debugDecoder {
println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic)) println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic)))
} }
// Break if not skippable frame. // Break if not skippable frame.
break break
@ -114,9 +114,9 @@ func (d *frameDec) reset(br byteBuffer) error {
return err return err
} }
} }
if !bytes.Equal(signature[:], frameMagic) { if string(signature[:]) != frameMagic {
if debugDecoder { if debugDecoder {
println("Got magic numbers: ", signature, "want:", frameMagic) println("Got magic numbers: ", signature, "want:", []byte(frameMagic))
} }
return ErrMagicMismatch return ErrMagicMismatch
} }
@ -305,7 +305,7 @@ func (d *frameDec) checkCRC() error {
} }
// We can overwrite upper tmp now // We can overwrite upper tmp now
want, err := d.rawInput.readSmall(4) buf, err := d.rawInput.readSmall(4)
if err != nil { if err != nil {
println("CRC missing?", err) println("CRC missing?", err)
return err return err
@ -315,22 +315,17 @@ func (d *frameDec) checkCRC() error {
return nil return nil
} }
var tmp [4]byte want := binary.LittleEndian.Uint32(buf[:4])
got := d.crc.Sum64() got := uint32(d.crc.Sum64())
// Flip to match file order.
tmp[0] = byte(got >> 0)
tmp[1] = byte(got >> 8)
tmp[2] = byte(got >> 16)
tmp[3] = byte(got >> 24)
if !bytes.Equal(tmp[:], want) { if got != want {
if debugDecoder { if debugDecoder {
println("CRC Check Failed:", tmp[:], "!=", want) printf("CRC check failed: got %08x, want %08x\n", got, want)
} }
return ErrCRCMismatch return ErrCRCMismatch
} }
if debugDecoder { if debugDecoder {
println("CRC ok", tmp[:]) printf("CRC ok %08x\n", got)
} }
return nil return nil
} }

View File

@ -2,12 +2,7 @@
VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package.
xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
xxhash is a Go implementation of the 64-bit
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
high-quality hashing algorithm that is much faster than anything in the Go high-quality hashing algorithm that is much faster than anything in the Go
standard library. standard library.
@ -28,31 +23,49 @@ func (*Digest) WriteString(string) (int, error)
func (*Digest) Sum64() uint64 func (*Digest) Sum64() uint64
``` ```
This implementation provides a fast pure-Go implementation and an even faster The package is written with optimized pure Go and also contains even faster
assembly implementation for amd64. assembly implementations for amd64 and arm64. If desired, the `purego` build tag
opts into using the Go code even on those architectures.
[xxHash]: http://cyan4973.github.io/xxHash/
## Compatibility
This package is in a module and the latest code is in version 2 of the module.
You need a version of Go with at least "minimal module compatibility" to use
github.com/cespare/xxhash/v2:
* 1.9.7+ for Go 1.9
* 1.10.3+ for Go 1.10
* Go 1.11 or later
I recommend using the latest release of Go.
## Benchmarks ## Benchmarks
Here are some quick benchmarks comparing the pure-Go and assembly Here are some quick benchmarks comparing the pure-Go and assembly
implementations of Sum64. implementations of Sum64.
| input size | purego | asm | | input size | purego | asm |
| --- | --- | --- | | ---------- | --------- | --------- |
| 5 B | 979.66 MB/s | 1291.17 MB/s | | 4 B | 1.3 GB/s | 1.2 GB/s |
| 100 B | 7475.26 MB/s | 7973.40 MB/s | | 16 B | 2.9 GB/s | 3.5 GB/s |
| 4 KB | 17573.46 MB/s | 17602.65 MB/s | | 100 B | 6.9 GB/s | 8.1 GB/s |
| 10 MB | 17131.46 MB/s | 17142.16 MB/s | | 4 KB | 11.7 GB/s | 16.7 GB/s |
| 10 MB | 12.0 GB/s | 17.3 GB/s |
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
the following commands under Go 1.11.2: CPU using the following commands under Go 1.19.2:
``` ```
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
$ go test -benchtime 10s -bench '/xxhash,direct,bytes' benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
``` ```
## Projects using this package ## Projects using this package
- [InfluxDB](https://github.com/influxdata/influxdb) - [InfluxDB](https://github.com/influxdata/influxdb)
- [Prometheus](https://github.com/prometheus/prometheus) - [Prometheus](https://github.com/prometheus/prometheus)
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
- [FreeCache](https://github.com/coocood/freecache) - [FreeCache](https://github.com/coocood/freecache)
- [FastCache](https://github.com/VictoriaMetrics/fastcache)

View File

@ -18,19 +18,11 @@ const (
prime5 uint64 = 2870177450012600261 prime5 uint64 = 2870177450012600261
) )
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where // Store the primes in an array as well.
// possible in the Go code is worth a small (but measurable) performance boost //
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for // The consts are used when possible in Go code to avoid MOVs but we need a
// convenience in the Go code in a few places where we need to intentionally // contiguous array of the assembly code.
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
// result overflows a uint64).
var (
prime1v = prime1
prime2v = prime2
prime3v = prime3
prime4v = prime4
prime5v = prime5
)
// Digest implements hash.Hash64. // Digest implements hash.Hash64.
type Digest struct { type Digest struct {
@ -52,10 +44,10 @@ func New() *Digest {
// Reset clears the Digest's state so that it can be reused. // Reset clears the Digest's state so that it can be reused.
func (d *Digest) Reset() { func (d *Digest) Reset() {
d.v1 = prime1v + prime2 d.v1 = primes[0] + prime2
d.v2 = prime2 d.v2 = prime2
d.v3 = 0 d.v3 = 0
d.v4 = -prime1v d.v4 = -primes[0]
d.total = 0 d.total = 0
d.n = 0 d.n = 0
} }
@ -71,21 +63,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
n = len(b) n = len(b)
d.total += uint64(n) d.total += uint64(n)
memleft := d.mem[d.n&(len(d.mem)-1):]
if d.n+n < 32 { if d.n+n < 32 {
// This new data doesn't even fill the current block. // This new data doesn't even fill the current block.
copy(d.mem[d.n:], b) copy(memleft, b)
d.n += n d.n += n
return return
} }
if d.n > 0 { if d.n > 0 {
// Finish off the partial block. // Finish off the partial block.
copy(d.mem[d.n:], b) c := copy(memleft, b)
d.v1 = round(d.v1, u64(d.mem[0:8])) d.v1 = round(d.v1, u64(d.mem[0:8]))
d.v2 = round(d.v2, u64(d.mem[8:16])) d.v2 = round(d.v2, u64(d.mem[8:16]))
d.v3 = round(d.v3, u64(d.mem[16:24])) d.v3 = round(d.v3, u64(d.mem[16:24]))
d.v4 = round(d.v4, u64(d.mem[24:32])) d.v4 = round(d.v4, u64(d.mem[24:32]))
b = b[32-d.n:] b = b[c:]
d.n = 0 d.n = 0
} }
@ -135,21 +129,20 @@ func (d *Digest) Sum64() uint64 {
h += d.total h += d.total
i, end := 0, d.n b := d.mem[:d.n&(len(d.mem)-1)]
for ; i+8 <= end; i += 8 { for ; len(b) >= 8; b = b[8:] {
k1 := round(0, u64(d.mem[i:i+8])) k1 := round(0, u64(b[:8]))
h ^= k1 h ^= k1
h = rol27(h)*prime1 + prime4 h = rol27(h)*prime1 + prime4
} }
if i+4 <= end { if len(b) >= 4 {
h ^= uint64(u32(d.mem[i:i+4])) * prime1 h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3 h = rol23(h)*prime2 + prime3
i += 4 b = b[4:]
} }
for i < end { for ; len(b) > 0; b = b[1:] {
h ^= uint64(d.mem[i]) * prime5 h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1 h = rol11(h) * prime1
i++
} }
h ^= h >> 33 h ^= h >> 33

View File

@ -1,3 +1,4 @@
//go:build !appengine && gc && !purego && !noasm
// +build !appengine // +build !appengine
// +build gc // +build gc
// +build !purego // +build !purego
@ -5,212 +6,205 @@
#include "textflag.h" #include "textflag.h"
// Register allocation: // Registers:
// AX h #define h AX
// SI pointer to advance through b #define d AX
// DX n #define p SI // pointer to advance through b
// BX loop end #define n DX
// R8 v1, k1 #define end BX // loop end
// R9 v2 #define v1 R8
// R10 v3 #define v2 R9
// R11 v4 #define v3 R10
// R12 tmp #define v4 R11
// R13 prime1v #define x R12
// R14 prime2v #define prime1 R13
// DI prime4v #define prime2 R14
#define prime4 DI
// round reads from and advances the buffer pointer in SI. #define round(acc, x) \
// It assumes that R13 has prime1v and R14 has prime2v. IMULQ prime2, x \
#define round(r) \ ADDQ x, acc \
MOVQ (SI), R12 \ ROLQ $31, acc \
ADDQ $8, SI \ IMULQ prime1, acc
IMULQ R14, R12 \
ADDQ R12, r \
ROLQ $31, r \
IMULQ R13, r
// mergeRound applies a merge round on the two registers acc and val. // round0 performs the operation x = round(0, x).
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. #define round0(x) \
#define mergeRound(acc, val) \ IMULQ prime2, x \
IMULQ R14, val \ ROLQ $31, x \
ROLQ $31, val \ IMULQ prime1, x
IMULQ R13, val \
XORQ val, acc \ // mergeRound applies a merge round on the two registers acc and x.
IMULQ R13, acc \ // It assumes that prime1, prime2, and prime4 have been loaded.
ADDQ DI, acc #define mergeRound(acc, x) \
round0(x) \
XORQ x, acc \
IMULQ prime1, acc \
ADDQ prime4, acc
// blockLoop processes as many 32-byte blocks as possible,
// updating v1, v2, v3, and v4. It assumes that there is at least one block
// to process.
#define blockLoop() \
loop: \
MOVQ +0(p), x \
round(v1, x) \
MOVQ +8(p), x \
round(v2, x) \
MOVQ +16(p), x \
round(v3, x) \
MOVQ +24(p), x \
round(v4, x) \
ADDQ $32, p \
CMPQ p, end \
JLE loop
// func Sum64(b []byte) uint64 // func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOSPLIT, $0-32 TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
// Load fixed primes. // Load fixed primes.
MOVQ ·prime1v(SB), R13 MOVQ ·primes+0(SB), prime1
MOVQ ·prime2v(SB), R14 MOVQ ·primes+8(SB), prime2
MOVQ ·prime4v(SB), DI MOVQ ·primes+24(SB), prime4
// Load slice. // Load slice.
MOVQ b_base+0(FP), SI MOVQ b_base+0(FP), p
MOVQ b_len+8(FP), DX MOVQ b_len+8(FP), n
LEAQ (SI)(DX*1), BX LEAQ (p)(n*1), end
// The first loop limit will be len(b)-32. // The first loop limit will be len(b)-32.
SUBQ $32, BX SUBQ $32, end
// Check whether we have at least one block. // Check whether we have at least one block.
CMPQ DX, $32 CMPQ n, $32
JLT noBlocks JLT noBlocks
// Set up initial state (v1, v2, v3, v4). // Set up initial state (v1, v2, v3, v4).
MOVQ R13, R8 MOVQ prime1, v1
ADDQ R14, R8 ADDQ prime2, v1
MOVQ R14, R9 MOVQ prime2, v2
XORQ R10, R10 XORQ v3, v3
XORQ R11, R11 XORQ v4, v4
SUBQ R13, R11 SUBQ prime1, v4
// Loop until SI > BX. blockLoop()
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
CMPQ SI, BX MOVQ v1, h
JLE blockLoop ROLQ $1, h
MOVQ v2, x
ROLQ $7, x
ADDQ x, h
MOVQ v3, x
ROLQ $12, x
ADDQ x, h
MOVQ v4, x
ROLQ $18, x
ADDQ x, h
MOVQ R8, AX mergeRound(h, v1)
ROLQ $1, AX mergeRound(h, v2)
MOVQ R9, R12 mergeRound(h, v3)
ROLQ $7, R12 mergeRound(h, v4)
ADDQ R12, AX
MOVQ R10, R12
ROLQ $12, R12
ADDQ R12, AX
MOVQ R11, R12
ROLQ $18, R12
ADDQ R12, AX
mergeRound(AX, R8)
mergeRound(AX, R9)
mergeRound(AX, R10)
mergeRound(AX, R11)
JMP afterBlocks JMP afterBlocks
noBlocks: noBlocks:
MOVQ ·prime5v(SB), AX MOVQ ·primes+32(SB), h
afterBlocks: afterBlocks:
ADDQ DX, AX ADDQ n, h
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. ADDQ $24, end
ADDQ $24, BX CMPQ p, end
JG try4
CMPQ SI, BX loop8:
JG fourByte MOVQ (p), x
ADDQ $8, p
round0(x)
XORQ x, h
ROLQ $27, h
IMULQ prime1, h
ADDQ prime4, h
wordLoop: CMPQ p, end
// Calculate k1. JLE loop8
MOVQ (SI), R8
ADDQ $8, SI
IMULQ R14, R8
ROLQ $31, R8
IMULQ R13, R8
XORQ R8, AX try4:
ROLQ $27, AX ADDQ $4, end
IMULQ R13, AX CMPQ p, end
ADDQ DI, AX JG try1
CMPQ SI, BX MOVL (p), x
JLE wordLoop ADDQ $4, p
IMULQ prime1, x
XORQ x, h
fourByte: ROLQ $23, h
ADDQ $4, BX IMULQ prime2, h
CMPQ SI, BX ADDQ ·primes+16(SB), h
JG singles
MOVL (SI), R8 try1:
ADDQ $4, SI ADDQ $4, end
IMULQ R13, R8 CMPQ p, end
XORQ R8, AX
ROLQ $23, AX
IMULQ R14, AX
ADDQ ·prime3v(SB), AX
singles:
ADDQ $4, BX
CMPQ SI, BX
JGE finalize JGE finalize
singlesLoop: loop1:
MOVBQZX (SI), R12 MOVBQZX (p), x
ADDQ $1, SI ADDQ $1, p
IMULQ ·prime5v(SB), R12 IMULQ ·primes+32(SB), x
XORQ R12, AX XORQ x, h
ROLQ $11, h
IMULQ prime1, h
ROLQ $11, AX CMPQ p, end
IMULQ R13, AX JL loop1
CMPQ SI, BX
JL singlesLoop
finalize: finalize:
MOVQ AX, R12 MOVQ h, x
SHRQ $33, R12 SHRQ $33, x
XORQ R12, AX XORQ x, h
IMULQ R14, AX IMULQ prime2, h
MOVQ AX, R12 MOVQ h, x
SHRQ $29, R12 SHRQ $29, x
XORQ R12, AX XORQ x, h
IMULQ ·prime3v(SB), AX IMULQ ·primes+16(SB), h
MOVQ AX, R12 MOVQ h, x
SHRQ $32, R12 SHRQ $32, x
XORQ R12, AX XORQ x, h
MOVQ AX, ret+24(FP) MOVQ h, ret+24(FP)
RET RET
// writeBlocks uses the same registers as above except that it uses AX to store
// the d pointer.
// func writeBlocks(d *Digest, b []byte) int // func writeBlocks(d *Digest, b []byte) int
TEXT ·writeBlocks(SB), NOSPLIT, $0-40 TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
// Load fixed primes needed for round. // Load fixed primes needed for round.
MOVQ ·prime1v(SB), R13 MOVQ ·primes+0(SB), prime1
MOVQ ·prime2v(SB), R14 MOVQ ·primes+8(SB), prime2
// Load slice. // Load slice.
MOVQ b_base+8(FP), SI MOVQ b_base+8(FP), p
MOVQ b_len+16(FP), DX MOVQ b_len+16(FP), n
LEAQ (SI)(DX*1), BX LEAQ (p)(n*1), end
SUBQ $32, BX SUBQ $32, end
// Load vN from d. // Load vN from d.
MOVQ d+0(FP), AX MOVQ s+0(FP), d
MOVQ 0(AX), R8 // v1 MOVQ 0(d), v1
MOVQ 8(AX), R9 // v2 MOVQ 8(d), v2
MOVQ 16(AX), R10 // v3 MOVQ 16(d), v3
MOVQ 24(AX), R11 // v4 MOVQ 24(d), v4
// We don't need to check the loop condition here; this function is // We don't need to check the loop condition here; this function is
// always called with at least one block of data to process. // always called with at least one block of data to process.
blockLoop: blockLoop()
round(R8)
round(R9)
round(R10)
round(R11)
CMPQ SI, BX
JLE blockLoop
// Copy vN back to d. // Copy vN back to d.
MOVQ R8, 0(AX) MOVQ v1, 0(d)
MOVQ R9, 8(AX) MOVQ v2, 8(d)
MOVQ R10, 16(AX) MOVQ v3, 16(d)
MOVQ R11, 24(AX) MOVQ v4, 24(d)
// The number of bytes written is SI minus the old base pointer. // The number of bytes written is p minus the old base pointer.
SUBQ b_base+8(FP), SI SUBQ b_base+8(FP), p
MOVQ SI, ret+32(FP) MOVQ p, ret+32(FP)
RET RET

View File

@ -1,13 +1,17 @@
// +build gc,!purego,!noasm //go:build !appengine && gc && !purego && !noasm
// +build !appengine
// +build gc
// +build !purego
// +build !noasm
#include "textflag.h" #include "textflag.h"
// Register allocation. // Registers:
#define digest R1 #define digest R1
#define h R2 // Return value. #define h R2 // return value
#define p R3 // Input pointer. #define p R3 // input pointer
#define len R4 #define n R4 // input length
#define nblocks R5 // len / 32. #define nblocks R5 // n / 32
#define prime1 R7 #define prime1 R7
#define prime2 R8 #define prime2 R8
#define prime3 R9 #define prime3 R9
@ -25,60 +29,52 @@
#define round(acc, x) \ #define round(acc, x) \
MADD prime2, acc, x, acc \ MADD prime2, acc, x, acc \
ROR $64-31, acc \ ROR $64-31, acc \
MUL prime1, acc \ MUL prime1, acc
// x = round(0, x). // round0 performs the operation x = round(0, x).
#define round0(x) \ #define round0(x) \
MUL prime2, x \ MUL prime2, x \
ROR $64-31, x \ ROR $64-31, x \
MUL prime1, x \ MUL prime1, x
#define mergeRound(x) \ #define mergeRound(acc, x) \
round0(x) \ round0(x) \
EOR x, h \ EOR x, acc \
MADD h, prime4, prime1, h \ MADD acc, prime4, prime1, acc
// Update v[1-4] with 32-byte blocks. Assumes len >= 32. // blockLoop processes as many 32-byte blocks as possible,
#define blocksLoop() \ // updating v1, v2, v3, and v4. It assumes that n >= 32.
LSR $5, len, nblocks \ #define blockLoop() \
PCALIGN $16 \ LSR $5, n, nblocks \
loop: \ PCALIGN $16 \
LDP.P 32(p), (x1, x2) \ loop: \
round(v1, x1) \ LDP.P 16(p), (x1, x2) \
LDP -16(p), (x3, x4) \ LDP.P 16(p), (x3, x4) \
round(v2, x2) \ round(v1, x1) \
SUB $1, nblocks \ round(v2, x2) \
round(v3, x3) \ round(v3, x3) \
round(v4, x4) \ round(v4, x4) \
CBNZ nblocks, loop \ SUB $1, nblocks \
CBNZ nblocks, loop
// The primes are repeated here to ensure that they're stored
// in a contiguous array, so we can load them with LDP.
DATA primes<> +0(SB)/8, $11400714785074694791
DATA primes<> +8(SB)/8, $14029467366897019727
DATA primes<>+16(SB)/8, $1609587929392839161
DATA primes<>+24(SB)/8, $9650029242287828579
DATA primes<>+32(SB)/8, $2870177450012600261
GLOBL primes<>(SB), NOPTR+RODATA, $40
// func Sum64(b []byte) uint64 // func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32 TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
LDP b_base+0(FP), (p, len) LDP b_base+0(FP), (p, n)
LDP primes<> +0(SB), (prime1, prime2) LDP ·primes+0(SB), (prime1, prime2)
LDP primes<>+16(SB), (prime3, prime4) LDP ·primes+16(SB), (prime3, prime4)
MOVD primes<>+32(SB), prime5 MOVD ·primes+32(SB), prime5
CMP $32, len CMP $32, n
CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 } CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
BLO afterLoop BLT afterLoop
ADD prime1, prime2, v1 ADD prime1, prime2, v1
MOVD prime2, v2 MOVD prime2, v2
MOVD $0, v3 MOVD $0, v3
NEG prime1, v4 NEG prime1, v4
blocksLoop() blockLoop()
ROR $64-1, v1, x1 ROR $64-1, v1, x1
ROR $64-7, v2, x2 ROR $64-7, v2, x2
@ -88,71 +84,75 @@ TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32
ADD x3, x4 ADD x3, x4
ADD x2, x4, h ADD x2, x4, h
mergeRound(v1) mergeRound(h, v1)
mergeRound(v2) mergeRound(h, v2)
mergeRound(v3) mergeRound(h, v3)
mergeRound(v4) mergeRound(h, v4)
afterLoop: afterLoop:
ADD len, h ADD n, h
TBZ $4, len, try8 TBZ $4, n, try8
LDP.P 16(p), (x1, x2) LDP.P 16(p), (x1, x2)
round0(x1) round0(x1)
// NOTE: here and below, sequencing the EOR after the ROR (using a
// rotated register) is worth a small but measurable speedup for small
// inputs.
ROR $64-27, h ROR $64-27, h
EOR x1 @> 64-27, h, h EOR x1 @> 64-27, h, h
MADD h, prime4, prime1, h MADD h, prime4, prime1, h
round0(x2) round0(x2)
ROR $64-27, h ROR $64-27, h
EOR x2 @> 64-27, h EOR x2 @> 64-27, h, h
MADD h, prime4, prime1, h MADD h, prime4, prime1, h
try8: try8:
TBZ $3, len, try4 TBZ $3, n, try4
MOVD.P 8(p), x1 MOVD.P 8(p), x1
round0(x1) round0(x1)
ROR $64-27, h ROR $64-27, h
EOR x1 @> 64-27, h EOR x1 @> 64-27, h, h
MADD h, prime4, prime1, h MADD h, prime4, prime1, h
try4: try4:
TBZ $2, len, try2 TBZ $2, n, try2
MOVWU.P 4(p), x2 MOVWU.P 4(p), x2
MUL prime1, x2 MUL prime1, x2
ROR $64-23, h ROR $64-23, h
EOR x2 @> 64-23, h EOR x2 @> 64-23, h, h
MADD h, prime3, prime2, h MADD h, prime3, prime2, h
try2: try2:
TBZ $1, len, try1 TBZ $1, n, try1
MOVHU.P 2(p), x3 MOVHU.P 2(p), x3
AND $255, x3, x1 AND $255, x3, x1
LSR $8, x3, x2 LSR $8, x3, x2
MUL prime5, x1 MUL prime5, x1
ROR $64-11, h ROR $64-11, h
EOR x1 @> 64-11, h EOR x1 @> 64-11, h, h
MUL prime1, h MUL prime1, h
MUL prime5, x2 MUL prime5, x2
ROR $64-11, h ROR $64-11, h
EOR x2 @> 64-11, h EOR x2 @> 64-11, h, h
MUL prime1, h MUL prime1, h
try1: try1:
TBZ $0, len, end TBZ $0, n, finalize
MOVBU (p), x4 MOVBU (p), x4
MUL prime5, x4 MUL prime5, x4
ROR $64-11, h ROR $64-11, h
EOR x4 @> 64-11, h EOR x4 @> 64-11, h, h
MUL prime1, h MUL prime1, h
end: finalize:
EOR h >> 33, h EOR h >> 33, h
MUL prime2, h MUL prime2, h
EOR h >> 29, h EOR h >> 29, h
@ -163,24 +163,22 @@ end:
RET RET
// func writeBlocks(d *Digest, b []byte) int // func writeBlocks(d *Digest, b []byte) int
// TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
// Assumes len(b) >= 32. LDP ·primes+0(SB), (prime1, prime2)
TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40
LDP primes<>(SB), (prime1, prime2)
// Load state. Assume v[1-4] are stored contiguously. // Load state. Assume v[1-4] are stored contiguously.
MOVD d+0(FP), digest MOVD d+0(FP), digest
LDP 0(digest), (v1, v2) LDP 0(digest), (v1, v2)
LDP 16(digest), (v3, v4) LDP 16(digest), (v3, v4)
LDP b_base+8(FP), (p, len) LDP b_base+8(FP), (p, n)
blocksLoop() blockLoop()
// Store updated state. // Store updated state.
STP (v1, v2), 0(digest) STP (v1, v2), 0(digest)
STP (v3, v4), 16(digest) STP (v3, v4), 16(digest)
BIC $31, len BIC $31, n
MOVD len, ret+32(FP) MOVD n, ret+32(FP)
RET RET

View File

@ -13,4 +13,4 @@ package xxhash
func Sum64(b []byte) uint64 func Sum64(b []byte) uint64
//go:noescape //go:noescape
func writeBlocks(d *Digest, b []byte) int func writeBlocks(s *Digest, b []byte) int

View File

@ -15,10 +15,10 @@ func Sum64(b []byte) uint64 {
var h uint64 var h uint64
if n >= 32 { if n >= 32 {
v1 := prime1v + prime2 v1 := primes[0] + prime2
v2 := prime2 v2 := prime2
v3 := uint64(0) v3 := uint64(0)
v4 := -prime1v v4 := -primes[0]
for len(b) >= 32 { for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)])) v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)])) v2 = round(v2, u64(b[8:16:len(b)]))
@ -37,19 +37,18 @@ func Sum64(b []byte) uint64 {
h += uint64(n) h += uint64(n)
i, end := 0, len(b) for ; len(b) >= 8; b = b[8:] {
for ; i+8 <= end; i += 8 { k1 := round(0, u64(b[:8]))
k1 := round(0, u64(b[i:i+8:len(b)]))
h ^= k1 h ^= k1
h = rol27(h)*prime1 + prime4 h = rol27(h)*prime1 + prime4
} }
if i+4 <= end { if len(b) >= 4 {
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3 h = rol23(h)*prime2 + prime3
i += 4 b = b[4:]
} }
for ; i < end; i++ { for ; len(b) > 0; b = b[1:] {
h ^= uint64(b[i]) * prime5 h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1 h = rol11(h) * prime1
} }

View File

@ -36,9 +36,6 @@ const forcePreDef = false
// zstdMinMatch is the minimum zstd match length. // zstdMinMatch is the minimum zstd match length.
const zstdMinMatch = 3 const zstdMinMatch = 3
// Reset the buffer offset when reaching this.
const bufferReset = math.MaxInt32 - MaxWindowSize
// fcsUnknown is used for unknown frame content size. // fcsUnknown is used for unknown frame content size.
const fcsUnknown = math.MaxUint64 const fcsUnknown = math.MaxUint64
@ -110,26 +107,25 @@ func printf(format string, a ...interface{}) {
} }
} }
// matchLen returns the maximum length. // matchLen returns the maximum common prefix length of a and b.
// a must be the shortest of the two. // a must be the shortest of the two.
// The function also returns whether all bytes matched. func matchLen(a, b []byte) (n int) {
func matchLen(a, b []byte) int { for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
b = b[:len(a)] diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
for i := 0; i < len(a)-7; i += 8 { if diff != 0 {
if diff := load64(a, i) ^ load64(b, i); diff != 0 { return n + bits.TrailingZeros64(diff)>>3
return i + (bits.TrailingZeros64(diff) >> 3)
} }
n += 8
} }
checked := (len(a) >> 3) << 3
a = a[checked:]
b = b[checked:]
for i := range a { for i := range a {
if a[i] != b[i] { if a[i] != b[i] {
return i + checked break
} }
n++
} }
return len(a) + checked return n
} }
func load3232(b []byte, i int32) uint32 { func load3232(b []byte, i int32) uint32 {
@ -140,10 +136,6 @@ func load6432(b []byte, i int32) uint64 {
return binary.LittleEndian.Uint64(b[i:]) return binary.LittleEndian.Uint64(b[i:])
} }
func load64(b []byte, i int) uint64 {
return binary.LittleEndian.Uint64(b[i:])
}
type byter interface { type byter interface {
Bytes() []byte Bytes() []byte
Len() int Len() int

View File

@ -0,0 +1,149 @@
// Copyright 2022 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cryptoutils
import (
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"errors"
"fmt"
)
var (
// OIDOtherName is the OID for the OtherName SAN per RFC 5280
OIDOtherName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 7}
// SANOID is the OID for Subject Alternative Name per RFC 5280
SANOID = asn1.ObjectIdentifier{2, 5, 29, 17}
)
// OtherName describes a name related to a certificate which is not in one
// of the standard name formats. RFC 5280, 4.2.1.6:
//
// OtherName ::= SEQUENCE {
// type-id OBJECT IDENTIFIER,
// value [0] EXPLICIT ANY DEFINED BY type-id }
//
// OtherName for Fulcio-issued certificates only supports UTF-8 strings as values.
type OtherName struct {
ID asn1.ObjectIdentifier
Value string `asn1:"utf8,explicit,tag:0"`
}
// MarshalOtherNameSAN creates a Subject Alternative Name extension
// with an OtherName sequence. RFC 5280, 4.2.1.6:
//
// SubjectAltName ::= GeneralNames
// GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName
// GeneralName ::= CHOICE {
//
// otherName [0] OtherName,
// ... }
func MarshalOtherNameSAN(name string, critical bool) (*pkix.Extension, error) {
o := OtherName{
ID: OIDOtherName,
Value: name,
}
bytes, err := asn1.MarshalWithParams(o, "tag:0")
if err != nil {
return nil, err
}
sans, err := asn1.Marshal([]asn1.RawValue{{FullBytes: bytes}})
if err != nil {
return nil, err
}
return &pkix.Extension{
Id: SANOID,
Critical: critical,
Value: sans,
}, nil
}
// UnmarshalOtherNameSAN extracts a UTF-8 string from the OtherName
// field in the Subject Alternative Name extension.
func UnmarshalOtherNameSAN(exts []pkix.Extension) (string, error) {
var otherNames []string
for _, e := range exts {
if !e.Id.Equal(SANOID) {
continue
}
var seq asn1.RawValue
rest, err := asn1.Unmarshal(e.Value, &seq)
if err != nil {
return "", err
} else if len(rest) != 0 {
return "", fmt.Errorf("trailing data after X.509 extension")
}
if !seq.IsCompound || seq.Tag != asn1.TagSequence || seq.Class != asn1.ClassUniversal {
return "", asn1.StructuralError{Msg: "bad SAN sequence"}
}
rest = seq.Bytes
for len(rest) > 0 {
var v asn1.RawValue
rest, err = asn1.Unmarshal(rest, &v)
if err != nil {
return "", err
}
// skip all GeneralName fields except OtherName
if v.Tag != 0 {
continue
}
var other OtherName
if _, err := asn1.UnmarshalWithParams(v.FullBytes, &other, "tag:0"); err != nil {
return "", fmt.Errorf("could not parse requested OtherName SAN: %w", err)
}
if !other.ID.Equal(OIDOtherName) {
return "", fmt.Errorf("unexpected OID for OtherName, expected %v, got %v", OIDOtherName, other.ID)
}
otherNames = append(otherNames, other.Value)
}
}
if len(otherNames) == 0 {
return "", errors.New("no OtherName found")
}
if len(otherNames) != 1 {
return "", errors.New("expected only one OtherName")
}
return otherNames[0], nil
}
// GetSubjectAlternateNames extracts all subject alternative names from
// the certificate, including email addresses, DNS, IP addresses, URIs,
// and OtherName SANs
func GetSubjectAlternateNames(cert *x509.Certificate) []string {
sans := []string{}
sans = append(sans, cert.DNSNames...)
sans = append(sans, cert.EmailAddresses...)
for _, ip := range cert.IPAddresses {
sans = append(sans, ip.String())
}
for _, uri := range cert.URIs {
sans = append(sans, uri.String())
}
// ignore error if there's no OtherName SAN
otherName, _ := UnmarshalOtherNameSAN(cert.Extensions)
if len(otherName) > 0 {
sans = append(sans, otherName)
}
return sans
}

View File

@ -76,7 +76,6 @@ import (
"go/format" "go/format"
"go/token" "go/token"
"go/types" "go/types"
"io/ioutil"
"log" "log"
"os" "os"
"path/filepath" "path/filepath"
@ -166,7 +165,7 @@ func main() {
baseName := fmt.Sprintf("%s_string.go", types[0]) baseName := fmt.Sprintf("%s_string.go", types[0])
outputName = filepath.Join(dir, strings.ToLower(baseName)) outputName = filepath.Join(dir, strings.ToLower(baseName))
} }
err := ioutil.WriteFile(outputName, src, 0644) err := os.WriteFile(outputName, src, 0644)
if err != nil { if err != nil {
log.Fatalf("writing output: %s", err) log.Fatalf("writing output: %s", err)
} }

View File

@ -30,7 +30,7 @@ import (
"io/ioutil" "io/ioutil"
"os/exec" "os/exec"
"golang.org/x/tools/go/internal/gcimporter" "golang.org/x/tools/internal/gcimporter"
) )
// Find returns the name of an object (.o) or archive (.a) file // Find returns the name of an object (.o) or archive (.a) file

View File

@ -604,17 +604,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
// Work around https://golang.org/issue/28749: // Work around https://golang.org/issue/28749:
// cmd/go puts assembly, C, and C++ files in CompiledGoFiles. // cmd/go puts assembly, C, and C++ files in CompiledGoFiles.
// Filter out any elements of CompiledGoFiles that are also in OtherFiles. // Remove files from CompiledGoFiles that are non-go files
// We have to keep this workaround in place until go1.12 is a distant memory. // (or are not files that look like they are from the cache).
if len(pkg.OtherFiles) > 0 { if len(pkg.CompiledGoFiles) > 0 {
other := make(map[string]bool, len(pkg.OtherFiles))
for _, f := range pkg.OtherFiles {
other[f] = true
}
out := pkg.CompiledGoFiles[:0] out := pkg.CompiledGoFiles[:0]
for _, f := range pkg.CompiledGoFiles { for _, f := range pkg.CompiledGoFiles {
if other[f] { if ext := filepath.Ext(f); ext != ".go" && ext != "" { // ext == "" means the file is from the cache, so probably cgo-processed file
continue continue
} }
out = append(out, f) out = append(out, f)

View File

@ -303,6 +303,9 @@ type Package struct {
// of the package, or while parsing or type-checking its files. // of the package, or while parsing or type-checking its files.
Errors []Error Errors []Error
// TypeErrors contains the subset of errors produced during type checking.
TypeErrors []types.Error
// GoFiles lists the absolute file paths of the package's Go source files. // GoFiles lists the absolute file paths of the package's Go source files.
GoFiles []string GoFiles []string
@ -911,6 +914,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
case types.Error: case types.Error:
// from type checker // from type checker
lpkg.TypeErrors = append(lpkg.TypeErrors, err)
errs = append(errs, Error{ errs = append(errs, Error{
Pos: err.Fset.Position(err.Pos).String(), Pos: err.Fset.Position(err.Pos).String(),
Msg: err.Msg, Msg: err.Msg,
@ -1017,7 +1021,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
tc := &types.Config{ tc := &types.Config{
Importer: importer, Importer: importer,
// Type-check bodies of functions only in non-initial packages. // Type-check bodies of functions only in initial packages.
// Example: for import graph A->B->C and initial packages {A,C}, // Example: for import graph A->B->C and initial packages {A,C},
// we can ignore function bodies in B. // we can ignore function bodies in B.
IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial,

View File

@ -12,7 +12,6 @@ import (
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"go/ast"
"go/constant" "go/constant"
"go/token" "go/token"
"go/types" "go/types"
@ -145,7 +144,7 @@ func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error)
objcount := 0 objcount := 0
scope := pkg.Scope() scope := pkg.Scope()
for _, name := range scope.Names() { for _, name := range scope.Names() {
if !ast.IsExported(name) { if !token.IsExported(name) {
continue continue
} }
if trace { if trace {
@ -482,7 +481,7 @@ func (p *exporter) method(m *types.Func) {
p.pos(m) p.pos(m)
p.string(m.Name()) p.string(m.Name())
if m.Name() != "_" && !ast.IsExported(m.Name()) { if m.Name() != "_" && !token.IsExported(m.Name()) {
p.pkg(m.Pkg(), false) p.pkg(m.Pkg(), false)
} }
@ -501,7 +500,7 @@ func (p *exporter) fieldName(f *types.Var) {
// 3) field name doesn't match base type name (alias name) // 3) field name doesn't match base type name (alias name)
bname := basetypeName(f.Type()) bname := basetypeName(f.Type())
if name == bname { if name == bname {
if ast.IsExported(name) { if token.IsExported(name) {
name = "" // 1) we don't need to know the field name or package name = "" // 1) we don't need to know the field name or package
} else { } else {
name = "?" // 2) use unexported name "?" to force package export name = "?" // 2) use unexported name "?" to force package export
@ -514,7 +513,7 @@ func (p *exporter) fieldName(f *types.Var) {
} }
p.string(name) p.string(name)
if name != "" && !ast.IsExported(name) { if name != "" && !token.IsExported(name) {
p.pkg(f.Pkg(), false) p.pkg(f.Pkg(), false)
} }
} }

View File

@ -9,10 +9,11 @@
// Package gcimporter provides various functions for reading // Package gcimporter provides various functions for reading
// gc-generated object files that can be used to implement the // gc-generated object files that can be used to implement the
// Importer interface defined by the Go 1.5 standard library package. // Importer interface defined by the Go 1.5 standard library package.
package gcimporter // import "golang.org/x/tools/go/internal/gcimporter" package gcimporter // import "golang.org/x/tools/internal/gcimporter"
import ( import (
"bufio" "bufio"
"bytes"
"errors" "errors"
"fmt" "fmt"
"go/build" "go/build"
@ -22,10 +23,12 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"os/exec"
"path/filepath" "path/filepath"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"sync"
"text/scanner" "text/scanner"
) )
@ -38,6 +41,47 @@ const (
trace = false trace = false
) )
var exportMap sync.Map // package dir → func() (string, bool)
// lookupGorootExport returns the location of the export data
// (normally found in the build cache, but located in GOROOT/pkg
// in prior Go releases) for the package located in pkgDir.
//
// (We use the package's directory instead of its import path
// mainly to simplify handling of the packages in src/vendor
// and cmd/vendor.)
func lookupGorootExport(pkgDir string) (string, bool) {
f, ok := exportMap.Load(pkgDir)
if !ok {
var (
listOnce sync.Once
exportPath string
)
f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) {
listOnce.Do(func() {
cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir)
cmd.Dir = build.Default.GOROOT
var output []byte
output, err := cmd.Output()
if err != nil {
return
}
exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
if len(exports) != 1 {
return
}
exportPath = exports[0]
})
return exportPath, exportPath != ""
})
}
return f.(func() (string, bool))()
}
var pkgExts = [...]string{".a", ".o"} var pkgExts = [...]string{".a", ".o"}
// FindPkg returns the filename and unique package id for an import // FindPkg returns the filename and unique package id for an import
@ -60,11 +104,18 @@ func FindPkg(path, srcDir string) (filename, id string) {
} }
bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
if bp.PkgObj == "" { if bp.PkgObj == "" {
id = path // make sure we have an id to print in error message var ok bool
return if bp.Goroot && bp.Dir != "" {
filename, ok = lookupGorootExport(bp.Dir)
}
if !ok {
id = path // make sure we have an id to print in error message
return
}
} else {
noext = strings.TrimSuffix(bp.PkgObj, ".a")
id = bp.ImportPath
} }
noext = strings.TrimSuffix(bp.PkgObj, ".a")
id = bp.ImportPath
case build.IsLocalImport(path): case build.IsLocalImport(path):
// "./x" -> "/this/directory/x.ext", "/this/directory/x" // "./x" -> "/this/directory/x.ext", "/this/directory/x"
@ -85,6 +136,12 @@ func FindPkg(path, srcDir string) (filename, id string) {
} }
} }
if filename != "" {
if f, err := os.Stat(filename); err == nil && !f.IsDir() {
return
}
}
// try extensions // try extensions
for _, ext := range pkgExts { for _, ext := range pkgExts {
filename = noext + ext filename = noext + ext

View File

@ -12,7 +12,6 @@ import (
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"go/ast"
"go/constant" "go/constant"
"go/token" "go/token"
"go/types" "go/types"
@ -26,6 +25,41 @@ import (
"golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typeparams"
) )
// IExportShallow encodes "shallow" export data for the specified package.
//
// No promises are made about the encoding other than that it can be
// decoded by the same version of IIExportShallow. If you plan to save
// export data in the file system, be sure to include a cryptographic
// digest of the executable in the key to avoid version skew.
func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) {
// In principle this operation can only fail if out.Write fails,
// but that's impossible for bytes.Buffer---and as a matter of
// fact iexportCommon doesn't even check for I/O errors.
// TODO(adonovan): handle I/O errors properly.
// TODO(adonovan): use byte slices throughout, avoiding copying.
const bundle, shallow = false, true
var out bytes.Buffer
err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg})
return out.Bytes(), err
}
// IImportShallow decodes "shallow" types.Package data encoded by IExportShallow
// in the same executable. This function cannot import data from
// cmd/compile or gcexportdata.Write.
func IImportShallow(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string, insert InsertType) (*types.Package, error) {
const bundle = false
pkgs, err := iimportCommon(fset, imports, data, bundle, path, insert)
if err != nil {
return nil, err
}
return pkgs[0], nil
}
// InsertType is the type of a function that creates a types.TypeName
// object for a named type and inserts it into the scope of the
// specified Package.
type InsertType = func(pkg *types.Package, name string)
// Current bundled export format version. Increase with each format change. // Current bundled export format version. Increase with each format change.
// 0: initial implementation // 0: initial implementation
const bundleVersion = 0 const bundleVersion = 0
@ -36,15 +70,17 @@ const bundleVersion = 0
// The package path of the top-level package will not be recorded, // The package path of the top-level package will not be recorded,
// so that calls to IImportData can override with a provided package path. // so that calls to IImportData can override with a provided package path.
func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error { func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
return iexportCommon(out, fset, false, iexportVersion, []*types.Package{pkg}) const bundle, shallow = false, false
return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg})
} }
// IExportBundle writes an indexed export bundle for pkgs to out. // IExportBundle writes an indexed export bundle for pkgs to out.
func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error {
return iexportCommon(out, fset, true, iexportVersion, pkgs) const bundle, shallow = true, false
return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs)
} }
func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, version int, pkgs []*types.Package) (err error) { func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package) (err error) {
if !debug { if !debug {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
@ -61,6 +97,7 @@ func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, version int,
p := iexporter{ p := iexporter{
fset: fset, fset: fset,
version: version, version: version,
shallow: shallow,
allPkgs: map[*types.Package]bool{}, allPkgs: map[*types.Package]bool{},
stringIndex: map[string]uint64{}, stringIndex: map[string]uint64{},
declIndex: map[types.Object]uint64{}, declIndex: map[types.Object]uint64{},
@ -82,7 +119,7 @@ func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, version int,
for _, pkg := range pkgs { for _, pkg := range pkgs {
scope := pkg.Scope() scope := pkg.Scope()
for _, name := range scope.Names() { for _, name := range scope.Names() {
if ast.IsExported(name) { if token.IsExported(name) {
p.pushDecl(scope.Lookup(name)) p.pushDecl(scope.Lookup(name))
} }
} }
@ -205,7 +242,8 @@ type iexporter struct {
out *bytes.Buffer out *bytes.Buffer
version int version int
localpkg *types.Package shallow bool // don't put types from other packages in the index
localpkg *types.Package // (nil in bundle mode)
// allPkgs tracks all packages that have been referenced by // allPkgs tracks all packages that have been referenced by
// the export data, so we can ensure to include them in the // the export data, so we can ensure to include them in the
@ -256,6 +294,11 @@ func (p *iexporter) pushDecl(obj types.Object) {
panic("cannot export package unsafe") panic("cannot export package unsafe")
} }
// Shallow export data: don't index decls from other packages.
if p.shallow && obj.Pkg() != p.localpkg {
return
}
if _, ok := p.declIndex[obj]; ok { if _, ok := p.declIndex[obj]; ok {
return return
} }
@ -497,7 +540,7 @@ func (w *exportWriter) pkg(pkg *types.Package) {
w.string(w.exportPath(pkg)) w.string(w.exportPath(pkg))
} }
func (w *exportWriter) qualifiedIdent(obj types.Object) { func (w *exportWriter) qualifiedType(obj *types.TypeName) {
name := w.p.exportName(obj) name := w.p.exportName(obj)
// Ensure any referenced declarations are written out too. // Ensure any referenced declarations are written out too.
@ -556,11 +599,11 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
return return
} }
w.startType(definedType) w.startType(definedType)
w.qualifiedIdent(t.Obj()) w.qualifiedType(t.Obj())
case *typeparams.TypeParam: case *typeparams.TypeParam:
w.startType(typeParamType) w.startType(typeParamType)
w.qualifiedIdent(t.Obj()) w.qualifiedType(t.Obj())
case *types.Pointer: case *types.Pointer:
w.startType(pointerType) w.startType(pointerType)
@ -602,14 +645,17 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
case *types.Struct: case *types.Struct:
w.startType(structType) w.startType(structType)
w.setPkg(pkg, true)
n := t.NumFields() n := t.NumFields()
if n > 0 {
w.setPkg(t.Field(0).Pkg(), true) // qualifying package for field objects
} else {
w.setPkg(pkg, true)
}
w.uint64(uint64(n)) w.uint64(uint64(n))
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
f := t.Field(i) f := t.Field(i)
w.pos(f.Pos()) w.pos(f.Pos())
w.string(f.Name()) w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg
w.typ(f.Type(), pkg) w.typ(f.Type(), pkg)
w.bool(f.Anonymous()) w.bool(f.Anonymous())
w.string(t.Tag(i)) // note (or tag) w.string(t.Tag(i)) // note (or tag)

View File

@ -85,7 +85,7 @@ const (
// If the export data version is not recognized or the format is otherwise // If the export data version is not recognized or the format is otherwise
// compromised, an error is returned. // compromised, an error is returned.
func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) {
pkgs, err := iimportCommon(fset, imports, data, false, path) pkgs, err := iimportCommon(fset, imports, data, false, path, nil)
if err != nil { if err != nil {
return 0, nil, err return 0, nil, err
} }
@ -94,10 +94,10 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []
// IImportBundle imports a set of packages from the serialized package bundle. // IImportBundle imports a set of packages from the serialized package bundle.
func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) {
return iimportCommon(fset, imports, data, true, "") return iimportCommon(fset, imports, data, true, "", nil)
} }
func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string) (pkgs []*types.Package, err error) { func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) {
const currentVersion = iexportVersionCurrent const currentVersion = iexportVersionCurrent
version := int64(-1) version := int64(-1)
if !debug { if !debug {
@ -147,6 +147,7 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data
p := iimporter{ p := iimporter{
version: int(version), version: int(version),
ipath: path, ipath: path,
insert: insert,
stringData: stringData, stringData: stringData,
stringCache: make(map[uint64]string), stringCache: make(map[uint64]string),
@ -187,11 +188,18 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data
} else if pkg.Name() != pkgName { } else if pkg.Name() != pkgName {
errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
} }
if i == 0 && !bundle {
p.localpkg = pkg
}
p.pkgCache[pkgPathOff] = pkg p.pkgCache[pkgPathOff] = pkg
// Read index for package.
nameIndex := make(map[string]uint64) nameIndex := make(map[string]uint64)
for nSyms := r.uint64(); nSyms > 0; nSyms-- { nSyms := r.uint64()
// In shallow mode we don't expect an index for other packages.
assert(nSyms == 0 || p.localpkg == pkg || p.insert == nil)
for ; nSyms > 0; nSyms-- {
name := p.stringAt(r.uint64()) name := p.stringAt(r.uint64())
nameIndex[name] = r.uint64() nameIndex[name] = r.uint64()
} }
@ -267,6 +275,9 @@ type iimporter struct {
version int version int
ipath string ipath string
localpkg *types.Package
insert func(pkg *types.Package, name string) // "shallow" mode only
stringData []byte stringData []byte
stringCache map[uint64]string stringCache map[uint64]string
pkgCache map[uint64]*types.Package pkgCache map[uint64]*types.Package
@ -310,6 +321,13 @@ func (p *iimporter) doDecl(pkg *types.Package, name string) {
off, ok := p.pkgIndex[pkg][name] off, ok := p.pkgIndex[pkg][name]
if !ok { if !ok {
// In "shallow" mode, call back to the application to
// find the object and insert it into the package scope.
if p.insert != nil {
assert(pkg != p.localpkg)
p.insert(pkg, name) // "can't fail"
return
}
errorf("%v.%v not in index", pkg, name) errorf("%v.%v not in index", pkg, name)
} }

View File

@ -21,3 +21,17 @@ func additionalPredeclared() []types.Type {
types.Universe.Lookup("any").Type(), types.Universe.Lookup("any").Type(),
} }
} }
// See cmd/compile/internal/types.SplitVargenSuffix.
func splitVargenSuffix(name string) (base, suffix string) {
i := len(name)
for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' {
i--
}
const dot = "·"
if i >= len(dot) && name[i-len(dot):i] == dot {
i -= len(dot)
return name[:i], name[i:]
}
return name, ""
}

View File

@ -14,7 +14,7 @@ import (
"go/types" "go/types"
"strings" "strings"
"golang.org/x/tools/go/internal/pkgbits" "golang.org/x/tools/internal/pkgbits"
) )
// A pkgReader holds the shared state for reading a unified IR package // A pkgReader holds the shared state for reading a unified IR package
@ -158,6 +158,17 @@ func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pk
} }
} }
func (pr *pkgReader) tempReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader {
return &reader{
Decoder: pr.TempDecoder(k, idx, marker),
p: pr,
}
}
func (pr *pkgReader) retireReader(r *reader) {
pr.RetireDecoder(&r.Decoder)
}
// @@@ Positions // @@@ Positions
func (r *reader) pos() token.Pos { func (r *reader) pos() token.Pos {
@ -182,26 +193,29 @@ func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string {
return b return b
} }
r := pr.newReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase) var filename string
{
r := pr.tempReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase)
// Within types2, position bases have a lot more details (e.g., // Within types2, position bases have a lot more details (e.g.,
// keeping track of where //line directives appeared exactly). // keeping track of where //line directives appeared exactly).
// //
// For go/types, we just track the file name. // For go/types, we just track the file name.
filename := r.String() filename = r.String()
if r.Bool() { // file base if r.Bool() { // file base
// Was: "b = token.NewTrimmedFileBase(filename, true)" // Was: "b = token.NewTrimmedFileBase(filename, true)"
} else { // line base } else { // line base
pos := r.pos() pos := r.pos()
line := r.Uint() line := r.Uint()
col := r.Uint() col := r.Uint()
// Was: "b = token.NewLineBase(pos, filename, true, line, col)" // Was: "b = token.NewLineBase(pos, filename, true, line, col)"
_, _, _ = pos, line, col _, _, _ = pos, line, col
}
pr.retireReader(r)
} }
b := filename b := filename
pr.posBases[idx] = b pr.posBases[idx] = b
return b return b
@ -259,22 +273,22 @@ func (r *reader) doPkg() *types.Package {
// packages rooted from pkgs. // packages rooted from pkgs.
func flattenImports(pkgs []*types.Package) []*types.Package { func flattenImports(pkgs []*types.Package) []*types.Package {
var res []*types.Package var res []*types.Package
seen := make(map[*types.Package]struct{})
seen := make(map[*types.Package]bool)
var add func(pkg *types.Package)
add = func(pkg *types.Package) {
if seen[pkg] {
return
}
seen[pkg] = true
res = append(res, pkg)
for _, imp := range pkg.Imports() {
add(imp)
}
}
for _, pkg := range pkgs { for _, pkg := range pkgs {
add(pkg) if _, ok := seen[pkg]; ok {
continue
}
seen[pkg] = struct{}{}
res = append(res, pkg)
// pkg.Imports() is already flattened.
for _, pkg := range pkg.Imports() {
if _, ok := seen[pkg]; ok {
continue
}
seen[pkg] = struct{}{}
res = append(res, pkg)
}
} }
return res return res
} }
@ -307,12 +321,15 @@ func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type {
return typ return typ
} }
r := pr.newReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx) var typ types.Type
r.dict = dict {
r := pr.tempReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx)
typ := r.doTyp() r.dict = dict
assert(typ != nil)
typ = r.doTyp()
assert(typ != nil)
pr.retireReader(r)
}
// See comment in pkgReader.typIdx explaining how this happens. // See comment in pkgReader.typIdx explaining how this happens.
if prev := *where; prev != nil { if prev := *where; prev != nil {
return prev return prev
@ -478,18 +495,30 @@ func (r *reader) obj() (types.Object, []types.Type) {
} }
func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
rname := pr.newReader(pkgbits.RelocName, idx, pkgbits.SyncObject1)
objPkg, objName := rname.qualifiedIdent() var objPkg *types.Package
assert(objName != "") var objName string
var tag pkgbits.CodeObj
{
rname := pr.tempReader(pkgbits.RelocName, idx, pkgbits.SyncObject1)
tag := pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) objPkg, objName = rname.qualifiedIdent()
assert(objName != "")
tag = pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj))
pr.retireReader(rname)
}
if tag == pkgbits.ObjStub { if tag == pkgbits.ObjStub {
assert(objPkg == nil || objPkg == types.Unsafe) assert(objPkg == nil || objPkg == types.Unsafe)
return objPkg, objName return objPkg, objName
} }
// Ignore local types promoted to global scope (#55110).
if _, suffix := splitVargenSuffix(objName); suffix != "" {
return objPkg, objName
}
if objPkg.Scope().Lookup(objName) == nil { if objPkg.Scope().Lookup(objName) == nil {
dict := pr.objDictIdx(idx) dict := pr.objDictIdx(idx)
@ -583,25 +612,28 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
} }
func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict {
r := pr.newReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1)
var dict readerDict var dict readerDict
if implicits := r.Len(); implicits != 0 { {
errorf("unexpected object with %v implicit type parameter(s)", implicits) r := pr.tempReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1)
} if implicits := r.Len(); implicits != 0 {
errorf("unexpected object with %v implicit type parameter(s)", implicits)
}
dict.bounds = make([]typeInfo, r.Len()) dict.bounds = make([]typeInfo, r.Len())
for i := range dict.bounds { for i := range dict.bounds {
dict.bounds[i] = r.typInfo() dict.bounds[i] = r.typInfo()
} }
dict.derived = make([]derivedInfo, r.Len()) dict.derived = make([]derivedInfo, r.Len())
dict.derivedTypes = make([]types.Type, len(dict.derived)) dict.derivedTypes = make([]types.Type, len(dict.derived))
for i := range dict.derived { for i := range dict.derived {
dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()}
} }
pr.retireReader(r)
}
// function references follow, but reader doesn't need those // function references follow, but reader doesn't need those
return &dict return &dict

View File

@ -7,6 +7,7 @@ package gocommand
import ( import (
"context" "context"
"fmt" "fmt"
"regexp"
"strings" "strings"
) )
@ -56,3 +57,23 @@ func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) {
} }
return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags) return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags)
} }
// GoVersionString reports the go version string as shown in `go version` command output.
// When `go version` outputs in non-standard form, this returns an empty string.
func GoVersionString(ctx context.Context, inv Invocation, r *Runner) (string, error) {
inv.Verb = "version"
goVersion, err := r.Run(ctx, inv)
if err != nil {
return "", err
}
return parseGoVersionOutput(goVersion.Bytes()), nil
}
func parseGoVersionOutput(data []byte) string {
re := regexp.MustCompile(`^go version (go\S+|devel \S+)`)
m := re.FindSubmatch(data)
if len(m) != 2 {
return "" // unrecognized version
}
return string(m[1])
}

View File

@ -6,6 +6,7 @@ package pkgbits
import ( import (
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
"go/constant" "go/constant"
"go/token" "go/token"
@ -52,6 +53,8 @@ type PkgDecoder struct {
// For example, section K's end positions start at elemEndsEnds[K-1] // For example, section K's end positions start at elemEndsEnds[K-1]
// (or 0, if K==0) and end at elemEndsEnds[K]. // (or 0, if K==0) and end at elemEndsEnds[K].
elemEndsEnds [numRelocs]uint32 elemEndsEnds [numRelocs]uint32
scratchRelocEnt []RelocEnt
} }
// PkgPath returns the package path for the package // PkgPath returns the package path for the package
@ -165,6 +168,21 @@ func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Deco
return r return r
} }
// TempDecoder returns a Decoder for the given (section, index) pair,
// and decodes the given SyncMarker from the element bitstream.
// If possible the Decoder should be RetireDecoder'd when it is no longer
// needed, this will avoid heap allocations.
func (pr *PkgDecoder) TempDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder {
r := pr.TempDecoderRaw(k, idx)
r.Sync(marker)
return r
}
func (pr *PkgDecoder) RetireDecoder(d *Decoder) {
pr.scratchRelocEnt = d.Relocs
d.Relocs = nil
}
// NewDecoderRaw returns a Decoder for the given (section, index) pair. // NewDecoderRaw returns a Decoder for the given (section, index) pair.
// //
// Most callers should use NewDecoder instead. // Most callers should use NewDecoder instead.
@ -188,6 +206,30 @@ func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder {
return r return r
} }
func (pr *PkgDecoder) TempDecoderRaw(k RelocKind, idx Index) Decoder {
r := Decoder{
common: pr,
k: k,
Idx: idx,
}
r.Data.Reset(pr.DataIdx(k, idx))
r.Sync(SyncRelocs)
l := r.Len()
if cap(pr.scratchRelocEnt) >= l {
r.Relocs = pr.scratchRelocEnt[:l]
pr.scratchRelocEnt = nil
} else {
r.Relocs = make([]RelocEnt, l)
}
for i := range r.Relocs {
r.Sync(SyncReloc)
r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())}
}
return r
}
// A Decoder provides methods for decoding an individual element's // A Decoder provides methods for decoding an individual element's
// bitstream data. // bitstream data.
type Decoder struct { type Decoder struct {
@ -207,11 +249,39 @@ func (r *Decoder) checkErr(err error) {
} }
func (r *Decoder) rawUvarint() uint64 { func (r *Decoder) rawUvarint() uint64 {
x, err := binary.ReadUvarint(&r.Data) x, err := readUvarint(&r.Data)
r.checkErr(err) r.checkErr(err)
return x return x
} }
// readUvarint is a type-specialized copy of encoding/binary.ReadUvarint.
// This avoids the interface conversion and thus has better escape properties,
// which flows up the stack.
func readUvarint(r *strings.Reader) (uint64, error) {
var x uint64
var s uint
for i := 0; i < binary.MaxVarintLen64; i++ {
b, err := r.ReadByte()
if err != nil {
if i > 0 && err == io.EOF {
err = io.ErrUnexpectedEOF
}
return x, err
}
if b < 0x80 {
if i == binary.MaxVarintLen64-1 && b > 1 {
return x, overflow
}
return x | uint64(b)<<s, nil
}
x |= uint64(b&0x7f) << s
s += 7
}
return x, overflow
}
var overflow = errors.New("pkgbits: readUvarint overflows a 64-bit integer")
func (r *Decoder) rawVarint() int64 { func (r *Decoder) rawVarint() int64 {
ux := r.rawUvarint() ux := r.rawUvarint()
@ -410,8 +480,12 @@ func (r *Decoder) bigFloat() *big.Float {
// PeekPkgPath returns the package path for the specified package // PeekPkgPath returns the package path for the specified package
// index. // index.
func (pr *PkgDecoder) PeekPkgPath(idx Index) string { func (pr *PkgDecoder) PeekPkgPath(idx Index) string {
r := pr.NewDecoder(RelocPkg, idx, SyncPkgDef) var path string
path := r.String() {
r := pr.TempDecoder(RelocPkg, idx, SyncPkgDef)
path = r.String()
pr.RetireDecoder(&r)
}
if path == "" { if path == "" {
path = pr.pkgPath path = pr.pkgPath
} }
@ -421,14 +495,23 @@ func (pr *PkgDecoder) PeekPkgPath(idx Index) string {
// PeekObj returns the package path, object name, and CodeObj for the // PeekObj returns the package path, object name, and CodeObj for the
// specified object index. // specified object index.
func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) {
r := pr.NewDecoder(RelocName, idx, SyncObject1) var ridx Index
r.Sync(SyncSym) var name string
r.Sync(SyncPkg) var rcode int
path := pr.PeekPkgPath(r.Reloc(RelocPkg)) {
name := r.String() r := pr.TempDecoder(RelocName, idx, SyncObject1)
r.Sync(SyncSym)
r.Sync(SyncPkg)
ridx = r.Reloc(RelocPkg)
name = r.String()
rcode = r.Code(SyncCodeObj)
pr.RetireDecoder(&r)
}
path := pr.PeekPkgPath(ridx)
assert(name != "") assert(name != "")
tag := CodeObj(r.Code(SyncCodeObj)) tag := CodeObj(rcode)
return path, name, tag return path, name, tag
} }

Some files were not shown because too many files have changed in this diff Show More