mirror of https://github.com/containers/podman.git
Merge pull request #26222 from Luap99/vendor
vendor: update c/{buildah,common,image,storage} to main
This commit is contained in:
commit
089547dba2
30
go.mod
30
go.mod
|
@ -12,15 +12,15 @@ require (
|
|||
github.com/checkpoint-restore/checkpointctl v1.3.0
|
||||
github.com/checkpoint-restore/go-criu/v7 v7.2.0
|
||||
github.com/containernetworking/plugins v1.7.1
|
||||
github.com/containers/buildah v1.40.0
|
||||
github.com/containers/common v0.63.0
|
||||
github.com/containers/buildah v1.40.1-0.20250523151639-b535d02d0ee1
|
||||
github.com/containers/common v0.63.1-0.20250528122446-1a3b5ecec62f
|
||||
github.com/containers/conmon v2.0.20+incompatible
|
||||
github.com/containers/gvisor-tap-vsock v0.8.6
|
||||
github.com/containers/image/v5 v5.35.0
|
||||
github.com/containers/image/v5 v5.35.1-0.20250526152843-c64593da00e4
|
||||
github.com/containers/libhvee v0.10.0
|
||||
github.com/containers/ocicrypt v1.2.1
|
||||
github.com/containers/psgo v1.9.0
|
||||
github.com/containers/storage v1.58.0
|
||||
github.com/containers/storage v1.58.1-0.20250515004000-78f4258b2bd9
|
||||
github.com/containers/winquit v1.1.0
|
||||
github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09
|
||||
github.com/crc-org/vfkit v0.6.1
|
||||
|
@ -82,9 +82,9 @@ require (
|
|||
)
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
dario.cat/mergo v1.0.2 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/Microsoft/hcsshim v0.12.9 // indirect
|
||||
github.com/Microsoft/hcsshim v0.13.0 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
||||
github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 // indirect
|
||||
|
@ -99,8 +99,8 @@ require (
|
|||
github.com/containerd/typeurl/v2 v2.2.3 // indirect
|
||||
github.com/containernetworking/cni v1.3.0 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||
github.com/containers/luksy v0.0.0-20250217190002-40bd943d93b8 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.13.0 // indirect
|
||||
github.com/containers/luksy v0.0.0-20250408185436-4bb4c3f825be // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.14.1 // indirect
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
|
@ -118,7 +118,7 @@ require (
|
|||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-openapi/analysis v0.23.0 // indirect
|
||||
github.com/go-openapi/errors v0.22.1 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.1 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/loads v0.22.0 // indirect
|
||||
github.com/go-openapi/runtime v0.28.0 // indirect
|
||||
|
@ -151,7 +151,7 @@ require (
|
|||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/buildkit v0.21.0 // indirect
|
||||
github.com/moby/buildkit v0.22.0 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/go-archive v0.1.0 // indirect
|
||||
github.com/moby/patternmatcher v0.6.0 // indirect
|
||||
|
@ -162,7 +162,7 @@ require (
|
|||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/opencontainers/runc v1.2.6 // indirect
|
||||
github.com/opencontainers/runc v1.3.0 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
|
@ -172,13 +172,13 @@ require (
|
|||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/proglottis/gpgme v0.1.4 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/seccomp/libseccomp-golang v0.10.0 // indirect
|
||||
github.com/seccomp/libseccomp-golang v0.11.0 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect
|
||||
github.com/segmentio/ksuid v1.0.4 // indirect
|
||||
github.com/sigstore/fulcio v1.6.6 // indirect
|
||||
github.com/sigstore/protobuf-specs v0.4.1 // indirect
|
||||
github.com/sigstore/rekor v1.3.10 // indirect
|
||||
github.com/sigstore/sigstore v1.9.3 // indirect
|
||||
github.com/sigstore/sigstore v1.9.4 // indirect
|
||||
github.com/skeema/knownhosts v1.3.1 // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||
github.com/smallstep/pkcs7 v0.1.1 // indirect
|
||||
|
@ -194,7 +194,7 @@ require (
|
|||
github.com/vbatts/tar-split v0.12.1 // indirect
|
||||
github.com/vishvananda/netns v0.0.5 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.17.3 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect
|
||||
|
@ -203,7 +203,7 @@ require (
|
|||
go.opentelemetry.io/otel/trace v1.34.0 // indirect
|
||||
go.uber.org/automaxprocs v1.6.0 // indirect
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/oauth2 v0.29.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
golang.org/x/tools v0.32.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect
|
||||
|
|
68
go.sum
68
go.sum
|
@ -1,6 +1,6 @@
|
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
|
||||
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
|
||||
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
|
||||
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg=
|
||||
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk=
|
||||
|
@ -12,8 +12,8 @@ github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg
|
|||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg=
|
||||
github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y=
|
||||
github.com/Microsoft/hcsshim v0.13.0 h1:/BcXOiS6Qi7N9XqUcv27vkIuVOkBEcWstd2pMlWSeaA=
|
||||
github.com/Microsoft/hcsshim v0.13.0/go.mod h1:9KWJ/8DgU+QzYGupX4tzMhRQE8h6w90lH6HAaclpEok=
|
||||
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s=
|
||||
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w=
|
||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||
|
@ -66,32 +66,32 @@ github.com/containernetworking/cni v1.3.0 h1:v6EpN8RznAZj9765HhXQrtXgX+ECGebEYEm
|
|||
github.com/containernetworking/cni v1.3.0/go.mod h1:Bs8glZjjFfGPHMw6hQu82RUgEPNGEaBb9KS5KtNMnJ4=
|
||||
github.com/containernetworking/plugins v1.7.1 h1:CNAR0jviDj6FS5Vg85NTgKWLDzZPfi/lj+VJfhMDTIs=
|
||||
github.com/containernetworking/plugins v1.7.1/go.mod h1:xuMdjuio+a1oVQsHKjr/mgzuZ24leAsqUYRnzGoXHy0=
|
||||
github.com/containers/buildah v1.40.0 h1:qCHTKnL/UEutxT6ZS8Zvhy7QUpe719jEIeGMSlcN3j4=
|
||||
github.com/containers/buildah v1.40.0/go.mod h1:U6qj0nseq6t97T2kkNpjgo0WBVRYIXASIOlS5eWvlhM=
|
||||
github.com/containers/common v0.63.0 h1:ox6vgUYX5TSvt4W+bE36sYBVz/aXMAfRGVAgvknSjBg=
|
||||
github.com/containers/common v0.63.0/go.mod h1:+3GCotSqNdIqM3sPs152VvW7m5+Mg8Kk+PExT3G9hZw=
|
||||
github.com/containers/buildah v1.40.1-0.20250523151639-b535d02d0ee1 h1:3bNWDmqh9tx0iAXPzBJugj/oC0nTD9yTXCyIu/Mj/LE=
|
||||
github.com/containers/buildah v1.40.1-0.20250523151639-b535d02d0ee1/go.mod h1:8BVLrM6nRl/dRMYxZ+TrmoWPXzkCY99rZOYvJoXpIyE=
|
||||
github.com/containers/common v0.63.1-0.20250528122446-1a3b5ecec62f h1:308Ex0+3+gBSpDPJrFCQIhALdD8YC7jzaXuxSFZgFiA=
|
||||
github.com/containers/common v0.63.1-0.20250528122446-1a3b5ecec62f/go.mod h1:efNRNweihnq5nXALnAPDXTpC7uJtnFV4pNuETTfvI8s=
|
||||
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
||||
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
||||
github.com/containers/gvisor-tap-vsock v0.8.6 h1:9SeAXK+K2o36CtrgYk6zRXbU3zrayjvkrI8b7/O6u5A=
|
||||
github.com/containers/gvisor-tap-vsock v0.8.6/go.mod h1:+0mtKmm4STeSDnZe+DGnIwN4EH2f7AcWir7PwT28Ti0=
|
||||
github.com/containers/image/v5 v5.35.0 h1:T1OeyWp3GjObt47bchwD9cqiaAm/u4O4R9hIWdrdrP8=
|
||||
github.com/containers/image/v5 v5.35.0/go.mod h1:8vTsgb+1gKcBL7cnjyNOInhJQfTUQjJoO2WWkKDoebM=
|
||||
github.com/containers/image/v5 v5.35.1-0.20250526152843-c64593da00e4 h1:7rvPvBNGjNfgjAmRZhlxA7ooBbLalqTTGoHa9DBVnBY=
|
||||
github.com/containers/image/v5 v5.35.1-0.20250526152843-c64593da00e4/go.mod h1:JAywiXYidI9NBfCvggVF80nYVAsYrNSRpvHKnalbZG0=
|
||||
github.com/containers/libhvee v0.10.0 h1:7VLv8keWZpHuGmWvyY4c1mVH5V1JYb1G78VC+8AlrM0=
|
||||
github.com/containers/libhvee v0.10.0/go.mod h1:at0h8lRcK5jCKfQgU/e6Io0Mw12F36zRLjXVOXRoDTM=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/luksy v0.0.0-20250217190002-40bd943d93b8 h1:hAkmJxAYcNxgv7EsFY9sf1uIYhilYOQqjJ9UzCmYvzY=
|
||||
github.com/containers/luksy v0.0.0-20250217190002-40bd943d93b8/go.mod h1:m5Y0Lh2ROHGCbOhGeUDfoOLuUDDtxszrbZc2IsQOGAM=
|
||||
github.com/containers/luksy v0.0.0-20250408185436-4bb4c3f825be h1:6E1dD/4g8Kq04jvI5BVqUIx2Z5Nl6+dqYuX9syTVZjI=
|
||||
github.com/containers/luksy v0.0.0-20250408185436-4bb4c3f825be/go.mod h1:xY4YWmawqtrpLFV7mYSkHfFDwEO+6Fo0bT18Xnsg7M0=
|
||||
github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM=
|
||||
github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ=
|
||||
github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g=
|
||||
github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A=
|
||||
github.com/containers/storage v1.58.0 h1:Q7SyyCCjqgT3wYNgRNIL8o/wUS92heIj2/cc8Sewvcc=
|
||||
github.com/containers/storage v1.58.0/go.mod h1:w7Jl6oG+OpeLGLzlLyOZPkmUso40kjpzgrHUk5tyBlo=
|
||||
github.com/containers/storage v1.58.1-0.20250515004000-78f4258b2bd9 h1:7pLGfniIOBvFoe4hzLpV+DWt1hHWaQPiBQR71ftOpCU=
|
||||
github.com/containers/storage v1.58.1-0.20250515004000-78f4258b2bd9/go.mod h1:exWY15dYuRjIG2nfv2/Z1TinvEYub582shEGGr6uawY=
|
||||
github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE=
|
||||
github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8=
|
||||
github.com/coreos/go-oidc/v3 v3.13.0 h1:M66zd0pcc5VxvBNM4pB331Wrsanby+QomQYjN8HamW8=
|
||||
github.com/coreos/go-oidc/v3 v3.13.0/go.mod h1:HaZ3szPaZ0e4r6ebqvsLWlk2Tn+aejfmrfah6hnSYEU=
|
||||
github.com/coreos/go-oidc/v3 v3.14.1 h1:9ePWwfdwC4QKRlCXsJGou56adA/owXczOzwKdOumLqk=
|
||||
github.com/coreos/go-oidc/v3 v3.14.1/go.mod h1:HaZ3szPaZ0e4r6ebqvsLWlk2Tn+aejfmrfah6hnSYEU=
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c=
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 h1:OoRAFlvDGCUqDLampLQjk0yeeSGdF9zzst/3G9IkBbc=
|
||||
|
@ -117,8 +117,8 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh
|
|||
github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/cli v28.0.4+incompatible h1:pBJSJeNd9QeIWPjRcV91RVJihd/TXB77q1ef64XEu4A=
|
||||
github.com/docker/cli v28.0.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k=
|
||||
github.com/docker/cli v28.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v28.1.1+incompatible h1:49M11BFLsVO1gxY9UX9p/zwkE/rswggs8AdFmXQw51I=
|
||||
|
@ -164,8 +164,8 @@ github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC0
|
|||
github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo=
|
||||
github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU=
|
||||
github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic=
|
||||
github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk=
|
||||
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
|
||||
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
|
||||
github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco=
|
||||
|
@ -317,8 +317,8 @@ github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPn
|
|||
github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/buildkit v0.21.0 h1:+z4vVqgt0spLrOSxi4DLedRbIh2gbNVlZ5q4rsnNp60=
|
||||
github.com/moby/buildkit v0.21.0/go.mod h1:mBq0D44uCyz2PdX8T/qym5LBbkBO3GGv0wqgX9ABYYw=
|
||||
github.com/moby/buildkit v0.22.0 h1:aWN06w1YGSVN1XfeZbj2ZbgY+zi5xDAjEFI8Cy9fTjA=
|
||||
github.com/moby/buildkit v0.22.0/go.mod h1:j4pP5hxiTWcz7xuTK2cyxQislHl/N2WWHzOy43DlLJw=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ=
|
||||
|
@ -362,8 +362,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
|
|||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
||||
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||
github.com/opencontainers/runc v1.2.6 h1:P7Hqg40bsMvQGCS4S7DJYhUZOISMLJOB2iGX5COWiPk=
|
||||
github.com/opencontainers/runc v1.2.6/go.mod h1:dOQeFo29xZKBNeRBI0B19mJtfHv68YgCTh1X+YphA+4=
|
||||
github.com/opencontainers/runc v1.3.0 h1:cvP7xbEvD0QQAs0nZKLzkVog2OPZhI/V2w3WmTmUSXI=
|
||||
github.com/opencontainers/runc v1.3.0/go.mod h1:9wbWt42gV+KRxKRVVugNP6D5+PQciRbenB4fLVsqGPs=
|
||||
github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww=
|
||||
github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-tools v0.9.1-0.20250303011046-260e151b8552 h1:CkXngT0nixZqQUPDVfwVs3GiuhfTqCMk0V+OoHpxIvA=
|
||||
|
@ -410,12 +410,12 @@ github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWN
|
|||
github.com/rootless-containers/rootlesskit/v2 v2.3.5 h1:WGY05oHE7xQpSkCGfYP9lMY5z19tCxA8PhWlvP1cKx8=
|
||||
github.com/rootless-containers/rootlesskit/v2 v2.3.5/go.mod h1:83EIYLeMX8UeNgLHkR1PefoSV76aKEC+OyI3vzrEfvw=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+xZwwfKHgph2lxBw=
|
||||
github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU=
|
||||
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ=
|
||||
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU=
|
||||
github.com/sebdah/goldie/v2 v2.5.5 h1:rx1mwF95RxZ3/83sdS4Yp7t2C5TCokvWP4TBRbAyEWY=
|
||||
github.com/sebdah/goldie/v2 v2.5.5/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
|
||||
github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY=
|
||||
github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||
github.com/seccomp/libseccomp-golang v0.11.0 h1:SDkcBRqGLP+sezmMACkxO1EfgbghxIxnRKfd6mHUEis=
|
||||
github.com/seccomp/libseccomp-golang v0.11.0/go.mod h1:5m1Lk8E9OwgZTTVz4bBOer7JuazaBa+xTkM895tDiWc=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw=
|
||||
github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
|
||||
|
@ -430,8 +430,8 @@ github.com/sigstore/protobuf-specs v0.4.1 h1:5SsMqZbdkcO/DNHudaxuCUEjj6x29tS2Xby
|
|||
github.com/sigstore/protobuf-specs v0.4.1/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc=
|
||||
github.com/sigstore/rekor v1.3.10 h1:/mSvRo4MZ/59ECIlARhyykAlQlkmeAQpvBPlmJtZOCU=
|
||||
github.com/sigstore/rekor v1.3.10/go.mod h1:JvryKJ40O0XA48MdzYUPu0y4fyvqt0C4iSY7ri9iu3A=
|
||||
github.com/sigstore/sigstore v1.9.3 h1:y2qlTj+vh+Or3ictKuR3JUFawZPdDxAjrWkeFhon0OQ=
|
||||
github.com/sigstore/sigstore v1.9.3/go.mod h1:VwYkiw0G0dRtwL25KSs04hCyVFF6CYMd/qvNeYrl7EQ=
|
||||
github.com/sigstore/sigstore v1.9.4 h1:64+OGed80+A4mRlNzRd055vFcgBeDghjZw24rPLZgDU=
|
||||
github.com/sigstore/sigstore v1.9.4/go.mod h1:Q7tGTC3gbtK7c3jcxEmGc2MmK4rRpIRzi3bxRFWKvEY=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8=
|
||||
|
@ -509,8 +509,8 @@ github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo
|
|||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.etcd.io/bbolt v1.4.0 h1:TU77id3TnN/zKr7CO/uk+fBCwF2jGcMuw2B/FMAzYIk=
|
||||
go.etcd.io/bbolt v1.4.0/go.mod h1:AsD+OCi/qPN1giOX1aiLAha3o1U8rAz65bvN4j0sRuk=
|
||||
go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
|
||||
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
|
||||
go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeHxQ=
|
||||
go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
|
@ -584,8 +584,8 @@ golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
|||
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
|
||||
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98=
|
||||
golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
"drips": {
|
||||
"ethereum": {
|
||||
"ownedBy": "0x6160020e7102237aC41bdb156e94401692D76930"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -85,7 +85,6 @@ Mergo is used by [thousands](https://deps.dev/go/dario.cat%2Fmergo/v1.0.0/depend
|
|||
* [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
|
||||
* [go-micro/go-micro](https://github.com/go-micro/go-micro)
|
||||
* [grafana/loki](https://github.com/grafana/loki)
|
||||
* [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
|
||||
* [masterminds/sprig](github.com/Masterminds/sprig)
|
||||
* [moby/moby](https://github.com/moby/moby)
|
||||
* [slackhq/nebula](https://github.com/slackhq/nebula)
|
||||
|
@ -191,10 +190,6 @@ func main() {
|
|||
}
|
||||
```
|
||||
|
||||
Note: if test are failing due missing package, please execute:
|
||||
|
||||
go get gopkg.in/yaml.v3
|
||||
|
||||
### Transformers
|
||||
|
||||
Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`?
|
||||
|
|
|
@ -4,8 +4,8 @@
|
|||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 0.3.x | :white_check_mark: |
|
||||
| < 0.3 | :x: |
|
||||
| 1.x.x | :white_check_mark: |
|
||||
| < 1.0 | :x: |
|
||||
|
||||
## Security contact information
|
||||
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
Language: Cpp
|
||||
BasedOnStyle: Microsoft
|
||||
BreakBeforeBraces: Attach
|
||||
PointerAlignment: Left
|
||||
AllowShortFunctionsOnASingleLine: All
|
||||
# match Go style
|
||||
IndentCaseLabels: false
|
||||
# don't break comments over line limit (needed for CodeQL exceptions)
|
||||
ReflowComments: false
|
||||
InsertNewlineAtEOF: true
|
||||
KeepEmptyLines:
|
||||
AtEndOfFile: true
|
|
@ -5,9 +5,6 @@ run:
|
|||
- admin
|
||||
- functional
|
||||
- integration
|
||||
skip-dirs:
|
||||
# paths are relative to module root
|
||||
- cri-containerd/test-images
|
||||
|
||||
linters:
|
||||
enable:
|
||||
|
@ -34,13 +31,15 @@ linters-settings:
|
|||
# struct order is often for Win32 compat
|
||||
# also, ignore pointer bytes/GC issues for now until performance becomes an issue
|
||||
- fieldalignment
|
||||
check-shadowing: true
|
||||
|
||||
stylecheck:
|
||||
# https://staticcheck.io/docs/checks
|
||||
checks: ["all"]
|
||||
|
||||
issues:
|
||||
exclude-dirs:
|
||||
# paths are relative to module root
|
||||
- cri-containerd/test-images
|
||||
exclude-rules:
|
||||
# err is very often shadowed in nested scopes
|
||||
- linters:
|
||||
|
@ -70,22 +69,22 @@ issues:
|
|||
- path: layer.go
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
text: "ST1003:"
|
||||
|
||||
- path: hcsshim.go
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
text: "ST1003:"
|
||||
|
||||
- path: cmd\\ncproxy\\nodenetsvc\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
text: "ST1003:"
|
||||
|
||||
- path: cmd\\ncproxy_mock\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
text: "ST1003:"
|
||||
|
||||
- path: internal\\hcs\\schema2\\
|
||||
linters:
|
||||
|
@ -95,67 +94,67 @@ issues:
|
|||
- path: internal\\wclayer\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
text: "ST1003:"
|
||||
|
||||
- path: hcn\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
text: "ST1003:"
|
||||
|
||||
- path: internal\\hcs\\schema1\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
text: "ST1003:"
|
||||
|
||||
- path: internal\\hns\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
text: "ST1003:"
|
||||
|
||||
- path: ext4\\internal\\compactext4\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
text: "ST1003:"
|
||||
|
||||
- path: ext4\\internal\\format\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
text: "ST1003:"
|
||||
|
||||
- path: internal\\guestrequest\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
text: "ST1003:"
|
||||
|
||||
- path: internal\\guest\\prot\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
text: "ST1003:"
|
||||
|
||||
- path: internal\\windevice\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
text: "ST1003:"
|
||||
|
||||
- path: internal\\winapi\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
text: "ST1003:"
|
||||
|
||||
- path: internal\\vmcompute\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
text: "ST1003:"
|
||||
|
||||
- path: internal\\regstate\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
text: "ST1003:"
|
||||
|
||||
- path: internal\\hcserror\\
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
text: "ST1003:"
|
||||
|
||||
# v0 APIs are deprecated, but still retained for backwards compatability
|
||||
- path: cmd\\ncproxy\\
|
||||
|
@ -171,4 +170,4 @@ issues:
|
|||
- path: internal\\vhdx\\info
|
||||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
text: "ST1003:"
|
||||
|
|
|
@ -1,13 +1,20 @@
|
|||
BASE:=base.tar.gz
|
||||
DEV_BUILD:=0
|
||||
include Makefile.bootfiles
|
||||
|
||||
GO:=go
|
||||
GO_FLAGS:=-ldflags "-s -w" # strip Go binaries
|
||||
CGO_ENABLED:=0
|
||||
GOMODVENDOR:=
|
||||
KMOD:=0
|
||||
|
||||
CFLAGS:=-O2 -Wall
|
||||
LDFLAGS:=-static -s # strip C binaries
|
||||
LDFLAGS:=-static -s #strip C binaries
|
||||
LDLIBS:=
|
||||
PREPROCESSORFLAGS:=
|
||||
ifeq "$(KMOD)" "1"
|
||||
LDFLAGS:= -s
|
||||
LDLIBS:= -lkmod
|
||||
PREPROCESSORFLAGS:=-DMODULES=1
|
||||
endif
|
||||
|
||||
GO_FLAGS_EXTRA:=
|
||||
ifeq "$(GOMODVENDOR)" "1"
|
||||
|
@ -23,108 +30,14 @@ SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST))))
|
|||
# additional directories to search for rule prerequisites and targets
|
||||
VPATH=$(SRCROOT)
|
||||
|
||||
DELTA_TARGET=out/delta.tar.gz
|
||||
|
||||
ifeq "$(DEV_BUILD)" "1"
|
||||
DELTA_TARGET=out/delta-dev.tar.gz
|
||||
endif
|
||||
|
||||
ifeq "$(SNP_BUILD)" "1"
|
||||
DELTA_TARGET=out/delta-snp.tar.gz
|
||||
endif
|
||||
|
||||
# The link aliases for gcstools
|
||||
GCS_TOOLS=\
|
||||
generichook \
|
||||
install-drivers
|
||||
|
||||
# Common path prefix.
|
||||
PATH_PREFIX:=
|
||||
# These have PATH_PREFIX prepended to obtain the full path in recipies e.g. $(PATH_PREFIX)/$(VMGS_TOOL)
|
||||
VMGS_TOOL:=
|
||||
IGVM_TOOL:=
|
||||
KERNEL_PATH:=
|
||||
|
||||
.PHONY: all always rootfs test snp simple
|
||||
|
||||
.DEFAULT_GOAL := all
|
||||
|
||||
all: out/initrd.img out/rootfs.tar.gz
|
||||
|
||||
clean:
|
||||
find -name '*.o' -print0 | xargs -0 -r rm
|
||||
rm -rf bin deps rootfs out
|
||||
|
||||
test:
|
||||
cd $(SRCROOT) && $(GO) test -v ./internal/guest/...
|
||||
|
||||
rootfs: out/rootfs.vhd
|
||||
|
||||
snp: out/kernelinitrd.vmgs out/rootfs.hash.vhd out/rootfs.vhd out/v2056.vmgs
|
||||
|
||||
simple: out/simple.vmgs snp
|
||||
|
||||
%.vmgs: %.bin
|
||||
rm -f $@
|
||||
# du -BM returns the size of the bin file in M, eg 7M. The sed command replaces the M with *1024*1024 and then bc does the math to convert to bytes
|
||||
$(PATH_PREFIX)/$(VMGS_TOOL) create --filepath $@ --filesize `du -BM $< | sed "s/M.*/*1024*1024/" | bc`
|
||||
$(PATH_PREFIX)/$(VMGS_TOOL) write --filepath $@ --datapath $< -i=8
|
||||
|
||||
# Simplest debug UVM used to test changes to the linux kernel. No dmverity protection. Boots an initramdisk rather than directly booting a vhd disk.
|
||||
out/simple.bin: out/initrd.img $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup_simple.sh
|
||||
rm -f $@
|
||||
python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 rdinit=/startup_simple.sh" -rdinit out/initrd.img -vtl 0
|
||||
|
||||
ROOTFS_DEVICE:=/dev/sda
|
||||
VERITY_DEVICE:=/dev/sdb
|
||||
# Debug build for use with uvmtester. UVM with dm-verity protected vhd disk mounted directly via the kernel command line. Ignores corruption in dm-verity protected disk. (Use dmesg to see if dm-verity is ignoring data corruption.)
|
||||
out/v2056.bin: out/rootfs.vhd out/rootfs.hash.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup_v2056.sh
|
||||
rm -f $@
|
||||
python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(VERITY_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) 0 sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" init=/startup_v2056.sh" -vtl 0
|
||||
|
||||
# Full UVM with dm-verity protected vhd disk mounted directly via the kernel command line.
|
||||
out/kernelinitrd.bin: out/rootfs.vhd out/rootfs.hash.vhd out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup.sh
|
||||
rm -f $@
|
||||
python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(VERITY_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) 0 sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt)\" init=/startup.sh" -vtl 0
|
||||
|
||||
# Rule to make a vhd from a file. This is used to create the rootfs.hash.vhd from rootfs.hash.
|
||||
%.vhd: % bin/cmd/tar2ext4
|
||||
./bin/cmd/tar2ext4 -only-vhd -i $< -o $@
|
||||
|
||||
# Rule to make a vhd from an ext4 file. This is used to create the rootfs.vhd from rootfs.ext4.
|
||||
%.vhd: %.ext4 bin/cmd/tar2ext4
|
||||
./bin/cmd/tar2ext4 -only-vhd -i $< -o $@
|
||||
|
||||
%.hash %.hash.info %.hash.datablocks %.hash.rootdigest %hash.datablocksize %.hash.datasectors %.hash.hashblocksize: %.ext4 %.hash.salt
|
||||
veritysetup format --no-superblock --salt $(shell cat out/rootfs.hash.salt) $< $*.hash > $*.hash.info
|
||||
# Retrieve info required by dm-verity at boot time
|
||||
# Get the blocksize of rootfs
|
||||
cat $*.hash.info | awk '/^Root hash:/{ print $$3 }' > $*.hash.rootdigest
|
||||
cat $*.hash.info | awk '/^Salt:/{ print $$2 }' > $*.hash.salt
|
||||
cat $*.hash.info | awk '/^Data block size:/{ print $$4 }' > $*.hash.datablocksize
|
||||
cat $*.hash.info | awk '/^Hash block size:/{ print $$4 }' > $*.hash.hashblocksize
|
||||
cat $*.hash.info | awk '/^Data blocks:/{ print $$3 }' > $*.hash.datablocks
|
||||
echo $$(( $$(cat $*.hash.datablocks) * $$(cat $*.hash.datablocksize) / 512 )) > $*.hash.datasectors
|
||||
|
||||
out/rootfs.hash.salt:
|
||||
hexdump -vn32 -e'8/4 "%08X" 1 "\n"' /dev/random > $@
|
||||
|
||||
out/rootfs.ext4: out/rootfs.tar.gz bin/cmd/tar2ext4
|
||||
gzip -f -d ./out/rootfs.tar.gz
|
||||
./bin/cmd/tar2ext4 -i ./out/rootfs.tar -o $@
|
||||
|
||||
out/rootfs.tar.gz: out/initrd.img
|
||||
rm -rf rootfs-conv
|
||||
mkdir rootfs-conv
|
||||
gunzip -c out/initrd.img | (cd rootfs-conv && cpio -imd)
|
||||
tar -zcf $@ -C rootfs-conv .
|
||||
rm -rf rootfs-conv
|
||||
|
||||
out/initrd.img: $(BASE) $(DELTA_TARGET) $(SRCROOT)/hack/catcpio.sh
|
||||
$(SRCROOT)/hack/catcpio.sh "$(BASE)" $(DELTA_TARGET) > out/initrd.img.uncompressed
|
||||
gzip -c out/initrd.img.uncompressed > $@
|
||||
rm out/initrd.img.uncompressed
|
||||
|
||||
# This target includes utilities which may be useful for testing purposes.
|
||||
out/delta-dev.tar.gz: out/delta.tar.gz bin/internal/tools/snp-report
|
||||
rm -rf rootfs-dev
|
||||
|
@ -168,10 +81,7 @@ out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/ho
|
|||
tar -zcf $@ -C rootfs .
|
||||
rm -rf rootfs
|
||||
|
||||
out/containerd-shim-runhcs-v1.exe:
|
||||
GOOS=windows $(GO_BUILD) -o $@ $(SRCROOT)/cmd/containerd-shim-runhcs-v1
|
||||
|
||||
bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report bin/cmd/dmverity-vhd:
|
||||
bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report:
|
||||
@mkdir -p $(dir $@)
|
||||
GOOS=linux $(GO_BUILD) -o $@ $(SRCROOT)/$(@:bin/%=%)
|
||||
|
||||
|
@ -181,8 +91,8 @@ bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o
|
|||
|
||||
bin/init: init/init.o vsockexec/vsock.o
|
||||
@mkdir -p bin
|
||||
$(CC) $(LDFLAGS) -o $@ $^
|
||||
$(CC) $(LDFLAGS) -o $@ $^ $(LDLIBS)
|
||||
|
||||
%.o: %.c
|
||||
@mkdir -p $(dir $@)
|
||||
$(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $<
|
||||
$(CC) $(PREPROCESSORFLAGS) $(CFLAGS) $(CPPFLAGS) -c -o $@ $<
|
||||
|
|
|
@ -0,0 +1,197 @@
|
|||
BASE:=base.tar.gz
|
||||
DEV_BUILD:=0
|
||||
|
||||
DELTA_TARGET=out/delta.tar.gz
|
||||
|
||||
ifeq "$(DEV_BUILD)" "1"
|
||||
DELTA_TARGET=out/delta-dev.tar.gz
|
||||
endif
|
||||
|
||||
ifeq "$(SNP_BUILD)" "1"
|
||||
DELTA_TARGET=out/delta-snp.tar.gz
|
||||
endif
|
||||
|
||||
SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST))))
|
||||
|
||||
PATH_PREFIX:=
|
||||
# These have PATH_PREFIX prepended to obtain the full path in recipies e.g. $(PATH_PREFIX)/$(VMGS_TOOL)
|
||||
VMGS_TOOL:=
|
||||
IGVM_TOOL:=
|
||||
KERNEL_PATH:=
|
||||
TAR2EXT4_TOOL:=bin/cmd/tar2ext4
|
||||
|
||||
ROOTFS_DEVICE:=/dev/sda
|
||||
HASH_DEVICE:=/dev/sdb
|
||||
|
||||
.PHONY: all always rootfs test snp simple
|
||||
|
||||
.DEFAULT_GOAL := all
|
||||
|
||||
all: out/initrd.img out/rootfs.tar.gz
|
||||
|
||||
clean:
|
||||
find -name '*.o' -print0 | xargs -0 -r rm
|
||||
rm -rf bin rootfs out
|
||||
|
||||
rootfs: out/rootfs.vhd
|
||||
|
||||
snp: out/kernel.vmgs out/rootfs-verity.vhd out/v2056.vmgs out/v2056combined.vmgs
|
||||
|
||||
simple: out/simple.vmgs snp
|
||||
|
||||
%.vmgs: %.bin
|
||||
rm -f $@
|
||||
# du -BM returns the size of the bin file in M, eg 7M. The sed command replaces the M with *1024*1024 and then bc does the math to convert to bytes
|
||||
$(PATH_PREFIX)/$(VMGS_TOOL) create --filepath $@ --filesize `du -BM $< | sed "s/M.*/*1024*1024/" | bc`
|
||||
$(PATH_PREFIX)/$(VMGS_TOOL) write --filepath $@ --datapath $< -i=8
|
||||
|
||||
# Simplest debug UVM used to test changes to the linux kernel. No dmverity protection. Boots an initramdisk rather than directly booting a vhd disk.
|
||||
out/simple.bin: out/initrd.img $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup_simple.sh
|
||||
rm -f $@
|
||||
python3 $(PATH_PREFIX)/$(IGVM_TOOL) \
|
||||
-o $@ \
|
||||
-kernel $(PATH_PREFIX)/$(KERNEL_PATH) \
|
||||
-append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 rdinit=/startup_simple.sh" \
|
||||
-rdinit out/initrd.img \
|
||||
-vtl 0
|
||||
|
||||
# The boot performance is optimized by supplying rootfs as a SCSI attachment. In this case the kernel boots with
|
||||
# dm-verity to ensure the integrity. Similar to layer VHDs the verity Merkle tree is appended to ext4 filesystem.
|
||||
# It transpires that the /dev/sd* order is not deterministic wrt the scsi device order. Thus build a single userland
|
||||
# fs + merkle tree device and boot that.
|
||||
#
|
||||
# From https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-init.html
|
||||
#
|
||||
# dm-mod.create=<name>,<uuid>,<minor>,<flags>,<table>[,<table>+][;<name>,<uuid>,<minor>,<flags>,<table>[,<table>+]+]
|
||||
#
|
||||
# where:
|
||||
# <name> ::= The device name.
|
||||
# <uuid> ::= xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx | ""
|
||||
# <minor> ::= The device minor number | ""
|
||||
# <flags> ::= "ro" | "rw"
|
||||
# <table> ::= <start_sector> <num_sectors> <target_type> <target_args>
|
||||
# <target_type> ::= "verity" | "linear" | ... (see list below)
|
||||
#
|
||||
# From https://docs.kernel.org/admin-guide/device-mapper/verity.html
|
||||
# <version> <dev> <hash_dev>
|
||||
# <data_block_size> <hash_block_size>
|
||||
# <num_data_blocks> <hash_start_block>
|
||||
# <algorithm> <digest> <salt>
|
||||
# [<#opt_params> <opt_params>]
|
||||
#
|
||||
# typical igvm tool line once all the macros are expanded
|
||||
# python3 /home/user/igvmfile.py -o out/v2056.bin -kernel /hose/user/bzImage -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=9 ignore_loglevel dev.scsi.logging_level=9411 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 196744 verity 1 /dev/sda /dev/sdb 4096 4096 24593 0 sha256 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba 1 ignore_corruption\" init=/startup_v2056.sh" -vtl 0
|
||||
#
|
||||
# so a kernel command line of:
|
||||
# 8250_core.nr_uarts=0 panic=-1 debug loglevel=9 ignore_loglevel dev.scsi.logging_level=9411 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 196744 verity 1 /dev/sda /dev/sdb 4096 4096 24593 0 sha256 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba 1 ignore_corruption\" init=/startup_v2056.sh
|
||||
#
|
||||
# and a dm-mod.create of:
|
||||
# dmverity,,,ro,0 196744 verity 1 /dev/sda /dev/sdb 4096 4096 24593 0 sha256 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba 1 ignore_corruption
|
||||
#
|
||||
# which breaks down to:
|
||||
#
|
||||
# name = "dmverity"
|
||||
# uuid = ""
|
||||
# minor = ""
|
||||
# flags = "ro"
|
||||
# table = 0 196744 verity "args"
|
||||
# start_sector = 0
|
||||
# num_sectors = 196744
|
||||
# target_type = verity
|
||||
# target_args = 1 /dev/sda /dev/sdb 4096 4096 24593 0 sha256 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba 1 ignore_corruption
|
||||
# args:
|
||||
# version 1
|
||||
# dev /dev/sda
|
||||
# hash_dev /dev/sdb
|
||||
# data_block_size 4096
|
||||
# hash_block_size 4096
|
||||
# num_data_blocks 24593
|
||||
# hash_start_block 0
|
||||
# algorithm sha256
|
||||
# digest 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66
|
||||
# salt b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba
|
||||
# opt_params
|
||||
# count = 1
|
||||
# ignore_corruption
|
||||
#
|
||||
# combined typical (not bigger count of sectors for the whole device)
|
||||
# dmverity,,,ro,0 199672 verity 1 /dev/sda /dev/sda 4096 4096 24959 24959 sha256 4aa6e79866ee946ddbd9cddd6554bc6449272942fcc65934326817785a3bd374 adc4956274489c936395bab046a2d476f21ef436e571ba53da2fdf3aee59bf0a
|
||||
#
|
||||
# A few notes:
|
||||
# - num_sectors is the size of the final (aka target) verity device, i.e. the size of our rootfs excluding the Merkle
|
||||
# tree.
|
||||
# - We don't add verity superblock, so the <hash_start_block> will be exactly at the end of ext4 filesystem and equal
|
||||
# to its size. In the case when verity superblock is present an extra block should be added to the offset value,
|
||||
# i.e. 24959 becomes 24960.
|
||||
|
||||
|
||||
# Debug build for use with uvmtester. UVM with dm-verity protected vhd disk mounted directly via the kernel command line.
|
||||
# Ignores corruption in dm-verity protected disk. (Use dmesg to see if dm-verity is ignoring data corruption.)
|
||||
out/v2056.bin: out/rootfs.vhd out/rootfs.hash.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup_v2056.sh
|
||||
rm -f $@
|
||||
python3 $(PATH_PREFIX)/$(IGVM_TOOL) \
|
||||
-o $@ \
|
||||
-kernel $(PATH_PREFIX)/$(KERNEL_PATH) \
|
||||
-append "8250_core.nr_uarts=0 panic=-1 debug loglevel=9 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(HASH_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" init=/startup_v2056.sh" \
|
||||
-vtl 0
|
||||
|
||||
out/v2056combined.bin: out/rootfs-verity.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup_v2056.sh
|
||||
rm -f $@
|
||||
echo root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(ROOTFS_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\"
|
||||
python3 $(PATH_PREFIX)/$(IGVM_TOOL) \
|
||||
-o $@ \
|
||||
-kernel $(PATH_PREFIX)/$(KERNEL_PATH) \
|
||||
-append "8250_core.nr_uarts=0 panic=-1 debug loglevel=9 ignore_loglevel dev.scsi.logging_level=9411 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(ROOTFS_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" init=/startup_v2056.sh" \
|
||||
-vtl 0
|
||||
|
||||
# Full UVM with dm-verity protected vhd disk mounted directly via the kernel command line.
|
||||
out/kernel.bin: out/rootfs-verity.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup.sh
|
||||
rm -f $@
|
||||
echo root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(ROOTFS_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt)\"
|
||||
python3 $(PATH_PREFIX)/$(IGVM_TOOL) \
|
||||
-o $@ \
|
||||
-kernel $(PATH_PREFIX)/$(KERNEL_PATH) \
|
||||
-append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(ROOTFS_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt)\" init=/startup.sh" \
|
||||
-vtl 0
|
||||
|
||||
# Rule to make a vhd from a file. This is used to create the rootfs.hash.vhd from rootfs.hash.
|
||||
%.vhd: % $(TAR2EXT4_TOOL)
|
||||
$(TAR2EXT4_TOOL) -only-vhd -i $< -o $@
|
||||
|
||||
# Rule to make a vhd from an ext4 file. This is used to create the rootfs.vhd from rootfs.ext4.
|
||||
%.vhd: %.ext4 $(TAR2EXT4_TOOL)
|
||||
$(TAR2EXT4_TOOL) -only-vhd -i $< -o $@
|
||||
|
||||
%.hash %.hash.info %.hash.datablocks %.hash.rootdigest %hash.datablocksize %.hash.datasectors %.hash.hashblocksize: %.ext4 %.hash.salt
|
||||
veritysetup format --no-superblock --salt $(shell cat out/rootfs.hash.salt) $< $*.hash > $*.hash.info
|
||||
# Retrieve info required by dm-verity at boot time
|
||||
# Get the blocksize of rootfs
|
||||
cat $*.hash.info | awk '/^Root hash:/{ print $$3 }' > $*.hash.rootdigest
|
||||
cat $*.hash.info | awk '/^Salt:/{ print $$2 }' > $*.hash.salt
|
||||
cat $*.hash.info | awk '/^Data block size:/{ print $$4 }' > $*.hash.datablocksize
|
||||
cat $*.hash.info | awk '/^Hash block size:/{ print $$4 }' > $*.hash.hashblocksize
|
||||
cat $*.hash.info | awk '/^Data blocks:/{ print $$3 }' > $*.hash.datablocks
|
||||
echo $$(( $$(cat $*.hash.datablocks) * $$(cat $*.hash.datablocksize) / 512 )) > $*.hash.datasectors
|
||||
|
||||
out/rootfs.hash.salt:
|
||||
hexdump -vn32 -e'8/4 "%08X" 1 "\n"' /dev/random > $@
|
||||
|
||||
out/rootfs.ext4: out/rootfs.tar.gz $(TAR2EXT4_TOOL)
|
||||
gzip -f -d ./out/rootfs.tar.gz
|
||||
$(TAR2EXT4_TOOL) -i ./out/rootfs.tar -o $@
|
||||
|
||||
out/rootfs-verity.ext4: out/rootfs.ext4 out/rootfs.hash
|
||||
cp out/rootfs.ext4 $@
|
||||
cat out/rootfs.hash >> $@
|
||||
|
||||
out/rootfs.tar.gz: out/initrd.img
|
||||
rm -rf rootfs-conv
|
||||
mkdir rootfs-conv
|
||||
gunzip -c out/initrd.img | (cd rootfs-conv && cpio -imd)
|
||||
tar -zcf $@ -C rootfs-conv .
|
||||
rm -rf rootfs-conv
|
||||
|
||||
out/initrd.img: $(BASE) $(DELTA_TARGET) $(SRCROOT)/hack/catcpio.sh
|
||||
$(SRCROOT)/hack/catcpio.sh "$(BASE)" $(DELTA_TARGET) > out/initrd.img.uncompressed
|
||||
gzip -c out/initrd.img.uncompressed > $@
|
||||
rm out/initrd.img.uncompressed
|
|
@ -44,7 +44,7 @@ delta.tar.gz initrd.img rootfs.tar.gz
|
|||
|
||||
### Containerd Shim
|
||||
|
||||
For info on the [Runtime V2 API](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md).
|
||||
For info on the [Runtime V2 API](https://github.com/containerd/containerd/blob/main/core/runtime/v2/README.md).
|
||||
|
||||
Contrary to the typical Linux architecture of shim -> runc, the runhcs shim is used both to launch and manage the lifetime of containers.
|
||||
|
||||
|
|
|
@ -63,10 +63,10 @@ func (process *Process) SystemID() string {
|
|||
}
|
||||
|
||||
func (process *Process) processSignalResult(ctx context.Context, err error) (bool, error) {
|
||||
switch err { //nolint:errorlint
|
||||
case nil:
|
||||
if err == nil {
|
||||
return true, nil
|
||||
case ErrVmcomputeOperationInvalidState, ErrComputeSystemDoesNotExist, ErrElementNotFound:
|
||||
}
|
||||
if errors.Is(err, ErrVmcomputeOperationInvalidState) || errors.Is(err, ErrComputeSystemDoesNotExist) || errors.Is(err, ErrElementNotFound) {
|
||||
if !process.stopped() {
|
||||
// The process should be gone, but we have not received the notification.
|
||||
// After a second, force unblock the process wait to work around a possible
|
||||
|
@ -82,9 +82,8 @@ func (process *Process) processSignalResult(ctx context.Context, err error) (boo
|
|||
}()
|
||||
}
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Signal signals the process with `options`.
|
||||
|
|
|
@ -24,4 +24,6 @@ type Chipset struct {
|
|||
|
||||
// LinuxKernelDirect - Added in v2.2 Builds >=181117
|
||||
LinuxKernelDirect *LinuxKernelDirect `json:"LinuxKernelDirect,omitempty"`
|
||||
|
||||
FirmwareFile *FirmwareFile `json:"FirmwareFile,omitempty"`
|
||||
}
|
||||
|
|
|
@ -9,14 +9,6 @@
|
|||
|
||||
package hcsschema
|
||||
|
||||
const (
|
||||
CimMountFlagNone uint32 = 0x0
|
||||
CimMountFlagChildOnly uint32 = 0x1
|
||||
CimMountFlagEnableDax uint32 = 0x2
|
||||
CimMountFlagCacheFiles uint32 = 0x4
|
||||
CimMountFlagCacheRegions uint32 = 0x8
|
||||
)
|
||||
|
||||
type CimMount struct {
|
||||
ImagePath string `json:"ImagePath,omitempty"`
|
||||
FileSystemName string `json:"FileSystemName,omitempty"`
|
8
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/firmware.go
generated
vendored
Normal file
8
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/firmware.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
package hcsschema
|
||||
|
||||
type FirmwareFile struct {
|
||||
// Parameters is an experimental/pre-release field. The field itself or its
|
||||
// behavior can change in future iterations of the schema. Avoid taking a hard
|
||||
// dependency on this field.
|
||||
Parameters []byte `json:"Parameters,omitempty"`
|
||||
}
|
|
@ -1,49 +0,0 @@
|
|||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.1
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type Memory2 struct {
|
||||
SizeInMB uint64 `json:"SizeInMB,omitempty"`
|
||||
|
||||
AllowOvercommit bool `json:"AllowOvercommit,omitempty"`
|
||||
|
||||
EnableHotHint bool `json:"EnableHotHint,omitempty"`
|
||||
|
||||
EnableColdHint bool `json:"EnableColdHint,omitempty"`
|
||||
|
||||
EnableEpf bool `json:"EnableEpf,omitempty"`
|
||||
|
||||
// EnableDeferredCommit is private in the schema. If regenerated need to add back.
|
||||
EnableDeferredCommit bool `json:"EnableDeferredCommit,omitempty"`
|
||||
|
||||
// EnableColdDiscardHint if enabled, then the memory cold discard hint feature is exposed
|
||||
// to the VM, allowing it to trim non-zeroed pages from the working set (if supported by
|
||||
// the guest operating system).
|
||||
EnableColdDiscardHint bool `json:"EnableColdDiscardHint,omitempty"`
|
||||
|
||||
// LowMmioGapInMB is the low MMIO region allocated below 4GB.
|
||||
//
|
||||
// TODO: This is pre-release support in schema 2.3. Need to add build number
|
||||
// docs when a public build with this is out.
|
||||
LowMMIOGapInMB uint64 `json:"LowMmioGapInMB,omitempty"`
|
||||
|
||||
// HighMmioBaseInMB is the high MMIO region allocated above 4GB (base and
|
||||
// size).
|
||||
//
|
||||
// TODO: This is pre-release support in schema 2.3. Need to add build number
|
||||
// docs when a public build with this is out.
|
||||
HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"`
|
||||
|
||||
// HighMmioGapInMB is the high MMIO region.
|
||||
//
|
||||
// TODO: This is pre-release support in schema 2.3. Need to add build number
|
||||
// docs when a public build with this is out.
|
||||
HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"`
|
||||
}
|
21
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_backing_type.go
generated
vendored
Normal file
21
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_backing_type.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
// Autogenerated code; DO NOT EDIT.
|
||||
|
||||
/*
|
||||
* Schema Open API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swaggerapi/swaggercodegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swaggerapi/swaggercodegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type MemoryBackingType string
|
||||
|
||||
// List of MemoryBackingType
|
||||
const (
|
||||
MemoryBackingType_PHYSICAL MemoryBackingType = "Physical"
|
||||
MemoryBackingType_VIRTUAL MemoryBackingType = "Virtual"
|
||||
MemoryBackingType_HYBRID MemoryBackingType = "Hybrid"
|
||||
)
|
|
@ -0,0 +1,19 @@
|
|||
// Autogenerated code; DO NOT EDIT.
|
||||
|
||||
/*
|
||||
* Schema Open API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type Numa struct {
|
||||
VirtualNodeCount uint8 `json:"VirtualNodeCount,omitempty"`
|
||||
PreferredPhysicalNodes []int64 `json:"PreferredPhysicalNodes,omitempty"`
|
||||
Settings []NumaSetting `json:"Settings,omitempty"`
|
||||
MaxSizePerNode uint64 `json:"MaxSizePerNode,omitempty"`
|
||||
}
|
17
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node.go
generated
vendored
Normal file
17
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
// Autogenerated code; DO NOT EDIT.
|
||||
|
||||
/*
|
||||
* Schema Open API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type NumaNode struct {
|
||||
VirtualNodeIndex uint32 `json:"VirtualNodeIndex,omitempty"`
|
||||
PhysicalNodeIndex uint32 `json:"PhysicalNodeIndex,omitempty"`
|
||||
}
|
19
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_memory.go
generated
vendored
Normal file
19
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_memory.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
// Autogenerated code; DO NOT EDIT.
|
||||
|
||||
/*
|
||||
* Schema Open API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type NumaNodeMemory struct {
|
||||
// Total physical memory on on this physical NUMA node that is consumable by the VMs.
|
||||
TotalConsumableMemoryInPages uint64 `json:"TotalConsumableMemoryInPages,omitempty"`
|
||||
// Currently available physical memory on this physical NUMA node for the VMs.
|
||||
AvailableMemoryInPages uint64 `json:"AvailableMemoryInPages,omitempty"`
|
||||
}
|
17
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_processor.go
generated
vendored
Normal file
17
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_processor.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
// Autogenerated code; DO NOT EDIT.
|
||||
|
||||
/*
|
||||
* Schema Open API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type NumaNodeProcessor struct {
|
||||
TotalAssignedProcessors uint32 `json:"TotalAssignedProcessors,omitempty"`
|
||||
TotalAvailableProcessors uint32 `json:"TotalAvailableProcessors,omitempty"`
|
||||
}
|
21
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_processors.go
generated
vendored
Normal file
21
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_processors.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
// Autogenerated code; DO NOT EDIT.
|
||||
|
||||
/*
|
||||
* Schema Open API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type NumaProcessors struct {
|
||||
CountPerNode Range `json:"count_per_node,omitempty"`
|
||||
NodePerSocket uint32 `json:"node_per_socket,omitempty"`
|
||||
}
|
||||
|
||||
type Range struct {
|
||||
Max uint32 `json:"max,omitempty"`
|
||||
}
|
21
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_setting.go
generated
vendored
Normal file
21
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_setting.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
// Autogenerated code; DO NOT EDIT.
|
||||
|
||||
/*
|
||||
* Schema Open API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type NumaSetting struct {
|
||||
VirtualNodeNumber uint32 `json:"VirtualNodeNumber,omitempty"`
|
||||
PhysicalNodeNumber uint32 `json:"PhysicalNodeNumber,omitempty"`
|
||||
VirtualSocketNumber uint32 `json:"VirtualSocketNumber,omitempty"`
|
||||
CountOfProcessors uint32 `json:"CountOfProcessors,omitempty"`
|
||||
CountOfMemoryBlocks uint64 `json:"CountOfMemoryBlocks,omitempty"`
|
||||
MemoryBackingType MemoryBackingType `json:"MemoryBackingType,omitempty"`
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.5
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type Processor2 struct {
|
||||
Count int32 `json:"Count,omitempty"`
|
||||
|
||||
Limit int32 `json:"Limit,omitempty"`
|
||||
|
||||
Weight int32 `json:"Weight,omitempty"`
|
||||
|
||||
ExposeVirtualizationExtensions bool `json:"ExposeVirtualizationExtensions,omitempty"`
|
||||
|
||||
// An optional object that configures the CPU Group to which a Virtual Machine is going to bind to.
|
||||
CpuGroup *CpuGroup `json:"CpuGroup,omitempty"`
|
||||
}
|
|
@ -26,6 +26,8 @@ type Properties struct {
|
|||
|
||||
RuntimeId string `json:"RuntimeId,omitempty"`
|
||||
|
||||
SystemGUID string `json:"SystemGUID,omitempty"`
|
||||
|
||||
RuntimeTemplateId string `json:"RuntimeTemplateId,omitempty"`
|
||||
|
||||
State string `json:"State,omitempty"`
|
||||
|
|
|
@ -23,4 +23,5 @@ const (
|
|||
PTICHeartbeatStatus PropertyType = "ICHeartbeatStatus"
|
||||
PTProcessorTopology PropertyType = "ProcessorTopology"
|
||||
PTCPUGroup PropertyType = "CpuGroup"
|
||||
PTSystemGUID PropertyType = "SystemGUID"
|
||||
)
|
||||
|
|
|
@ -1,16 +1,18 @@
|
|||
// Autogenerated code; DO NOT EDIT.
|
||||
|
||||
/*
|
||||
* HCS API
|
||||
* Schema Open API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.1
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type Topology struct {
|
||||
Memory *Memory2 `json:"Memory,omitempty"`
|
||||
|
||||
Processor *Processor2 `json:"Processor,omitempty"`
|
||||
Memory *VirtualMachineMemory `json:"Memory,omitempty"`
|
||||
Processor *VirtualMachineProcessor `json:"Processor,omitempty"`
|
||||
Numa *Numa `json:"Numa,omitempty"`
|
||||
}
|
||||
|
|
|
@ -1,36 +1,29 @@
|
|||
// Autogenerated code; DO NOT EDIT.
|
||||
|
||||
/*
|
||||
* HCS API
|
||||
* Schema Open API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.1
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
// Configuration of a virtual machine, used during its creation to set up and/or use resources.
|
||||
type VirtualMachine struct {
|
||||
|
||||
// StopOnReset is private in the schema. If regenerated need to put back.
|
||||
Version *Version `json:"Version,omitempty"`
|
||||
// When set to true, the virtual machine will treat a reset as a stop, releasing resources and cleaning up state.
|
||||
StopOnReset bool `json:"StopOnReset,omitempty"`
|
||||
|
||||
Chipset *Chipset `json:"Chipset,omitempty"`
|
||||
|
||||
ComputeTopology *Topology `json:"ComputeTopology,omitempty"`
|
||||
|
||||
Devices *Devices `json:"Devices,omitempty"`
|
||||
|
||||
GuestState *GuestState `json:"GuestState,omitempty"`
|
||||
|
||||
RestoreState *RestoreState `json:"RestoreState,omitempty"`
|
||||
|
||||
RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"`
|
||||
|
||||
StorageQoS *StorageQoS `json:"StorageQoS,omitempty"`
|
||||
|
||||
GuestConnection *GuestConnection `json:"GuestConnection,omitempty"`
|
||||
|
||||
SecuritySettings *SecuritySettings `json:"SecuritySettings,omitempty"`
|
||||
|
||||
DebugOptions *DebugOptions `json:"DebugOptions,omitempty"`
|
||||
GuestConnection *GuestConnection `json:"GuestConnection,omitempty"`
|
||||
SecuritySettings *SecuritySettings `json:"SecuritySettings,omitempty"`
|
||||
}
|
||||
|
|
33
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_memory.go
generated
vendored
Normal file
33
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_memory.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
// Autogenerated code; DO NOT EDIT.
|
||||
|
||||
/*
|
||||
* Schema Open API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type VirtualMachineMemory struct {
|
||||
SizeInMB uint64 `json:"SizeInMB,omitempty"`
|
||||
Backing *MemoryBackingType `json:"Backing,omitempty"`
|
||||
// If enabled, then the VM's memory is backed by the Windows pagefile rather than physically backed, statically allocated memory.
|
||||
AllowOvercommit bool `json:"AllowOvercommit,omitempty"`
|
||||
// If enabled, then the memory hot hint feature is exposed to the VM, allowing it to prefetch pages into its working set. (if supported by the guest operating system).
|
||||
EnableHotHint bool `json:"EnableHotHint,omitempty"`
|
||||
// If enabled, then the memory cold hint feature is exposed to the VM, allowing it to trim zeroed pages from its working set (if supported by the guest operating system).
|
||||
EnableColdHint bool `json:"EnableColdHint,omitempty"`
|
||||
// If enabled, then the memory cold discard hint feature is exposed to the VM, allowing it to trim non-zeroed pages from the working set (if supported by the guest operating system).
|
||||
EnableColdDiscardHint bool `json:"EnableColdDiscardHint,omitempty"`
|
||||
// If enabled, then commit is not charged for each backing page until first access.
|
||||
EnableDeferredCommit bool `json:"EnableDeferredCommit,omitempty"`
|
||||
// Low MMIO region allocated below 4GB
|
||||
LowMMIOGapInMB uint64 `json:"LowMmioGapInMB,omitempty"`
|
||||
// High MMIO region allocated above 4GB (base and size)
|
||||
HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"`
|
||||
HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"`
|
||||
SlitType *VirtualSlitType `json:"SlitType,omitempty"`
|
||||
}
|
21
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_processor.go
generated
vendored
Normal file
21
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_processor.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
// Autogenerated code; DO NOT EDIT.
|
||||
|
||||
/*
|
||||
* Schema Open API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type VirtualMachineProcessor struct {
|
||||
Count uint32 `json:"Count,omitempty"`
|
||||
Limit uint64 `json:"Limit,omitempty"`
|
||||
Weight uint64 `json:"Weight,omitempty"`
|
||||
Reservation uint64 `json:"Reservation,omitempty"`
|
||||
CpuGroup *CpuGroup `json:"CpuGroup,omitempty"`
|
||||
NumaProcessorsSettings *NumaProcessors `json:"NumaProcessorsSettings,omitempty"`
|
||||
}
|
|
@ -9,8 +9,9 @@
|
|||
|
||||
package hcsschema
|
||||
|
||||
// TODO: This is pre-release support in schema 2.3. Need to add build number
|
||||
// TODO: PropagateNumaAffinity is pre-release/experimental field in schema 2.11. Need to add build number
|
||||
// docs when a public build with this is out.
|
||||
type VirtualPciDevice struct {
|
||||
Functions []VirtualPciFunction `json:",omitempty"`
|
||||
PropagateNumaAffinity *bool `json:"PropagateNumaAffinity,omitempty"`
|
||||
}
|
||||
|
|
23
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_slit_type.go
generated
vendored
Normal file
23
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_slit_type.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
// Autogenerated code; DO NOT EDIT.
|
||||
|
||||
/*
|
||||
* Schema Open API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
// VirtualSlitType : Indicates if a virtual SLIT should ne enabled for a VM and the type of virtual SLIT to be enabled.
|
||||
type VirtualSlitType string
|
||||
|
||||
// List of VirtualSlitType
|
||||
const (
|
||||
VirtualSlitType_NONE VirtualSlitType = "None"
|
||||
VirtualSlitType_FIRMWARE VirtualSlitType = "Firmware"
|
||||
VirtualSlitType_MEASURED VirtualSlitType = "Measured"
|
||||
VirtualSlitType_FIRMWARE_FALLBACK_MEASURED VirtualSlitType = "FirmwareFallbackMeasured"
|
||||
)
|
2
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go
generated
vendored
|
@ -13,4 +13,6 @@ type WindowsCrashReporting struct {
|
|||
DumpFileName string `json:"DumpFileName,omitempty"`
|
||||
|
||||
MaxDumpSize int64 `json:"MaxDumpSize,omitempty"`
|
||||
|
||||
DumpType string `json:"DumpType,omitempty"`
|
||||
}
|
||||
|
|
|
@ -238,9 +238,10 @@ func (computeSystem *System) Shutdown(ctx context.Context) error {
|
|||
|
||||
resultJSON, err := vmcompute.HcsShutdownComputeSystem(ctx, computeSystem.handle, "")
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
switch err { //nolint:errorlint
|
||||
case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending:
|
||||
default:
|
||||
if err != nil &&
|
||||
!errors.Is(err, ErrVmcomputeAlreadyStopped) &&
|
||||
!errors.Is(err, ErrComputeSystemDoesNotExist) &&
|
||||
!errors.Is(err, ErrVmcomputeOperationPending) {
|
||||
return makeSystemError(computeSystem, operation, err, events)
|
||||
}
|
||||
return nil
|
||||
|
@ -259,9 +260,10 @@ func (computeSystem *System) Terminate(ctx context.Context) error {
|
|||
|
||||
resultJSON, err := vmcompute.HcsTerminateComputeSystem(ctx, computeSystem.handle, "")
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
switch err { //nolint:errorlint
|
||||
case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending:
|
||||
default:
|
||||
if err != nil &&
|
||||
!errors.Is(err, ErrVmcomputeAlreadyStopped) &&
|
||||
!errors.Is(err, ErrComputeSystemDoesNotExist) &&
|
||||
!errors.Is(err, ErrVmcomputeOperationPending) {
|
||||
return makeSystemError(computeSystem, operation, err, events)
|
||||
}
|
||||
return nil
|
||||
|
@ -279,14 +281,13 @@ func (computeSystem *System) waitBackground() {
|
|||
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
||||
|
||||
err := waitForNotification(ctx, computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
|
||||
switch err { //nolint:errorlint
|
||||
case nil:
|
||||
if err == nil {
|
||||
log.G(ctx).Debug("system exited")
|
||||
case ErrVmcomputeUnexpectedExit:
|
||||
} else if errors.Is(err, ErrVmcomputeUnexpectedExit) {
|
||||
log.G(ctx).Debug("unexpected system exit")
|
||||
computeSystem.exitError = makeSystemError(computeSystem, operation, err, nil)
|
||||
err = nil
|
||||
default:
|
||||
} else {
|
||||
err = makeSystemError(computeSystem, operation, err, nil)
|
||||
}
|
||||
computeSystem.closedWaitOnce.Do(func() {
|
||||
|
|
|
@ -47,7 +47,7 @@ func (nnvManagementMacList *HNSNnvManagementMacList) Set() (*HNSNnvManagementMac
|
|||
func GetNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) {
|
||||
operation := "Get"
|
||||
title := "hcsshim::nnvManagementMacList::" + operation
|
||||
logrus.Debugf(title)
|
||||
logrus.Debug(title)
|
||||
return HNSNnvManagementMacRequest("GET", "", "")
|
||||
}
|
||||
|
||||
|
@ -55,6 +55,6 @@ func GetNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) {
|
|||
func DeleteNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) {
|
||||
operation := "Delete"
|
||||
title := "hcsshim::nnvManagementMacList::" + operation
|
||||
logrus.Debugf(title)
|
||||
logrus.Debug(title)
|
||||
return HNSNnvManagementMacRequest("DELETE", "", "")
|
||||
}
|
||||
|
|
|
@ -22,9 +22,8 @@ import (
|
|||
// of the job and a mutex for synchronized handle access.
|
||||
type JobObject struct {
|
||||
handle windows.Handle
|
||||
// All accesses to this MUST be done atomically except in `Open` as the object
|
||||
// is being created in the function. 1 signifies that this job is currently a silo.
|
||||
silo uint32
|
||||
// silo signifies that this job is currently a silo.
|
||||
silo atomic.Bool
|
||||
mq *queue.MessageQueue
|
||||
handleLock sync.RWMutex
|
||||
}
|
||||
|
@ -204,9 +203,7 @@ func Open(ctx context.Context, options *Options) (_ *JobObject, err error) {
|
|||
handle: jobHandle,
|
||||
}
|
||||
|
||||
if isJobSilo(jobHandle) {
|
||||
job.silo = 1
|
||||
}
|
||||
job.silo.Store(isJobSilo(jobHandle))
|
||||
|
||||
// If the IOCP we'll be using to receive messages for all jobs hasn't been
|
||||
// created, create it and start polling.
|
||||
|
@ -479,7 +476,7 @@ func (job *JobObject) ApplyFileBinding(root, target string, readOnly bool) error
|
|||
return ErrAlreadyClosed
|
||||
}
|
||||
|
||||
if !job.isSilo() {
|
||||
if !job.silo.Load() {
|
||||
return ErrNotSilo
|
||||
}
|
||||
|
||||
|
@ -546,7 +543,7 @@ func (job *JobObject) PromoteToSilo() error {
|
|||
return ErrAlreadyClosed
|
||||
}
|
||||
|
||||
if job.isSilo() {
|
||||
if job.silo.Load() {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -569,15 +566,10 @@ func (job *JobObject) PromoteToSilo() error {
|
|||
return fmt.Errorf("failed to promote job to silo: %w", err)
|
||||
}
|
||||
|
||||
atomic.StoreUint32(&job.silo, 1)
|
||||
job.silo.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// isSilo returns if the job object is a silo.
|
||||
func (job *JobObject) isSilo() bool {
|
||||
return atomic.LoadUint32(&job.silo) == 1
|
||||
}
|
||||
|
||||
// QueryPrivateWorkingSet returns the private working set size for the job. This is calculated by adding up the
|
||||
// private working set for every process running in the job.
|
||||
func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) {
|
||||
|
|
|
@ -150,6 +150,7 @@ func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error {
|
|||
return fmt.Errorf("affinity bitmask (%d) exceeds max allowable value (%d)", affinityBitMask, maxUintptr)
|
||||
}
|
||||
|
||||
// CodeQL [SM03681] checked against max value above (there is no math.MaxUintPtr ...)
|
||||
info.BasicLimitInformation.Affinity = uintptr(affinityBitMask)
|
||||
return job.setExtendedInformation(info)
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
type entryContextKeyType int
|
||||
|
@ -20,13 +19,13 @@ var (
|
|||
// Instead, use `L.With*` or `L.Dup()`. Or `G(context.Background())`.
|
||||
L = logrus.NewEntry(logrus.StandardLogger())
|
||||
|
||||
// G is an alias for GetEntry
|
||||
// G is an alias for GetEntry.
|
||||
G = GetEntry
|
||||
|
||||
// S is an alias for SetEntry
|
||||
// S is an alias for SetEntry.
|
||||
S = SetEntry
|
||||
|
||||
// U is an alias for UpdateContext
|
||||
// U is an alias for UpdateContext.
|
||||
U = UpdateContext
|
||||
)
|
||||
|
||||
|
@ -83,7 +82,7 @@ func UpdateContext(ctx context.Context) context.Context {
|
|||
// WithContext returns a context that contains the provided log entry.
|
||||
// The entry can be extracted with `GetEntry` (`G`)
|
||||
//
|
||||
// The entry in the context is a copy of `entry` (generated by `entry.WithContext`)
|
||||
// The entry in the context is a copy of `entry` (generated by `entry.WithContext`).
|
||||
func WithContext(ctx context.Context, entry *logrus.Entry) (context.Context, *logrus.Entry) {
|
||||
// regardless of the order, entry.Context != GetEntry(ctx)
|
||||
// here, the returned entry will reference the supplied context
|
||||
|
@ -93,25 +92,6 @@ func WithContext(ctx context.Context, entry *logrus.Entry) (context.Context, *lo
|
|||
return ctx, entry
|
||||
}
|
||||
|
||||
// Copy extracts the tracing Span and logging entry from the src Context, if they
|
||||
// exist, and adds them to the dst Context.
|
||||
//
|
||||
// This is useful to share tracing and logging between contexts, but not the
|
||||
// cancellation. For example, if the src Context has been cancelled but cleanup
|
||||
// operations triggered by the cancellation require a non-cancelled context to
|
||||
// execute.
|
||||
func Copy(dst context.Context, src context.Context) context.Context {
|
||||
if s := trace.FromContext(src); s != nil {
|
||||
dst = trace.NewContext(dst, s)
|
||||
}
|
||||
|
||||
if e := fromContext(src); e != nil {
|
||||
dst, _ = WithContext(dst, e)
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
func fromContext(ctx context.Context) *logrus.Entry {
|
||||
e, _ := ctx.Value(_entryContextKey).(*logrus.Entry)
|
||||
return e
|
||||
|
|
|
@ -103,9 +103,7 @@ func encode(v interface{}) (_ []byte, err error) {
|
|||
|
||||
if jErr := enc.Encode(v); jErr != nil {
|
||||
if err != nil {
|
||||
// TODO (go1.20): use multierror via fmt.Errorf("...: %w; ...: %w", ...)
|
||||
//nolint:errorlint // non-wrapping format verb for fmt.Errorf
|
||||
return nil, fmt.Errorf("protojson encoding: %v; json encoding: %w", err, jErr)
|
||||
return nil, fmt.Errorf("protojson encoding: %w; json encoding: %w", err, jErr)
|
||||
}
|
||||
return nil, fmt.Errorf("json encoding: %w", jErr)
|
||||
}
|
||||
|
|
|
@ -22,23 +22,14 @@ var (
|
|||
// case sensitive keywords, so "env" is not a substring on "Environment"
|
||||
_scrubKeywords = [][]byte{[]byte("env"), []byte("Environment")}
|
||||
|
||||
_scrub int32
|
||||
_scrub atomic.Bool
|
||||
)
|
||||
|
||||
// SetScrubbing enables scrubbing
|
||||
func SetScrubbing(enable bool) {
|
||||
v := int32(0) // cant convert from bool to int32 directly
|
||||
if enable {
|
||||
v = 1
|
||||
}
|
||||
atomic.StoreInt32(&_scrub, v)
|
||||
}
|
||||
func SetScrubbing(enable bool) { _scrub.Store(enable) }
|
||||
|
||||
// IsScrubbingEnabled checks if scrubbing is enabled
|
||||
func IsScrubbingEnabled() bool {
|
||||
v := atomic.LoadInt32(&_scrub)
|
||||
return v != 0
|
||||
}
|
||||
func IsScrubbingEnabled() bool { return _scrub.Load() }
|
||||
|
||||
// ScrubProcessParameters scrubs HCS Create Process requests with config parameters of
|
||||
// type internal/hcs/schema2.ScrubProcessParameters (aka hcsshema.ScrubProcessParameters)
|
||||
|
|
|
@ -104,7 +104,7 @@ func execute(ctx gcontext.Context, timeout time.Duration, f func() error) error
|
|||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if ctx.Err() == gcontext.DeadlineExceeded { //nolint:errorlint
|
||||
if ctx.Err() == gcontext.DeadlineExceeded {
|
||||
log.G(ctx).WithField(logfields.Timeout, trueTimeout).
|
||||
Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. " +
|
||||
"If it appears to be making no forward progress, obtain the stacks and see if there is a syscall " +
|
||||
|
|
|
@ -34,6 +34,7 @@ const (
|
|||
UtilityVMPath = `UtilityVM`
|
||||
UtilityVMFilesPath = `UtilityVM\Files`
|
||||
RegFilesPath = `Files\Windows\System32\config`
|
||||
BootDirRelativePath = `\EFI\Microsoft\Boot`
|
||||
BcdFilePath = `UtilityVM\Files\EFI\Microsoft\Boot\BCD`
|
||||
BootMgrFilePath = `UtilityVM\Files\EFI\Microsoft\Boot\bootmgfw.efi`
|
||||
ContainerBaseVhd = `blank-base.vhdx`
|
||||
|
|
|
@ -32,10 +32,16 @@ type CimFsFileMetadata struct {
|
|||
EACount uint32
|
||||
}
|
||||
|
||||
type CimFsImagePath struct {
|
||||
ImageDir *uint16
|
||||
ImageName *uint16
|
||||
}
|
||||
|
||||
//sys CimMountImage(imagePath string, fsName string, flags uint32, volumeID *g) (hr error) = cimfs.CimMountImage?
|
||||
//sys CimDismountImage(volumeID *g) (hr error) = cimfs.CimDismountImage?
|
||||
|
||||
//sys CimCreateImage(imagePath string, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) = cimfs.CimCreateImage?
|
||||
//sys CimCreateImage2(imagePath string, flags uint32, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) = cimfs.CimCreateImage2?
|
||||
//sys CimCloseImage(cimFSHandle FsHandle) = cimfs.CimCloseImage?
|
||||
//sys CimCommitImage(cimFSHandle FsHandle) (hr error) = cimfs.CimCommitImage?
|
||||
|
||||
|
@ -45,3 +51,8 @@ type CimFsFileMetadata struct {
|
|||
//sys CimDeletePath(cimFSHandle FsHandle, path string) (hr error) = cimfs.CimDeletePath?
|
||||
//sys CimCreateHardLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) = cimfs.CimCreateHardLink?
|
||||
//sys CimCreateAlternateStream(cimFSHandle FsHandle, path string, size uint64, cimStreamHandle *StreamHandle) (hr error) = cimfs.CimCreateAlternateStream?
|
||||
//sys CimAddFsToMergedImage(cimFSHandle FsHandle, path string) (hr error) = cimfs.CimAddFsToMergedImage?
|
||||
//sys CimAddFsToMergedImage2(cimFSHandle FsHandle, path string, flags uint32) (hr error) = cimfs.CimAddFsToMergedImage2?
|
||||
//sys CimMergeMountImage(numCimPaths uint32, backingImagePaths *CimFsImagePath, flags uint32, volumeID *g) (hr error) = cimfs.CimMergeMountImage?
|
||||
//sys CimTombstoneFile(cimFSHandle FsHandle, path string) (hr error) = cimfs.CimTombstoneFile?
|
||||
//sys CimCreateMergeLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) = cimfs.CimCreateMergeLink?
|
||||
|
|
|
@ -53,6 +53,8 @@ var (
|
|||
procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA")
|
||||
procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA")
|
||||
procCM_Locate_DevNodeW = modcfgmgr32.NewProc("CM_Locate_DevNodeW")
|
||||
procCimAddFsToMergedImage = modcimfs.NewProc("CimAddFsToMergedImage")
|
||||
procCimAddFsToMergedImage2 = modcimfs.NewProc("CimAddFsToMergedImage2")
|
||||
procCimCloseImage = modcimfs.NewProc("CimCloseImage")
|
||||
procCimCloseStream = modcimfs.NewProc("CimCloseStream")
|
||||
procCimCommitImage = modcimfs.NewProc("CimCommitImage")
|
||||
|
@ -60,9 +62,13 @@ var (
|
|||
procCimCreateFile = modcimfs.NewProc("CimCreateFile")
|
||||
procCimCreateHardLink = modcimfs.NewProc("CimCreateHardLink")
|
||||
procCimCreateImage = modcimfs.NewProc("CimCreateImage")
|
||||
procCimCreateImage2 = modcimfs.NewProc("CimCreateImage2")
|
||||
procCimCreateMergeLink = modcimfs.NewProc("CimCreateMergeLink")
|
||||
procCimDeletePath = modcimfs.NewProc("CimDeletePath")
|
||||
procCimDismountImage = modcimfs.NewProc("CimDismountImage")
|
||||
procCimMergeMountImage = modcimfs.NewProc("CimMergeMountImage")
|
||||
procCimMountImage = modcimfs.NewProc("CimMountImage")
|
||||
procCimTombstoneFile = modcimfs.NewProc("CimTombstoneFile")
|
||||
procCimWriteStream = modcimfs.NewProc("CimWriteStream")
|
||||
procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId")
|
||||
procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole")
|
||||
|
@ -181,6 +187,54 @@ func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr
|
|||
return
|
||||
}
|
||||
|
||||
func CimAddFsToMergedImage(cimFSHandle FsHandle, path string) (hr error) {
|
||||
var _p0 *uint16
|
||||
_p0, hr = syscall.UTF16PtrFromString(path)
|
||||
if hr != nil {
|
||||
return
|
||||
}
|
||||
return _CimAddFsToMergedImage(cimFSHandle, _p0)
|
||||
}
|
||||
|
||||
func _CimAddFsToMergedImage(cimFSHandle FsHandle, path *uint16) (hr error) {
|
||||
hr = procCimAddFsToMergedImage.Find()
|
||||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.SyscallN(procCimAddFsToMergedImage.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func CimAddFsToMergedImage2(cimFSHandle FsHandle, path string, flags uint32) (hr error) {
|
||||
var _p0 *uint16
|
||||
_p0, hr = syscall.UTF16PtrFromString(path)
|
||||
if hr != nil {
|
||||
return
|
||||
}
|
||||
return _CimAddFsToMergedImage2(cimFSHandle, _p0, flags)
|
||||
}
|
||||
|
||||
func _CimAddFsToMergedImage2(cimFSHandle FsHandle, path *uint16, flags uint32) (hr error) {
|
||||
hr = procCimAddFsToMergedImage2.Find()
|
||||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.SyscallN(procCimAddFsToMergedImage2.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(flags))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func CimCloseImage(cimFSHandle FsHandle) (err error) {
|
||||
err = procCimCloseImage.Find()
|
||||
if err != nil {
|
||||
|
@ -321,6 +375,59 @@ func _CimCreateImage(imagePath *uint16, oldFSName *uint16, newFSName *uint16, ci
|
|||
return
|
||||
}
|
||||
|
||||
func CimCreateImage2(imagePath string, flags uint32, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) {
|
||||
var _p0 *uint16
|
||||
_p0, hr = syscall.UTF16PtrFromString(imagePath)
|
||||
if hr != nil {
|
||||
return
|
||||
}
|
||||
return _CimCreateImage2(_p0, flags, oldFSName, newFSName, cimFSHandle)
|
||||
}
|
||||
|
||||
func _CimCreateImage2(imagePath *uint16, flags uint32, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) {
|
||||
hr = procCimCreateImage2.Find()
|
||||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.SyscallN(procCimCreateImage2.Addr(), uintptr(unsafe.Pointer(imagePath)), uintptr(flags), uintptr(unsafe.Pointer(oldFSName)), uintptr(unsafe.Pointer(newFSName)), uintptr(unsafe.Pointer(cimFSHandle)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func CimCreateMergeLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) {
|
||||
var _p0 *uint16
|
||||
_p0, hr = syscall.UTF16PtrFromString(newPath)
|
||||
if hr != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *uint16
|
||||
_p1, hr = syscall.UTF16PtrFromString(oldPath)
|
||||
if hr != nil {
|
||||
return
|
||||
}
|
||||
return _CimCreateMergeLink(cimFSHandle, _p0, _p1)
|
||||
}
|
||||
|
||||
func _CimCreateMergeLink(cimFSHandle FsHandle, newPath *uint16, oldPath *uint16) (hr error) {
|
||||
hr = procCimCreateMergeLink.Find()
|
||||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.SyscallN(procCimCreateMergeLink.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(newPath)), uintptr(unsafe.Pointer(oldPath)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func CimDeletePath(cimFSHandle FsHandle, path string) (hr error) {
|
||||
var _p0 *uint16
|
||||
_p0, hr = syscall.UTF16PtrFromString(path)
|
||||
|
@ -360,6 +467,21 @@ func CimDismountImage(volumeID *g) (hr error) {
|
|||
return
|
||||
}
|
||||
|
||||
func CimMergeMountImage(numCimPaths uint32, backingImagePaths *CimFsImagePath, flags uint32, volumeID *g) (hr error) {
|
||||
hr = procCimMergeMountImage.Find()
|
||||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.SyscallN(procCimMergeMountImage.Addr(), uintptr(numCimPaths), uintptr(unsafe.Pointer(backingImagePaths)), uintptr(flags), uintptr(unsafe.Pointer(volumeID)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func CimMountImage(imagePath string, fsName string, flags uint32, volumeID *g) (hr error) {
|
||||
var _p0 *uint16
|
||||
_p0, hr = syscall.UTF16PtrFromString(imagePath)
|
||||
|
@ -389,6 +511,30 @@ func _CimMountImage(imagePath *uint16, fsName *uint16, flags uint32, volumeID *g
|
|||
return
|
||||
}
|
||||
|
||||
func CimTombstoneFile(cimFSHandle FsHandle, path string) (hr error) {
|
||||
var _p0 *uint16
|
||||
_p0, hr = syscall.UTF16PtrFromString(path)
|
||||
if hr != nil {
|
||||
return
|
||||
}
|
||||
return _CimTombstoneFile(cimFSHandle, _p0)
|
||||
}
|
||||
|
||||
func _CimTombstoneFile(cimFSHandle FsHandle, path *uint16) (hr error) {
|
||||
hr = procCimTombstoneFile.Find()
|
||||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.SyscallN(procCimTombstoneFile.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func CimWriteStream(cimStreamHandle StreamHandle, buffer uintptr, bufferSize uint32) (hr error) {
|
||||
hr = procCimWriteStream.Find()
|
||||
if hr != nil {
|
||||
|
|
|
@ -3,7 +3,8 @@ package osversion
|
|||
// List of stable ABI compliant ltsc releases
|
||||
// Note: List must be sorted in ascending order
|
||||
var compatLTSCReleases = []uint16{
|
||||
V21H2Server,
|
||||
LTSC2022,
|
||||
LTSC2025,
|
||||
}
|
||||
|
||||
// CheckHostAndContainerCompat checks if given host and container
|
||||
|
@ -20,16 +21,25 @@ func CheckHostAndContainerCompat(host, ctr OSVersion) bool {
|
|||
}
|
||||
|
||||
// If host is < WS 2022, exact version match is required
|
||||
if host.Build < V21H2Server {
|
||||
if host.Build < LTSC2022 {
|
||||
return host.Build == ctr.Build
|
||||
}
|
||||
|
||||
var supportedLtscRelease uint16
|
||||
// Find the latest LTSC version that is earlier than the host version.
|
||||
// This is the earliest version of container that the host can run.
|
||||
//
|
||||
// If the host version is an LTSC, then it supports compatibility with
|
||||
// everything from the previous LTSC up to itself, so we want supportedLTSCRelease
|
||||
// to be the previous entry.
|
||||
//
|
||||
// If no match is found, then we know that the host is LTSC2022 exactly,
|
||||
// since we already checked that it's not less than LTSC2022.
|
||||
var supportedLTSCRelease uint16 = LTSC2022
|
||||
for i := len(compatLTSCReleases) - 1; i >= 0; i-- {
|
||||
if host.Build >= compatLTSCReleases[i] {
|
||||
supportedLtscRelease = compatLTSCReleases[i]
|
||||
if host.Build > compatLTSCReleases[i] {
|
||||
supportedLTSCRelease = compatLTSCReleases[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
return ctr.Build >= supportedLtscRelease && ctr.Build <= host.Build
|
||||
return supportedLTSCRelease <= ctr.Build && ctr.Build <= host.Build
|
||||
}
|
||||
|
|
|
@ -81,4 +81,11 @@ const (
|
|||
|
||||
// V22H2Win11 corresponds to Windows 11 (2022 Update).
|
||||
V22H2Win11 = 22621
|
||||
|
||||
// V23H2 is the 23H2 release in the Windows Server annual channel.
|
||||
V23H2 = 25398
|
||||
|
||||
// Windows Server 2025 build 26100
|
||||
V25H1Server = 26100
|
||||
LTSC2025 = V25H1Server
|
||||
)
|
||||
|
|
|
@ -28,12 +28,12 @@ env:
|
|||
####
|
||||
# GCE project where images live
|
||||
IMAGE_PROJECT: "libpod-218412"
|
||||
FEDORA_NAME: "fedora-41"
|
||||
PRIOR_FEDORA_NAME: "fedora-40"
|
||||
FEDORA_NAME: "fedora-42"
|
||||
PRIOR_FEDORA_NAME: "fedora-41"
|
||||
DEBIAN_NAME: "debian-13"
|
||||
|
||||
# Image identifiers
|
||||
IMAGE_SUFFIX: "c20250324t111922z-f41f40d13"
|
||||
IMAGE_SUFFIX: "c20250422t130822z-f42f41d13"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
|
||||
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
|
||||
|
@ -122,7 +122,7 @@ vendor_task:
|
|||
|
||||
# Runs within Cirrus's "community cluster"
|
||||
container:
|
||||
image: docker.io/library/golang:1.23
|
||||
image: docker.io/library/golang:1.23.3
|
||||
cpu: 1
|
||||
memory: 1
|
||||
|
||||
|
@ -207,11 +207,10 @@ integration_task:
|
|||
DISTRO_NV: "${FEDORA_NAME}"
|
||||
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
|
||||
STORAGE_DRIVER: 'vfs'
|
||||
# Disabled until we update to f41/42 as f40 does not have go 1.22
|
||||
# - env:
|
||||
# DISTRO_NV: "${PRIOR_FEDORA_NAME}"
|
||||
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
|
||||
# STORAGE_DRIVER: 'vfs'
|
||||
- env:
|
||||
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
|
||||
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
|
||||
STORAGE_DRIVER: 'vfs'
|
||||
- env:
|
||||
DISTRO_NV: "${DEBIAN_NAME}"
|
||||
IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"
|
||||
|
@ -221,11 +220,10 @@ integration_task:
|
|||
DISTRO_NV: "${FEDORA_NAME}"
|
||||
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
|
||||
STORAGE_DRIVER: 'overlay'
|
||||
# Disabled until we update to f41/42 as f40 does not have go 1.22
|
||||
# - env:
|
||||
# DISTRO_NV: "${PRIOR_FEDORA_NAME}"
|
||||
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
|
||||
# STORAGE_DRIVER: 'overlay'
|
||||
- env:
|
||||
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
|
||||
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
|
||||
STORAGE_DRIVER: 'overlay'
|
||||
- env:
|
||||
DISTRO_NV: "${DEBIAN_NAME}"
|
||||
IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"
|
||||
|
@ -268,12 +266,11 @@ integration_rootless_task:
|
|||
IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}"
|
||||
STORAGE_DRIVER: 'overlay'
|
||||
PRIV_NAME: rootless
|
||||
# Disabled until we update to f40/41 as f39 does not have go 1.22
|
||||
# - env:
|
||||
# DISTRO_NV: "${PRIOR_FEDORA_NAME}"
|
||||
# IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
|
||||
# STORAGE_DRIVER: 'overlay'
|
||||
# PRIV_NAME: rootless
|
||||
- env:
|
||||
DISTRO_NV: "${PRIOR_FEDORA_NAME}"
|
||||
IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
|
||||
STORAGE_DRIVER: 'overlay'
|
||||
PRIV_NAME: rootless
|
||||
- env:
|
||||
DISTRO_NV: "${DEBIAN_NAME}"
|
||||
IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}"
|
||||
|
|
|
@ -46,17 +46,16 @@ jobs:
|
|||
notifications: &copr_build_failure_notification
|
||||
failure_comment:
|
||||
message: "Ephemeral COPR build failed. @containers/packit-build please check."
|
||||
# Fedora aliases documentation: https://packit.dev/docs/configuration#aliases
|
||||
# python3-fedora-distro-aliases provides `resolve-fedora-aliases` command
|
||||
targets: &fedora_copr_targets
|
||||
# f40 ships go 1.22 and we require go 1.23 now. This should be revert to fedora-all
|
||||
# once either f40 is rebased to go 1.23 or f42 is released and f40 EOL.
|
||||
- fedora-latest-stable-x86_64
|
||||
- fedora-latest-stable-aarch64
|
||||
- fedora-development-x86_64
|
||||
- fedora-development-aarch64
|
||||
- fedora-all-x86_64
|
||||
- fedora-all-aarch64
|
||||
enable_net: true
|
||||
|
||||
# Ignore until golang is updated in distro buildroot to 1.23.3+
|
||||
- job: copr_build
|
||||
trigger: pull_request
|
||||
trigger: ignore
|
||||
packages: [buildah-eln]
|
||||
notifications: *copr_build_failure_notification
|
||||
targets:
|
||||
|
@ -68,8 +67,9 @@ jobs:
|
|||
- "https://kojipkgs.fedoraproject.org/repos/eln-build/latest/aarch64/"
|
||||
enable_net: true
|
||||
|
||||
# Ignore until golang is updated in distro buildroot to 1.23.3+
|
||||
- job: copr_build
|
||||
trigger: pull_request
|
||||
trigger: ignore
|
||||
packages: [buildah-centos]
|
||||
notifications: *copr_build_failure_notification
|
||||
targets: ¢os_copr_targets
|
||||
|
@ -95,21 +95,20 @@ jobs:
|
|||
- job: tests
|
||||
trigger: pull_request
|
||||
packages: [buildah-fedora]
|
||||
targets: &fedora_copr_test_targets
|
||||
# See the other comment above, this should be reverted to fedora-all when possible.
|
||||
- fedora-latest-stable-x86_64
|
||||
- fedora-development-x86_64
|
||||
targets:
|
||||
- fedora-all-x86_64
|
||||
tf_extra_params:
|
||||
environments:
|
||||
- artifacts:
|
||||
- type: repository-file
|
||||
id: https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/repo/fedora-$releasever/rhcontainerbot-podman-next-fedora-$releasever.repo
|
||||
|
||||
# Ignore until golang is updated in distro buildroot to 1.23.3+
|
||||
# Tests on CentOS Stream for main branch PRs
|
||||
- job: tests
|
||||
trigger: pull_request
|
||||
trigger: ignore
|
||||
packages: [buildah-centos]
|
||||
targets: ¢os_copr_test_targets
|
||||
targets:
|
||||
- centos-stream-9-x86_64
|
||||
- centos-stream-10-x86_64
|
||||
tf_extra_params:
|
||||
|
|
|
@ -1750,7 +1750,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
|
|||
return fmt.Errorf("copier: put: error setting owner of %q to %d:%d: %w", path, defaultDirUID, defaultDirGID, err)
|
||||
}
|
||||
// make a conditional note to set this directory's permissions
|
||||
// later, but not if we already had an explictly-provided mode
|
||||
// later, but not if we already had an explicitly-provided mode
|
||||
if _, ok := directoryModes[path]; !ok {
|
||||
directoryModes[path] = defaultDirMode
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ const (
|
|||
// identify working containers.
|
||||
Package = "buildah"
|
||||
// Version for the Package. Also used by .packit.sh for Packit builds.
|
||||
Version = "1.40.0"
|
||||
Version = "1.41.0-dev"
|
||||
|
||||
// DefaultRuntime if containers.conf fails.
|
||||
DefaultRuntime = "runc"
|
||||
|
|
|
@ -148,36 +148,6 @@ func expectedDockerDiffIDs(image docker.V2Image) int {
|
|||
return expected
|
||||
}
|
||||
|
||||
// Compute the media types which we need to attach to a layer, given the type of
|
||||
// compression that we'll be applying.
|
||||
func computeLayerMIMEType(what string, layerCompression archive.Compression) (omediaType, dmediaType string, err error) {
|
||||
omediaType = v1.MediaTypeImageLayer
|
||||
dmediaType = docker.V2S2MediaTypeUncompressedLayer
|
||||
if layerCompression != archive.Uncompressed {
|
||||
switch layerCompression {
|
||||
case archive.Gzip:
|
||||
omediaType = v1.MediaTypeImageLayerGzip
|
||||
dmediaType = manifest.DockerV2Schema2LayerMediaType
|
||||
logrus.Debugf("compressing %s with gzip", what)
|
||||
case archive.Bzip2:
|
||||
// Until the image specs define a media type for bzip2-compressed layers, even if we know
|
||||
// how to decompress them, we can't try to compress layers with bzip2.
|
||||
return "", "", errors.New("media type for bzip2-compressed layers is not defined")
|
||||
case archive.Xz:
|
||||
// Until the image specs define a media type for xz-compressed layers, even if we know
|
||||
// how to decompress them, we can't try to compress layers with xz.
|
||||
return "", "", errors.New("media type for xz-compressed layers is not defined")
|
||||
case archive.Zstd:
|
||||
// Until the image specs define a media type for zstd-compressed layers, even if we know
|
||||
// how to decompress them, we can't try to compress layers with zstd.
|
||||
return "", "", errors.New("media type for zstd-compressed layers is not defined")
|
||||
default:
|
||||
logrus.Debugf("compressing %s with unknown compressor(?)", what)
|
||||
}
|
||||
}
|
||||
return omediaType, dmediaType, nil
|
||||
}
|
||||
|
||||
// Extract the container's whole filesystem as a filesystem image, wrapped
|
||||
// in LUKS-compatible encryption.
|
||||
func (i *containerImageRef) extractConfidentialWorkloadFS(options ConfidentialWorkloadOptions) (io.ReadCloser, error) {
|
||||
|
@ -304,34 +274,35 @@ func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadClo
|
|||
}), errChan, nil
|
||||
}
|
||||
|
||||
type manifestBuilder interface {
|
||||
// addLayer adds notes to the manifest and config about the layer. The layer blobs are
|
||||
// identified by their possibly-compressed blob digests and sizes in the manifest, and by
|
||||
// their uncompressed digests (diffIDs) in the config.
|
||||
addLayer(layerBlobSum digest.Digest, layerBlobSize int64, diffID digest.Digest)
|
||||
computeLayerMIMEType(what string, layerCompression archive.Compression) error
|
||||
buildHistory(extraImageContentDiff string, extraImageContentDiffDigest digest.Digest) error
|
||||
manifestAndConfig() ([]byte, []byte, error)
|
||||
}
|
||||
|
||||
type dockerSchema2ManifestBuilder struct {
|
||||
i *containerImageRef
|
||||
layerMediaType string
|
||||
dimage docker.V2Image
|
||||
dmanifest docker.V2S2Manifest
|
||||
}
|
||||
|
||||
// Build fresh copies of the container configuration structures so that we can edit them
|
||||
// without making unintended changes to the original Builder.
|
||||
func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, docker.V2Image, docker.V2S2Manifest, error) {
|
||||
// without making unintended changes to the original Builder (Docker schema 2).
|
||||
func (i *containerImageRef) newDockerSchema2ManifestBuilder() (manifestBuilder, error) {
|
||||
created := time.Now().UTC()
|
||||
if i.created != nil {
|
||||
created = *i.created
|
||||
}
|
||||
|
||||
// Build an empty image, and then decode over it.
|
||||
oimage := v1.Image{}
|
||||
if err := json.Unmarshal(i.oconfig, &oimage); err != nil {
|
||||
return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err
|
||||
}
|
||||
// Always replace this value, since we're newer than our base image.
|
||||
oimage.Created = &created
|
||||
// Clear the list of diffIDs, since we always repopulate it.
|
||||
oimage.RootFS.Type = docker.TypeLayers
|
||||
oimage.RootFS.DiffIDs = []digest.Digest{}
|
||||
// Only clear the history if we're squashing, otherwise leave it be so that we can append
|
||||
// entries to it.
|
||||
if i.confidentialWorkload.Convert || i.squash || i.omitHistory {
|
||||
oimage.History = []v1.History{}
|
||||
}
|
||||
|
||||
// Build an empty image, and then decode over it.
|
||||
dimage := docker.V2Image{}
|
||||
if err := json.Unmarshal(i.dconfig, &dimage); err != nil {
|
||||
return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err
|
||||
return nil, err
|
||||
}
|
||||
// Set the parent, but only if we want to be compatible with "classic" docker build.
|
||||
if i.compatSetParent == types.OptionalBoolTrue {
|
||||
|
@ -358,31 +329,243 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest,
|
|||
|
||||
// If we were supplied with a configuration, copy fields from it to
|
||||
// matching fields in both formats.
|
||||
if err := config.Override(dimage.Config, &oimage.Config, i.overrideChanges, i.overrideConfig); err != nil {
|
||||
return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, fmt.Errorf("applying changes: %w", err)
|
||||
if err := config.OverrideDocker(dimage.Config, i.overrideChanges, i.overrideConfig); err != nil {
|
||||
return nil, fmt.Errorf("applying changes: %w", err)
|
||||
}
|
||||
|
||||
// If we're producing a confidential workload, override the command and
|
||||
// assorted other settings that aren't expected to work correctly.
|
||||
if i.confidentialWorkload.Convert {
|
||||
dimage.Config.Entrypoint = []string{"/entrypoint"}
|
||||
oimage.Config.Entrypoint = []string{"/entrypoint"}
|
||||
dimage.Config.Cmd = nil
|
||||
oimage.Config.Cmd = nil
|
||||
dimage.Config.User = ""
|
||||
oimage.Config.User = ""
|
||||
dimage.Config.WorkingDir = ""
|
||||
oimage.Config.WorkingDir = ""
|
||||
dimage.Config.Healthcheck = nil
|
||||
dimage.Config.Shell = nil
|
||||
dimage.Config.Volumes = nil
|
||||
oimage.Config.Volumes = nil
|
||||
dimage.Config.ExposedPorts = nil
|
||||
}
|
||||
|
||||
// Return partial manifest. The Layers lists will be populated later.
|
||||
return &dockerSchema2ManifestBuilder{
|
||||
i: i,
|
||||
layerMediaType: docker.V2S2MediaTypeUncompressedLayer,
|
||||
dimage: dimage,
|
||||
dmanifest: docker.V2S2Manifest{
|
||||
V2Versioned: docker.V2Versioned{
|
||||
SchemaVersion: 2,
|
||||
MediaType: manifest.DockerV2Schema2MediaType,
|
||||
},
|
||||
Config: docker.V2S2Descriptor{
|
||||
MediaType: manifest.DockerV2Schema2ConfigMediaType,
|
||||
},
|
||||
Layers: []docker.V2S2Descriptor{},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (mb *dockerSchema2ManifestBuilder) addLayer(layerBlobSum digest.Digest, layerBlobSize int64, diffID digest.Digest) {
|
||||
dlayerDescriptor := docker.V2S2Descriptor{
|
||||
MediaType: mb.layerMediaType,
|
||||
Digest: layerBlobSum,
|
||||
Size: layerBlobSize,
|
||||
}
|
||||
mb.dmanifest.Layers = append(mb.dmanifest.Layers, dlayerDescriptor)
|
||||
// Note this layer in the list of diffIDs, again using the uncompressed digest.
|
||||
mb.dimage.RootFS.DiffIDs = append(mb.dimage.RootFS.DiffIDs, diffID)
|
||||
}
|
||||
|
||||
// Compute the media types which we need to attach to a layer, given the type of
|
||||
// compression that we'll be applying.
|
||||
func (mb *dockerSchema2ManifestBuilder) computeLayerMIMEType(what string, layerCompression archive.Compression) error {
|
||||
dmediaType := docker.V2S2MediaTypeUncompressedLayer
|
||||
if layerCompression != archive.Uncompressed {
|
||||
switch layerCompression {
|
||||
case archive.Gzip:
|
||||
dmediaType = manifest.DockerV2Schema2LayerMediaType
|
||||
logrus.Debugf("compressing %s with gzip", what)
|
||||
case archive.Bzip2:
|
||||
// Until the image specs define a media type for bzip2-compressed layers, even if we know
|
||||
// how to decompress them, we can't try to compress layers with bzip2.
|
||||
return errors.New("media type for bzip2-compressed layers is not defined")
|
||||
case archive.Xz:
|
||||
// Until the image specs define a media type for xz-compressed layers, even if we know
|
||||
// how to decompress them, we can't try to compress layers with xz.
|
||||
return errors.New("media type for xz-compressed layers is not defined")
|
||||
case archive.Zstd:
|
||||
// Until the image specs define a media type for zstd-compressed layers, even if we know
|
||||
// how to decompress them, we can't try to compress layers with zstd.
|
||||
return errors.New("media type for zstd-compressed layers is not defined")
|
||||
default:
|
||||
logrus.Debugf("compressing %s with unknown compressor(?)", what)
|
||||
}
|
||||
}
|
||||
mb.layerMediaType = dmediaType
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *dockerSchema2ManifestBuilder) buildHistory(extraImageContentDiff string, extraImageContentDiffDigest digest.Digest) error {
|
||||
// Build history notes in the image configuration.
|
||||
appendHistory := func(history []v1.History, empty bool) {
|
||||
for i := range history {
|
||||
var created time.Time
|
||||
if history[i].Created != nil {
|
||||
created = *history[i].Created
|
||||
}
|
||||
dnews := docker.V2S2History{
|
||||
Created: created,
|
||||
CreatedBy: history[i].CreatedBy,
|
||||
Author: history[i].Author,
|
||||
Comment: history[i].Comment,
|
||||
EmptyLayer: empty,
|
||||
}
|
||||
mb.dimage.History = append(mb.dimage.History, dnews)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep track of how many entries the base image's history had
|
||||
// before we started adding to it.
|
||||
baseImageHistoryLen := len(mb.dimage.History)
|
||||
|
||||
// Add history entries for prepended empty layers.
|
||||
appendHistory(mb.i.preEmptyLayers, true)
|
||||
// Add history entries for prepended API-supplied layers.
|
||||
for _, h := range mb.i.preLayers {
|
||||
appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer)
|
||||
}
|
||||
// Add a history entry for this layer, empty or not.
|
||||
created := time.Now().UTC()
|
||||
if mb.i.created != nil {
|
||||
created = (*mb.i.created).UTC()
|
||||
}
|
||||
dnews := docker.V2S2History{
|
||||
Created: created,
|
||||
CreatedBy: mb.i.createdBy,
|
||||
Author: mb.dimage.Author,
|
||||
EmptyLayer: mb.i.emptyLayer,
|
||||
Comment: mb.i.historyComment,
|
||||
}
|
||||
mb.dimage.History = append(mb.dimage.History, dnews)
|
||||
// Add a history entry for the extra image content if we added a layer for it.
|
||||
// This diff was added to the list of layers before API-supplied layers that
|
||||
// needed to be appended, and we need to keep the order of history entries for
|
||||
// not-empty layers consistent with that.
|
||||
if extraImageContentDiff != "" {
|
||||
createdBy := fmt.Sprintf(`/bin/sh -c #(nop) ADD dir:%s in /",`, extraImageContentDiffDigest.Encoded())
|
||||
dnews := docker.V2S2History{
|
||||
Created: created,
|
||||
CreatedBy: createdBy,
|
||||
}
|
||||
mb.dimage.History = append(mb.dimage.History, dnews)
|
||||
}
|
||||
// Add history entries for appended empty layers.
|
||||
appendHistory(mb.i.postEmptyLayers, true)
|
||||
// Add history entries for appended API-supplied layers.
|
||||
for _, h := range mb.i.postLayers {
|
||||
appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer)
|
||||
}
|
||||
|
||||
// Assemble a comment indicating which base image was used, if it wasn't
|
||||
// just an image ID, and add it to the first history entry we added.
|
||||
var fromComment string
|
||||
if strings.Contains(mb.i.parent, mb.i.fromImageID) && mb.i.fromImageName != "" && !strings.HasPrefix(mb.i.fromImageID, mb.i.fromImageName) {
|
||||
if mb.dimage.History[baseImageHistoryLen].Comment != "" {
|
||||
fromComment = " "
|
||||
}
|
||||
fromComment += "FROM " + mb.i.fromImageName
|
||||
}
|
||||
mb.dimage.History[baseImageHistoryLen].Comment += fromComment
|
||||
|
||||
// Confidence check that we didn't just create a mismatch between non-empty layers in the
|
||||
// history and the number of diffIDs. Only applicable if the base image (if there was
|
||||
// one) provided us at least one entry to use as a starting point.
|
||||
if baseImageHistoryLen != 0 {
|
||||
expectedDiffIDs := expectedDockerDiffIDs(mb.dimage)
|
||||
if len(mb.dimage.RootFS.DiffIDs) != expectedDiffIDs {
|
||||
return fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(mb.dimage.RootFS.DiffIDs))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *dockerSchema2ManifestBuilder) manifestAndConfig() ([]byte, []byte, error) {
|
||||
// Encode the image configuration blob.
|
||||
dconfig, err := json.Marshal(&mb.dimage)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("encoding %#v as json: %w", mb.dimage, err)
|
||||
}
|
||||
logrus.Debugf("Docker v2s2 config = %s", dconfig)
|
||||
|
||||
// Add the configuration blob to the manifest.
|
||||
mb.dmanifest.Config.Digest = digest.Canonical.FromBytes(dconfig)
|
||||
mb.dmanifest.Config.Size = int64(len(dconfig))
|
||||
mb.dmanifest.Config.MediaType = manifest.DockerV2Schema2ConfigMediaType
|
||||
|
||||
// Encode the manifest.
|
||||
dmanifestbytes, err := json.Marshal(&mb.dmanifest)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("encoding %#v as json: %w", mb.dmanifest, err)
|
||||
}
|
||||
logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes)
|
||||
|
||||
return dmanifestbytes, dconfig, nil
|
||||
}
|
||||
|
||||
type ociManifestBuilder struct {
|
||||
i *containerImageRef
|
||||
layerMediaType string
|
||||
oimage v1.Image
|
||||
omanifest v1.Manifest
|
||||
}
|
||||
|
||||
// Build fresh copies of the container configuration structures so that we can edit them
|
||||
// without making unintended changes to the original Builder (OCI manifest).
|
||||
func (i *containerImageRef) newOCIManifestBuilder() (manifestBuilder, error) {
|
||||
created := time.Now().UTC()
|
||||
if i.created != nil {
|
||||
created = *i.created
|
||||
}
|
||||
|
||||
// Build an empty image, and then decode over it.
|
||||
oimage := v1.Image{}
|
||||
if err := json.Unmarshal(i.oconfig, &oimage); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Always replace this value, since we're newer than our base image.
|
||||
oimage.Created = &created
|
||||
// Clear the list of diffIDs, since we always repopulate it.
|
||||
oimage.RootFS.Type = docker.TypeLayers
|
||||
oimage.RootFS.DiffIDs = []digest.Digest{}
|
||||
// Only clear the history if we're squashing, otherwise leave it be so that we can append
|
||||
// entries to it.
|
||||
if i.confidentialWorkload.Convert || i.squash || i.omitHistory {
|
||||
oimage.History = []v1.History{}
|
||||
}
|
||||
|
||||
// If we were supplied with a configuration, copy fields from it to
|
||||
// matching fields in both formats.
|
||||
if err := config.OverrideOCI(&oimage.Config, i.overrideChanges, i.overrideConfig); err != nil {
|
||||
return nil, fmt.Errorf("applying changes: %w", err)
|
||||
}
|
||||
|
||||
// If we're producing a confidential workload, override the command and
|
||||
// assorted other settings that aren't expected to work correctly.
|
||||
if i.confidentialWorkload.Convert {
|
||||
oimage.Config.Entrypoint = []string{"/entrypoint"}
|
||||
oimage.Config.Cmd = nil
|
||||
oimage.Config.User = ""
|
||||
oimage.Config.WorkingDir = ""
|
||||
oimage.Config.Volumes = nil
|
||||
oimage.Config.ExposedPorts = nil
|
||||
}
|
||||
|
||||
// Build empty manifests. The Layers lists will be populated later.
|
||||
omanifest := v1.Manifest{
|
||||
// Return partial manifest. The Layers lists will be populated later.
|
||||
return &ociManifestBuilder{
|
||||
i: i,
|
||||
// The default layer media type assumes no compression.
|
||||
layerMediaType: v1.MediaTypeImageLayer,
|
||||
oimage: oimage,
|
||||
omanifest: v1.Manifest{
|
||||
Versioned: specs.Versioned{
|
||||
SchemaVersion: 2,
|
||||
},
|
||||
|
@ -392,30 +575,159 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest,
|
|||
},
|
||||
Layers: []v1.Descriptor{},
|
||||
Annotations: i.annotations,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (mb *ociManifestBuilder) addLayer(layerBlobSum digest.Digest, layerBlobSize int64, diffID digest.Digest) {
|
||||
olayerDescriptor := v1.Descriptor{
|
||||
MediaType: mb.layerMediaType,
|
||||
Digest: layerBlobSum,
|
||||
Size: layerBlobSize,
|
||||
}
|
||||
mb.omanifest.Layers = append(mb.omanifest.Layers, olayerDescriptor)
|
||||
// Note this layer in the list of diffIDs, again using the uncompressed digest.
|
||||
mb.oimage.RootFS.DiffIDs = append(mb.oimage.RootFS.DiffIDs, diffID)
|
||||
}
|
||||
|
||||
// Compute the media types which we need to attach to a layer, given the type of
|
||||
// compression that we'll be applying.
|
||||
func (mb *ociManifestBuilder) computeLayerMIMEType(what string, layerCompression archive.Compression) error {
|
||||
omediaType := v1.MediaTypeImageLayer
|
||||
if layerCompression != archive.Uncompressed {
|
||||
switch layerCompression {
|
||||
case archive.Gzip:
|
||||
omediaType = v1.MediaTypeImageLayerGzip
|
||||
logrus.Debugf("compressing %s with gzip", what)
|
||||
case archive.Bzip2:
|
||||
// Until the image specs define a media type for bzip2-compressed layers, even if we know
|
||||
// how to decompress them, we can't try to compress layers with bzip2.
|
||||
return errors.New("media type for bzip2-compressed layers is not defined")
|
||||
case archive.Xz:
|
||||
// Until the image specs define a media type for xz-compressed layers, even if we know
|
||||
// how to decompress them, we can't try to compress layers with xz.
|
||||
return errors.New("media type for xz-compressed layers is not defined")
|
||||
case archive.Zstd:
|
||||
// Until the image specs define a media type for zstd-compressed layers, even if we know
|
||||
// how to decompress them, we can't try to compress layers with zstd.
|
||||
return errors.New("media type for zstd-compressed layers is not defined")
|
||||
default:
|
||||
logrus.Debugf("compressing %s with unknown compressor(?)", what)
|
||||
}
|
||||
}
|
||||
mb.layerMediaType = omediaType
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *ociManifestBuilder) buildHistory(extraImageContentDiff string, extraImageContentDiffDigest digest.Digest) error {
|
||||
// Build history notes in the image configuration.
|
||||
appendHistory := func(history []v1.History, empty bool) {
|
||||
for i := range history {
|
||||
var created *time.Time
|
||||
if history[i].Created != nil {
|
||||
copiedTimestamp := *history[i].Created
|
||||
created = &copiedTimestamp
|
||||
}
|
||||
onews := v1.History{
|
||||
Created: created,
|
||||
CreatedBy: history[i].CreatedBy,
|
||||
Author: history[i].Author,
|
||||
Comment: history[i].Comment,
|
||||
EmptyLayer: empty,
|
||||
}
|
||||
mb.oimage.History = append(mb.oimage.History, onews)
|
||||
}
|
||||
}
|
||||
|
||||
dmanifest := docker.V2S2Manifest{
|
||||
V2Versioned: docker.V2Versioned{
|
||||
SchemaVersion: 2,
|
||||
MediaType: manifest.DockerV2Schema2MediaType,
|
||||
},
|
||||
Config: docker.V2S2Descriptor{
|
||||
MediaType: manifest.DockerV2Schema2ConfigMediaType,
|
||||
},
|
||||
Layers: []docker.V2S2Descriptor{},
|
||||
// Keep track of how many entries the base image's history had
|
||||
// before we started adding to it.
|
||||
baseImageHistoryLen := len(mb.oimage.History)
|
||||
|
||||
// Add history entries for prepended empty layers.
|
||||
appendHistory(mb.i.preEmptyLayers, true)
|
||||
// Add history entries for prepended API-supplied layers.
|
||||
for _, h := range mb.i.preLayers {
|
||||
appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer)
|
||||
}
|
||||
// Add a history entry for this layer, empty or not.
|
||||
created := time.Now().UTC()
|
||||
if mb.i.created != nil {
|
||||
created = (*mb.i.created).UTC()
|
||||
}
|
||||
onews := v1.History{
|
||||
Created: &created,
|
||||
CreatedBy: mb.i.createdBy,
|
||||
Author: mb.oimage.Author,
|
||||
EmptyLayer: mb.i.emptyLayer,
|
||||
Comment: mb.i.historyComment,
|
||||
}
|
||||
mb.oimage.History = append(mb.oimage.History, onews)
|
||||
// Add a history entry for the extra image content if we added a layer for it.
|
||||
// This diff was added to the list of layers before API-supplied layers that
|
||||
// needed to be appended, and we need to keep the order of history entries for
|
||||
// not-empty layers consistent with that.
|
||||
if extraImageContentDiff != "" {
|
||||
createdBy := fmt.Sprintf(`/bin/sh -c #(nop) ADD dir:%s in /",`, extraImageContentDiffDigest.Encoded())
|
||||
onews := v1.History{
|
||||
Created: &created,
|
||||
CreatedBy: createdBy,
|
||||
}
|
||||
mb.oimage.History = append(mb.oimage.History, onews)
|
||||
}
|
||||
// Add history entries for appended empty layers.
|
||||
appendHistory(mb.i.postEmptyLayers, true)
|
||||
// Add history entries for appended API-supplied layers.
|
||||
for _, h := range mb.i.postLayers {
|
||||
appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer)
|
||||
}
|
||||
|
||||
return oimage, omanifest, dimage, dmanifest, nil
|
||||
// Assemble a comment indicating which base image was used, if it wasn't
|
||||
// just an image ID, and add it to the first history entry we added.
|
||||
var fromComment string
|
||||
if strings.Contains(mb.i.parent, mb.i.fromImageID) && mb.i.fromImageName != "" && !strings.HasPrefix(mb.i.fromImageID, mb.i.fromImageName) {
|
||||
if mb.oimage.History[baseImageHistoryLen].Comment != "" {
|
||||
fromComment = " "
|
||||
}
|
||||
fromComment += "FROM " + mb.i.fromImageName
|
||||
}
|
||||
mb.oimage.History[baseImageHistoryLen].Comment += fromComment
|
||||
|
||||
// Confidence check that we didn't just create a mismatch between non-empty layers in the
|
||||
// history and the number of diffIDs. Only applicable if the base image (if there was
|
||||
// one) provided us at least one entry to use as a starting point.
|
||||
if baseImageHistoryLen != 0 {
|
||||
expectedDiffIDs := expectedOCIDiffIDs(mb.oimage)
|
||||
if len(mb.oimage.RootFS.DiffIDs) != expectedDiffIDs {
|
||||
return fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(mb.oimage.RootFS.DiffIDs))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *ociManifestBuilder) manifestAndConfig() ([]byte, []byte, error) {
|
||||
// Encode the image configuration blob.
|
||||
oconfig, err := json.Marshal(&mb.oimage)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("encoding %#v as json: %w", mb.oimage, err)
|
||||
}
|
||||
logrus.Debugf("OCIv1 config = %s", oconfig)
|
||||
|
||||
// Add the configuration blob to the manifest.
|
||||
mb.omanifest.Config.Digest = digest.Canonical.FromBytes(oconfig)
|
||||
mb.omanifest.Config.Size = int64(len(oconfig))
|
||||
mb.omanifest.Config.MediaType = v1.MediaTypeImageConfig
|
||||
|
||||
// Encode the manifest.
|
||||
omanifestbytes, err := json.Marshal(&mb.omanifest)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("encoding %#v as json: %w", mb.omanifest, err)
|
||||
}
|
||||
logrus.Debugf("OCIv1 manifest = %s", omanifestbytes)
|
||||
|
||||
return omanifestbytes, oconfig, nil
|
||||
}
|
||||
|
||||
func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemContext) (src types.ImageSource, err error) {
|
||||
// Decide which type of manifest and configuration output we're going to provide.
|
||||
manifestType := i.preferredManifestType
|
||||
// If it's not a format we support, return an error.
|
||||
if manifestType != v1.MediaTypeImageManifest && manifestType != manifest.DockerV2Schema2MediaType {
|
||||
return nil, fmt.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)",
|
||||
manifestType, v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType)
|
||||
}
|
||||
// These maps will let us check if a layer ID is part of one group or another.
|
||||
parentLayerIDs := make(map[string]bool)
|
||||
apiLayerIDs := make(map[string]bool)
|
||||
|
@ -492,10 +804,22 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
|
|||
|
||||
// Build fresh copies of the configurations and manifest so that we don't mess with any
|
||||
// values in the Builder object itself.
|
||||
oimage, omanifest, dimage, dmanifest, err := i.createConfigsAndManifests()
|
||||
var mb manifestBuilder
|
||||
switch i.preferredManifestType {
|
||||
case v1.MediaTypeImageManifest:
|
||||
mb, err = i.newOCIManifestBuilder()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case manifest.DockerV2Schema2MediaType:
|
||||
mb, err = i.newDockerSchema2ManifestBuilder()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)",
|
||||
i.preferredManifestType, v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType)
|
||||
}
|
||||
|
||||
// Extract each layer and compute its digests, both compressed (if requested) and uncompressed.
|
||||
var extraImageContentDiff string
|
||||
|
@ -512,9 +836,6 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
|
|||
if apiLayerIDs[layerID] {
|
||||
what = layerID
|
||||
}
|
||||
// The default layer media type assumes no compression.
|
||||
omediaType := v1.MediaTypeImageLayer
|
||||
dmediaType := docker.V2S2MediaTypeUncompressedLayer
|
||||
// Look up this layer.
|
||||
var layerUncompressedDigest digest.Digest
|
||||
var layerUncompressedSize int64
|
||||
|
@ -552,21 +873,7 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
|
|||
layerBlobSize := layerUncompressedSize
|
||||
diffID := layerUncompressedDigest
|
||||
// Note this layer in the manifest, using the appropriate blobsum.
|
||||
olayerDescriptor := v1.Descriptor{
|
||||
MediaType: omediaType,
|
||||
Digest: layerBlobSum,
|
||||
Size: layerBlobSize,
|
||||
}
|
||||
omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
|
||||
dlayerDescriptor := docker.V2S2Descriptor{
|
||||
MediaType: dmediaType,
|
||||
Digest: layerBlobSum,
|
||||
Size: layerBlobSize,
|
||||
}
|
||||
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
|
||||
// Note this layer in the list of diffIDs, again using the uncompressed digest.
|
||||
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, diffID)
|
||||
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, diffID)
|
||||
mb.addLayer(layerBlobSum, layerBlobSize, diffID)
|
||||
blobLayers[diffID] = blobLayerInfo{
|
||||
ID: layerID,
|
||||
Size: layerBlobSize,
|
||||
|
@ -574,8 +881,7 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
|
|||
continue
|
||||
}
|
||||
// Figure out if we need to change the media type, in case we've changed the compression.
|
||||
omediaType, dmediaType, err = computeLayerMIMEType(what, i.compression)
|
||||
if err != nil {
|
||||
if err := mb.computeLayerMIMEType(what, i.compression); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Start reading either the layer or the whole container rootfs.
|
||||
|
@ -651,16 +957,13 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
|
|||
return nil, fmt.Errorf("compressing %s: %w", what, err)
|
||||
}
|
||||
writer := io.MultiWriter(writeCloser, srcHasher.Hash())
|
||||
{
|
||||
// Tweak the contents of layers we're creating.
|
||||
nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close)
|
||||
writeCloser = newTarFilterer(nestedWriteCloser, func(hdr *tar.Header) (bool, bool, io.Reader) {
|
||||
// Scrub any local user names that might correspond to UIDs or GIDs of
|
||||
// files in this layer.
|
||||
hdr.Uname, hdr.Gname = "", ""
|
||||
|
||||
// Use specified timestamps in the layer, if we're doing that for history
|
||||
// entries.
|
||||
if i.created != nil {
|
||||
// Tweak the contents of layers we're creating.
|
||||
nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close)
|
||||
writeCloser = newTarFilterer(nestedWriteCloser, func(hdr *tar.Header) (bool, bool, io.Reader) {
|
||||
// Changing a zeroed field to a non-zero field can affect the
|
||||
// format that the library uses for writing the header, so only
|
||||
// change fields that are already set to avoid changing the
|
||||
|
@ -676,10 +979,8 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
|
|||
hdr.ChangeTime = *i.created
|
||||
}
|
||||
return false, false, nil
|
||||
}
|
||||
return false, false, nil
|
||||
})
|
||||
writer = io.Writer(writeCloser)
|
||||
writer = writeCloser
|
||||
}
|
||||
// Okay, copy from the raw diff through the filter, compressor, and counter and
|
||||
// digesters.
|
||||
|
@ -718,189 +1019,19 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
|
|||
if err = os.Rename(filepath.Join(path, "layer"), finalBlobName); err != nil {
|
||||
return nil, fmt.Errorf("storing %s to file while renaming %q to %q: %w", what, filepath.Join(path, "layer"), finalBlobName, err)
|
||||
}
|
||||
// Add a note in the manifest about the layer. The blobs are identified by their possibly-
|
||||
// compressed blob digests.
|
||||
olayerDescriptor := v1.Descriptor{
|
||||
MediaType: omediaType,
|
||||
Digest: destHasher.Digest(),
|
||||
Size: size,
|
||||
}
|
||||
omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
|
||||
dlayerDescriptor := docker.V2S2Descriptor{
|
||||
MediaType: dmediaType,
|
||||
Digest: destHasher.Digest(),
|
||||
Size: size,
|
||||
}
|
||||
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
|
||||
// Add a note about the diffID, which is always the layer's uncompressed digest.
|
||||
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, srcHasher.Digest())
|
||||
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, srcHasher.Digest())
|
||||
}
|
||||
|
||||
// Build history notes in the image configurations.
|
||||
appendHistory := func(history []v1.History, empty bool) {
|
||||
for i := range history {
|
||||
var created *time.Time
|
||||
if history[i].Created != nil {
|
||||
copiedTimestamp := *history[i].Created
|
||||
created = &copiedTimestamp
|
||||
}
|
||||
onews := v1.History{
|
||||
Created: created,
|
||||
CreatedBy: history[i].CreatedBy,
|
||||
Author: history[i].Author,
|
||||
Comment: history[i].Comment,
|
||||
EmptyLayer: empty,
|
||||
}
|
||||
oimage.History = append(oimage.History, onews)
|
||||
if created == nil {
|
||||
created = &time.Time{}
|
||||
}
|
||||
dnews := docker.V2S2History{
|
||||
Created: *created,
|
||||
CreatedBy: history[i].CreatedBy,
|
||||
Author: history[i].Author,
|
||||
Comment: history[i].Comment,
|
||||
EmptyLayer: empty,
|
||||
}
|
||||
dimage.History = append(dimage.History, dnews)
|
||||
}
|
||||
mb.addLayer(destHasher.Digest(), size, srcHasher.Digest())
|
||||
}
|
||||
|
||||
// Only attempt to append history if history was not disabled explicitly.
|
||||
if !i.omitHistory {
|
||||
// Keep track of how many entries the base image's history had
|
||||
// before we started adding to it.
|
||||
baseImageHistoryLen := len(oimage.History)
|
||||
|
||||
// Add history entries for prepended empty layers.
|
||||
appendHistory(i.preEmptyLayers, true)
|
||||
// Add history entries for prepended API-supplied layers.
|
||||
for _, h := range i.preLayers {
|
||||
appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer)
|
||||
}
|
||||
// Add a history entry for this layer, empty or not.
|
||||
created := time.Now().UTC()
|
||||
if i.created != nil {
|
||||
created = (*i.created).UTC()
|
||||
}
|
||||
onews := v1.History{
|
||||
Created: &created,
|
||||
CreatedBy: i.createdBy,
|
||||
Author: oimage.Author,
|
||||
EmptyLayer: i.emptyLayer,
|
||||
Comment: i.historyComment,
|
||||
}
|
||||
oimage.History = append(oimage.History, onews)
|
||||
dnews := docker.V2S2History{
|
||||
Created: created,
|
||||
CreatedBy: i.createdBy,
|
||||
Author: dimage.Author,
|
||||
EmptyLayer: i.emptyLayer,
|
||||
Comment: i.historyComment,
|
||||
}
|
||||
dimage.History = append(dimage.History, dnews)
|
||||
// Add a history entry for the extra image content if we added a layer for it.
|
||||
// This diff was added to the list of layers before API-supplied layers that
|
||||
// needed to be appended, and we need to keep the order of history entries for
|
||||
// not-empty layers consistent with that.
|
||||
if extraImageContentDiff != "" {
|
||||
createdBy := fmt.Sprintf(`/bin/sh -c #(nop) ADD dir:%s in /",`, extraImageContentDiffDigest.Encoded())
|
||||
onews := v1.History{
|
||||
Created: &created,
|
||||
CreatedBy: createdBy,
|
||||
}
|
||||
oimage.History = append(oimage.History, onews)
|
||||
dnews := docker.V2S2History{
|
||||
Created: created,
|
||||
CreatedBy: createdBy,
|
||||
}
|
||||
dimage.History = append(dimage.History, dnews)
|
||||
}
|
||||
// Add history entries for appended empty layers.
|
||||
appendHistory(i.postEmptyLayers, true)
|
||||
// Add history entries for appended API-supplied layers.
|
||||
for _, h := range i.postLayers {
|
||||
appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer)
|
||||
}
|
||||
|
||||
// Assemble a comment indicating which base image was used, if it wasn't
|
||||
// just an image ID, and add it to the first history entry we added.
|
||||
var fromComment string
|
||||
if strings.Contains(i.parent, i.fromImageID) && i.fromImageName != "" && !strings.HasPrefix(i.fromImageID, i.fromImageName) {
|
||||
if oimage.History[baseImageHistoryLen].Comment != "" {
|
||||
fromComment = " "
|
||||
}
|
||||
fromComment += "FROM " + i.fromImageName
|
||||
}
|
||||
oimage.History[baseImageHistoryLen].Comment += fromComment
|
||||
dimage.History[baseImageHistoryLen].Comment += fromComment
|
||||
|
||||
// Confidence check that we didn't just create a mismatch between non-empty layers in the
|
||||
// history and the number of diffIDs. Only applicable if the base image (if there was
|
||||
// one) provided us at least one entry to use as a starting point.
|
||||
if baseImageHistoryLen != 0 {
|
||||
expectedDiffIDs := expectedOCIDiffIDs(oimage)
|
||||
if len(oimage.RootFS.DiffIDs) != expectedDiffIDs {
|
||||
return nil, fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(oimage.RootFS.DiffIDs))
|
||||
}
|
||||
expectedDiffIDs = expectedDockerDiffIDs(dimage)
|
||||
if len(dimage.RootFS.DiffIDs) != expectedDiffIDs {
|
||||
return nil, fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(dimage.RootFS.DiffIDs))
|
||||
}
|
||||
if err := mb.buildHistory(extraImageContentDiff, extraImageContentDiffDigest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Encode the image configuration blob.
|
||||
oconfig, err := json.Marshal(&oimage)
|
||||
imageManifest, config, err := mb.manifestAndConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("encoding %#v as json: %w", oimage, err)
|
||||
}
|
||||
logrus.Debugf("OCIv1 config = %s", oconfig)
|
||||
|
||||
// Add the configuration blob to the manifest.
|
||||
omanifest.Config.Digest = digest.Canonical.FromBytes(oconfig)
|
||||
omanifest.Config.Size = int64(len(oconfig))
|
||||
omanifest.Config.MediaType = v1.MediaTypeImageConfig
|
||||
|
||||
// Encode the manifest.
|
||||
omanifestbytes, err := json.Marshal(&omanifest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("encoding %#v as json: %w", omanifest, err)
|
||||
}
|
||||
logrus.Debugf("OCIv1 manifest = %s", omanifestbytes)
|
||||
|
||||
// Encode the image configuration blob.
|
||||
dconfig, err := json.Marshal(&dimage)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("encoding %#v as json: %w", dimage, err)
|
||||
}
|
||||
logrus.Debugf("Docker v2s2 config = %s", dconfig)
|
||||
|
||||
// Add the configuration blob to the manifest.
|
||||
dmanifest.Config.Digest = digest.Canonical.FromBytes(dconfig)
|
||||
dmanifest.Config.Size = int64(len(dconfig))
|
||||
dmanifest.Config.MediaType = manifest.DockerV2Schema2ConfigMediaType
|
||||
|
||||
// Encode the manifest.
|
||||
dmanifestbytes, err := json.Marshal(&dmanifest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("encoding %#v as json: %w", dmanifest, err)
|
||||
}
|
||||
logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes)
|
||||
|
||||
// Decide which manifest and configuration blobs we'll actually output.
|
||||
var config []byte
|
||||
var imageManifest []byte
|
||||
switch manifestType {
|
||||
case v1.MediaTypeImageManifest:
|
||||
imageManifest = omanifestbytes
|
||||
config = oconfig
|
||||
case manifest.DockerV2Schema2MediaType:
|
||||
imageManifest = dmanifestbytes
|
||||
config = dconfig
|
||||
default:
|
||||
panic("unreachable code: unsupported manifest type")
|
||||
return nil, err
|
||||
}
|
||||
src = &containerImageSource{
|
||||
path: path,
|
||||
|
@ -914,7 +1045,7 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon
|
|||
config: config,
|
||||
configDigest: digest.Canonical.FromBytes(config),
|
||||
manifest: imageManifest,
|
||||
manifestType: manifestType,
|
||||
manifestType: i.preferredManifestType,
|
||||
blobDirectory: i.blobDirectory,
|
||||
blobLayers: blobLayers,
|
||||
}
|
||||
|
|
|
@ -2243,9 +2243,11 @@ func (s *StageExecutor) pullCache(ctx context.Context, cacheKey string) (referen
|
|||
return nil, "", fmt.Errorf("failed pulling cache from all available sources %q", srcList)
|
||||
}
|
||||
|
||||
// intermediateImageExists returns true if an intermediate image of currNode exists in the image store from a previous build.
|
||||
// intermediateImageExists returns image ID if an intermediate image of currNode exists in the image store from a previous build.
|
||||
// It verifies this by checking the parent of the top layer of the image and the history.
|
||||
// If more than one image matches as potiential candidates then priority is given to the most recently built image.
|
||||
func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *parser.Node, addedContentDigest string, buildAddsLayer bool) (string, error) {
|
||||
cacheCandidates := []storage.Image{}
|
||||
// Get the list of images available in the image store
|
||||
images, err := s.executor.store.Images()
|
||||
if err != nil {
|
||||
|
@ -2316,9 +2318,13 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p
|
|||
return "", err
|
||||
}
|
||||
if foundMatch {
|
||||
return image.ID, nil
|
||||
cacheCandidates = append(cacheCandidates, image)
|
||||
}
|
||||
}
|
||||
if len(cacheCandidates) > 0 {
|
||||
slices.SortFunc(cacheCandidates, func(a, b storage.Image) int { return a.Created.Compare(b.Created) })
|
||||
return cacheCandidates[len(cacheCandidates)-1].ID, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
|
|
|
@ -65,11 +65,10 @@ func mergeEnv(a, b []string) []string {
|
|||
return results
|
||||
}
|
||||
|
||||
// Override takes a buildah docker config and an OCI ImageConfig, and applies a
|
||||
// mixture of a slice of Dockerfile-style instructions and fields from a config
|
||||
// blob to them both
|
||||
func Override(dconfig *docker.Config, oconfig *v1.ImageConfig, overrideChanges []string, overrideConfig *manifest.Schema2Config) error {
|
||||
if len(overrideChanges) > 0 {
|
||||
func parseOverrideChanges(overrideChanges []string, overrideConfig *manifest.Schema2Config) (*manifest.Schema2Config, error) {
|
||||
if len(overrideChanges) == 0 {
|
||||
return overrideConfig, nil
|
||||
}
|
||||
if overrideConfig == nil {
|
||||
overrideConfig = &manifest.Schema2Config{}
|
||||
}
|
||||
|
@ -77,7 +76,7 @@ func Override(dconfig *docker.Config, oconfig *v1.ImageConfig, overrideChanges [
|
|||
changes := strings.Join(overrideChanges, "\n")
|
||||
parsed, err := imagebuilder.ParseDockerfile(strings.NewReader(changes))
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing change set %+v: %w", changes, err)
|
||||
return overrideConfig, fmt.Errorf("parsing change set %+v: %w", changes, err)
|
||||
}
|
||||
// Create a dummy builder object to process configuration-related
|
||||
// instructions.
|
||||
|
@ -88,51 +87,98 @@ func Override(dconfig *docker.Config, oconfig *v1.ImageConfig, overrideChanges [
|
|||
for _, node := range parsed.Children {
|
||||
var step imagebuilder.Step
|
||||
if err := step.Resolve(node); err != nil {
|
||||
return fmt.Errorf("resolving change %q: %w", node.Original, err)
|
||||
return overrideConfig, fmt.Errorf("resolving change %q: %w", node.Original, err)
|
||||
}
|
||||
if err := subBuilder.Run(&step, &configOnlyExecutor{}, true); err != nil {
|
||||
return fmt.Errorf("processing change %q: %w", node.Original, err)
|
||||
return overrideConfig, fmt.Errorf("processing change %q: %w", node.Original, err)
|
||||
}
|
||||
}
|
||||
// Pull settings out of the dummy builder's RunConfig.
|
||||
overrideConfig = Schema2ConfigFromGoDockerclientConfig(&subBuilder.RunConfig)
|
||||
return Schema2ConfigFromGoDockerclientConfig(&subBuilder.RunConfig), nil
|
||||
}
|
||||
|
||||
// OverrideOCI takes a buildah docker config and an OCI ImageConfig, and applies a
|
||||
// mixture of a slice of Dockerfile-style instructions and fields from a config
|
||||
// blob to them both
|
||||
func OverrideOCI(oconfig *v1.ImageConfig, overrideChanges []string, overrideConfig *manifest.Schema2Config) error {
|
||||
overrideConfig, err := parseOverrideChanges(overrideChanges, overrideConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if overrideConfig != nil {
|
||||
// Apply changes from a possibly-provided possibly-changed config struct.
|
||||
dconfig.Hostname = firstStringElseSecondString(overrideConfig.Hostname, dconfig.Hostname)
|
||||
dconfig.Domainname = firstStringElseSecondString(overrideConfig.Domainname, dconfig.Domainname)
|
||||
dconfig.User = firstStringElseSecondString(overrideConfig.User, dconfig.User)
|
||||
oconfig.User = firstStringElseSecondString(overrideConfig.User, oconfig.User)
|
||||
dconfig.AttachStdin = overrideConfig.AttachStdin
|
||||
dconfig.AttachStdout = overrideConfig.AttachStdout
|
||||
dconfig.AttachStderr = overrideConfig.AttachStderr
|
||||
if len(overrideConfig.ExposedPorts) > 0 {
|
||||
dexposedPorts := make(map[docker.Port]struct{})
|
||||
oexposedPorts := make(map[string]struct{})
|
||||
for port := range dconfig.ExposedPorts {
|
||||
dexposedPorts[port] = struct{}{}
|
||||
}
|
||||
for port := range overrideConfig.ExposedPorts {
|
||||
dexposedPorts[docker.Port(port)] = struct{}{}
|
||||
}
|
||||
for port := range oconfig.ExposedPorts {
|
||||
oexposedPorts[port] = struct{}{}
|
||||
}
|
||||
for port := range overrideConfig.ExposedPorts {
|
||||
oexposedPorts[string(port)] = struct{}{}
|
||||
}
|
||||
dconfig.ExposedPorts = dexposedPorts
|
||||
oconfig.ExposedPorts = oexposedPorts
|
||||
}
|
||||
if len(overrideConfig.Env) > 0 {
|
||||
oconfig.Env = mergeEnv(oconfig.Env, overrideConfig.Env)
|
||||
}
|
||||
oconfig.Entrypoint, oconfig.Cmd = firstSlicePairElseSecondSlicePair(overrideConfig.Entrypoint, overrideConfig.Cmd, oconfig.Entrypoint, oconfig.Cmd)
|
||||
if len(overrideConfig.Volumes) > 0 {
|
||||
if oconfig.Volumes == nil {
|
||||
oconfig.Volumes = make(map[string]struct{})
|
||||
}
|
||||
for volume := range overrideConfig.Volumes {
|
||||
oconfig.Volumes[volume] = struct{}{}
|
||||
}
|
||||
}
|
||||
oconfig.WorkingDir = firstStringElseSecondString(overrideConfig.WorkingDir, oconfig.WorkingDir)
|
||||
if len(overrideConfig.Labels) > 0 {
|
||||
if oconfig.Labels == nil {
|
||||
oconfig.Labels = make(map[string]string)
|
||||
}
|
||||
for k, v := range overrideConfig.Labels {
|
||||
oconfig.Labels[k] = v
|
||||
}
|
||||
}
|
||||
oconfig.StopSignal = overrideConfig.StopSignal
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OverrideDocker takes a buildah docker config and an Docker Config, and applies a
|
||||
// mixture of a slice of Dockerfile-style instructions and fields from a config
|
||||
// blob to them both
|
||||
func OverrideDocker(dconfig *docker.Config, overrideChanges []string, overrideConfig *manifest.Schema2Config) error {
|
||||
overrideConfig, err := parseOverrideChanges(overrideChanges, overrideConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if overrideConfig != nil {
|
||||
// Apply changes from a possibly-provided possibly-changed config struct.
|
||||
dconfig.Hostname = firstStringElseSecondString(overrideConfig.Hostname, dconfig.Hostname)
|
||||
dconfig.Domainname = firstStringElseSecondString(overrideConfig.Domainname, dconfig.Domainname)
|
||||
dconfig.User = firstStringElseSecondString(overrideConfig.User, dconfig.User)
|
||||
dconfig.AttachStdin = overrideConfig.AttachStdin
|
||||
dconfig.AttachStdout = overrideConfig.AttachStdout
|
||||
dconfig.AttachStderr = overrideConfig.AttachStderr
|
||||
if len(overrideConfig.ExposedPorts) > 0 {
|
||||
dexposedPorts := make(map[docker.Port]struct{})
|
||||
for port := range dconfig.ExposedPorts {
|
||||
dexposedPorts[port] = struct{}{}
|
||||
}
|
||||
for port := range overrideConfig.ExposedPorts {
|
||||
dexposedPorts[docker.Port(port)] = struct{}{}
|
||||
}
|
||||
dconfig.ExposedPorts = dexposedPorts
|
||||
}
|
||||
dconfig.Tty = overrideConfig.Tty
|
||||
dconfig.OpenStdin = overrideConfig.OpenStdin
|
||||
dconfig.StdinOnce = overrideConfig.StdinOnce
|
||||
if len(overrideConfig.Env) > 0 {
|
||||
dconfig.Env = mergeEnv(dconfig.Env, overrideConfig.Env)
|
||||
oconfig.Env = mergeEnv(oconfig.Env, overrideConfig.Env)
|
||||
}
|
||||
dconfig.Entrypoint, dconfig.Cmd = firstSlicePairElseSecondSlicePair(overrideConfig.Entrypoint, overrideConfig.Cmd, dconfig.Entrypoint, dconfig.Cmd)
|
||||
oconfig.Entrypoint, oconfig.Cmd = firstSlicePairElseSecondSlicePair(overrideConfig.Entrypoint, overrideConfig.Cmd, oconfig.Entrypoint, oconfig.Cmd)
|
||||
if overrideConfig.Healthcheck != nil {
|
||||
dconfig.Healthcheck = &docker.HealthConfig{
|
||||
Test: slices.Clone(overrideConfig.Healthcheck.Test),
|
||||
|
@ -148,16 +194,11 @@ func Override(dconfig *docker.Config, oconfig *v1.ImageConfig, overrideChanges [
|
|||
if dconfig.Volumes == nil {
|
||||
dconfig.Volumes = make(map[string]struct{})
|
||||
}
|
||||
if oconfig.Volumes == nil {
|
||||
oconfig.Volumes = make(map[string]struct{})
|
||||
}
|
||||
for volume := range overrideConfig.Volumes {
|
||||
dconfig.Volumes[volume] = struct{}{}
|
||||
oconfig.Volumes[volume] = struct{}{}
|
||||
}
|
||||
}
|
||||
dconfig.WorkingDir = firstStringElseSecondString(overrideConfig.WorkingDir, dconfig.WorkingDir)
|
||||
oconfig.WorkingDir = firstStringElseSecondString(overrideConfig.WorkingDir, oconfig.WorkingDir)
|
||||
dconfig.NetworkDisabled = overrideConfig.NetworkDisabled
|
||||
dconfig.MacAddress = overrideConfig.MacAddress
|
||||
dconfig.OnBuild = overrideConfig.OnBuild
|
||||
|
@ -165,16 +206,11 @@ func Override(dconfig *docker.Config, oconfig *v1.ImageConfig, overrideChanges [
|
|||
if dconfig.Labels == nil {
|
||||
dconfig.Labels = make(map[string]string)
|
||||
}
|
||||
if oconfig.Labels == nil {
|
||||
oconfig.Labels = make(map[string]string)
|
||||
}
|
||||
for k, v := range overrideConfig.Labels {
|
||||
dconfig.Labels[k] = v
|
||||
oconfig.Labels[k] = v
|
||||
}
|
||||
}
|
||||
dconfig.StopSignal = overrideConfig.StopSignal
|
||||
oconfig.StopSignal = overrideConfig.StopSignal
|
||||
dconfig.StopTimeout = overrideConfig.StopTimeout
|
||||
dconfig.Shell = firstSliceElseSecondSlice(overrideConfig.Shell, dconfig.Shell)
|
||||
}
|
||||
|
|
|
@ -98,7 +98,7 @@ func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func SetHas(m map[string]struct{}, k string) bool {
|
||||
func SetHas[K comparable, V any](m map[K]V, k K) bool {
|
||||
_, ok := m[k]
|
||||
return ok
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ import (
|
|||
securejoin "github.com/cyphar/filepath-securejoin"
|
||||
units "github.com/docker/go-units"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
"github.com/openshift/imagebuilder"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -81,6 +82,25 @@ func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) {
|
|||
return CommonBuildOptionsFromFlagSet(c.Flags(), c.Flag)
|
||||
}
|
||||
|
||||
// If user selected to run with currentLabelOpts then append on the current user and role
|
||||
func currentLabelOpts() ([]string, error) {
|
||||
label, err := selinux.CurrentLabel()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if label == "" {
|
||||
return nil, nil
|
||||
}
|
||||
con, err := selinux.NewContext(label)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []string{
|
||||
fmt.Sprintf("label=user:%s", con["user"]),
|
||||
fmt.Sprintf("label=role:%s", con["role"]),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CommonBuildOptionsFromFlagSet parses the build options from the bud cli
|
||||
func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (*define.CommonBuildOptions, error) {
|
||||
var (
|
||||
|
@ -201,6 +221,18 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name
|
|||
OCIHooksDir: ociHooks,
|
||||
}
|
||||
securityOpts, _ := flags.GetStringArray("security-opt")
|
||||
defConfig, err := config.Default()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get container config: %w", err)
|
||||
}
|
||||
if defConfig.Containers.EnableLabeledUsers {
|
||||
defSecurityOpts, err := currentLabelOpts()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
securityOpts = append(defSecurityOpts, securityOpts...)
|
||||
}
|
||||
if err := parseSecurityOpts(securityOpts, commonOpts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"github.com/containers/buildah/define"
|
||||
"github.com/opencontainers/cgroups/devices/config"
|
||||
"github.com/opencontainers/runc/libcontainer/devices"
|
||||
)
|
||||
|
||||
|
@ -47,7 +48,7 @@ func DeviceFromPath(device string) (define.ContainerDevices, error) {
|
|||
}
|
||||
for _, d := range srcDevices {
|
||||
d.Path = filepath.Join(dst, filepath.Base(d.Path))
|
||||
d.Permissions = devices.Permissions(permissions)
|
||||
d.Permissions = config.Permissions(permissions)
|
||||
device := define.BuildahDevice{Device: *d, Source: src, Destination: dst}
|
||||
devs = append(devs, device)
|
||||
}
|
||||
|
|
9
vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_linux.go
generated
vendored
9
vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_linux.go
generated
vendored
|
@ -369,9 +369,14 @@ func (n *Netns) setupMounts() error {
|
|||
|
||||
// Ensure we mount private in our mountns to prevent accidentally
|
||||
// overwriting the host mounts in case the default propagation is shared.
|
||||
err = unix.Mount("", "/", "", unix.MS_PRIVATE|unix.MS_REC, "")
|
||||
// However using private propagation is not what we want. New mounts/umounts
|
||||
// would not be propagated into our namespace. This is a problem because we
|
||||
// may hold mount points open that were unmounted on the host confusing users
|
||||
// why the underlying device is still busy as they no longer see the mount:
|
||||
// https://github.com/containers/podman/issues/25994
|
||||
err = unix.Mount("", "/", "", unix.MS_SLAVE|unix.MS_REC, "")
|
||||
if err != nil {
|
||||
return wrapError("make tree private in new mount namespace", err)
|
||||
return wrapError("set mount propagation to slave in new mount namespace", err)
|
||||
}
|
||||
|
||||
xdgRuntimeDir, err := homedir.GetRuntimeDir()
|
||||
|
|
|
@ -170,7 +170,7 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
|
|||
case types.BridgeNetworkDriver:
|
||||
internalutil.MapDockerBridgeDriverOptions(newNetwork)
|
||||
|
||||
var vlan int
|
||||
checkBridgeConflict := true
|
||||
// validate the given options,
|
||||
for key, value := range newNetwork.Options {
|
||||
switch key {
|
||||
|
@ -181,10 +181,19 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
|
|||
}
|
||||
|
||||
case types.VLANOption:
|
||||
vlan, err = internalutil.ParseVlan(value)
|
||||
_, err = internalutil.ParseVlan(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Unset used networks here to ensure that when using vlan networks
|
||||
// we do not error if the subnet is already in use on the host.
|
||||
// https://github.com/containers/podman/issues/25736
|
||||
usedNetworks = nil
|
||||
// If there is no vlan there should be no other config with the same bridge.
|
||||
// However with vlan we want to allow that so that you can have different
|
||||
// configs on the same bridge but different vlans
|
||||
// https://github.com/containers/common/issues/2095
|
||||
checkBridgeConflict = false
|
||||
|
||||
case types.IsolateOption:
|
||||
val, err := internalutil.ParseIsolate(value)
|
||||
|
@ -209,7 +218,16 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
|
|||
return nil, errors.New("invalid vrf name")
|
||||
}
|
||||
case types.ModeOption:
|
||||
if !slices.Contains(types.ValidBridgeModes, value) {
|
||||
switch value {
|
||||
case types.BridgeModeManaged:
|
||||
case types.BridgeModeUnmanaged:
|
||||
// Unset used networks here to ensure that when using unmanaged networks
|
||||
// we do not error if the subnet is already in use on the host.
|
||||
// https://github.com/containers/common/issues/2322
|
||||
usedNetworks = nil
|
||||
// Also make sure we don't error if the bridge name is already used as well.
|
||||
checkBridgeConflict = false
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown bridge mode %q", value)
|
||||
}
|
||||
default:
|
||||
|
@ -217,11 +235,6 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
|
|||
}
|
||||
}
|
||||
|
||||
// If there is no vlan there should be no other config with the same bridge.
|
||||
// However with vlan we want to allow that so that you can have different
|
||||
// configs on the same bridge but different vlans
|
||||
// https://github.com/containers/common/issues/2095
|
||||
checkBridgeConflict := vlan == 0
|
||||
err = internalutil.CreateBridge(n, newNetwork, usedNetworks, n.defaultsubnetPools, checkBridgeConflict)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -361,11 +361,6 @@ func defaultEngineConfig() (*EngineConfig, error) {
|
|||
c.ComposeProviders.Set(getDefaultComposeProviders()) // may vary across supported platforms
|
||||
c.ComposeWarningLogs = true
|
||||
|
||||
if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok {
|
||||
if err := types.SetDefaultConfigFilePath(path); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
storeOpts, err := types.DefaultStoreOptions()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -8,10 +8,11 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/opencontainers/cgroups/devices/config"
|
||||
"github.com/opencontainers/runc/libcontainer/devices"
|
||||
)
|
||||
|
||||
func DeviceFromPath(device string) ([]devices.Device, error) {
|
||||
func DeviceFromPath(device string) ([]config.Device, error) {
|
||||
src, dst, permissions, err := Device(device)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -25,7 +26,7 @@ func DeviceFromPath(device string) ([]devices.Device, error) {
|
|||
}
|
||||
|
||||
if !srcInfo.IsDir() {
|
||||
devs := make([]devices.Device, 0, 1)
|
||||
devs := make([]config.Device, 0, 1)
|
||||
dev, err := devices.DeviceFromPath(src, permissions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s is not a valid device: %w", src, err)
|
||||
|
@ -40,10 +41,10 @@ func DeviceFromPath(device string) ([]devices.Device, error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("getting source devices from directory %s: %w", src, err)
|
||||
}
|
||||
devs := make([]devices.Device, 0, len(srcDevices))
|
||||
devs := make([]config.Device, 0, len(srcDevices))
|
||||
for _, d := range srcDevices {
|
||||
d.Path = filepath.Join(dst, filepath.Base(d.Path))
|
||||
d.Permissions = devices.Permissions(permissions)
|
||||
d.Permissions = config.Permissions(permissions)
|
||||
devs = append(devs, *d)
|
||||
}
|
||||
return devs, nil
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"io"
|
||||
"math"
|
||||
"math/rand/v2"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -47,6 +48,8 @@ func IfNecessary(ctx context.Context, operation func() error, options *Options)
|
|||
delay = options.Delay
|
||||
}
|
||||
logrus.Warnf("Failed, retrying in %s ... (%d/%d). Error: %v", delay, attempt+1, options.MaxRetry, err)
|
||||
delay += rand.N(delay / 10) // 10 % jitter so that a failure blip doesn’t cause a deterministic stampede
|
||||
logrus.Debugf("Retry delay with added jitter: %s", delay)
|
||||
select {
|
||||
case <-time.After(delay):
|
||||
// Do nothing.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package version
|
||||
|
||||
// Version is the version of the build.
|
||||
const Version = "0.63.0"
|
||||
const Version = "0.64.0-dev"
|
||||
|
|
|
@ -421,10 +421,11 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
|||
}
|
||||
}()
|
||||
|
||||
differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Digest, srcInfo.Size, srcInfo.Annotations, &fetcher)
|
||||
differ, err := chunked.NewDiffer(ctx, s.imageRef.transport.store, srcInfo.Digest, srcInfo.Size, srcInfo.Annotations, &fetcher)
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
defer differ.Close()
|
||||
|
||||
out, err := s.imageRef.transport.store.PrepareStagedLayer(nil, differ)
|
||||
if err != nil {
|
||||
|
|
|
@ -6,12 +6,12 @@ const (
|
|||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 5
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 35
|
||||
VersionMinor = 36
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 0
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = ""
|
||||
VersionDev = "-dev"
|
||||
)
|
||||
|
||||
// Version is the specification version that the package types support.
|
||||
|
|
|
@ -4,9 +4,11 @@ docker_builder:
|
|||
HOME: /root
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
CIRRUS_LOG_TIMESTAMP: true
|
||||
GOVERSION: 1.21
|
||||
PATH: /usr/lib/go-1.21/bin:$PATH
|
||||
setup_script: |
|
||||
apt-get -q update
|
||||
apt-get -q install -y bats cryptsetup golang
|
||||
apt-get -q install -y bats cryptsetup golang-${GOVERSION}
|
||||
go version
|
||||
make
|
||||
unit_test_script: |
|
||||
|
|
|
@ -17,13 +17,13 @@ env:
|
|||
####
|
||||
#### Cache-image names to test with (double-quotes around names are critical)
|
||||
###
|
||||
FEDORA_NAME: "fedora-41"
|
||||
FEDORA_NAME: "fedora-42"
|
||||
DEBIAN_NAME: "debian-13"
|
||||
|
||||
# GCE project where images live
|
||||
IMAGE_PROJECT: "libpod-218412"
|
||||
# VM Image built in containers/automation_images
|
||||
IMAGE_SUFFIX: "c20250324t111922z-f41f40d13"
|
||||
IMAGE_SUFFIX: "c20250422t130822z-f42f41d13"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /de
|
|||
# N/B: This value is managed by Renovate, manual changes are
|
||||
# possible, as long as they don't disturb the formatting
|
||||
# (i.e. DO NOT ADD A 'v' prefix!)
|
||||
GOLANGCI_LINT_VERSION := 2.0.2
|
||||
GOLANGCI_LINT_VERSION := 2.1.6
|
||||
|
||||
default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs
|
||||
|
||||
|
|
|
@ -1 +1 @@
|
|||
1.58.0
|
||||
1.59.0-dev
|
||||
|
|
|
@ -207,7 +207,6 @@ type LayerStore interface {
|
|||
Mounted(id string) (int, error)
|
||||
ParentOwners(id string) (uids, gids []int, err error)
|
||||
ApplyDiff(to string, diff io.Reader) (int64, error)
|
||||
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
|
||||
DifferTarget(id string) (string, error)
|
||||
LoadLocked() error
|
||||
PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error)
|
||||
|
|
|
@ -216,7 +216,9 @@ type DriverWithDifferOutput struct {
|
|||
CompressedDigest digest.Digest
|
||||
Metadata string
|
||||
BigData map[string][]byte
|
||||
TarSplit []byte // nil if not available
|
||||
// TarSplit is owned by the [DriverWithDifferOutput], and must be closed by calling one of
|
||||
// [Store.ApplyStagedLayer]/[Store.CleanupStagedLayer]. It is nil if not available.
|
||||
TarSplit *os.File
|
||||
TOCDigest digest.Digest
|
||||
// RootDirMode is the mode of the root directory of the layer, if specified.
|
||||
RootDirMode *os.FileMode
|
||||
|
@ -267,6 +269,7 @@ type DifferOptions struct {
|
|||
// This API is experimental and can be changed without bumping the major version number.
|
||||
type Differ interface {
|
||||
ApplyDiff(dest string, options *archive.TarOptions, differOpts *DifferOptions) (DriverWithDifferOutput, error)
|
||||
Close() error
|
||||
}
|
||||
|
||||
// DriverWithDiffer is the interface for direct diff access.
|
||||
|
|
|
@ -2550,10 +2550,14 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver
|
|||
if err != nil {
|
||||
compressor = pgzip.NewWriter(&tsdata)
|
||||
}
|
||||
if _, err := diffOutput.TarSplit.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that
|
||||
logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err)
|
||||
}
|
||||
if _, err := compressor.Write(diffOutput.TarSplit); err != nil {
|
||||
if _, err := diffOutput.TarSplit.WriteTo(compressor); err != nil {
|
||||
compressor.Close()
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -528,11 +528,29 @@ func canonicalTarName(name string, isDir bool) (string, error) {
|
|||
return name, nil
|
||||
}
|
||||
|
||||
// addFile adds a file from `path` as `name` to the tar archive.
|
||||
func (ta *tarWriter) addFile(path, name string) error {
|
||||
type addFileData struct {
|
||||
// The path from which to read contents.
|
||||
path string
|
||||
|
||||
// os.Stat for the above.
|
||||
fi os.FileInfo
|
||||
|
||||
// The file header of the above.
|
||||
hdr *tar.Header
|
||||
|
||||
// if present, an extra whiteout entry to write after the header.
|
||||
extraWhiteout *tar.Header
|
||||
}
|
||||
|
||||
// prepareAddFile generates the tar file header(s) for adding a file
|
||||
// from path as name to the tar archive, without writing to the
|
||||
// tar stream. Thus, any error may be ignored without corrupting the
|
||||
// tar file. A (nil, nil) return means that the file should be
|
||||
// ignored for non-error reasons.
|
||||
func (ta *tarWriter) prepareAddFile(path, name string) (*addFileData, error) {
|
||||
fi, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var link string
|
||||
|
@ -540,26 +558,26 @@ func (ta *tarWriter) addFile(path, name string) error {
|
|||
var err error
|
||||
link, err = os.Readlink(path)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if fi.Mode()&os.ModeSocket != 0 {
|
||||
logrus.Infof("archive: skipping %q since it is a socket", path)
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
hdr, err := FileInfoHeader(name, fi, link)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if err := readSecurityXattrToTarHeader(path, hdr); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if err := readUserXattrToTarHeader(path, hdr); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if err := ReadFileFlagsToTarHeader(path, hdr); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if ta.CopyPass {
|
||||
copyPassHeader(hdr)
|
||||
|
@ -568,18 +586,13 @@ func (ta *tarWriter) addFile(path, name string) error {
|
|||
// if it's not a directory and has more than 1 link,
|
||||
// it's hard linked, so set the type flag accordingly
|
||||
if !fi.IsDir() && hasHardlinks(fi) {
|
||||
inode, err := getInodeFromStat(fi.Sys())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
inode := getInodeFromStat(fi.Sys())
|
||||
// a link should have a name that it links too
|
||||
// and that linked name should be first in the tar archive
|
||||
if oldpath, ok := ta.SeenFiles[inode]; ok {
|
||||
hdr.Typeflag = tar.TypeLink
|
||||
hdr.Linkname = oldpath
|
||||
hdr.Size = 0 // This Must be here for the writer math to add up!
|
||||
} else {
|
||||
ta.SeenFiles[inode] = name
|
||||
hdr.Size = 0 // This must be here for the writer math to add up!
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -589,11 +602,11 @@ func (ta *tarWriter) addFile(path, name string) error {
|
|||
if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() {
|
||||
fileIDPair, err := getFileUIDGID(fi.Sys())
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -616,26 +629,48 @@ func (ta *tarWriter) addFile(path, name string) error {
|
|||
|
||||
maybeTruncateHeaderModTime(hdr)
|
||||
|
||||
result := &addFileData{
|
||||
path: path,
|
||||
hdr: hdr,
|
||||
fi: fi,
|
||||
}
|
||||
if ta.WhiteoutConverter != nil {
|
||||
wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
|
||||
// The WhiteoutConverter suggests a generic mechanism,
|
||||
// but this code is only used to convert between
|
||||
// overlayfs (on-disk) and AUFS (in the tar file)
|
||||
// whiteouts, and is initiated because the overlayfs
|
||||
// storage driver returns OverlayWhiteoutFormat from
|
||||
// Driver.getWhiteoutFormat().
|
||||
//
|
||||
// For AUFS, a directory with all its contents deleted
|
||||
// should be represented as a directory containing a
|
||||
// magic whiteout empty regular file, hence the
|
||||
// extraWhiteout header returned here.
|
||||
result.extraWhiteout, err = ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// If a new whiteout file exists, write original hdr, then
|
||||
// replace hdr with wo to be written after. Whiteouts should
|
||||
// always be written after the original. Note the original
|
||||
// hdr may have been updated to be a whiteout with returning
|
||||
// a whiteout header
|
||||
if wo != nil {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// addFile performs the write. An error here corrupts the tar file.
|
||||
func (ta *tarWriter) addFile(headers *addFileData) error {
|
||||
hdr := headers.hdr
|
||||
if headers.extraWhiteout != nil {
|
||||
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
|
||||
// If we write hdr with hdr.Size > 0, we have
|
||||
// to write the body before we can write the
|
||||
// extraWhiteout header. This can only happen
|
||||
// if the contract for WhiteoutConverter is
|
||||
// not honored, so bail out.
|
||||
return fmt.Errorf("tar: cannot use extra whiteout with non-empty file %s", hdr.Name)
|
||||
}
|
||||
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
|
||||
return fmt.Errorf("tar: cannot use whiteout for non-empty file")
|
||||
}
|
||||
hdr = wo
|
||||
}
|
||||
hdr = headers.extraWhiteout
|
||||
}
|
||||
|
||||
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
|
||||
|
@ -643,7 +678,7 @@ func (ta *tarWriter) addFile(path, name string) error {
|
|||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
|
||||
file, err := os.Open(path)
|
||||
file, err := os.Open(headers.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -661,6 +696,10 @@ func (ta *tarWriter) addFile(path, name string) error {
|
|||
}
|
||||
}
|
||||
|
||||
if !headers.fi.IsDir() && hasHardlinks(headers.fi) {
|
||||
ta.SeenFiles[getInodeFromStat(headers.fi.Sys())] = headers.hdr.Name
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -853,15 +892,12 @@ func extractTarFileEntry(path, extractDir string, hdr *tar.Header, reader io.Rea
|
|||
}
|
||||
|
||||
// Tar creates an archive from the directory at `path`, and returns it as a
|
||||
// stream of bytes.
|
||||
// stream of bytes. This is a convenience wrapper for [TarWithOptions].
|
||||
func Tar(path string, compression Compression) (io.ReadCloser, error) {
|
||||
return TarWithOptions(path, &TarOptions{Compression: compression})
|
||||
}
|
||||
|
||||
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
|
||||
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
|
||||
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
|
||||
tarWithOptionsTo := func(dest io.WriteCloser, srcPath string, options *TarOptions) (result error) {
|
||||
func tarWithOptionsTo(dest io.WriteCloser, srcPath string, options *TarOptions) (result error) {
|
||||
// Fix the source path to work with long path names. This is a no-op
|
||||
// on platforms other than Windows.
|
||||
srcPath = fixVolumePathPrefix(srcPath)
|
||||
|
@ -1016,10 +1052,11 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
|||
relFilePath = strings.Replace(relFilePath, include, replacement, 1)
|
||||
}
|
||||
|
||||
if err := ta.addFile(filePath, relFilePath); err != nil {
|
||||
logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
|
||||
// if pipe is broken, stop writing tar stream to it
|
||||
if err == io.ErrClosedPipe {
|
||||
headers, err := ta.prepareAddFile(filePath, relFilePath)
|
||||
if err != nil {
|
||||
logrus.Errorf("Can't add file %s to tar: %s; skipping", filePath, err)
|
||||
} else if headers != nil {
|
||||
if err := ta.addFile(headers); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -1029,8 +1066,15 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
|||
}
|
||||
}
|
||||
return ta.TarWriter.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
|
||||
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
|
||||
//
|
||||
// If used on a file system being modified concurrently,
|
||||
// TarWithOptions will create a valid tar archive, but may leave out
|
||||
// some files.
|
||||
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
go func() {
|
||||
err := tarWithOptionsTo(pipeWriter, srcPath, options)
|
||||
|
@ -1446,7 +1490,7 @@ func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
|
|||
if _, err := io.Copy(f, src); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := f.Seek(0, 0); err != nil {
|
||||
if _, err := f.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st, err := f.Stat()
|
||||
|
|
|
@ -82,7 +82,7 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat any) (err erro
|
|||
return
|
||||
}
|
||||
|
||||
func getInodeFromStat(stat any) (inode uint64, err error) {
|
||||
func getInodeFromStat(stat any) (inode uint64) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if ok {
|
||||
|
|
|
@ -57,7 +57,7 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (
|
|||
return
|
||||
}
|
||||
|
||||
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
||||
func getInodeFromStat(stat interface{}) (inode uint64) {
|
||||
// do nothing. no notion of Inode in stat on Windows
|
||||
return
|
||||
}
|
||||
|
|
|
@ -481,8 +481,14 @@ func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMa
|
|||
}
|
||||
} else {
|
||||
path := filepath.Join(dir, change.Path)
|
||||
if err := ta.addFile(path, change.Path[1:]); err != nil {
|
||||
headers, err := ta.prepareAddFile(path, change.Path[1:])
|
||||
if err != nil {
|
||||
logrus.Debugf("Can't add file %s to tar: %s", path, err)
|
||||
} else if headers != nil {
|
||||
if err := ta.addFile(headers); err != nil {
|
||||
writer.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"os"
|
||||
"slices"
|
||||
"strconv"
|
||||
"time"
|
||||
|
@ -18,6 +19,7 @@ import (
|
|||
"github.com/vbatts/tar-split/archive/tar"
|
||||
"github.com/vbatts/tar-split/tar/asm"
|
||||
"github.com/vbatts/tar-split/tar/storage"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -157,10 +159,33 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
|
|||
return manifestUncompressed, tocOffset, nil
|
||||
}
|
||||
|
||||
func openTmpFile(tmpDir string) (*os.File, error) {
|
||||
file, err := os.OpenFile(tmpDir, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC|unix.O_EXCL, 0o600)
|
||||
if err == nil {
|
||||
return file, nil
|
||||
}
|
||||
return openTmpFileNoTmpFile(tmpDir)
|
||||
}
|
||||
|
||||
// openTmpFileNoTmpFile is a fallback used by openTmpFile when the underlying file system does not
|
||||
// support O_TMPFILE.
|
||||
func openTmpFileNoTmpFile(tmpDir string) (*os.File, error) {
|
||||
file, err := os.CreateTemp(tmpDir, ".tmpfile")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Unlink the file immediately so that only the open fd refers to it.
|
||||
_ = os.Remove(file.Name())
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream.
|
||||
// Returns (manifest blob, parsed manifest, tar-split blob or nil, manifest offset).
|
||||
// tmpDir is a directory where the tar-split temporary file is written to. The file is opened with
|
||||
// O_TMPFILE so that it is automatically removed when it is closed.
|
||||
// Returns (manifest blob, parsed manifest, tar-split file or nil, manifest offset).
|
||||
// The opened tar-split file’s position is unspecified.
|
||||
// It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert.
|
||||
func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) (_ []byte, _ *minimal.TOC, _ []byte, _ int64, retErr error) {
|
||||
func readZstdChunkedManifest(tmpDir string, blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) (_ []byte, _ *minimal.TOC, _ *os.File, _ int64, retErr error) {
|
||||
offsetMetadata := annotations[minimal.ManifestInfoKey]
|
||||
if offsetMetadata == "" {
|
||||
return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", minimal.ManifestInfoKey)
|
||||
|
@ -245,7 +270,7 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
|
|||
return nil, nil, nil, 0, fmt.Errorf("unmarshaling TOC: %w", err)
|
||||
}
|
||||
|
||||
var decodedTarSplit []byte = nil
|
||||
var decodedTarSplit *os.File
|
||||
if toc.TarSplitDigest != "" {
|
||||
if tarSplitChunk.Offset <= 0 {
|
||||
return nil, nil, nil, 0, fmt.Errorf("TOC requires a tar-split, but the %s annotation does not describe a position", minimal.TarSplitInfoKey)
|
||||
|
@ -254,8 +279,16 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
|
|||
if err != nil {
|
||||
return nil, nil, nil, 0, err
|
||||
}
|
||||
decodedTarSplit, err = decodeAndValidateBlob(tarSplit, tarSplitLengthUncompressed, toc.TarSplitDigest.String())
|
||||
decodedTarSplit, err = openTmpFile(tmpDir)
|
||||
if err != nil {
|
||||
return nil, nil, nil, 0, err
|
||||
}
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
decodedTarSplit.Close()
|
||||
}
|
||||
}()
|
||||
if err := decodeAndValidateBlobToStream(tarSplit, decodedTarSplit, toc.TarSplitDigest.String()); err != nil {
|
||||
return nil, nil, nil, 0, fmt.Errorf("validating and decompressing tar-split: %w", err)
|
||||
}
|
||||
// We use the TOC for creating on-disk files, but the tar-split for creating metadata
|
||||
|
@ -274,11 +307,11 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
|
|||
return nil, nil, nil, 0, err
|
||||
}
|
||||
}
|
||||
return decodedBlob, toc, decodedTarSplit, int64(manifestChunk.Offset), err
|
||||
return decodedBlob, toc, decodedTarSplit, int64(manifestChunk.Offset), nil
|
||||
}
|
||||
|
||||
// ensureTOCMatchesTarSplit validates that toc and tarSplit contain _exactly_ the same entries.
|
||||
func ensureTOCMatchesTarSplit(toc *minimal.TOC, tarSplit []byte) error {
|
||||
func ensureTOCMatchesTarSplit(toc *minimal.TOC, tarSplit *os.File) error {
|
||||
pendingFiles := map[string]*minimal.FileMetadata{} // Name -> an entry in toc.Entries
|
||||
for i := range toc.Entries {
|
||||
e := &toc.Entries[i]
|
||||
|
@ -290,7 +323,11 @@ func ensureTOCMatchesTarSplit(toc *minimal.TOC, tarSplit []byte) error {
|
|||
}
|
||||
}
|
||||
|
||||
unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit))
|
||||
if _, err := tarSplit.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
unpacker := storage.NewJSONUnpacker(tarSplit)
|
||||
if err := asm.IterateHeaders(unpacker, func(hdr *tar.Header) error {
|
||||
e, ok := pendingFiles[hdr.Name]
|
||||
if !ok {
|
||||
|
@ -320,10 +357,10 @@ func ensureTOCMatchesTarSplit(toc *minimal.TOC, tarSplit []byte) error {
|
|||
}
|
||||
|
||||
// tarSizeFromTarSplit computes the total tarball size, using only the tarSplit metadata
|
||||
func tarSizeFromTarSplit(tarSplit []byte) (int64, error) {
|
||||
func tarSizeFromTarSplit(tarSplit io.Reader) (int64, error) {
|
||||
var res int64 = 0
|
||||
|
||||
unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit))
|
||||
unpacker := storage.NewJSONUnpacker(tarSplit)
|
||||
for {
|
||||
entry, err := unpacker.Next()
|
||||
if err != nil {
|
||||
|
@ -433,22 +470,29 @@ func ensureFileMetadataAttributesMatch(a, b *minimal.FileMetadata) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string) ([]byte, error) {
|
||||
func validateBlob(blob []byte, expectedCompressedChecksum string) error {
|
||||
d, err := digest.Parse(expectedCompressedChecksum)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid digest %q: %w", expectedCompressedChecksum, err)
|
||||
return fmt.Errorf("invalid digest %q: %w", expectedCompressedChecksum, err)
|
||||
}
|
||||
|
||||
blobDigester := d.Algorithm().Digester()
|
||||
blobChecksum := blobDigester.Hash()
|
||||
if _, err := blobChecksum.Write(blob); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if blobDigester.Digest() != d {
|
||||
return nil, fmt.Errorf("invalid blob checksum, expected checksum %s, got %s", d, blobDigester.Digest())
|
||||
return fmt.Errorf("invalid blob checksum, expected checksum %s, got %s", d, blobDigester.Digest())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string) ([]byte, error) {
|
||||
if err := validateBlob(blob, expectedCompressedChecksum); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
decoder, err := zstd.NewReader(nil) //nolint:contextcheck
|
||||
decoder, err := zstd.NewReader(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -457,3 +501,18 @@ func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompr
|
|||
b := make([]byte, 0, lengthUncompressed)
|
||||
return decoder.DecodeAll(blob, b)
|
||||
}
|
||||
|
||||
func decodeAndValidateBlobToStream(blob []byte, w *os.File, expectedCompressedChecksum string) error {
|
||||
if err := validateBlob(blob, expectedCompressedChecksum); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
decoder, err := zstd.NewReader(bytes.NewReader(blob))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer decoder.Close()
|
||||
|
||||
_, err = decoder.WriteTo(w)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package chunked
|
|||
|
||||
import (
|
||||
archivetar "archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
|
@ -81,7 +80,7 @@ type chunkedDiffer struct {
|
|||
convertToZstdChunked bool
|
||||
|
||||
// Chunked metadata
|
||||
// This is usually set in GetDiffer, but if convertToZstdChunked, it is only computed in chunkedDiffer.ApplyDiff
|
||||
// This is usually set in NewDiffer, but if convertToZstdChunked, it is only computed in chunkedDiffer.ApplyDiff
|
||||
// ==========
|
||||
// tocDigest is the digest of the TOC document when the layer
|
||||
// is partially pulled, or "" if not relevant to consumers.
|
||||
|
@ -89,14 +88,14 @@ type chunkedDiffer struct {
|
|||
tocOffset int64
|
||||
manifest []byte
|
||||
toc *minimal.TOC // The parsed contents of manifest, or nil if not yet available
|
||||
tarSplit []byte
|
||||
tarSplit *os.File
|
||||
uncompressedTarSize int64 // -1 if unknown
|
||||
// skipValidation is set to true if the individual files in
|
||||
// the layer are trusted and should not be validated.
|
||||
skipValidation bool
|
||||
|
||||
// Long-term caches
|
||||
// This is set in GetDiffer, when the caller must not hold any storage locks, and later consumed in .ApplyDiff()
|
||||
// This is set in NewDiffer, when the caller must not hold any storage locks, and later consumed in .ApplyDiff()
|
||||
// ==========
|
||||
layersCache *layersCache
|
||||
copyBuffer []byte
|
||||
|
@ -109,6 +108,7 @@ type chunkedDiffer struct {
|
|||
zstdReader *zstd.Decoder
|
||||
rawReader io.Reader
|
||||
useFsVerity graphdriver.DifferFsVerity
|
||||
used bool // the differ object was already used and cannot be used again for .ApplyDiff
|
||||
}
|
||||
|
||||
var xattrsToIgnore = map[string]any{
|
||||
|
@ -164,13 +164,11 @@ func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *o
|
|||
|
||||
defer diff.Close()
|
||||
|
||||
fd, err := unix.Open(destDirectory, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
|
||||
f, err := openTmpFile(destDirectory)
|
||||
if err != nil {
|
||||
return 0, nil, "", nil, &fs.PathError{Op: "open", Path: destDirectory, Err: err}
|
||||
return 0, nil, "", nil, err
|
||||
}
|
||||
|
||||
f := os.NewFile(uintptr(fd), destDirectory)
|
||||
|
||||
newAnnotations := make(map[string]string)
|
||||
level := 1
|
||||
chunked, err := compressor.ZstdCompressor(f, newAnnotations, &level)
|
||||
|
@ -193,10 +191,20 @@ func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *o
|
|||
return copied, newSeekableFile(f), convertedOutputDigester.Digest(), newAnnotations, nil
|
||||
}
|
||||
|
||||
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
|
||||
func (c *chunkedDiffer) Close() error {
|
||||
if c.tarSplit != nil {
|
||||
err := c.tarSplit.Close()
|
||||
c.tarSplit = nil
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewDiffer returns a differ than can be used with [Store.PrepareStagedLayer].
|
||||
// If it returns an error that matches ErrFallbackToOrdinaryLayerDownload, the caller can
|
||||
// retry the operation with a different method.
|
||||
func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
|
||||
// The caller must call Close() on the returned Differ.
|
||||
func NewDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
|
||||
pullOptions := parsePullOptions(store)
|
||||
|
||||
if !pullOptions.enablePartialImages {
|
||||
|
@ -259,7 +267,7 @@ func (e errFallbackCanConvert) Unwrap() error {
|
|||
return e.err
|
||||
}
|
||||
|
||||
// getProperDiffer is an implementation detail of GetDiffer.
|
||||
// getProperDiffer is an implementation detail of NewDiffer.
|
||||
// It returns a “proper” differ (not a convert_images one) if possible.
|
||||
// May return an error matching ErrFallbackToOrdinaryLayerDownload if a fallback to an alternative
|
||||
// (either makeConvertFromRawDiffer, or a non-partial pull) is permissible.
|
||||
|
@ -332,14 +340,22 @@ func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blo
|
|||
|
||||
// makeZstdChunkedDiffer sets up a chunkedDiffer for a zstd:chunked layer.
|
||||
// It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert.
|
||||
func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, pullOptions pullOptions) (*chunkedDiffer, error) {
|
||||
manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, tocDigest, annotations)
|
||||
func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, pullOptions pullOptions) (_ *chunkedDiffer, retErr error) {
|
||||
manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(store.RunRoot(), iss, tocDigest, annotations)
|
||||
if err != nil { // May be ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert
|
||||
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if tarSplit != nil && retErr != nil {
|
||||
tarSplit.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
var uncompressedTarSize int64 = -1
|
||||
if tarSplit != nil {
|
||||
if _, err := tarSplit.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uncompressedTarSize, err = tarSizeFromTarSplit(tarSplit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("computing size from tar-split: %w", err)
|
||||
|
@ -643,7 +659,7 @@ func (o *originFile) OpenFile() (io.ReadCloser, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := srcFile.Seek(o.Offset, 0); err != nil {
|
||||
if _, err := srcFile.Seek(o.Offset, io.SeekStart); err != nil {
|
||||
srcFile.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1374,6 +1390,11 @@ func typeToOsMode(typ string) (os.FileMode, error) {
|
|||
}
|
||||
|
||||
func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, differOpts *graphdriver.DifferOptions) (graphdriver.DriverWithDifferOutput, error) {
|
||||
if c.used {
|
||||
return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("internal error: chunked differ already used")
|
||||
}
|
||||
c.used = true
|
||||
|
||||
defer c.layersCache.release()
|
||||
defer func() {
|
||||
if c.zstdReader != nil {
|
||||
|
@ -1435,7 +1456,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
if tocDigest == nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("internal error: just-created zstd:chunked missing TOC digest")
|
||||
}
|
||||
manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(fileSource, *tocDigest, annotations)
|
||||
manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(dest, fileSource, *tocDigest, annotations)
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
||||
}
|
||||
|
@ -1842,7 +1863,10 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
case c.pullOptions.insecureAllowUnpredictableImageContents:
|
||||
// Oh well. Skip the costly digest computation.
|
||||
case output.TarSplit != nil:
|
||||
metadata := tsStorage.NewJSONUnpacker(bytes.NewReader(output.TarSplit))
|
||||
if _, err := output.TarSplit.Seek(0, io.SeekStart); err != nil {
|
||||
return output, err
|
||||
}
|
||||
metadata := tsStorage.NewJSONUnpacker(output.TarSplit)
|
||||
fg := newStagedFileGetter(dirFile, flatPathNameMap)
|
||||
digester := digest.Canonical.Digester()
|
||||
if err := asm.WriteOutputTarStream(fg, metadata, digester.Hash()); err != nil {
|
||||
|
@ -1850,7 +1874,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
}
|
||||
output.UncompressedDigest = digester.Digest()
|
||||
default:
|
||||
// We are checking for this earlier in GetDiffer, so this should not be reachable.
|
||||
// We are checking for this earlier in NewDiffer, so this should not be reachable.
|
||||
return output, fmt.Errorf(`internal error: layer's UncompressedDigest is unknown and "insecure_allow_unpredictable_image_contents" is not set`)
|
||||
}
|
||||
}
|
||||
|
@ -1861,6 +1885,9 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
|
||||
output.Artifacts[fsVerityDigestsKey] = c.fsVerityDigests
|
||||
|
||||
// on success steal the reference to the tarSplit file
|
||||
c.tarSplit = nil
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
|
@ -1962,7 +1989,7 @@ func validateChunkChecksum(chunk *minimal.FileMetadata, root, path string, offse
|
|||
}
|
||||
defer fd.Close()
|
||||
|
||||
if _, err := unix.Seek(int(fd.Fd()), offset, 0); err != nil {
|
||||
if _, err := fd.Seek(offset, io.SeekStart); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
|
@ -11,7 +11,8 @@ import (
|
|||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
|
||||
func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
|
||||
// NewDiffer returns a differ than can be used with [Store.PrepareStagedLayer].
|
||||
// The caller must call Close() on the returned Differ.
|
||||
func NewDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
|
||||
return nil, newErrFallbackToOrdinaryLayerDownload(errors.New("format not supported on this system"))
|
||||
}
|
||||
|
|
|
@ -362,15 +362,11 @@ type Store interface {
|
|||
// }
|
||||
ApplyDiff(to string, diff io.Reader) (int64, error)
|
||||
|
||||
// ApplyDiffWithDiffer applies a diff to a layer.
|
||||
// It is the caller responsibility to clean the staging directory if it is not
|
||||
// successfully applied with ApplyStagedLayer.
|
||||
// Deprecated: Use PrepareStagedLayer instead. ApplyDiffWithDiffer is going to be removed in a future release
|
||||
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
|
||||
|
||||
// PrepareStagedLayer applies a diff to a layer.
|
||||
// It is the caller responsibility to clean the staging directory if it is not
|
||||
// successfully applied with ApplyStagedLayer.
|
||||
// The caller must ensure [Store.ApplyStagedLayer] or [Store.CleanupStagedLayer] is called eventually
|
||||
// with the returned [drivers.DriverWithDifferOutput] object.
|
||||
PrepareStagedLayer(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
|
||||
|
||||
// ApplyStagedLayer combines the functions of creating a layer and using the staging
|
||||
|
@ -3132,6 +3128,12 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro
|
|||
}
|
||||
|
||||
func (s *store) ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error) {
|
||||
defer func() {
|
||||
if args.DiffOutput.TarSplit != nil {
|
||||
args.DiffOutput.TarSplit.Close()
|
||||
args.DiffOutput.TarSplit = nil
|
||||
}
|
||||
}()
|
||||
rlstore, rlstores, err := s.bothLayerStoreKinds()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -3163,6 +3165,10 @@ func (s *store) ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error) {
|
|||
}
|
||||
|
||||
func (s *store) CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) error {
|
||||
if diffOutput.TarSplit != nil {
|
||||
diffOutput.TarSplit.Close()
|
||||
diffOutput.TarSplit = nil
|
||||
}
|
||||
_, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
|
||||
return struct{}{}, rlstore.CleanupStagingDirectory(diffOutput.Target)
|
||||
})
|
||||
|
@ -3177,13 +3183,6 @@ func (s *store) PrepareStagedLayer(options *drivers.ApplyDiffWithDifferOpts, dif
|
|||
return rlstore.applyDiffWithDifferNoLock(options, differ)
|
||||
}
|
||||
|
||||
func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
|
||||
if to != "" {
|
||||
return nil, fmt.Errorf("ApplyDiffWithDiffer does not support non-empty 'layer' parameter")
|
||||
}
|
||||
return s.PrepareStagedLayer(options, differ)
|
||||
}
|
||||
|
||||
func (s *store) DifferTarget(id string) (string, error) {
|
||||
return writeToLayerStore(s, func(rlstore rwLayerStore) (string, error) {
|
||||
if rlstore.Exists(id) {
|
||||
|
|
|
@ -160,7 +160,6 @@ func loadStoreOptionsFromConfFile(storageConf string) (StoreOptions, error) {
|
|||
defaultRootlessGraphRoot = storageOpts.GraphRoot
|
||||
storageOpts = StoreOptions{}
|
||||
reloadConfigurationFileIfNeeded(storageConf, &storageOpts)
|
||||
if usePerUserStorage() {
|
||||
// If the file did not specify a graphroot or runroot,
|
||||
// set sane defaults so we don't try and use root-owned
|
||||
// directories
|
||||
|
@ -175,7 +174,6 @@ func loadStoreOptionsFromConfFile(storageConf string) (StoreOptions, error) {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if storageOpts.RunRoot == "" {
|
||||
return storageOpts, fmt.Errorf("runroot must be set")
|
||||
}
|
||||
|
|
|
@ -1,12 +1,6 @@
|
|||
linters-settings:
|
||||
govet:
|
||||
check-shadowing: true
|
||||
golint:
|
||||
min-confidence: 0
|
||||
gocyclo:
|
||||
min-complexity: 45
|
||||
maligned:
|
||||
suggest-new: true
|
||||
dupl:
|
||||
threshold: 200
|
||||
goconst:
|
||||
|
@ -16,7 +10,7 @@ linters-settings:
|
|||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- maligned
|
||||
- recvcheck
|
||||
- unparam
|
||||
- lll
|
||||
- gochecknoinits
|
||||
|
@ -29,9 +23,6 @@ linters:
|
|||
- wrapcheck
|
||||
- testpackage
|
||||
- nlreturn
|
||||
- gomnd
|
||||
- exhaustivestruct
|
||||
- goerr113
|
||||
- errorlint
|
||||
- nestif
|
||||
- godot
|
||||
|
@ -39,7 +30,6 @@ linters:
|
|||
- paralleltest
|
||||
- tparallel
|
||||
- thelper
|
||||
- ifshort
|
||||
- exhaustruct
|
||||
- varnamelen
|
||||
- gci
|
||||
|
@ -52,10 +42,15 @@ linters:
|
|||
- forcetypeassert
|
||||
- cyclop
|
||||
# deprecated linters
|
||||
- deadcode
|
||||
- interfacer
|
||||
- scopelint
|
||||
- varcheck
|
||||
- structcheck
|
||||
- golint
|
||||
- nosnakecase
|
||||
#- deadcode
|
||||
#- interfacer
|
||||
#- scopelint
|
||||
#- varcheck
|
||||
#- structcheck
|
||||
#- golint
|
||||
#- nosnakecase
|
||||
#- maligned
|
||||
#- goerr113
|
||||
#- ifshort
|
||||
#- gomnd
|
||||
#- exhaustivestruct
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
package jsonpointer
|
||||
|
||||
type pointerError string
|
||||
|
||||
func (e pointerError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
const (
|
||||
// ErrPointer is an error raised by the jsonpointer package
|
||||
ErrPointer pointerError = "JSON pointer error"
|
||||
|
||||
// ErrInvalidStart states that a JSON pointer must start with a separator ("/")
|
||||
ErrInvalidStart pointerError = `JSON pointer must be empty or start with a "` + pointerSeparator
|
||||
|
||||
// ErrUnsupportedValueType indicates that a value of the wrong type is being set
|
||||
ErrUnsupportedValueType pointerError = "only structs, pointers, maps and slices are supported for setting values"
|
||||
)
|
|
@ -39,9 +39,6 @@ import (
|
|||
const (
|
||||
emptyPointer = ``
|
||||
pointerSeparator = `/`
|
||||
|
||||
invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator
|
||||
notFound = `Can't find the pointer in the document`
|
||||
)
|
||||
|
||||
var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem()
|
||||
|
@ -80,7 +77,7 @@ func (p *Pointer) parse(jsonPointerString string) error {
|
|||
|
||||
if jsonPointerString != emptyPointer {
|
||||
if !strings.HasPrefix(jsonPointerString, pointerSeparator) {
|
||||
err = errors.New(invalidStart)
|
||||
err = errors.Join(ErrInvalidStart, ErrPointer)
|
||||
} else {
|
||||
referenceTokens := strings.Split(jsonPointerString, pointerSeparator)
|
||||
p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...)
|
||||
|
@ -128,7 +125,7 @@ func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvide
|
|||
rValue := reflect.Indirect(reflect.ValueOf(node))
|
||||
kind := rValue.Kind()
|
||||
if isNil(node) {
|
||||
return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken)
|
||||
return nil, kind, fmt.Errorf("nil value has no field %q: %w", decodedToken, ErrPointer)
|
||||
}
|
||||
|
||||
switch typed := node.(type) {
|
||||
|
@ -146,7 +143,7 @@ func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvide
|
|||
case reflect.Struct:
|
||||
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
|
||||
if !ok {
|
||||
return nil, kind, fmt.Errorf("object has no field %q", decodedToken)
|
||||
return nil, kind, fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer)
|
||||
}
|
||||
fld := rValue.FieldByName(nm)
|
||||
return fld.Interface(), kind, nil
|
||||
|
@ -158,7 +155,7 @@ func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvide
|
|||
if mv.IsValid() {
|
||||
return mv.Interface(), kind, nil
|
||||
}
|
||||
return nil, kind, fmt.Errorf("object has no key %q", decodedToken)
|
||||
return nil, kind, fmt.Errorf("object has no key %q: %w", decodedToken, ErrPointer)
|
||||
|
||||
case reflect.Slice:
|
||||
tokenIndex, err := strconv.Atoi(decodedToken)
|
||||
|
@ -167,14 +164,14 @@ func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvide
|
|||
}
|
||||
sLength := rValue.Len()
|
||||
if tokenIndex < 0 || tokenIndex >= sLength {
|
||||
return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength-1, tokenIndex)
|
||||
return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength-1, tokenIndex, ErrPointer)
|
||||
}
|
||||
|
||||
elem := rValue.Index(tokenIndex)
|
||||
return elem.Interface(), kind, nil
|
||||
|
||||
default:
|
||||
return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken)
|
||||
return nil, kind, fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -194,7 +191,7 @@ func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameP
|
|||
case reflect.Struct:
|
||||
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
|
||||
if !ok {
|
||||
return fmt.Errorf("object has no field %q", decodedToken)
|
||||
return fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer)
|
||||
}
|
||||
fld := rValue.FieldByName(nm)
|
||||
if fld.IsValid() {
|
||||
|
@ -214,18 +211,18 @@ func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameP
|
|||
}
|
||||
sLength := rValue.Len()
|
||||
if tokenIndex < 0 || tokenIndex >= sLength {
|
||||
return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex)
|
||||
return fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength, tokenIndex, ErrPointer)
|
||||
}
|
||||
|
||||
elem := rValue.Index(tokenIndex)
|
||||
if !elem.CanSet() {
|
||||
return fmt.Errorf("can't set slice index %s to %v", decodedToken, data)
|
||||
return fmt.Errorf("can't set slice index %s to %v: %w", decodedToken, data, ErrPointer)
|
||||
}
|
||||
elem.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
|
||||
default:
|
||||
return fmt.Errorf("invalid token reference %q", decodedToken)
|
||||
return fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -244,7 +241,6 @@ func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.K
|
|||
}
|
||||
|
||||
for _, token := range p.referenceTokens {
|
||||
|
||||
decodedToken := Unescape(token)
|
||||
|
||||
r, knd, err := getSingleImpl(node, decodedToken, nameProvider)
|
||||
|
@ -264,7 +260,10 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
|
|||
knd := reflect.ValueOf(node).Kind()
|
||||
|
||||
if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array {
|
||||
return errors.New("only structs, pointers, maps and slices are supported for setting values")
|
||||
return errors.Join(
|
||||
ErrUnsupportedValueType,
|
||||
ErrPointer,
|
||||
)
|
||||
}
|
||||
|
||||
if nameProvider == nil {
|
||||
|
@ -307,7 +306,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
|
|||
case reflect.Struct:
|
||||
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
|
||||
if !ok {
|
||||
return fmt.Errorf("object has no field %q", decodedToken)
|
||||
return fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer)
|
||||
}
|
||||
fld := rValue.FieldByName(nm)
|
||||
if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr {
|
||||
|
@ -321,7 +320,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
|
|||
mv := rValue.MapIndex(kv)
|
||||
|
||||
if !mv.IsValid() {
|
||||
return fmt.Errorf("object has no key %q", decodedToken)
|
||||
return fmt.Errorf("object has no key %q: %w", decodedToken, ErrPointer)
|
||||
}
|
||||
if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr {
|
||||
node = mv.Addr().Interface()
|
||||
|
@ -336,7 +335,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
|
|||
}
|
||||
sLength := rValue.Len()
|
||||
if tokenIndex < 0 || tokenIndex >= sLength {
|
||||
return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex)
|
||||
return fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength, tokenIndex, ErrPointer)
|
||||
}
|
||||
|
||||
elem := rValue.Index(tokenIndex)
|
||||
|
@ -347,7 +346,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
|
|||
node = elem.Interface()
|
||||
|
||||
default:
|
||||
return fmt.Errorf("invalid token reference %q", decodedToken)
|
||||
return fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -404,10 +403,10 @@ func (p *Pointer) Offset(document string) (int64, error) {
|
|||
return 0, err
|
||||
}
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid token %#v", tk)
|
||||
return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer)
|
||||
}
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid token %#v", tk)
|
||||
return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer)
|
||||
}
|
||||
}
|
||||
return offset, nil
|
||||
|
@ -437,16 +436,16 @@ func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) {
|
|||
return offset, nil
|
||||
}
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid token %#v", tk)
|
||||
return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer)
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("token reference %q not found", decodedToken)
|
||||
return 0, fmt.Errorf("token reference %q not found: %w", decodedToken, ErrPointer)
|
||||
}
|
||||
|
||||
func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) {
|
||||
idx, err := strconv.Atoi(decodedToken)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("token reference %q is not a number: %v", decodedToken, err)
|
||||
return 0, fmt.Errorf("token reference %q is not a number: %v: %w", decodedToken, err, ErrPointer)
|
||||
}
|
||||
var i int
|
||||
for i = 0; i < idx && dec.More(); i++ {
|
||||
|
@ -470,7 +469,7 @@ func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) {
|
|||
}
|
||||
|
||||
if !dec.More() {
|
||||
return 0, fmt.Errorf("token reference %q not found", decodedToken)
|
||||
return 0, fmt.Errorf("token reference %q not found: %w", decodedToken, ErrPointer)
|
||||
}
|
||||
return dec.InputOffset(), nil
|
||||
}
|
||||
|
|
|
@ -5,14 +5,14 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ErrorLocation gives a location in source code that caused the error
|
||||
type ErrorLocation struct {
|
||||
// LocationError gives a location in source code that caused the error
|
||||
type LocationError struct {
|
||||
Locations [][]Range
|
||||
error
|
||||
}
|
||||
|
||||
// Unwrap unwraps to the next error
|
||||
func (e *ErrorLocation) Unwrap() error {
|
||||
func (e *LocationError) Unwrap() error {
|
||||
return e.error
|
||||
}
|
||||
|
||||
|
@ -45,7 +45,7 @@ func setLocation(err error, location []Range, add bool) error {
|
|||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
var el *ErrorLocation
|
||||
var el *LocationError
|
||||
if errors.As(err, &el) {
|
||||
if add {
|
||||
el.Locations = append(el.Locations, location)
|
||||
|
@ -54,7 +54,7 @@ func setLocation(err error, location []Range, add bool) error {
|
|||
}
|
||||
return err
|
||||
}
|
||||
return stack.Enable(&ErrorLocation{
|
||||
return stack.Enable(&LocationError{
|
||||
error: err,
|
||||
Locations: [][]Range{location},
|
||||
})
|
||||
|
|
|
@ -318,7 +318,7 @@ func parseMaybeJSON(rest string, d *directives) (*Node, map[string]bool, error)
|
|||
if err == nil {
|
||||
return node, attrs, nil
|
||||
}
|
||||
if err == errDockerfileNotStringArray {
|
||||
if errors.Is(err, errDockerfileNotStringArray) {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
|
@ -336,7 +336,7 @@ func parseMaybeJSONToList(rest string, d *directives) (*Node, map[string]bool, e
|
|||
if err == nil {
|
||||
return node, attrs, nil
|
||||
}
|
||||
if err == errDockerfileNotStringArray {
|
||||
if errors.Is(err, errDockerfileNotStringArray) {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -114,7 +114,7 @@ type Heredoc struct {
|
|||
var (
|
||||
dispatch map[string]func(string, *directives) (*Node, map[string]bool, error)
|
||||
reWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`)
|
||||
reHeredoc = regexp.MustCompile(`^(\d*)<<(-?)([^<]*)$`)
|
||||
reHeredoc = regexp.MustCompile(`^(\d*)<<(-?)\s*([^<]*)$`)
|
||||
reLeadingTabs = regexp.MustCompile(`(?m)^\t+`)
|
||||
)
|
||||
|
||||
|
@ -556,8 +556,8 @@ func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
|||
}
|
||||
|
||||
func handleScannerError(err error) error {
|
||||
switch err {
|
||||
case bufio.ErrTooLong:
|
||||
switch {
|
||||
case errors.Is(err, bufio.ErrTooLong):
|
||||
return errors.Errorf("dockerfile line greater than max allowed size of %d", bufio.MaxScanTokenSize-1)
|
||||
default:
|
||||
return err
|
||||
|
|
|
@ -177,6 +177,7 @@ func (sw *shellWord) processStopOn(stopChar rune, rawEscapes bool) (string, []st
|
|||
// no need to initialize all the time
|
||||
var charFuncMapping = map[rune]func() (string, error){
|
||||
'$': sw.processDollar,
|
||||
'<': sw.processPossibleHeredoc,
|
||||
}
|
||||
if !sw.SkipProcessQuotes {
|
||||
charFuncMapping['\''] = sw.processSingleQuote
|
||||
|
@ -512,6 +513,25 @@ func (sw *shellWord) processName() string {
|
|||
return name.String()
|
||||
}
|
||||
|
||||
func (sw *shellWord) processPossibleHeredoc() (string, error) {
|
||||
sw.scanner.Next()
|
||||
if sw.scanner.Peek() != '<' {
|
||||
return "<", nil // not a heredoc
|
||||
}
|
||||
sw.scanner.Next()
|
||||
|
||||
// heredoc might have whitespace between << and word terminator
|
||||
var space bytes.Buffer
|
||||
nextCh := sw.scanner.Peek()
|
||||
for isWhitespace(nextCh) {
|
||||
space.WriteRune(nextCh)
|
||||
sw.scanner.Next()
|
||||
nextCh = sw.scanner.Peek()
|
||||
}
|
||||
result := "<<" + space.String()
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// isSpecialParam checks if the provided character is a special parameters,
|
||||
// as defined in http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02
|
||||
func isSpecialParam(char rune) bool {
|
||||
|
@ -677,3 +697,11 @@ func trimSuffix(pattern, word string, greedy bool) (string, error) {
|
|||
}
|
||||
return reverseString(str), nil
|
||||
}
|
||||
|
||||
func isWhitespace(r rune) bool {
|
||||
switch r {
|
||||
case '\t', '\r', ' ':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ loop0:
|
|||
}
|
||||
// full match, potentially skip all
|
||||
if idx == len(st.Frames)-1 {
|
||||
if st.Pid == prev.Pid && st.Version == prev.Version && slices.Compare(st.Cmdline, st.Cmdline) == 0 {
|
||||
if st.Pid == prev.Pid && st.Version == prev.Version && slices.Equal(st.Cmdline, prev.Cmdline) {
|
||||
continue loop0
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ func Traces(err error) []*Stack {
|
|||
func traces(err error) []*Stack {
|
||||
var st []*Stack
|
||||
|
||||
switch e := err.(type) {
|
||||
switch e := err.(type) { //nolint:errorlint
|
||||
case interface{ Unwrap() error }:
|
||||
st = Traces(e.Unwrap())
|
||||
case interface{ Unwrap() []error }:
|
||||
|
@ -63,7 +63,7 @@ func traces(err error) []*Stack {
|
|||
}
|
||||
}
|
||||
|
||||
switch ste := err.(type) {
|
||||
switch ste := err.(type) { //nolint:errorlint
|
||||
case interface{ StackTrace() errors.StackTrace }:
|
||||
st = append(st, convertStack(ste.StackTrace()))
|
||||
case interface{ StackTrace() *Stack }:
|
||||
|
@ -85,7 +85,7 @@ func Enable(err error) error {
|
|||
}
|
||||
|
||||
func Wrap(err error, s *Stack) error {
|
||||
return &withStack{stack: s, error: err}
|
||||
return &withStackError{stack: s, error: err}
|
||||
}
|
||||
|
||||
func hasLocalStackTrace(err error) bool {
|
||||
|
@ -173,15 +173,15 @@ func convertStack(s errors.StackTrace) *Stack {
|
|||
return &out
|
||||
}
|
||||
|
||||
type withStack struct {
|
||||
type withStackError struct {
|
||||
stack *Stack
|
||||
error
|
||||
}
|
||||
|
||||
func (e *withStack) Unwrap() error {
|
||||
func (e *withStackError) Unwrap() error {
|
||||
return e.error
|
||||
}
|
||||
|
||||
func (e *withStack) StackTrace() *Stack {
|
||||
func (e *withStackError) StackTrace() *Stack {
|
||||
return e.stack
|
||||
}
|
||||
|
|
|
@ -1,174 +0,0 @@
|
|||
package devices
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
Wildcard = -1
|
||||
)
|
||||
|
||||
type Device struct {
|
||||
Rule
|
||||
|
||||
// Path to the device.
|
||||
Path string `json:"path"`
|
||||
|
||||
// FileMode permission bits for the device.
|
||||
FileMode os.FileMode `json:"file_mode"`
|
||||
|
||||
// Uid of the device.
|
||||
Uid uint32 `json:"uid"`
|
||||
|
||||
// Gid of the device.
|
||||
Gid uint32 `json:"gid"`
|
||||
}
|
||||
|
||||
// Permissions is a cgroupv1-style string to represent device access. It
|
||||
// has to be a string for backward compatibility reasons, hence why it has
|
||||
// methods to do set operations.
|
||||
type Permissions string
|
||||
|
||||
const (
|
||||
deviceRead uint = (1 << iota)
|
||||
deviceWrite
|
||||
deviceMknod
|
||||
)
|
||||
|
||||
func (p Permissions) toSet() uint {
|
||||
var set uint
|
||||
for _, perm := range p {
|
||||
switch perm {
|
||||
case 'r':
|
||||
set |= deviceRead
|
||||
case 'w':
|
||||
set |= deviceWrite
|
||||
case 'm':
|
||||
set |= deviceMknod
|
||||
}
|
||||
}
|
||||
return set
|
||||
}
|
||||
|
||||
func fromSet(set uint) Permissions {
|
||||
var perm string
|
||||
if set&deviceRead == deviceRead {
|
||||
perm += "r"
|
||||
}
|
||||
if set&deviceWrite == deviceWrite {
|
||||
perm += "w"
|
||||
}
|
||||
if set&deviceMknod == deviceMknod {
|
||||
perm += "m"
|
||||
}
|
||||
return Permissions(perm)
|
||||
}
|
||||
|
||||
// Union returns the union of the two sets of Permissions.
|
||||
func (p Permissions) Union(o Permissions) Permissions {
|
||||
lhs := p.toSet()
|
||||
rhs := o.toSet()
|
||||
return fromSet(lhs | rhs)
|
||||
}
|
||||
|
||||
// Difference returns the set difference of the two sets of Permissions.
|
||||
// In set notation, A.Difference(B) gives you A\B.
|
||||
func (p Permissions) Difference(o Permissions) Permissions {
|
||||
lhs := p.toSet()
|
||||
rhs := o.toSet()
|
||||
return fromSet(lhs &^ rhs)
|
||||
}
|
||||
|
||||
// Intersection computes the intersection of the two sets of Permissions.
|
||||
func (p Permissions) Intersection(o Permissions) Permissions {
|
||||
lhs := p.toSet()
|
||||
rhs := o.toSet()
|
||||
return fromSet(lhs & rhs)
|
||||
}
|
||||
|
||||
// IsEmpty returns whether the set of permissions in a Permissions is
|
||||
// empty.
|
||||
func (p Permissions) IsEmpty() bool {
|
||||
return p == Permissions("")
|
||||
}
|
||||
|
||||
// IsValid returns whether the set of permissions is a subset of valid
|
||||
// permissions (namely, {r,w,m}).
|
||||
func (p Permissions) IsValid() bool {
|
||||
return p == fromSet(p.toSet())
|
||||
}
|
||||
|
||||
type Type rune
|
||||
|
||||
const (
|
||||
WildcardDevice Type = 'a'
|
||||
BlockDevice Type = 'b'
|
||||
CharDevice Type = 'c' // or 'u'
|
||||
FifoDevice Type = 'p'
|
||||
)
|
||||
|
||||
func (t Type) IsValid() bool {
|
||||
switch t {
|
||||
case WildcardDevice, BlockDevice, CharDevice, FifoDevice:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (t Type) CanMknod() bool {
|
||||
switch t {
|
||||
case BlockDevice, CharDevice, FifoDevice:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (t Type) CanCgroup() bool {
|
||||
switch t {
|
||||
case WildcardDevice, BlockDevice, CharDevice:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
type Rule struct {
|
||||
// Type of device ('c' for char, 'b' for block). If set to 'a', this rule
|
||||
// acts as a wildcard and all fields other than Allow are ignored.
|
||||
Type Type `json:"type"`
|
||||
|
||||
// Major is the device's major number.
|
||||
Major int64 `json:"major"`
|
||||
|
||||
// Minor is the device's minor number.
|
||||
Minor int64 `json:"minor"`
|
||||
|
||||
// Permissions is the set of permissions that this rule applies to (in the
|
||||
// cgroupv1 format -- any combination of "rwm").
|
||||
Permissions Permissions `json:"permissions"`
|
||||
|
||||
// Allow specifies whether this rule is allowed.
|
||||
Allow bool `json:"allow"`
|
||||
}
|
||||
|
||||
func (d *Rule) CgroupString() string {
|
||||
var (
|
||||
major = strconv.FormatInt(d.Major, 10)
|
||||
minor = strconv.FormatInt(d.Minor, 10)
|
||||
)
|
||||
if d.Major == Wildcard {
|
||||
major = "*"
|
||||
}
|
||||
if d.Minor == Wildcard {
|
||||
minor = "*"
|
||||
}
|
||||
return fmt.Sprintf("%c %s:%s %s", d.Type, major, minor, d.Permissions)
|
||||
}
|
||||
|
||||
func (d *Rule) Mkdev() (uint64, error) {
|
||||
return mkDev(d)
|
||||
}
|
20
vendor/github.com/opencontainers/runc/libcontainer/devices/device_deprecated.go
generated
vendored
Normal file
20
vendor/github.com/opencontainers/runc/libcontainer/devices/device_deprecated.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
package devices
|
||||
|
||||
import "github.com/opencontainers/cgroups/devices/config"
|
||||
|
||||
// Deprecated: use [github.com/opencontainers/cgroups/devices/config].
|
||||
const (
|
||||
Wildcard = config.Wildcard
|
||||
WildcardDevice = config.WildcardDevice
|
||||
BlockDevice = config.BlockDevice
|
||||
CharDevice = config.CharDevice
|
||||
FifoDevice = config.FifoDevice
|
||||
)
|
||||
|
||||
// Deprecated: use [github.com/opencontainers/cgroups/devices/config].
|
||||
type (
|
||||
Device = config.Device
|
||||
Permissions = config.Permissions
|
||||
Type = config.Type
|
||||
Rule = config.Rule
|
||||
)
|
|
@ -19,13 +19,6 @@ var (
|
|||
osReadDir = os.ReadDir
|
||||
)
|
||||
|
||||
func mkDev(d *Rule) (uint64, error) {
|
||||
if d.Major == Wildcard || d.Minor == Wildcard {
|
||||
return 0, errors.New("cannot mkdev() device with wildcards")
|
||||
}
|
||||
return unix.Mkdev(uint32(d.Major), uint32(d.Minor)), nil
|
||||
}
|
||||
|
||||
// DeviceFromPath takes the path to a device and its cgroup_permissions (which
|
||||
// cannot be easily queried) to look up the information about a linux device
|
||||
// and returns that information as a Device struct.
|
||||
|
|
|
@ -50,19 +50,19 @@ func CleanPath(path string) string {
|
|||
|
||||
// Ensure that all paths are cleaned (especially problematic ones like
|
||||
// "/../../../../../" which can cause lots of issues).
|
||||
path = filepath.Clean(path)
|
||||
|
||||
if filepath.IsAbs(path) {
|
||||
return filepath.Clean(path)
|
||||
}
|
||||
|
||||
// If the path isn't absolute, we need to do more processing to fix paths
|
||||
// such as "../../../../<etc>/some/path". We also shouldn't convert absolute
|
||||
// paths to relative ones.
|
||||
if !filepath.IsAbs(path) {
|
||||
path = filepath.Clean(string(os.PathSeparator) + path)
|
||||
// This can't fail, as (by definition) all paths are relative to root.
|
||||
path, _ = filepath.Rel(string(os.PathSeparator), path)
|
||||
}
|
||||
|
||||
// Clean the path again for good measure.
|
||||
return filepath.Clean(path)
|
||||
return path
|
||||
}
|
||||
|
||||
// stripRoot returns the passed path, stripping the root path if it was
|
||||
|
@ -77,7 +77,7 @@ func stripRoot(root, path string) string {
|
|||
path = "/"
|
||||
case root == "/":
|
||||
// do nothing
|
||||
case strings.HasPrefix(path, root+"/"):
|
||||
default:
|
||||
path = strings.TrimPrefix(path, root+"/")
|
||||
}
|
||||
return CleanPath("/" + path)
|
||||
|
@ -88,8 +88,8 @@ func stripRoot(root, path string) string {
|
|||
func SearchLabels(labels []string, key string) (string, bool) {
|
||||
key += "="
|
||||
for _, s := range labels {
|
||||
if strings.HasPrefix(s, key) {
|
||||
return s[len(key):], true
|
||||
if val, ok := strings.CutPrefix(s, key); ok {
|
||||
return val, true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
|
|
|
@ -102,8 +102,14 @@ func fdRangeFrom(minFd int, fn fdFunc) error {
|
|||
func CloseExecFrom(minFd int) error {
|
||||
// Use close_range(CLOSE_RANGE_CLOEXEC) if possible.
|
||||
if haveCloseRangeCloexec() {
|
||||
err := unix.CloseRange(uint(minFd), math.MaxUint, unix.CLOSE_RANGE_CLOEXEC)
|
||||
return os.NewSyscallError("close_range", err)
|
||||
err := unix.CloseRange(uint(minFd), math.MaxInt32, unix.CLOSE_RANGE_CLOEXEC)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
logrus.Debugf("close_range failed, closing range one at a time (error: %v)", err)
|
||||
|
||||
// If close_range fails, we fall back to the standard loop.
|
||||
}
|
||||
// Otherwise, fall back to the standard loop.
|
||||
return fdRangeFrom(minFd, unix.CloseOnExec)
|
||||
|
|
|
@ -1,4 +1,12 @@
|
|||
# For documentation, see https://golangci-lint.run/usage/configuration/
|
||||
linters:
|
||||
version: "2"
|
||||
|
||||
formatters:
|
||||
enable:
|
||||
- gofumpt
|
||||
|
||||
linters:
|
||||
exclusions:
|
||||
generated: disable
|
||||
presets:
|
||||
- std-error-handling
|
||||
|
|
|
@ -2,6 +2,18 @@ libseccomp-golang: Releases
|
|||
===============================================================================
|
||||
https://github.com/seccomp/libseccomp-golang
|
||||
|
||||
* Version 0.11.0 - April 23, 2025
|
||||
- Add new architectures (LOONGARCH64, M68K, SH, SHEB)
|
||||
- Add support for SCMP_FLTATR_CTL_WAITKILL (GetWaitKill, SetWaitKill)
|
||||
- Add support for filter precompute (Precompute)
|
||||
- Add support for transactions (Transaction{Start,Commit,Reject})
|
||||
- Add ExportBPFMem
|
||||
- Improve documentation for struct fields
|
||||
- Fix TestRuleAddAndLoad for ppc architecture
|
||||
- Fix TestRuleAddAndLoad to not use magic number
|
||||
- Remove unused get_*_version implementation
|
||||
- Test against latest libseccomp and Go versions
|
||||
|
||||
* Version 0.10.0 - June 9, 2022
|
||||
- Minimum supported version of libseccomp bumped to v2.3.1
|
||||
- Add seccomp userspace notification API (ActNotify, filter.*Notif*)
|
||||
|
|
|
@ -17,8 +17,28 @@ import (
|
|||
"unsafe"
|
||||
)
|
||||
|
||||
// #include <stdlib.h>
|
||||
// #include <seccomp.h>
|
||||
/*
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
#include <seccomp.h>
|
||||
|
||||
// The following functions were added in libseccomp v2.6.0.
|
||||
#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 6
|
||||
int seccomp_precompute(scmp_filter_ctx ctx) {
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
int seccomp_export_bpf_mem(const scmp_filter_ctx ctx, void *buf, size_t *len) {
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
int seccomp_transaction_start(const scmp_filter_ctx ctx) {
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
int seccomp_transaction_commit(const scmp_filter_ctx ctx) {
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
void seccomp_transaction_reject(const scmp_filter_ctx ctx) {}
|
||||
#endif
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// Exported types
|
||||
|
@ -33,8 +53,9 @@ type VersionError struct {
|
|||
|
||||
func init() {
|
||||
// This forces the cgo libseccomp to initialize its internal API support state,
|
||||
// which is necessary on older versions of libseccomp in order to work
|
||||
// which is necessary on older versions of libseccomp (< 2.5.0) in order to work
|
||||
// correctly.
|
||||
// TODO: remove once libseccomp < v2.5.0 is not supported.
|
||||
_, _ = getAPI()
|
||||
}
|
||||
|
||||
|
@ -78,49 +99,44 @@ type ScmpSyscall int32
|
|||
type ScmpFd int32
|
||||
|
||||
// ScmpNotifData describes the system call context that triggered a notification.
|
||||
//
|
||||
// Syscall: the syscall number
|
||||
// Arch: the filter architecture
|
||||
// InstrPointer: address of the instruction that triggered a notification
|
||||
// Args: arguments (up to 6) for the syscall
|
||||
//
|
||||
type ScmpNotifData struct {
|
||||
// Syscall is the syscall number.
|
||||
Syscall ScmpSyscall `json:"syscall,omitempty"`
|
||||
// Arch is the filter architecture.
|
||||
Arch ScmpArch `json:"arch,omitempty"`
|
||||
// InstrPointer is the address of the instruction that triggered a notification.
|
||||
InstrPointer uint64 `json:"instr_pointer,omitempty"`
|
||||
// Args are the arguments (up to 6) for the syscall.
|
||||
Args []uint64 `json:"args,omitempty"`
|
||||
}
|
||||
|
||||
// ScmpNotifReq represents a seccomp userspace notification. See NotifReceive() for
|
||||
// info on how to pull such a notification.
|
||||
//
|
||||
// ID: notification ID
|
||||
// Pid: process that triggered the notification event
|
||||
// Flags: filter flags (see seccomp(2))
|
||||
// Data: system call context that triggered the notification
|
||||
//
|
||||
type ScmpNotifReq struct {
|
||||
// ID is the notification ID.
|
||||
ID uint64 `json:"id,omitempty"`
|
||||
// Pid is the process that triggered the notification event.
|
||||
Pid uint32 `json:"pid,omitempty"`
|
||||
// Flags is filter flags (see seccomp(2)).
|
||||
Flags uint32 `json:"flags,omitempty"`
|
||||
// Data is system call context that triggered the notification.
|
||||
Data ScmpNotifData `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
// ScmpNotifResp represents a seccomp userspace notification response. See NotifRespond()
|
||||
// for info on how to push such a response.
|
||||
//
|
||||
// ID: notification ID (must match the corresponding ScmpNotifReq ID)
|
||||
// Error: must be 0 if no error occurred, or an error constant from package
|
||||
// syscall (e.g., syscall.EPERM, etc). In the latter case, it's used
|
||||
// as an error return from the syscall that created the notification.
|
||||
// Val: return value for the syscall that created the notification. Only
|
||||
// relevant if Error is 0.
|
||||
// Flags: userspace notification response flag (e.g., NotifRespFlagContinue)
|
||||
//
|
||||
type ScmpNotifResp struct {
|
||||
// ID is the notification ID (must match the corresponding ScmpNotifReq ID).
|
||||
ID uint64 `json:"id,omitempty"`
|
||||
// Error must be 0 if no error occurred, or an error constant from
|
||||
// package syscall (e.g., syscall.EPERM, etc). In the latter case, it
|
||||
// is used as an error return from the syscall that created the
|
||||
// notification.
|
||||
Error int32 `json:"error,omitempty"`
|
||||
// Val is a return value for the syscall that created the notification.
|
||||
// Only relevant if Error is 0.
|
||||
Val uint64 `json:"val,omitempty"`
|
||||
// Flags is userspace notification response flag (e.g., NotifRespFlagContinue).
|
||||
Flags uint32 `json:"flags,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -175,6 +191,14 @@ const (
|
|||
ArchPARISC64
|
||||
// ArchRISCV64 represents RISCV64
|
||||
ArchRISCV64
|
||||
// ArchLOONGARCH64 represents 64-bit LoongArch.
|
||||
ArchLOONGARCH64
|
||||
// ArchM68K represents 32-bit Motorola 68000.
|
||||
ArchM68K
|
||||
// ArchSH represents SuperH.
|
||||
ArchSH
|
||||
// ArchSHEB represents Big-endian SuperH.
|
||||
ArchSHEB
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -306,6 +330,14 @@ func GetArchFromString(arch string) (ScmpArch, error) {
|
|||
return ArchPARISC64, nil
|
||||
case "riscv64":
|
||||
return ArchRISCV64, nil
|
||||
case "loongarch64":
|
||||
return ArchLOONGARCH64, nil
|
||||
case "m68k":
|
||||
return ArchM68K, nil
|
||||
case "sh":
|
||||
return ArchSH, nil
|
||||
case "sheb":
|
||||
return ArchSHEB, nil
|
||||
default:
|
||||
return ArchInvalid, fmt.Errorf("cannot convert unrecognized string %q", arch)
|
||||
}
|
||||
|
@ -352,6 +384,14 @@ func (a ScmpArch) String() string {
|
|||
return "parisc64"
|
||||
case ArchRISCV64:
|
||||
return "riscv64"
|
||||
case ArchLOONGARCH64:
|
||||
return "loong64"
|
||||
case ArchM68K:
|
||||
return "m68k"
|
||||
case ArchSH:
|
||||
return "sh"
|
||||
case ArchSHEB:
|
||||
return "sheb"
|
||||
case ArchNative:
|
||||
return "native"
|
||||
case ArchInvalid:
|
||||
|
@ -798,6 +838,26 @@ func (f *ScmpFilter) RemoveArch(arch ScmpArch) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Precompute precomputes the seccomp filter for later use by [Load] and
|
||||
// similar functions. Not only does this improve performance of [Load],
|
||||
// it also ensures that the seccomp filter can be loaded in an
|
||||
// async-signal-safe manner if no changes have been made to the filter
|
||||
// since it was precomputed.
|
||||
func (f *ScmpFilter) Precompute() error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if !f.valid {
|
||||
return errBadFilter
|
||||
}
|
||||
|
||||
if retCode := C.seccomp_precompute(f.filterCtx); retCode != 0 {
|
||||
return errRc(retCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load loads a filter context into the kernel.
|
||||
// Returns an error if the filter context is invalid or the syscall failed.
|
||||
func (f *ScmpFilter) Load() error {
|
||||
|
@ -941,6 +1001,25 @@ func (f *ScmpFilter) GetRawRC() (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
// GetWaitKill returns the current state of WaitKill flag,
|
||||
// or an error if an issue was encountered retrieving the value.
|
||||
// See SetWaitKill for more details.
|
||||
func (f *ScmpFilter) GetWaitKill() (bool, error) {
|
||||
val, err := f.getFilterAttr(filterAttrWaitKill)
|
||||
if err != nil {
|
||||
if e := checkAPI("GetWaitKill", 7, 2, 6, 0); e != nil {
|
||||
err = e
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
if val == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// SetBadArchAction sets the default action taken on a syscall for an
|
||||
// architecture not in the filter, or an error if an issue was encountered
|
||||
// setting the value.
|
||||
|
@ -1053,6 +1132,25 @@ func (f *ScmpFilter) SetRawRC(state bool) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// SetWaitKill sets whether libseccomp should request wait killable semantics
|
||||
// when possible. Defaults to false.
|
||||
func (f *ScmpFilter) SetWaitKill(state bool) error {
|
||||
var toSet C.uint32_t = 0x0
|
||||
|
||||
if state {
|
||||
toSet = 0x1
|
||||
}
|
||||
|
||||
err := f.setFilterAttr(filterAttrWaitKill, toSet)
|
||||
if err != nil {
|
||||
if e := checkAPI("SetWaitKill", 7, 2, 6, 0); e != nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SetSyscallPriority sets a syscall's priority.
|
||||
// This provides a hint to the filter generator in libseccomp about the
|
||||
// importance of this syscall. High-priority syscalls are placed
|
||||
|
@ -1154,6 +1252,30 @@ func (f *ScmpFilter) ExportBPF(file *os.File) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// ExportBPFMem is similar to [ExportBPF], except the data is written into
|
||||
// a memory and returned as []byte.
|
||||
func (f *ScmpFilter) ExportBPFMem() ([]byte, error) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if !f.valid {
|
||||
return nil, errBadFilter
|
||||
}
|
||||
|
||||
var len C.size_t
|
||||
// Get the size required.
|
||||
if retCode := C.seccomp_export_bpf_mem(f.filterCtx, unsafe.Pointer(nil), &len); retCode < 0 {
|
||||
return nil, errRc(retCode)
|
||||
}
|
||||
// Get the data.
|
||||
buf := make([]byte, int(len))
|
||||
if retCode := C.seccomp_export_bpf_mem(f.filterCtx, unsafe.Pointer(&buf[0]), &len); retCode < 0 {
|
||||
return nil, errRc(retCode)
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// Userspace Notification API
|
||||
|
||||
// GetNotifFd returns the userspace notification file descriptor associated with the given
|
||||
|
@ -1186,3 +1308,53 @@ func NotifRespond(fd ScmpFd, scmpResp *ScmpNotifResp) error {
|
|||
func NotifIDValid(fd ScmpFd, id uint64) error {
|
||||
return notifIDValid(fd, id)
|
||||
}
|
||||
|
||||
// TransactionStart starts a new seccomp filter transaction that the caller can
|
||||
// use to perform any number of filter modifications which can then be
|
||||
// committed to the filter using [TransactionCommit] or rejected using
|
||||
// [TransactionReject]. It is important to note that transactions only affect
|
||||
// the seccomp filter state while it is being managed by libseccomp; seccomp
|
||||
// filters which have been loaded into the kernel can not be modified, only new
|
||||
// seccomp filters can be added on top of the existing loaded filter stack.
|
||||
func (f *ScmpFilter) TransactionStart() error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if !f.valid {
|
||||
return errBadFilter
|
||||
}
|
||||
|
||||
if retCode := C.seccomp_transaction_start(f.filterCtx); retCode < 0 {
|
||||
return errRc(retCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TransactionReject rejects a transaction started by [TransactionStart].
|
||||
func (f *ScmpFilter) TransactionReject() {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if !f.valid {
|
||||
return
|
||||
}
|
||||
|
||||
C.seccomp_transaction_reject(f.filterCtx)
|
||||
}
|
||||
|
||||
// TransactionCommit commits a transaction started by [TransactionStart].
|
||||
func (f *ScmpFilter) TransactionCommit() error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if !f.valid {
|
||||
return errBadFilter
|
||||
}
|
||||
|
||||
if retCode := C.seccomp_transaction_commit(f.filterCtx); retCode < 0 {
|
||||
return errRc(retCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -68,6 +68,22 @@ const uint32_t C_ARCH_BAD = ARCH_BAD;
|
|||
#define SCMP_ARCH_RISCV64 ARCH_BAD
|
||||
#endif
|
||||
|
||||
#ifndef SCMP_ARCH_LOONGARCH64
|
||||
#define SCMP_ARCH_LOONGARCH64 ARCH_BAD
|
||||
#endif
|
||||
|
||||
#ifndef SCMP_ARCH_M68K
|
||||
#define SCMP_ARCH_M68K ARCH_BAD
|
||||
#endif
|
||||
|
||||
#ifndef SCMP_ARCH_SH
|
||||
#define SCMP_ARCH_SH ARCH_BAD
|
||||
#endif
|
||||
|
||||
#ifndef SCMP_ARCH_SHEB
|
||||
#define SCMP_ARCH_SHEB ARCH_BAD
|
||||
#endif
|
||||
|
||||
const uint32_t C_ARCH_NATIVE = SCMP_ARCH_NATIVE;
|
||||
const uint32_t C_ARCH_X86 = SCMP_ARCH_X86;
|
||||
const uint32_t C_ARCH_X86_64 = SCMP_ARCH_X86_64;
|
||||
|
@ -88,6 +104,10 @@ const uint32_t C_ARCH_S390X = SCMP_ARCH_S390X;
|
|||
const uint32_t C_ARCH_PARISC = SCMP_ARCH_PARISC;
|
||||
const uint32_t C_ARCH_PARISC64 = SCMP_ARCH_PARISC64;
|
||||
const uint32_t C_ARCH_RISCV64 = SCMP_ARCH_RISCV64;
|
||||
const uint32_t C_ARCH_LOONGARCH64 = SCMP_ARCH_LOONGARCH64;
|
||||
const uint32_t C_ARCH_M68K = SCMP_ARCH_M68K;
|
||||
const uint32_t C_ARCH_SH = SCMP_ARCH_SH;
|
||||
const uint32_t C_ARCH_SHEB = SCMP_ARCH_SHEB;
|
||||
|
||||
#ifndef SCMP_ACT_LOG
|
||||
#define SCMP_ACT_LOG 0x7ffc0000U
|
||||
|
@ -128,6 +148,11 @@ const uint32_t C_ACT_NOTIFY = SCMP_ACT_NOTIFY;
|
|||
#define SCMP_FLTATR_API_SYSRAWRC _SCMP_FLTATR_MIN
|
||||
#endif
|
||||
|
||||
// Added in libseccomp v2.6.0.
|
||||
#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 6
|
||||
#define SCMP_FLTATR_CTL_WAITKILL _SCMP_FLTATR_MIN
|
||||
#endif
|
||||
|
||||
const uint32_t C_ATTRIBUTE_DEFAULT = (uint32_t)SCMP_FLTATR_ACT_DEFAULT;
|
||||
const uint32_t C_ATTRIBUTE_BADARCH = (uint32_t)SCMP_FLTATR_ACT_BADARCH;
|
||||
const uint32_t C_ATTRIBUTE_NNP = (uint32_t)SCMP_FLTATR_CTL_NNP;
|
||||
|
@ -136,6 +161,7 @@ const uint32_t C_ATTRIBUTE_LOG = (uint32_t)SCMP_FLTATR_CTL_LOG;
|
|||
const uint32_t C_ATTRIBUTE_SSB = (uint32_t)SCMP_FLTATR_CTL_SSB;
|
||||
const uint32_t C_ATTRIBUTE_OPTIMIZE = (uint32_t)SCMP_FLTATR_CTL_OPTIMIZE;
|
||||
const uint32_t C_ATTRIBUTE_SYSRAWRC = (uint32_t)SCMP_FLTATR_API_SYSRAWRC;
|
||||
const uint32_t C_ATTRIBUTE_WAITKILL = (uint32_t)SCMP_FLTATR_CTL_WAITKILL;
|
||||
|
||||
const int C_CMP_NE = (int)SCMP_CMP_NE;
|
||||
const int C_CMP_LT = (int)SCMP_CMP_LT;
|
||||
|
@ -145,11 +171,6 @@ const int C_CMP_GE = (int)SCMP_CMP_GE;
|
|||
const int C_CMP_GT = (int)SCMP_CMP_GT;
|
||||
const int C_CMP_MASKED_EQ = (int)SCMP_CMP_MASKED_EQ;
|
||||
|
||||
const int C_VERSION_MAJOR = SCMP_VER_MAJOR;
|
||||
const int C_VERSION_MINOR = SCMP_VER_MINOR;
|
||||
const int C_VERSION_MICRO = SCMP_VER_MICRO;
|
||||
|
||||
#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR >= 3
|
||||
unsigned int get_major_version()
|
||||
{
|
||||
return seccomp_version()->major;
|
||||
|
@ -164,22 +185,6 @@ unsigned int get_micro_version()
|
|||
{
|
||||
return seccomp_version()->micro;
|
||||
}
|
||||
#else
|
||||
unsigned int get_major_version()
|
||||
{
|
||||
return (unsigned int)C_VERSION_MAJOR;
|
||||
}
|
||||
|
||||
unsigned int get_minor_version()
|
||||
{
|
||||
return (unsigned int)C_VERSION_MINOR;
|
||||
}
|
||||
|
||||
unsigned int get_micro_version()
|
||||
{
|
||||
return (unsigned int)C_VERSION_MICRO;
|
||||
}
|
||||
#endif
|
||||
|
||||
// The libseccomp API level functions were added in v2.4.0
|
||||
#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 4
|
||||
|
@ -284,6 +289,7 @@ const (
|
|||
filterAttrSSB
|
||||
filterAttrOptimize
|
||||
filterAttrRawRC
|
||||
filterAttrWaitKill
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -291,7 +297,7 @@ const (
|
|||
scmpError C.int = -1
|
||||
// Comparison boundaries to check for architecture validity
|
||||
archStart ScmpArch = ArchNative
|
||||
archEnd ScmpArch = ArchRISCV64
|
||||
archEnd ScmpArch = ArchSHEB
|
||||
// Comparison boundaries to check for action validity
|
||||
actionStart ScmpAction = ActKillThread
|
||||
actionEnd ScmpAction = ActKillProcess
|
||||
|
@ -552,6 +558,14 @@ func archFromNative(a C.uint32_t) (ScmpArch, error) {
|
|||
return ArchPARISC64, nil
|
||||
case C.C_ARCH_RISCV64:
|
||||
return ArchRISCV64, nil
|
||||
case C.C_ARCH_LOONGARCH64:
|
||||
return ArchLOONGARCH64, nil
|
||||
case C.C_ARCH_M68K:
|
||||
return ArchM68K, nil
|
||||
case C.C_ARCH_SH:
|
||||
return ArchSH, nil
|
||||
case C.C_ARCH_SHEB:
|
||||
return ArchSHEB, nil
|
||||
default:
|
||||
return 0x0, fmt.Errorf("unrecognized architecture %#x", uint32(a))
|
||||
}
|
||||
|
@ -598,6 +612,14 @@ func (a ScmpArch) toNative() C.uint32_t {
|
|||
return C.C_ARCH_PARISC64
|
||||
case ArchRISCV64:
|
||||
return C.C_ARCH_RISCV64
|
||||
case ArchLOONGARCH64:
|
||||
return C.C_ARCH_LOONGARCH64
|
||||
case ArchM68K:
|
||||
return C.C_ARCH_M68K
|
||||
case ArchSH:
|
||||
return C.C_ARCH_SH
|
||||
case ArchSHEB:
|
||||
return C.C_ARCH_SHEB
|
||||
case ArchNative:
|
||||
return C.C_ARCH_NATIVE
|
||||
default:
|
||||
|
@ -694,6 +716,8 @@ func (a scmpFilterAttr) toNative() uint32 {
|
|||
return uint32(C.C_ATTRIBUTE_OPTIMIZE)
|
||||
case filterAttrRawRC:
|
||||
return uint32(C.C_ATTRIBUTE_SYSRAWRC)
|
||||
case filterAttrWaitKill:
|
||||
return uint32(C.C_ATTRIBUTE_WAITKILL)
|
||||
default:
|
||||
return 0x0
|
||||
}
|
||||
|
@ -794,10 +818,7 @@ func notifReceive(fd ScmpFd) (*ScmpNotifReq, error) {
|
|||
if retCode := C.seccomp_notify_alloc(&req, &resp); retCode != 0 {
|
||||
return nil, errRc(retCode)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
C.seccomp_notify_free(req, resp)
|
||||
}()
|
||||
defer C.seccomp_notify_free(req, resp)
|
||||
|
||||
for {
|
||||
retCode, errno := C.seccomp_notify_receive(C.int(fd), req)
|
||||
|
@ -831,10 +852,7 @@ func notifRespond(fd ScmpFd, scmpResp *ScmpNotifResp) error {
|
|||
if retCode := C.seccomp_notify_alloc(&req, &resp); retCode != 0 {
|
||||
return errRc(retCode)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
C.seccomp_notify_free(req, resp)
|
||||
}()
|
||||
defer C.seccomp_notify_free(req, resp)
|
||||
|
||||
scmpResp.toNative(resp)
|
||||
|
||||
|
|
|
@ -330,7 +330,7 @@ func (DefaultValueDecoders) intDecodeType(dc DecodeContext, vr bsonrw.ValueReade
|
|||
case reflect.Int64:
|
||||
return reflect.ValueOf(i64), nil
|
||||
case reflect.Int:
|
||||
if int64(int(i64)) != i64 { // Can we fit this inside of an int
|
||||
if i64 > math.MaxInt { // Can we fit this inside of an int
|
||||
return emptyValue, fmt.Errorf("%d overflows int", i64)
|
||||
}
|
||||
|
||||
|
@ -434,7 +434,7 @@ func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.Valu
|
|||
return fmt.Errorf("%d overflows uint64", i64)
|
||||
}
|
||||
case reflect.Uint:
|
||||
if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint
|
||||
if i64 < 0 || uint64(i64) > uint64(math.MaxUint) { // Can we fit this inside of an uint
|
||||
return fmt.Errorf("%d overflows uint", i64)
|
||||
}
|
||||
default:
|
||||
|
@ -1521,6 +1521,18 @@ func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(_ DecodeContext, vr
|
|||
return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val}
|
||||
}
|
||||
|
||||
// If BSON value is null and the go value is a pointer, then don't call
|
||||
// UnmarshalBSONValue. Even if the Go pointer is already initialized (i.e.,
|
||||
// non-nil), encountering null in BSON will result in the pointer being
|
||||
// directly set to nil here. Since the pointer is being replaced with nil,
|
||||
// there is no opportunity (or reason) for the custom UnmarshalBSONValue logic
|
||||
// to be called.
|
||||
if vr.Type() == bsontype.Null && val.Kind() == reflect.Ptr {
|
||||
val.Set(reflect.Zero(val.Type()))
|
||||
|
||||
return vr.ReadNull()
|
||||
}
|
||||
|
||||
if val.Kind() == reflect.Ptr && val.IsNil() {
|
||||
if !val.CanSet() {
|
||||
return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val}
|
||||
|
|
|
@ -164,11 +164,15 @@ func (uic *UIntCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t refl
|
|||
|
||||
return reflect.ValueOf(uint64(i64)), nil
|
||||
case reflect.Uint:
|
||||
if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint
|
||||
if i64 < 0 {
|
||||
return emptyValue, fmt.Errorf("%d overflows uint", i64)
|
||||
}
|
||||
v := uint64(i64)
|
||||
if v > math.MaxUint { // Can we fit this inside of an uint
|
||||
return emptyValue, fmt.Errorf("%d overflows uint", i64)
|
||||
}
|
||||
|
||||
return reflect.ValueOf(uint(i64)), nil
|
||||
return reflect.ValueOf(uint(v)), nil
|
||||
default:
|
||||
return emptyValue, ValueDecoderError{
|
||||
Name: "UintDecodeValue",
|
||||
|
|
|
@ -305,7 +305,7 @@ func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) {
|
|||
}
|
||||
|
||||
// remove hyphens
|
||||
uuidNoHyphens := strings.Replace(uuid, "-", "", -1)
|
||||
uuidNoHyphens := strings.ReplaceAll(uuid, "-", "")
|
||||
if len(uuidNoHyphens) != 32 {
|
||||
return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens")
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue