Merge pull request #2333 from mtrmac/c-image-for-docker
Update c/image to main
This commit is contained in:
commit
8e47f514cd
|
@ -11,15 +11,14 @@ require (
|
|||
github.com/containerd/platforms v0.2.1
|
||||
github.com/containernetworking/cni v1.2.3
|
||||
github.com/containernetworking/plugins v1.5.1
|
||||
github.com/containers/image/v5 v5.34.1
|
||||
github.com/containers/image/v5 v5.34.2-0.20250304184223-c43e62eed23a
|
||||
github.com/containers/ocicrypt v1.2.1
|
||||
github.com/containers/storage v1.57.1
|
||||
github.com/containers/storage v1.57.2-0.20250228100055-700b765b2111
|
||||
github.com/coreos/go-systemd/v22 v22.5.0
|
||||
github.com/cyphar/filepath-securejoin v0.4.1
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||
github.com/disiqueira/gotree/v3 v3.0.2
|
||||
github.com/docker/distribution v2.8.3+incompatible
|
||||
github.com/docker/docker v27.5.1+incompatible
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/fsnotify/fsnotify v1.8.0
|
||||
github.com/godbus/dbus/v5 v5.1.0
|
||||
|
@ -62,16 +61,17 @@ require (
|
|||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/chzyer/readline v1.5.1 // indirect
|
||||
github.com/containerd/cgroups/v3 v3.0.3 // indirect
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/cgroups/v3 v3.0.5 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
|
||||
github.com/containerd/typeurl/v2 v2.2.3 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.2 // indirect
|
||||
github.com/docker/docker v28.0.1+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.9.1 // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
|
@ -100,7 +100,7 @@ require (
|
|||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect
|
||||
|
@ -124,8 +124,9 @@ require (
|
|||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect
|
||||
github.com/sigstore/fulcio v1.6.4 // indirect
|
||||
github.com/sigstore/rekor v1.3.8 // indirect
|
||||
github.com/sigstore/sigstore v1.8.12 // indirect
|
||||
github.com/sigstore/protobuf-specs v0.4.0 // indirect
|
||||
github.com/sigstore/rekor v1.3.9 // indirect
|
||||
github.com/sigstore/sigstore v1.8.15 // indirect
|
||||
github.com/smallstep/pkcs7 v0.1.1 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect
|
||||
github.com/sylabs/sif/v2 v2.20.2 // indirect
|
||||
|
@ -133,21 +134,22 @@ require (
|
|||
github.com/tchap/go-patricia/v2 v2.3.2 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/ulikunitz/xz v0.5.12 // indirect
|
||||
github.com/vbatts/tar-split v0.11.7 // indirect
|
||||
github.com/vbauerster/mpb/v8 v8.9.1 // indirect
|
||||
github.com/vbatts/tar-split v0.12.1 // indirect
|
||||
github.com/vbauerster/mpb/v8 v8.9.3 // indirect
|
||||
github.com/vishvananda/netns v0.0.4 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
|
||||
go.opentelemetry.io/otel v1.31.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.31.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.31.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect
|
||||
go.opentelemetry.io/otel v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.32.0 // indirect
|
||||
golang.org/x/net v0.34.0 // indirect
|
||||
golang.org/x/text v0.22.0 // indirect
|
||||
golang.org/x/tools v0.28.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d // indirect
|
||||
google.golang.org/grpc v1.69.4 // indirect
|
||||
google.golang.org/protobuf v1.36.2 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect
|
||||
google.golang.org/grpc v1.70.0 // indirect
|
||||
google.golang.org/protobuf v1.36.4 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
|
|
107
common/go.sum
107
common/go.sum
|
@ -43,10 +43,10 @@ github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
|
|||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
|
||||
github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
|
||||
github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
|
||||
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo=
|
||||
github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins=
|
||||
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
|
||||
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
|
@ -61,19 +61,19 @@ github.com/containernetworking/cni v1.2.3 h1:hhOcjNVUQTnzdRJ6alC5XF+wd9mfGIUaj8F
|
|||
github.com/containernetworking/cni v1.2.3/go.mod h1:DuLgF+aPd3DzcTQTtp/Nvl1Kim23oFKdm2okJzBQA5M=
|
||||
github.com/containernetworking/plugins v1.5.1 h1:T5ji+LPYjjgW0QM+KyrigZbLsZ8jaX+E5J/EcKOE4gQ=
|
||||
github.com/containernetworking/plugins v1.5.1/go.mod h1:MIQfgMayGuHYs0XdNudf31cLLAC+i242hNm6KuDGqCM=
|
||||
github.com/containers/image/v5 v5.34.1 h1:/m2bkFnuedTyNkzma8s7cFLjeefPIb4trjyafWhIlwM=
|
||||
github.com/containers/image/v5 v5.34.1/go.mod h1:/WnvUSEfdqC/ahMRd4YJDBLrpYWkGl018rB77iB3FDo=
|
||||
github.com/containers/image/v5 v5.34.2-0.20250304184223-c43e62eed23a h1:DNcvb32znlfRQw9MaYzzgGY7QL2i8DkUpmVnex2GPN0=
|
||||
github.com/containers/image/v5 v5.34.2-0.20250304184223-c43e62eed23a/go.mod h1:VwUFnOWM6LCYc+7qEeAiUF4Kdmhfr8VptENQQRRafQw=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM=
|
||||
github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ=
|
||||
github.com/containers/storage v1.57.1 h1:hKPoFsuBcB3qTzBxa4IFpZMRzUuL5Xhv/BE44W0XHx8=
|
||||
github.com/containers/storage v1.57.1/go.mod h1:i/Hb4lu7YgFr9G0K6BMjqW0BLJO1sFsnWQwj2UoWCUM=
|
||||
github.com/containers/storage v1.57.2-0.20250228100055-700b765b2111 h1:NMmaECeWzq2cWAXfPnsl7oFc2jyb/YRcPbzYT8jpQUA=
|
||||
github.com/containers/storage v1.57.2-0.20250228100055-700b765b2111/go.mod h1:egC90qMy0fTpGjkaHj667syy1Cbr3XPZEVX/qkUPrdM=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=
|
||||
github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
@ -84,14 +84,14 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh
|
|||
github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/cli v27.5.1+incompatible h1:JB9cieUT9YNiMITtIsguaN55PLOHhBSz3LKVc6cqWaY=
|
||||
github.com/docker/cli v27.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v28.0.1+incompatible h1:g0h5NQNda3/CxIsaZfH4Tyf6vpxFth7PYl3hgCPOKzs=
|
||||
github.com/docker/cli v28.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8=
|
||||
github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
|
||||
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
|
||||
github.com/docker/docker v28.0.1+incompatible h1:FCHjSRdXhNRFjlHMTv4jUNlIBbTeRjrWfeFuJp7jpo0=
|
||||
github.com/docker/docker v28.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.9.1 h1:xrekBBXJprfiFFWKrKxsFBjMl+5cG1GaNN1kZTcjXD0=
|
||||
github.com/docker/docker-credential-helpers v0.9.1/go.mod h1:T4QzRTwtu1hvhXKzeQkJm6GNMJT3bJ0S6V4LU/SZV9A=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
|
||||
|
@ -199,8 +199,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
|
|||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
|
||||
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
|
||||
|
@ -235,8 +235,8 @@ github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo=
|
|||
github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
|
||||
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
|
||||
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
|
@ -287,8 +287,8 @@ github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoG
|
|||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sebdah/goldie/v2 v2.5.5 h1:rx1mwF95RxZ3/83sdS4Yp7t2C5TCokvWP4TBRbAyEWY=
|
||||
github.com/sebdah/goldie/v2 v2.5.5/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
|
||||
|
@ -300,10 +300,12 @@ github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
|
|||
github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
|
||||
github.com/sigstore/fulcio v1.6.4 h1:d86obfxUAG3Y6CYwOx1pdwCZwKmROB6w6927pKOVIRY=
|
||||
github.com/sigstore/fulcio v1.6.4/go.mod h1:Y6bn3i3KGhXpaHsAtYP3Z4Np0+VzCo1fLv8Ci6mbPDs=
|
||||
github.com/sigstore/rekor v1.3.8 h1:B8kJI8mpSIXova4Jxa6vXdJyysRxFGsEsLKBDl0rRjA=
|
||||
github.com/sigstore/rekor v1.3.8/go.mod h1:/dHFYKSuxEygfDRnEwyJ+ZD6qoVYNXQdi1mJrKvKWsI=
|
||||
github.com/sigstore/sigstore v1.8.12 h1:S8xMVZbE2z9ZBuQUEG737pxdLjnbOIcFi5v9UFfkJFc=
|
||||
github.com/sigstore/sigstore v1.8.12/go.mod h1:+PYQAa8rfw0QdPpBcT+Gl3egKD9c+TUgAlF12H3Nmjo=
|
||||
github.com/sigstore/protobuf-specs v0.4.0 h1:yoZbdh0kZYKOSiVbYyA8J3f2wLh5aUk2SQB7LgAfIdU=
|
||||
github.com/sigstore/protobuf-specs v0.4.0/go.mod h1:FKW5NYhnnFQ/Vb9RKtQk91iYd0MKJ9AxyqInEwU6+OI=
|
||||
github.com/sigstore/rekor v1.3.9 h1:sUjRpKVh/hhgqGMs0t+TubgYsksArZ6poLEC3MsGAzU=
|
||||
github.com/sigstore/rekor v1.3.9/go.mod h1:xThNUhm6eNEmkJ/SiU/FVU7pLY2f380fSDZFsdDWlcM=
|
||||
github.com/sigstore/sigstore v1.8.15 h1:9HHnZmxjPQSTPXTCZc25HDxxSTWwsGMh/ZhWZZ39maU=
|
||||
github.com/sigstore/sigstore v1.8.15/go.mod h1:+Wa5mrG6A+Gss516YC9owy10q3IazqIRe0y1EoQRHHM=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8=
|
||||
|
@ -336,10 +338,10 @@ github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C
|
|||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
|
||||
github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
|
||||
github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/vbatts/tar-split v0.11.7 h1:ixZ93pO/GmvaZw4Vq9OwmfZK/kc2zKdPfu0B+gYqs3U=
|
||||
github.com/vbatts/tar-split v0.11.7/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
|
||||
github.com/vbauerster/mpb/v8 v8.9.1 h1:LH5R3lXPfE2e3lIGxN7WNWv3Hl5nWO6LRi2B0L0ERHw=
|
||||
github.com/vbauerster/mpb/v8 v8.9.1/go.mod h1:4XMvznPh8nfe2NpnDo1QTPvW9MVkUhbG90mPWvmOzcQ=
|
||||
github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo=
|
||||
github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
|
||||
github.com/vbauerster/mpb/v8 v8.9.3 h1:PnMeF+sMvYv9u23l6DO6Q3+Mdj408mjLRXIzmUmU2Z8=
|
||||
github.com/vbauerster/mpb/v8 v8.9.3/go.mod h1:hxS8Hz4C6ijnppDSIX6LjG8FYJSoPo9iIOcE53Zik0c=
|
||||
github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk=
|
||||
github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs=
|
||||
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
|
||||
|
@ -359,22 +361,22 @@ go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd
|
|||
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
|
||||
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
|
||||
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM=
|
||||
go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U=
|
||||
go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk=
|
||||
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
|
||||
go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
|
||||
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
|
||||
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
|
||||
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
|
||||
go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4=
|
||||
go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M=
|
||||
go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8=
|
||||
go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4=
|
||||
go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ=
|
||||
go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM=
|
||||
go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8=
|
||||
go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94=
|
||||
go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
|
@ -503,18 +505,17 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
|
|||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d h1:xJJRGY7TJcvIlpSrN3K6LAWgNFUILlO+OMAqtg9aqnw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
|
||||
google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
|
||||
google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ=
|
||||
google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -524,8 +525,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
|
|||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU=
|
||||
google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM=
|
||||
google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
@ -534,8 +535,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
|||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
||||
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
|
||||
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
|
||||
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
package sysinfo
|
||||
|
||||
import "github.com/docker/docker/pkg/parsers"
|
||||
import "github.com/containers/storage/pkg/parsers"
|
||||
|
||||
// SysInfo stores information about which features a kernel supports.
|
||||
// TODO Windows: Factor out platform specific capabilities.
|
||||
|
|
|
@ -148,6 +148,13 @@ type Options struct {
|
|||
// so that storage.ResolveReference returns exactly the created image.
|
||||
// WARNING: It is unspecified whether the reference also contains a reference.Named element.
|
||||
ReportResolvedReference *types.ImageReference
|
||||
|
||||
// DestinationTimestamp, if set, will force timestamps of content created in the destination to this value.
|
||||
// Most transports don't support this.
|
||||
//
|
||||
// In oci-archive: destinations, this will set the create/mod/access timestamps in each tar entry
|
||||
// (but not a timestamp of the created archive file).
|
||||
DestinationTimestamp *time.Time
|
||||
}
|
||||
|
||||
// OptionCompressionVariant allows to supply information about
|
||||
|
@ -354,6 +361,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
|||
if err := c.dest.CommitWithOptions(ctx, private.CommitOptions{
|
||||
UnparsedToplevel: c.unparsedToplevel,
|
||||
ReportResolvedReference: options.ReportResolvedReference,
|
||||
Timestamp: options.DestinationTimestamp,
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("committing the finished image: %w", err)
|
||||
}
|
||||
|
|
|
@ -328,19 +328,16 @@ func prepareImageConfigForDest(ctx context.Context, sys *types.SystemContext, sr
|
|||
}
|
||||
wantedPlatforms := platform.WantedPlatforms(sys)
|
||||
|
||||
options := newOrderedSet()
|
||||
match := false
|
||||
for _, wantedPlatform := range wantedPlatforms {
|
||||
if !slices.ContainsFunc(wantedPlatforms, func(wantedPlatform imgspecv1.Platform) bool {
|
||||
// For a transitional period, this might trigger warnings because the Variant
|
||||
// field was added to OCI config only recently. If this turns out to be too noisy,
|
||||
// revert this check to only look for (OS, Architecture).
|
||||
if platform.MatchesPlatform(ociConfig.Platform, wantedPlatform) {
|
||||
match = true
|
||||
break
|
||||
return platform.MatchesPlatform(ociConfig.Platform, wantedPlatform)
|
||||
}) {
|
||||
options := newOrderedSet()
|
||||
for _, p := range wantedPlatforms {
|
||||
options.append(fmt.Sprintf("%s+%s+%q", p.OS, p.Architecture, p.Variant))
|
||||
}
|
||||
options.append(fmt.Sprintf("%s+%s+%q", wantedPlatform.OS, wantedPlatform.Architecture, wantedPlatform.Variant))
|
||||
}
|
||||
if !match {
|
||||
logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q",
|
||||
ociConfig.OS, ociConfig.Architecture, ociConfig.Variant, strings.Join(options.list, ", "))
|
||||
}
|
||||
|
|
|
@ -92,7 +92,7 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe
|
|||
|
||||
// imageLoad accepts tar stream on reader and sends it to c
|
||||
func imageLoad(ctx context.Context, c *client.Client, reader *io.PipeReader) error {
|
||||
resp, err := c.ImageLoad(ctx, reader, true)
|
||||
resp, err := c.ImageLoad(ctx, reader, client.ImageLoadWithQuiet(true))
|
||||
if err != nil {
|
||||
return fmt.Errorf("starting a load operation in docker engine: %w", err)
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package private
|
|||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/blobinfocache"
|
||||
|
@ -170,6 +171,12 @@ type CommitOptions struct {
|
|||
// What “resolved” means is transport-specific.
|
||||
// Transports which don’t support reporting resolved references can ignore the field; the generic copy code writes "nil" into the value.
|
||||
ReportResolvedReference *types.ImageReference
|
||||
// Timestamp, if set, will force timestamps of content created in the destination to this value.
|
||||
// Most transports don't support this.
|
||||
//
|
||||
// In oci-archive: destinations, this will set the create/mod/access timestamps in each tar entry
|
||||
// (but not a timestamp of the created archive file).
|
||||
Timestamp *time.Time
|
||||
}
|
||||
|
||||
// ImageSourceChunk is a portion of a blob.
|
||||
|
|
22
common/vendor/github.com/containers/image/v5/internal/reflink/reflink_linux.go
generated
vendored
22
common/vendor/github.com/containers/image/v5/internal/reflink/reflink_linux.go
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
//go:build linux
|
||||
|
||||
package reflink
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// LinkOrCopy attempts to reflink the source to the destination fd.
|
||||
// If reflinking fails or is unsupported, it falls back to io.Copy().
|
||||
func LinkOrCopy(src, dst *os.File) error {
|
||||
_, _, errno := unix.Syscall(unix.SYS_IOCTL, dst.Fd(), unix.FICLONE, src.Fd())
|
||||
if errno == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := io.Copy(dst, src)
|
||||
return err
|
||||
}
|
|
@ -166,10 +166,11 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
|||
// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
|
||||
// an error if the mediatype does not support encryption
|
||||
func getEncryptedMediaType(mediatype string) (string, error) {
|
||||
if slices.Contains(strings.Split(mediatype, "+")[1:], "encrypted") {
|
||||
parts := strings.Split(mediatype, "+")
|
||||
if slices.Contains(parts[1:], "encrypted") {
|
||||
return "", fmt.Errorf("unsupported mediaType: %q already encrypted", mediatype)
|
||||
}
|
||||
unsuffixedMediatype := strings.Split(mediatype, "+")[0]
|
||||
unsuffixedMediatype := parts[0]
|
||||
switch unsuffixedMediatype {
|
||||
case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer,
|
||||
imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/internal/imagedestination"
|
||||
"github.com/containers/image/v5/internal/imagedestination/impl"
|
||||
|
@ -172,16 +173,19 @@ func (d *ociArchiveImageDestination) CommitWithOptions(ctx context.Context, opti
|
|||
src := d.tempDirRef.tempDirectory
|
||||
// path to save tarred up file
|
||||
dst := d.ref.resolvedFile
|
||||
return tarDirectory(src, dst)
|
||||
return tarDirectory(src, dst, options.Timestamp)
|
||||
}
|
||||
|
||||
// tar converts the directory at src and saves it to dst
|
||||
func tarDirectory(src, dst string) error {
|
||||
// if contentModTimes is non-nil, tar header entries times are set to this
|
||||
func tarDirectory(src, dst string, contentModTimes *time.Time) (retErr error) {
|
||||
// input is a stream of bytes from the archive of the directory at path
|
||||
input, err := archive.TarWithOptions(src, &archive.TarOptions{
|
||||
Compression: archive.Uncompressed,
|
||||
// Don’t include the data about the user account this code is running under.
|
||||
ChownOpts: &idtools.IDPair{UID: 0, GID: 0},
|
||||
// override tar header timestamps
|
||||
Timestamp: contentModTimes,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving stream of bytes from %q: %w", src, err)
|
||||
|
@ -193,7 +197,14 @@ func tarDirectory(src, dst string) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("creating tar file %q: %w", dst, err)
|
||||
}
|
||||
defer outFile.Close()
|
||||
|
||||
// since we are writing to this file, make sure we handle errors
|
||||
defer func() {
|
||||
closeErr := outFile.Close()
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
// copies the contents of the directory to the tar file
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
|
||||
|
|
|
@ -159,7 +159,7 @@ func (ref ociReference) deleteReferenceFromIndex(referenceIndex int) error {
|
|||
return saveJSON(ref.indexPath(), index)
|
||||
}
|
||||
|
||||
func saveJSON(path string, content any) error {
|
||||
func saveJSON(path string, content any) (retErr error) {
|
||||
// If the file already exists, get its mode to preserve it
|
||||
var mode fs.FileMode
|
||||
existingfi, err := os.Stat(path)
|
||||
|
@ -177,7 +177,13 @@ func saveJSON(path string, content any) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
// since we are writing to this file, make sure we handle errors
|
||||
defer func() {
|
||||
closeErr := file.Close()
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
return json.NewEncoder(file).Encode(content)
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@ import (
|
|||
"github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/internal/putblobdigest"
|
||||
"github.com/containers/image/v5/internal/reflink"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
|
@ -116,7 +115,7 @@ func (d *ociImageDestination) Close() error {
|
|||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
|
||||
func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (_ private.UploadedBlob, retErr error) {
|
||||
blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob")
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
|
@ -125,7 +124,10 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
|||
explicitClosed := false
|
||||
defer func() {
|
||||
if !explicitClosed {
|
||||
blobFile.Close()
|
||||
closeErr := blobFile.Close()
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}
|
||||
if !succeeded {
|
||||
os.Remove(blobFile.Name())
|
||||
|
@ -177,7 +179,10 @@ func (d *ociImageDestination) blobFileSyncAndRename(blobFile *os.File, blobDiges
|
|||
}
|
||||
|
||||
// need to explicitly close the file, since a rename won't otherwise work on Windows
|
||||
blobFile.Close()
|
||||
err = blobFile.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*closed = true
|
||||
|
||||
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
|
||||
|
@ -324,10 +329,10 @@ type PutBlobFromLocalFileOption struct{}
|
|||
// It computes, and returns, the digest and size of the used file.
|
||||
//
|
||||
// This function can be used instead of dest.PutBlob() where the ImageDestination requires PutBlob() to be called.
|
||||
func PutBlobFromLocalFile(ctx context.Context, dest types.ImageDestination, file string, options ...PutBlobFromLocalFileOption) (digest.Digest, int64, error) {
|
||||
func PutBlobFromLocalFile(ctx context.Context, dest types.ImageDestination, file string, options ...PutBlobFromLocalFileOption) (_ digest.Digest, _ int64, retErr error) {
|
||||
d, ok := dest.(*ociImageDestination)
|
||||
if !ok {
|
||||
return "", -1, errors.New("internal error: PutBlobFromLocalFile called with a non-oci: destination")
|
||||
return "", -1, errors.New("caller error: PutBlobFromLocalFile called with a non-oci: destination")
|
||||
}
|
||||
|
||||
succeeded := false
|
||||
|
@ -338,7 +343,10 @@ func PutBlobFromLocalFile(ctx context.Context, dest types.ImageDestination, file
|
|||
}
|
||||
defer func() {
|
||||
if !blobFileClosed {
|
||||
blobFile.Close()
|
||||
closeErr := blobFile.Close()
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}
|
||||
if !succeeded {
|
||||
os.Remove(blobFile.Name())
|
||||
|
@ -351,7 +359,7 @@ func PutBlobFromLocalFile(ctx context.Context, dest types.ImageDestination, file
|
|||
}
|
||||
defer srcFile.Close()
|
||||
|
||||
err = reflink.LinkOrCopy(srcFile, blobFile)
|
||||
err = fileutils.ReflinkOrCopy(srcFile, blobFile)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/pkg/tlsclientconfig"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
@ -214,3 +215,26 @@ func getBlobSize(resp *http.Response) int64 {
|
|||
}
|
||||
return size
|
||||
}
|
||||
|
||||
// GetLocalBlobPath returns the local path to the blob file with the given digest.
|
||||
// The returned path is checked for existence so when a non existing digest is
|
||||
// given an error will be returned.
|
||||
//
|
||||
// Important: The returned path must be treated as read only, writing the file will
|
||||
// corrupt the oci layout as the digest no longer matches.
|
||||
func GetLocalBlobPath(ctx context.Context, src types.ImageSource, digest digest.Digest) (string, error) {
|
||||
s, ok := src.(*ociImageSource)
|
||||
if !ok {
|
||||
return "", errors.New("caller error: GetLocalBlobPath called with a non-oci: source")
|
||||
}
|
||||
|
||||
path, err := s.ref.blobPath(digest, s.sharedBlobDir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := fileutils.Exists(path); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return path, nil
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/containers/image/v5/directory/explicitfilepath"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/image"
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/oci/internal"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
|
@ -234,7 +235,7 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, erro
|
|||
var unsupportedMIMETypes []string
|
||||
for i, md := range index.Manifests {
|
||||
if refName, ok := md.Annotations[imgspecv1.AnnotationRefName]; ok && refName == ref.image {
|
||||
if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex {
|
||||
if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex || md.MediaType == manifest.DockerV2Schema2MediaType || md.MediaType == manifest.DockerV2ListMediaType {
|
||||
return md, i, nil
|
||||
}
|
||||
unsupportedMIMETypes = append(unsupportedMIMETypes, md.MediaType)
|
||||
|
|
|
@ -143,16 +143,24 @@ func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
|||
return private.UploadedBlob{}, err
|
||||
}
|
||||
|
||||
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
|
||||
|
||||
blobPath := filepath.Join(tmpDir, "content")
|
||||
blobFile, err := os.Create(blobPath)
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
defer blobFile.Close()
|
||||
|
||||
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
size, err := io.Copy(blobFile, stream)
|
||||
size, err := func() (_ int64, retErr error) { // A scope for defer
|
||||
// since we are writing to this file, make sure we handle errors
|
||||
defer func() {
|
||||
closeErr := blobFile.Close()
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}()
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
return io.Copy(blobFile, stream)
|
||||
}()
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
|
@ -247,9 +255,15 @@ func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch strin
|
|||
return err
|
||||
}
|
||||
|
||||
func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) {
|
||||
func generateTarSplitMetadata(output *bytes.Buffer, file string) (_ digest.Digest, _ int64, retErr error) {
|
||||
mfz := pgzip.NewWriter(output)
|
||||
defer mfz.Close()
|
||||
// since we are writing to this, make sure we handle errors
|
||||
defer func() {
|
||||
closeErr := mfz.Close()
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}()
|
||||
metaPacker := storage.NewJSONPacker(mfz)
|
||||
|
||||
stream, err := os.OpenFile(file, os.O_RDONLY, 0)
|
||||
|
|
|
@ -250,9 +250,7 @@ func newOSTreePathFileGetter(repo *C.struct_OstreeRepo, commit string) (*ostreeP
|
|||
|
||||
func (o ostreePathFileGetter) Get(filename string) (io.ReadCloser, error) {
|
||||
var file *C.GFile
|
||||
if strings.HasPrefix(filename, "./") {
|
||||
filename = filename[2:]
|
||||
}
|
||||
filename, _ = strings.CutPrefix(filename, "./")
|
||||
cfilename := C.CString(filename)
|
||||
defer C.free(unsafe.Pointer(cfilename))
|
||||
|
||||
|
|
33
common/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
generated
vendored
33
common/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
generated
vendored
|
@ -87,14 +87,20 @@ func new2(path string) (*cache, error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("initializing blob info cache at %q: %w", path, err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// We don’t check the schema before every operation, because that would be costly
|
||||
// and because we assume schema changes will be handled by using a different path.
|
||||
if err := ensureDBHasCurrentSchema(db); err != nil {
|
||||
err = func() (retErr error) { // A scope for defer
|
||||
defer func() {
|
||||
closeErr := db.Close()
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}()
|
||||
// We don’t check the schema before every operation, because that would be costly
|
||||
// and because we assume schema changes will be handled by using a different path.
|
||||
return ensureDBHasCurrentSchema(db)
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cache{
|
||||
path: path,
|
||||
refCount: 0,
|
||||
|
@ -147,25 +153,30 @@ func (sqc *cache) Close() {
|
|||
type void struct{} // So that we don’t have to write struct{}{} all over the place
|
||||
|
||||
// transaction calls fn within a read-write transaction in sqc.
|
||||
func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) {
|
||||
db, closeDB, err := func() (*sql.DB, func(), error) { // A scope for defer
|
||||
func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (_ T, retErr error) {
|
||||
db, closeDB, err := func() (*sql.DB, func() error, error) { // A scope for defer
|
||||
sqc.lock.Lock()
|
||||
defer sqc.lock.Unlock()
|
||||
|
||||
if sqc.db != nil {
|
||||
return sqc.db, func() {}, nil
|
||||
return sqc.db, func() error { return nil }, nil
|
||||
}
|
||||
db, err := rawOpen(sqc.path)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("opening blob info cache at %q: %w", sqc.path, err)
|
||||
}
|
||||
return db, func() { db.Close() }, nil
|
||||
return db, db.Close, nil
|
||||
}()
|
||||
if err != nil {
|
||||
var zeroRes T // A zero value of T
|
||||
return zeroRes, err
|
||||
}
|
||||
defer closeDB()
|
||||
defer func() {
|
||||
closeErr := closeDB()
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
return dbTransaction(db, fn)
|
||||
}
|
||||
|
|
10
common/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
generated
vendored
10
common/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
generated
vendored
|
@ -134,7 +134,7 @@ func ResolveShortNameAlias(ctx *types.SystemContext, name string) (reference.Nam
|
|||
// editShortNameAlias loads the aliases.conf file and changes it. If value is
|
||||
// set, it adds the name-value pair as a new alias. Otherwise, it will remove
|
||||
// name from the config.
|
||||
func editShortNameAlias(ctx *types.SystemContext, name string, value *string) error {
|
||||
func editShortNameAlias(ctx *types.SystemContext, name string, value *string) (retErr error) {
|
||||
if err := validateShortName(name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -178,7 +178,13 @@ func editShortNameAlias(ctx *types.SystemContext, name string, value *string) er
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
// since we are writing to this file, make sure we handle err on Close()
|
||||
defer func() {
|
||||
closeErr := f.Close()
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
encoder := toml.NewEncoder(f)
|
||||
return encoder.Encode(conf)
|
||||
|
|
|
@ -430,7 +430,8 @@ func (config *V2RegistriesConf) postProcessRegistries() error {
|
|||
return fmt.Errorf("pull-from-mirror must not be set for a non-mirror registry %q", reg.Prefix)
|
||||
}
|
||||
// make sure mirrors are valid
|
||||
for _, mir := range reg.Mirrors {
|
||||
for j := range reg.Mirrors {
|
||||
mir := ®.Mirrors[j]
|
||||
mir.Location, err = parseLocation(mir.Location)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -186,12 +186,18 @@ func convertSIFToElements(ctx context.Context, sifImage *sif.FileImage, tempDir
|
|||
// has an -o option that allows extracting a squashfs from the SIF file directly,
|
||||
// but that version is not currently available in RHEL 8.
|
||||
logrus.Debugf("Creating a temporary squashfs image %s ...", squashFSPath)
|
||||
if err := func() error { // A scope for defer
|
||||
if err := func() (retErr error) { // A scope for defer
|
||||
f, err := os.Create(squashFSPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
// since we are writing to this file, make sure we handle err on Close()
|
||||
defer func() {
|
||||
closeErr := f.Close()
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}()
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
if _, err := io.CopyN(f, rootFS.GetReader(), rootFS.Size()); err != nil {
|
||||
return err
|
||||
|
|
|
@ -180,18 +180,18 @@ type PRSigstoreSignedPKI interface {
|
|||
// prSigstoreSignedPKI contains non-fulcio certificate PKI configuration options for prSigstoreSigned
|
||||
type prSigstoreSignedPKI struct {
|
||||
// CARootsPath a path to a file containing accepted CA root certificates, in PEM format. Exactly one of CARootsPath and CARootsData must be specified.
|
||||
CARootsPath string `json:"caRootsPath"`
|
||||
CARootsPath string `json:"caRootsPath,omitempty"`
|
||||
// CARootsData contains accepted CA root certificates in PEM format, all of that base64-encoded. Exactly one of CARootsPath and CARootsData must be specified.
|
||||
CARootsData []byte `json:"caRootsData"`
|
||||
CARootsData []byte `json:"caRootsData,omitempty"`
|
||||
// CAIntermediatesPath a path to a file containing accepted CA intermediate certificates, in PEM format. Only one of CAIntermediatesPath or CAIntermediatesData can be specified, not both.
|
||||
CAIntermediatesPath string `json:"caIntermediatesPath"`
|
||||
CAIntermediatesPath string `json:"caIntermediatesPath,omitempty"`
|
||||
// CAIntermediatesData contains accepted CA intermediate certificates in PEM format, all of that base64-encoded. Only one of CAIntermediatesPath or CAIntermediatesData can be specified, not both.
|
||||
CAIntermediatesData []byte `json:"caIntermediatesData"`
|
||||
CAIntermediatesData []byte `json:"caIntermediatesData,omitempty"`
|
||||
|
||||
// SubjectEmail specifies the expected email address imposed on the subject to which the certificate was issued. At least one of SubjectEmail and SubjectHostname must be specified.
|
||||
SubjectEmail string `json:"subjectEmail"`
|
||||
SubjectEmail string `json:"subjectEmail,omitempty"`
|
||||
// SubjectHostname specifies the expected hostname imposed on the subject to which the certificate was issued. At least one of SubjectEmail and SubjectHostname must be specified.
|
||||
SubjectHostname string `json:"subjectHostname"`
|
||||
SubjectHostname string `json:"subjectHostname,omitempty"`
|
||||
}
|
||||
|
||||
// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement.
|
||||
|
|
|
@ -272,43 +272,56 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
|
|||
if err != nil {
|
||||
return private.UploadedBlob{}, fmt.Errorf("creating temporary file %q: %w", filename, err)
|
||||
}
|
||||
defer file.Close()
|
||||
counter := ioutils.NewWriteCounter(file)
|
||||
stream = io.TeeReader(stream, counter)
|
||||
digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo)
|
||||
decompressed, err := archive.DecompressStream(stream)
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, fmt.Errorf("setting up to decompress blob: %w", err)
|
||||
}
|
||||
blobDigest, diffID, count, err := func() (_, _ digest.Digest, _ int64, retErr error) { // A scope for defer
|
||||
// since we are writing to this file, make sure we handle err on Close()
|
||||
defer func() {
|
||||
closeErr := file.Close()
|
||||
if retErr == nil {
|
||||
retErr = closeErr
|
||||
}
|
||||
}()
|
||||
counter := ioutils.NewWriteCounter(file)
|
||||
stream = io.TeeReader(stream, counter)
|
||||
digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo)
|
||||
decompressed, err := archive.DecompressStream(stream)
|
||||
if err != nil {
|
||||
return "", "", 0, fmt.Errorf("setting up to decompress blob: %w", err)
|
||||
|
||||
diffID := digest.Canonical.Digester()
|
||||
// Copy the data to the file.
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using context.Context.
|
||||
_, err = io.Copy(diffID.Hash(), decompressed)
|
||||
decompressed.Close()
|
||||
}
|
||||
defer decompressed.Close()
|
||||
|
||||
diffID := digest.Canonical.Digester()
|
||||
// Copy the data to the file.
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using context.Context.
|
||||
_, err = io.Copy(diffID.Hash(), decompressed)
|
||||
if err != nil {
|
||||
return "", "", 0, fmt.Errorf("storing blob to file %q: %w", filename, err)
|
||||
}
|
||||
|
||||
return digester.Digest(), diffID.Digest(), counter.Count, nil
|
||||
}()
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, fmt.Errorf("storing blob to file %q: %w", filename, err)
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
|
||||
// Determine blob properties, and fail if information that we were given about the blob
|
||||
// is known to be incorrect.
|
||||
blobDigest := digester.Digest()
|
||||
blobSize := blobinfo.Size
|
||||
if blobSize < 0 {
|
||||
blobSize = counter.Count
|
||||
} else if blobinfo.Size != counter.Count {
|
||||
blobSize = count
|
||||
} else if blobinfo.Size != count {
|
||||
return private.UploadedBlob{}, ErrBlobSizeMismatch
|
||||
}
|
||||
|
||||
// Record information about the blob.
|
||||
s.lock.Lock()
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = diffID.Digest()
|
||||
s.lockProtected.fileSizes[blobDigest] = counter.Count
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = diffID
|
||||
s.lockProtected.fileSizes[blobDigest] = count
|
||||
s.lockProtected.filenames[blobDigest] = filename
|
||||
s.lock.Unlock()
|
||||
// This is safe because we have just computed diffID, and blobDigest was either computed
|
||||
// by us, or validated by the caller (usually copy.digestingReader).
|
||||
options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
|
||||
options.Cache.RecordDigestUncompressedPair(blobDigest, diffID)
|
||||
return private.UploadedBlob{
|
||||
Digest: blobDigest,
|
||||
Size: blobSize,
|
||||
|
|
|
@ -362,15 +362,14 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
|
|||
}
|
||||
storeSpec := scope[1:closeIndex]
|
||||
scope = scope[closeIndex+1:]
|
||||
storeInfo := strings.SplitN(storeSpec, "@", 2)
|
||||
if len(storeInfo) == 1 && storeInfo[0] != "" {
|
||||
// One component: the graph root.
|
||||
if !filepath.IsAbs(storeInfo[0]) {
|
||||
if a, b, ok := strings.Cut(storeSpec, "@"); ok && a != "" && b != "" {
|
||||
// Two components: the driver type and the graph root.
|
||||
if !filepath.IsAbs(b) {
|
||||
return ErrPathNotAbsolute
|
||||
}
|
||||
} else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" {
|
||||
// Two components: the driver type and the graph root.
|
||||
if !filepath.IsAbs(storeInfo[1]) {
|
||||
} else if !ok && a != "" {
|
||||
// One component: the graph root.
|
||||
if !filepath.IsAbs(storeSpec) {
|
||||
return ErrPathNotAbsolute
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -6,12 +6,12 @@ const (
|
|||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 5
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 34
|
||||
VersionMinor = 35
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 1
|
||||
VersionPatch = 0
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = ""
|
||||
VersionDev = "-dev"
|
||||
)
|
||||
|
||||
// Version is the specification version that the package types support.
|
||||
|
|
|
@ -23,7 +23,7 @@ env:
|
|||
# GCE project where images live
|
||||
IMAGE_PROJECT: "libpod-218412"
|
||||
# VM Image built in containers/automation_images
|
||||
IMAGE_SUFFIX: "c20250107t132430z-f41f40d13"
|
||||
IMAGE_SUFFIX: "c20250131t121915z-f41f40d13"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /de
|
|||
# N/B: This value is managed by Renovate, manual changes are
|
||||
# possible, as long as they don't disturb the formatting
|
||||
# (i.e. DO NOT ADD A 'v' prefix!)
|
||||
GOLANGCI_LINT_VERSION := 1.63.4
|
||||
GOLANGCI_LINT_VERSION := 1.64.5
|
||||
|
||||
default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs
|
||||
|
||||
|
|
|
@ -1 +1 @@
|
|||
1.57.1
|
||||
1.58.0-dev
|
||||
|
|
|
@ -36,7 +36,6 @@ import (
|
|||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
|
@ -419,7 +418,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
|||
|
||||
if !opts.skipMountHome {
|
||||
if err := mount.MakePrivate(home); err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("overlay: failed to make mount private: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1343,7 +1342,7 @@ func (d *Driver) recreateSymlinks() error {
|
|||
return err
|
||||
}
|
||||
// Keep looping as long as we take some corrective action in each iteration
|
||||
var errs *multierror.Error
|
||||
var errs error
|
||||
madeProgress := true
|
||||
iterations := 0
|
||||
for madeProgress {
|
||||
|
@ -1359,7 +1358,7 @@ func (d *Driver) recreateSymlinks() error {
|
|||
// Read the "link" file under each layer to get the name of the symlink
|
||||
data, err := os.ReadFile(path.Join(d.dir(dir.Name()), "link"))
|
||||
if err != nil {
|
||||
errs = multierror.Append(errs, fmt.Errorf("reading name of symlink for %q: %w", dir.Name(), err))
|
||||
errs = errors.Join(errs, fmt.Errorf("reading name of symlink for %q: %w", dir.Name(), err))
|
||||
continue
|
||||
}
|
||||
linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n"))
|
||||
|
@ -1368,12 +1367,12 @@ func (d *Driver) recreateSymlinks() error {
|
|||
err = fileutils.Lexists(linkPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
if err := os.Symlink(path.Join("..", dir.Name(), "diff"), linkPath); err != nil {
|
||||
errs = multierror.Append(errs, err)
|
||||
errs = errors.Join(errs, err)
|
||||
continue
|
||||
}
|
||||
madeProgress = true
|
||||
} else if err != nil {
|
||||
errs = multierror.Append(errs, err)
|
||||
errs = errors.Join(errs, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -1384,7 +1383,7 @@ func (d *Driver) recreateSymlinks() error {
|
|||
// that each symlink we have corresponds to one.
|
||||
links, err := os.ReadDir(linkDirFullPath)
|
||||
if err != nil {
|
||||
errs = multierror.Append(errs, err)
|
||||
errs = errors.Join(errs, err)
|
||||
continue
|
||||
}
|
||||
// Go through all of the symlinks in the "l" directory
|
||||
|
@ -1392,16 +1391,16 @@ func (d *Driver) recreateSymlinks() error {
|
|||
// Read the symlink's target, which should be "../$layer/diff"
|
||||
target, err := os.Readlink(filepath.Join(linkDirFullPath, link.Name()))
|
||||
if err != nil {
|
||||
errs = multierror.Append(errs, err)
|
||||
errs = errors.Join(errs, err)
|
||||
continue
|
||||
}
|
||||
targetComponents := strings.Split(target, string(os.PathSeparator))
|
||||
if len(targetComponents) != 3 || targetComponents[0] != ".." || targetComponents[2] != "diff" {
|
||||
errs = multierror.Append(errs, fmt.Errorf("link target of %q looks weird: %q", link, target))
|
||||
errs = errors.Join(errs, fmt.Errorf("link target of %q looks weird: %q", link, target))
|
||||
// force the link to be recreated on the next pass
|
||||
if err := os.Remove(filepath.Join(linkDirFullPath, link.Name())); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
errs = multierror.Append(errs, fmt.Errorf("removing link %q: %w", link, err))
|
||||
errs = errors.Join(errs, fmt.Errorf("removing link %q: %w", link, err))
|
||||
} // else don’t report any error, but also don’t set madeProgress.
|
||||
continue
|
||||
}
|
||||
|
@ -1417,7 +1416,7 @@ func (d *Driver) recreateSymlinks() error {
|
|||
// NOTE: If two or more links point to the same target, we will update linkFile
|
||||
// with every value of link.Name(), and set madeProgress = true every time.
|
||||
if err := os.WriteFile(linkFile, []byte(link.Name()), 0o644); err != nil {
|
||||
errs = multierror.Append(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err))
|
||||
errs = errors.Join(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err))
|
||||
continue
|
||||
}
|
||||
madeProgress = true
|
||||
|
@ -1425,14 +1424,11 @@ func (d *Driver) recreateSymlinks() error {
|
|||
}
|
||||
iterations++
|
||||
if iterations >= maxIterations {
|
||||
errs = multierror.Append(errs, fmt.Errorf("reached %d iterations in overlay graph driver’s recreateSymlink, giving up", iterations))
|
||||
errs = errors.Join(errs, fmt.Errorf("reached %d iterations in overlay graph driver’s recreateSymlink, giving up", iterations))
|
||||
break
|
||||
}
|
||||
}
|
||||
if errs != nil {
|
||||
return errs.ErrorOrNil()
|
||||
}
|
||||
return nil
|
||||
return errs
|
||||
}
|
||||
|
||||
// Get creates and mounts the required file system for the given id and returns the mount path.
|
||||
|
@ -2103,17 +2099,16 @@ func (g *overlayFileGetter) Get(path string) (io.ReadCloser, error) {
|
|||
return nil, fmt.Errorf("%s: %w", path, os.ErrNotExist)
|
||||
}
|
||||
|
||||
func (g *overlayFileGetter) Close() error {
|
||||
var errs *multierror.Error
|
||||
func (g *overlayFileGetter) Close() (errs error) {
|
||||
for _, f := range g.composefsMounts {
|
||||
if err := f.Close(); err != nil {
|
||||
errs = multierror.Append(errs, err)
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
if err := unix.Rmdir(f.Name()); err != nil {
|
||||
errs = multierror.Append(errs, err)
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
}
|
||||
return errs.ErrorOrNil()
|
||||
return errs
|
||||
}
|
||||
|
||||
// newStagingDir creates a new staging directory and returns the path to it.
|
||||
|
|
15
common/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go
generated
vendored
15
common/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go
generated
vendored
|
@ -190,7 +190,8 @@ func NewControl(basePath string) (*Control, error) {
|
|||
}
|
||||
|
||||
// SetQuota - assign a unique project id to directory and set the quota limits
|
||||
// for that project id
|
||||
// for that project id.
|
||||
// targetPath must exist, must be a directory, and must be empty.
|
||||
func (q *Control) SetQuota(targetPath string, quota Quota) error {
|
||||
var projectID uint32
|
||||
value, ok := q.quotas.Load(targetPath)
|
||||
|
@ -200,10 +201,20 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error {
|
|||
if !ok {
|
||||
projectID = q.nextProjectID
|
||||
|
||||
// The directory we are setting an ID on must be empty, as
|
||||
// the ID will not be propagated to pre-existing subdirectories.
|
||||
dents, err := os.ReadDir(targetPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading directory %s: %w", targetPath, err)
|
||||
}
|
||||
if len(dents) > 0 {
|
||||
return fmt.Errorf("can only set project ID on empty directories, %s is not empty", targetPath)
|
||||
}
|
||||
|
||||
//
|
||||
// assign project id to new container directory
|
||||
//
|
||||
err := setProjectID(targetPath, projectID)
|
||||
err = setProjectID(targetPath, projectID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"math/bits"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
@ -26,7 +27,6 @@ import (
|
|||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/containers/storage/pkg/tarlog"
|
||||
"github.com/containers/storage/pkg/truncindex"
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
"github.com/klauspost/pgzip"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
|
@ -47,11 +47,13 @@ const (
|
|||
|
||||
type layerLocations uint8
|
||||
|
||||
// The backing store is split in two json files, one (the volatile)
|
||||
// that is written without fsync() meaning it isn't as robust to
|
||||
// unclean shutdown
|
||||
// The backing store is split in three json files.
|
||||
// The volatile store is written without fsync() meaning it isn't as robust to unclean shutdown.
|
||||
// Optionally, an image store can be configured to store RO layers.
|
||||
// The stable store is used for the remaining layers that don't go into the other stores.
|
||||
const (
|
||||
stableLayerLocation layerLocations = 1 << iota
|
||||
imageStoreLayerLocation
|
||||
volatileLayerLocation
|
||||
|
||||
numLayerLocationIndex = iota
|
||||
|
@ -61,6 +63,10 @@ func layerLocationFromIndex(index int) layerLocations {
|
|||
return 1 << index
|
||||
}
|
||||
|
||||
func indexFromLayerLocation(location layerLocations) int {
|
||||
return bits.TrailingZeros(uint(location))
|
||||
}
|
||||
|
||||
// A Layer is a record of a copy-on-write layer that's stored by the lower
|
||||
// level graph driver.
|
||||
type Layer struct {
|
||||
|
@ -165,8 +171,8 @@ type Layer struct {
|
|||
// ReadOnly is true if this layer resides in a read-only layer store.
|
||||
ReadOnly bool `json:"-"`
|
||||
|
||||
// volatileStore is true if the container is from the volatile json file
|
||||
volatileStore bool `json:"-"`
|
||||
// location is the location of the store where the layer is present.
|
||||
location layerLocations `json:"-"`
|
||||
|
||||
// BigDataNames is a list of names of data items that we keep for the
|
||||
// convenience of the caller. They can be large, and are only in
|
||||
|
@ -431,14 +437,6 @@ type layerStore struct {
|
|||
driver drivers.Driver
|
||||
}
|
||||
|
||||
// The caller must hold r.inProcessLock for reading.
|
||||
func layerLocation(l *Layer) layerLocations {
|
||||
if l.volatileStore {
|
||||
return volatileLayerLocation
|
||||
}
|
||||
return stableLayerLocation
|
||||
}
|
||||
|
||||
func copyLayer(l *Layer) *Layer {
|
||||
return &Layer{
|
||||
ID: l.ID,
|
||||
|
@ -456,7 +454,7 @@ func copyLayer(l *Layer) *Layer {
|
|||
TOCDigest: l.TOCDigest,
|
||||
CompressionType: l.CompressionType,
|
||||
ReadOnly: l.ReadOnly,
|
||||
volatileStore: l.volatileStore,
|
||||
location: l.location,
|
||||
BigDataNames: copySlicePreferringNil(l.BigDataNames),
|
||||
Flags: copyMapPreferringNil(l.Flags),
|
||||
UIDMap: copySlicePreferringNil(l.UIDMap),
|
||||
|
@ -659,7 +657,11 @@ func (r *layerStore) layersModified() (lockfile.LastWrite, bool, error) {
|
|||
// modified manually, then we have to reload the storage in
|
||||
// any case.
|
||||
for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
|
||||
info, err := os.Stat(r.jsonPath[locationIndex])
|
||||
rpath := r.jsonPath[locationIndex]
|
||||
if rpath == "" {
|
||||
continue
|
||||
}
|
||||
info, err := os.Stat(rpath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return lockfile.LastWrite{}, false, fmt.Errorf("stat layers file: %w", err)
|
||||
}
|
||||
|
@ -795,6 +797,9 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
|||
for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
|
||||
location := layerLocationFromIndex(locationIndex)
|
||||
rpath := r.jsonPath[locationIndex]
|
||||
if rpath == "" {
|
||||
continue
|
||||
}
|
||||
info, err := os.Stat(rpath)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
|
@ -821,9 +826,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
|||
continue // skip invalid duplicated layer
|
||||
}
|
||||
// Remember where the layer came from
|
||||
if location == volatileLayerLocation {
|
||||
layer.volatileStore = true
|
||||
}
|
||||
layer.location = location
|
||||
layers = append(layers, layer)
|
||||
ids[layer.ID] = layer
|
||||
}
|
||||
|
@ -844,7 +847,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
|||
if conflict, ok := names[name]; ok {
|
||||
r.removeName(conflict, name)
|
||||
errorToResolveBySaving = ErrDuplicateLayerNames
|
||||
modifiedLocations |= layerLocation(conflict)
|
||||
modifiedLocations |= conflict.location
|
||||
}
|
||||
names[name] = layers[n]
|
||||
}
|
||||
|
@ -937,10 +940,10 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
|||
// Don't return the error immediately, because deleteInternal does not saveLayers();
|
||||
// Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully
|
||||
// deleted incomplete layers have their metadata correctly removed.
|
||||
incompleteDeletionErrors = multierror.Append(incompleteDeletionErrors,
|
||||
incompleteDeletionErrors = errors.Join(incompleteDeletionErrors,
|
||||
fmt.Errorf("deleting layer %#v: %w", layer.ID, err))
|
||||
}
|
||||
modifiedLocations |= layerLocation(layer)
|
||||
modifiedLocations |= layer.location
|
||||
}
|
||||
if err := r.saveLayers(modifiedLocations); err != nil {
|
||||
return false, err
|
||||
|
@ -1009,7 +1012,7 @@ func (r *layerStore) save(saveLocations layerLocations) error {
|
|||
// The caller must hold r.lockfile locked for writing.
|
||||
// The caller must hold r.inProcessLock for WRITING.
|
||||
func (r *layerStore) saveFor(modifiedLayer *Layer) error {
|
||||
return r.save(layerLocation(modifiedLayer))
|
||||
return r.save(modifiedLayer.location)
|
||||
}
|
||||
|
||||
// The caller must hold r.lockfile locked for writing.
|
||||
|
@ -1034,12 +1037,15 @@ func (r *layerStore) saveLayers(saveLocations layerLocations) error {
|
|||
continue
|
||||
}
|
||||
rpath := r.jsonPath[locationIndex]
|
||||
if rpath == "" {
|
||||
return fmt.Errorf("internal error: no path for location %v", location)
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
subsetLayers := make([]*Layer, 0, len(r.layers))
|
||||
for _, layer := range r.layers {
|
||||
if layerLocation(layer) == location {
|
||||
if layer.location == location {
|
||||
subsetLayers = append(subsetLayers, layer)
|
||||
}
|
||||
}
|
||||
|
@ -1139,12 +1145,17 @@ func (s *store) newLayerStore(rundir, layerdir, imagedir string, driver drivers.
|
|||
if transient {
|
||||
volatileDir = rundir
|
||||
}
|
||||
layersImageDir := ""
|
||||
if imagedir != "" {
|
||||
layersImageDir = filepath.Join(imagedir, "layers.json")
|
||||
}
|
||||
rlstore := layerStore{
|
||||
lockfile: newMultipleLockFile(lockFiles...),
|
||||
mountsLockfile: mountsLockfile,
|
||||
rundir: rundir,
|
||||
jsonPath: [numLayerLocationIndex]string{
|
||||
filepath.Join(layerdir, "layers.json"),
|
||||
layersImageDir,
|
||||
filepath.Join(volatileDir, "volatile-layers.json"),
|
||||
},
|
||||
layerdir: layerdir,
|
||||
|
@ -1182,6 +1193,7 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roL
|
|||
rundir: rundir,
|
||||
jsonPath: [numLayerLocationIndex]string{
|
||||
filepath.Join(layerdir, "layers.json"),
|
||||
"",
|
||||
filepath.Join(layerdir, "volatile-layers.json"),
|
||||
},
|
||||
layerdir: layerdir,
|
||||
|
@ -1330,6 +1342,17 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
|
|||
return copyLayer(layer), nil
|
||||
}
|
||||
|
||||
func (r *layerStore) pickStoreLocation(volatile, writeable bool) layerLocations {
|
||||
switch {
|
||||
case volatile:
|
||||
return volatileLayerLocation
|
||||
case !writeable && r.jsonPath[indexFromLayerLocation(imageStoreLayerLocation)] != "":
|
||||
return imageStoreLayerLocation
|
||||
default:
|
||||
return stableLayerLocation
|
||||
}
|
||||
}
|
||||
|
||||
// Requires startWriting.
|
||||
func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (layer *Layer, size int64, err error) {
|
||||
if moreOptions == nil {
|
||||
|
@ -1422,7 +1445,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
|
|||
UIDMap: copySlicePreferringNil(moreOptions.UIDMap),
|
||||
GIDMap: copySlicePreferringNil(moreOptions.GIDMap),
|
||||
BigDataNames: []string{},
|
||||
volatileStore: moreOptions.Volatile,
|
||||
location: r.pickStoreLocation(moreOptions.Volatile, writeable),
|
||||
}
|
||||
layer.Flags[incompleteFlag] = true
|
||||
|
||||
|
@ -2256,33 +2279,33 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
|
|||
// but they modify in-memory state.
|
||||
fgetter, err := r.newFileGetter(to)
|
||||
if err != nil {
|
||||
errs := multierror.Append(nil, fmt.Errorf("creating file-getter: %w", err))
|
||||
errs := fmt.Errorf("creating file-getter: %w", err)
|
||||
if err := decompressor.Close(); err != nil {
|
||||
errs = multierror.Append(errs, fmt.Errorf("closing decompressor: %w", err))
|
||||
errs = errors.Join(errs, fmt.Errorf("closing decompressor: %w", err))
|
||||
}
|
||||
if err := tsfile.Close(); err != nil {
|
||||
errs = multierror.Append(errs, fmt.Errorf("closing tarstream headers: %w", err))
|
||||
errs = errors.Join(errs, fmt.Errorf("closing tarstream headers: %w", err))
|
||||
}
|
||||
return nil, errs.ErrorOrNil()
|
||||
return nil, errs
|
||||
}
|
||||
|
||||
tarstream := asm.NewOutputTarStream(fgetter, metadata)
|
||||
rc := ioutils.NewReadCloserWrapper(tarstream, func() error {
|
||||
var errs *multierror.Error
|
||||
var errs error
|
||||
if err := decompressor.Close(); err != nil {
|
||||
errs = multierror.Append(errs, fmt.Errorf("closing decompressor: %w", err))
|
||||
errs = errors.Join(errs, fmt.Errorf("closing decompressor: %w", err))
|
||||
}
|
||||
if err := tsfile.Close(); err != nil {
|
||||
errs = multierror.Append(errs, fmt.Errorf("closing tarstream headers: %w", err))
|
||||
errs = errors.Join(errs, fmt.Errorf("closing tarstream headers: %w", err))
|
||||
}
|
||||
if err := tarstream.Close(); err != nil {
|
||||
errs = multierror.Append(errs, fmt.Errorf("closing reconstructed tarstream: %w", err))
|
||||
errs = errors.Join(errs, fmt.Errorf("closing reconstructed tarstream: %w", err))
|
||||
}
|
||||
if err := fgetter.Close(); err != nil {
|
||||
errs = multierror.Append(errs, fmt.Errorf("closing file-getter: %w", err))
|
||||
errs = errors.Join(errs, fmt.Errorf("closing file-getter: %w", err))
|
||||
}
|
||||
if errs != nil {
|
||||
return errs.ErrorOrNil()
|
||||
return errs
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
|
@ -67,6 +68,8 @@ type (
|
|||
CopyPass bool
|
||||
// ForceMask, if set, indicates the permission mask used for created files.
|
||||
ForceMask *os.FileMode
|
||||
// Timestamp, if set, will be set in each header as create/mod/access time
|
||||
Timestamp *time.Time
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -78,7 +81,6 @@ const (
|
|||
windows = "windows"
|
||||
darwin = "darwin"
|
||||
freebsd = "freebsd"
|
||||
linux = "linux"
|
||||
)
|
||||
|
||||
var xattrsToIgnore = map[string]interface{}{
|
||||
|
@ -179,6 +181,7 @@ func DecompressStream(archive io.Reader) (_ io.ReadCloser, Err error) {
|
|||
|
||||
defer func() {
|
||||
if Err != nil {
|
||||
// In the normal case, the buffer is embedded in the ReadCloser return.
|
||||
p.Put(buf)
|
||||
}
|
||||
}()
|
||||
|
@ -475,7 +478,7 @@ type TarWhiteoutConverter interface {
|
|||
ConvertReadWithHandler(*tar.Header, string, TarWhiteoutHandler) (bool, error)
|
||||
}
|
||||
|
||||
type tarAppender struct {
|
||||
type tarWriter struct {
|
||||
TarWriter *tar.Writer
|
||||
Buffer *bufio.Writer
|
||||
|
||||
|
@ -494,15 +497,19 @@ type tarAppender struct {
|
|||
// from the traditional behavior/format to get features like subsecond
|
||||
// precision in timestamps.
|
||||
CopyPass bool
|
||||
|
||||
// Timestamp, if set, will be set in each header as create/mod/access time
|
||||
Timestamp *time.Time
|
||||
}
|
||||
|
||||
func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender {
|
||||
return &tarAppender{
|
||||
func newTarWriter(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair, timestamp *time.Time) *tarWriter {
|
||||
return &tarWriter{
|
||||
SeenFiles: make(map[uint64]string),
|
||||
TarWriter: tar.NewWriter(writer),
|
||||
Buffer: pools.BufioWriter32KPool.Get(nil),
|
||||
IDMappings: idMapping,
|
||||
ChownOpts: chownOpts,
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -521,8 +528,8 @@ func canonicalTarName(name string, isDir bool) (string, error) {
|
|||
return name, nil
|
||||
}
|
||||
|
||||
// addTarFile adds to the tar archive a file from `path` as `name`
|
||||
func (ta *tarAppender) addTarFile(path, name string) error {
|
||||
// addFile adds a file from `path` as `name` to the tar archive.
|
||||
func (ta *tarWriter) addFile(path, name string) error {
|
||||
fi, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -600,6 +607,13 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
|||
hdr.Gname = ""
|
||||
}
|
||||
|
||||
// if override timestamp set, replace all times with this
|
||||
if ta.Timestamp != nil {
|
||||
hdr.ModTime = *ta.Timestamp
|
||||
hdr.AccessTime = *ta.Timestamp
|
||||
hdr.ChangeTime = *ta.Timestamp
|
||||
}
|
||||
|
||||
maybeTruncateHeaderModTime(hdr)
|
||||
|
||||
if ta.WhiteoutConverter != nil {
|
||||
|
@ -650,7 +664,7 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool, forceMask *os.FileMode, buffer []byte) error {
|
||||
func extractTarFileEntry(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool, forceMask *os.FileMode, buffer []byte) error {
|
||||
// hdr.Mode is in linux format, which we can use for sycalls,
|
||||
// but for os.Foo() calls we need the mode converted to os.FileMode,
|
||||
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
|
||||
|
@ -862,10 +876,11 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
|||
}
|
||||
|
||||
go func() {
|
||||
ta := newTarAppender(
|
||||
ta := newTarWriter(
|
||||
idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
|
||||
compressWriter,
|
||||
options.ChownOpts,
|
||||
options.Timestamp,
|
||||
)
|
||||
ta.WhiteoutConverter = GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
|
||||
ta.CopyPass = options.CopyPass
|
||||
|
@ -1002,7 +1017,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
|||
relFilePath = strings.Replace(relFilePath, include, replacement, 1)
|
||||
}
|
||||
|
||||
if err := ta.addTarFile(filePath, relFilePath); err != nil {
|
||||
if err := ta.addFile(filePath, relFilePath); err != nil {
|
||||
logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
|
||||
// if pipe is broken, stop writing tar stream to it
|
||||
if err == io.ErrClosedPipe {
|
||||
|
@ -1137,7 +1152,7 @@ loop:
|
|||
chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
|
||||
}
|
||||
|
||||
if err = createTarFile(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
|
||||
if err = extractTarFileEntry(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -452,7 +452,7 @@ func ChangesSize(newDir string, changes []Change) int64 {
|
|||
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
|
||||
reader, writer := io.Pipe()
|
||||
go func() {
|
||||
ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil)
|
||||
ta := newTarWriter(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil, nil)
|
||||
|
||||
// this buffer is needed for the duration of this piped stream
|
||||
defer pools.BufioWriter32KPool.Put(ta.Buffer)
|
||||
|
@ -481,7 +481,7 @@ func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMa
|
|||
}
|
||||
} else {
|
||||
path := filepath.Join(dir, change.Path)
|
||||
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
|
||||
if err := ta.addFile(path, change.Path[1:]); err != nil {
|
||||
logrus.Debugf("Can't add file %s to tar: %s", path, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -107,7 +107,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
|||
}
|
||||
defer os.RemoveAll(aufsTempdir)
|
||||
}
|
||||
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
|
||||
if err := extractTarFileEntry(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
|||
// We always reset the immutable flag (if present) to allow metadata
|
||||
// changes and to allow directory modification. The flag will be
|
||||
// re-applied based on the contents of hdr either at the end for
|
||||
// directories or in createTarFile otherwise.
|
||||
// directories or in extractTarFileEntry otherwise.
|
||||
if fi, err := os.Lstat(path); err == nil {
|
||||
if err := resetImmutable(path, &fi); err != nil {
|
||||
return 0, err
|
||||
|
@ -212,7 +212,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
|||
return 0, err
|
||||
}
|
||||
|
||||
if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
|
||||
if err := extractTarFileEntry(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
|
|
20
common/vendor/github.com/containers/storage/pkg/fileutils/reflink_linux.go
generated
vendored
Normal file
20
common/vendor/github.com/containers/storage/pkg/fileutils/reflink_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
package fileutils
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// ReflinkOrCopy attempts to reflink the source to the destination fd.
|
||||
// If reflinking fails or is unsupported, it falls back to io.Copy().
|
||||
func ReflinkOrCopy(src, dst *os.File) error {
|
||||
err := unix.IoctlFileClone(int(dst.Fd()), int(src.Fd()))
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = io.Copy(dst, src)
|
||||
return err
|
||||
}
|
|
@ -1,15 +1,15 @@
|
|||
//go:build !linux
|
||||
|
||||
package reflink
|
||||
package fileutils
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// LinkOrCopy attempts to reflink the source to the destination fd.
|
||||
// ReflinkOrCopy attempts to reflink the source to the destination fd.
|
||||
// If reflinking fails or is unsupported, it falls back to io.Copy().
|
||||
func LinkOrCopy(src, dst *os.File) error {
|
||||
func ReflinkOrCopy(src, dst *os.File) error {
|
||||
_, err := io.Copy(dst, src)
|
||||
return err
|
||||
}
|
|
@ -98,7 +98,7 @@ func IsSetID(path string, modeid os.FileMode, capid capability.Cap) (bool, error
|
|||
return cap.Get(capability.EFFECTIVE, capid), nil
|
||||
}
|
||||
|
||||
func (c *Cmd) Start() error {
|
||||
func (c *Cmd) Start() (retErr error) {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
|
@ -167,6 +167,15 @@ func (c *Cmd) Start() error {
|
|||
return err
|
||||
}
|
||||
|
||||
// If the function fails from here, we need to make sure the
|
||||
// child process is killed and properly cleaned up.
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
_ = c.Cmd.Process.Kill()
|
||||
_ = c.Cmd.Wait()
|
||||
}
|
||||
}()
|
||||
|
||||
// Close the ends of the pipes that the parent doesn't need.
|
||||
continueRead.Close()
|
||||
continueRead = nil
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
|
||||
// register all of the built-in drivers
|
||||
_ "github.com/containers/storage/drivers/register"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
drivers "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/internal/dedup"
|
||||
|
@ -30,7 +31,6 @@ import (
|
|||
"github.com/containers/storage/pkg/stringutils"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/containers/storage/types"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
@ -2744,7 +2744,7 @@ func (s *store) DeleteContainer(id string) error {
|
|||
}
|
||||
}
|
||||
|
||||
var wg multierror.Group
|
||||
var wg errgroup.Group
|
||||
|
||||
middleDir := s.graphDriverName + "-containers"
|
||||
|
||||
|
@ -2759,7 +2759,7 @@ func (s *store) DeleteContainer(id string) error {
|
|||
})
|
||||
|
||||
if multierr := wg.Wait(); multierr != nil {
|
||||
return multierr.ErrorOrNil()
|
||||
return multierr
|
||||
}
|
||||
return s.containerStore.Delete(id)
|
||||
})
|
||||
|
|
|
@ -15,27 +15,30 @@ type Program interface {
|
|||
// ProgramFunc is a type of function that initializes programs based on arguments.
|
||||
type ProgramFunc func(args ...string) Program
|
||||
|
||||
// NewShellProgramFunc creates programs that are executed in a Shell.
|
||||
func NewShellProgramFunc(name string) ProgramFunc {
|
||||
return NewShellProgramFuncWithEnv(name, nil)
|
||||
}
|
||||
|
||||
// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables
|
||||
func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc {
|
||||
// NewShellProgramFunc creates a [ProgramFunc] to run command in a [Shell].
|
||||
func NewShellProgramFunc(command string) ProgramFunc {
|
||||
return func(args ...string) Program {
|
||||
return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)}
|
||||
return createProgramCmdRedirectErr(command, args, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd {
|
||||
programCmd := exec.Command(commandName, args...)
|
||||
// NewShellProgramFuncWithEnv creates a [ProgramFunc] tu run command
|
||||
// in a [Shell] with the given environment variables.
|
||||
func NewShellProgramFuncWithEnv(command string, env *map[string]string) ProgramFunc {
|
||||
return func(args ...string) Program {
|
||||
return createProgramCmdRedirectErr(command, args, env)
|
||||
}
|
||||
}
|
||||
|
||||
func createProgramCmdRedirectErr(command string, args []string, env *map[string]string) *Shell {
|
||||
ec := exec.Command(command, args...)
|
||||
if env != nil {
|
||||
for k, v := range *env {
|
||||
programCmd.Env = append(programCmd.Environ(), k+"="+v)
|
||||
ec.Env = append(ec.Environ(), k+"="+v)
|
||||
}
|
||||
}
|
||||
programCmd.Stderr = os.Stderr
|
||||
return programCmd
|
||||
ec.Stderr = os.Stderr
|
||||
return &Shell{cmd: ec}
|
||||
}
|
||||
|
||||
// Shell invokes shell commands to talk with a remote credentials-helper.
|
||||
|
|
|
@ -2,7 +2,9 @@
|
|||
# This file lists all contributors to the repository.
|
||||
# See hack/generate-authors.sh to make modifications.
|
||||
|
||||
7sunarni <710720732@qq.com>
|
||||
Aanand Prasad <aanand.prasad@gmail.com>
|
||||
Aarni Koskela <akx@iki.fi>
|
||||
Aaron Davidson <aaron@databricks.com>
|
||||
Aaron Feng <aaron.feng@gmail.com>
|
||||
Aaron Hnatiw <aaron@griddio.com>
|
||||
|
@ -11,6 +13,7 @@ Aaron L. Xu <liker.xu@foxmail.com>
|
|||
Aaron Lehmann <alehmann@netflix.com>
|
||||
Aaron Welch <welch@packet.net>
|
||||
Aaron Yoshitake <airandfingers@gmail.com>
|
||||
Abdur Rehman <abdur_rehman@mentor.com>
|
||||
Abel Muiño <amuino@gmail.com>
|
||||
Abhijeet Kasurde <akasurde@redhat.com>
|
||||
Abhinandan Prativadi <aprativadi@gmail.com>
|
||||
|
@ -24,9 +27,11 @@ Adam Avilla <aavilla@yp.com>
|
|||
Adam Dobrawy <naczelnik@jawnosc.tk>
|
||||
Adam Eijdenberg <adam.eijdenberg@gmail.com>
|
||||
Adam Kunk <adam.kunk@tiaa-cref.org>
|
||||
Adam Lamers <adam.lamers@wmsdev.pl>
|
||||
Adam Miller <admiller@redhat.com>
|
||||
Adam Mills <adam@armills.info>
|
||||
Adam Pointer <adam.pointer@skybettingandgaming.com>
|
||||
Adam Simon <adamsimon85100@gmail.com>
|
||||
Adam Singer <financeCoding@gmail.com>
|
||||
Adam Thornton <adam.thornton@maryville.com>
|
||||
Adam Walz <adam@adamwalz.net>
|
||||
|
@ -119,6 +124,7 @@ amangoel <amangoel@gmail.com>
|
|||
Amen Belayneh <amenbelayneh@gmail.com>
|
||||
Ameya Gawde <agawde@mirantis.com>
|
||||
Amir Goldstein <amir73il@aquasec.com>
|
||||
AmirBuddy <badinlu.amirhossein@gmail.com>
|
||||
Amit Bakshi <ambakshi@gmail.com>
|
||||
Amit Krishnan <amit.krishnan@oracle.com>
|
||||
Amit Shukla <amit.shukla@docker.com>
|
||||
|
@ -168,6 +174,7 @@ Andrey Kolomentsev <andrey.kolomentsev@docker.com>
|
|||
Andrey Petrov <andrey.petrov@shazow.net>
|
||||
Andrey Stolbovsky <andrey.stolbovsky@gmail.com>
|
||||
André Martins <aanm90@gmail.com>
|
||||
Andrés Maldonado <maldonado@codelutin.com>
|
||||
Andy Chambers <anchambers@paypal.com>
|
||||
andy diller <dillera@gmail.com>
|
||||
Andy Goldstein <agoldste@redhat.com>
|
||||
|
@ -219,6 +226,7 @@ Artur Meyster <arthurfbi@yahoo.com>
|
|||
Arun Gupta <arun.gupta@gmail.com>
|
||||
Asad Saeeduddin <masaeedu@gmail.com>
|
||||
Asbjørn Enge <asbjorn@hanafjedle.net>
|
||||
Ashly Mathew <ashly.mathew@sap.com>
|
||||
Austin Vazquez <macedonv@amazon.com>
|
||||
averagehuman <averagehuman@users.noreply.github.com>
|
||||
Avi Das <andas222@gmail.com>
|
||||
|
@ -345,6 +353,7 @@ Chance Zibolski <chance.zibolski@gmail.com>
|
|||
Chander Govindarajan <chandergovind@gmail.com>
|
||||
Chanhun Jeong <keyolk@gmail.com>
|
||||
Chao Wang <wangchao.fnst@cn.fujitsu.com>
|
||||
Charity Kathure <ckathure@microsoft.com>
|
||||
Charles Chan <charleswhchan@users.noreply.github.com>
|
||||
Charles Hooper <charles.hooper@dotcloud.com>
|
||||
Charles Law <claw@conduce.com>
|
||||
|
@ -480,6 +489,7 @@ Daniel Farrell <dfarrell@redhat.com>
|
|||
Daniel Garcia <daniel@danielgarcia.info>
|
||||
Daniel Gasienica <daniel@gasienica.ch>
|
||||
Daniel Grunwell <mwgrunny@gmail.com>
|
||||
Daniel Guns <danbguns@gmail.com>
|
||||
Daniel Helfand <helfand.4@gmail.com>
|
||||
Daniel Hiltgen <daniel.hiltgen@docker.com>
|
||||
Daniel J Walsh <dwalsh@redhat.com>
|
||||
|
@ -763,6 +773,7 @@ Frank Macreery <frank@macreery.com>
|
|||
Frank Rosquin <frank.rosquin+github@gmail.com>
|
||||
Frank Villaro-Dixon <frank.villarodixon@merkle.com>
|
||||
Frank Yang <yyb196@gmail.com>
|
||||
François Scala <github@arcenik.net>
|
||||
Fred Lifton <fred.lifton@docker.com>
|
||||
Frederick F. Kautz IV <fkautz@redhat.com>
|
||||
Frederico F. de Oliveira <FreddieOliveira@users.noreply.github.com>
|
||||
|
@ -798,6 +809,7 @@ GennadySpb <lipenkov@gmail.com>
|
|||
Geoff Levand <geoff@infradead.org>
|
||||
Geoffrey Bachelet <grosfrais@gmail.com>
|
||||
Geon Kim <geon0250@gmail.com>
|
||||
George Adams <georgeadams1995@gmail.com>
|
||||
George Kontridze <george@bugsnag.com>
|
||||
George Ma <mayangang@outlook.com>
|
||||
George MacRorie <gmacr31@gmail.com>
|
||||
|
@ -826,6 +838,7 @@ Gopikannan Venugopalsamy <gopikannan.venugopalsamy@gmail.com>
|
|||
Gosuke Miyashita <gosukenator@gmail.com>
|
||||
Gou Rao <gou@portworx.com>
|
||||
Govinda Fichtner <govinda.fichtner@googlemail.com>
|
||||
Grace Choi <grace.54109@gmail.com>
|
||||
Grant Millar <rid@cylo.io>
|
||||
Grant Reaber <grant.reaber@gmail.com>
|
||||
Graydon Hoare <graydon@pobox.com>
|
||||
|
@ -966,6 +979,7 @@ James Nugent <james@jen20.com>
|
|||
James Sanders <james3sanders@gmail.com>
|
||||
James Turnbull <james@lovedthanlost.net>
|
||||
James Watkins-Harvey <jwatkins@progi-media.com>
|
||||
Jameson Hyde <jameson.hyde@docker.com>
|
||||
Jamie Hannaford <jamie@limetree.org>
|
||||
Jamshid Afshar <jafshar@yahoo.com>
|
||||
Jan Breig <git@pygos.space>
|
||||
|
@ -1064,13 +1078,16 @@ Jim Perrin <jperrin@centos.org>
|
|||
Jimmy Cuadra <jimmy@jimmycuadra.com>
|
||||
Jimmy Puckett <jimmy.puckett@spinen.com>
|
||||
Jimmy Song <rootsongjc@gmail.com>
|
||||
jinjiadu <jinjiadu@aliyun.com>
|
||||
Jinsoo Park <cellpjs@gmail.com>
|
||||
Jintao Zhang <zhangjintao9020@gmail.com>
|
||||
Jiri Appl <jiria@microsoft.com>
|
||||
Jiri Popelka <jpopelka@redhat.com>
|
||||
Jiuyue Ma <majiuyue@huawei.com>
|
||||
Jiří Župka <jzupka@redhat.com>
|
||||
jjimbo137 <115816493+jjimbo137@users.noreply.github.com>
|
||||
Joakim Roubert <joakim.roubert@axis.com>
|
||||
Joan Grau <grautxo.dev@proton.me>
|
||||
Joao Fernandes <joao.fernandes@docker.com>
|
||||
Joao Trindade <trindade.joao@gmail.com>
|
||||
Joe Beda <joe.github@bedafamily.com>
|
||||
|
@ -1155,6 +1172,7 @@ Josiah Kiehl <jkiehl@riotgames.com>
|
|||
José Tomás Albornoz <jojo@eljojo.net>
|
||||
Joyce Jang <mail@joycejang.com>
|
||||
JP <jpellerin@leapfrogonline.com>
|
||||
JSchltggr <jschltggr@gmail.com>
|
||||
Julian Taylor <jtaylor.debian@googlemail.com>
|
||||
Julien Barbier <write0@gmail.com>
|
||||
Julien Bisconti <veggiemonk@users.noreply.github.com>
|
||||
|
@ -1289,6 +1307,7 @@ Laura Brehm <laurabrehm@hey.com>
|
|||
Laura Frank <ljfrank@gmail.com>
|
||||
Laurent Bernaille <laurent.bernaille@datadoghq.com>
|
||||
Laurent Erignoux <lerignoux@gmail.com>
|
||||
Laurent Goderre <laurent.goderre@docker.com>
|
||||
Laurie Voss <github@seldo.com>
|
||||
Leandro Motta Barros <lmb@stackedboxes.org>
|
||||
Leandro Siqueira <leandro.siqueira@gmail.com>
|
||||
|
@ -1369,6 +1388,7 @@ Madhan Raj Mookkandy <MadhanRaj.Mookkandy@microsoft.com>
|
|||
Madhav Puri <madhav.puri@gmail.com>
|
||||
Madhu Venugopal <mavenugo@gmail.com>
|
||||
Mageee <fangpuyi@foxmail.com>
|
||||
maggie44 <64841595+maggie44@users.noreply.github.com>
|
||||
Mahesh Tiyyagura <tmahesh@gmail.com>
|
||||
malnick <malnick@gmail..com>
|
||||
Malte Janduda <mail@janduda.net>
|
||||
|
@ -1579,6 +1599,7 @@ Muayyad Alsadi <alsadi@gmail.com>
|
|||
Muhammad Zohaib Aslam <zohaibse011@gmail.com>
|
||||
Mustafa Akın <mustafa91@gmail.com>
|
||||
Muthukumar R <muthur@gmail.com>
|
||||
Myeongjoon Kim <kimmj8409@gmail.com>
|
||||
Máximo Cuadros <mcuadros@gmail.com>
|
||||
Médi-Rémi Hashim <medimatrix@users.noreply.github.com>
|
||||
Nace Oroz <orkica@gmail.com>
|
||||
|
@ -1593,6 +1614,7 @@ Natasha Jarus <linuxmercedes@gmail.com>
|
|||
Nate Brennand <nate.brennand@clever.com>
|
||||
Nate Eagleson <nate@nateeag.com>
|
||||
Nate Jones <nate@endot.org>
|
||||
Nathan Baulch <nathan.baulch@gmail.com>
|
||||
Nathan Carlson <carl4403@umn.edu>
|
||||
Nathan Herald <me@nathanherald.com>
|
||||
Nathan Hsieh <hsieh.nathan@gmail.com>
|
||||
|
@ -1655,6 +1677,7 @@ Nuutti Kotivuori <naked@iki.fi>
|
|||
nzwsch <hi@nzwsch.com>
|
||||
O.S. Tezer <ostezer@gmail.com>
|
||||
objectified <objectified@gmail.com>
|
||||
Octol1ttle <l1ttleofficial@outlook.com>
|
||||
Odin Ugedal <odin@ugedal.com>
|
||||
Oguz Bilgic <fisyonet@gmail.com>
|
||||
Oh Jinkyun <tintypemolly@gmail.com>
|
||||
|
@ -1763,6 +1786,7 @@ Pierre Carrier <pierre@meteor.com>
|
|||
Pierre Dal-Pra <dalpra.pierre@gmail.com>
|
||||
Pierre Wacrenier <pierre.wacrenier@gmail.com>
|
||||
Pierre-Alain RIVIERE <pariviere@ippon.fr>
|
||||
pinglanlu <pinglanlu@outlook.com>
|
||||
Piotr Bogdan <ppbogdan@gmail.com>
|
||||
Piotr Karbowski <piotr.karbowski@protonmail.ch>
|
||||
Porjo <porjo38@yahoo.com.au>
|
||||
|
@ -1790,6 +1814,7 @@ Quentin Tayssier <qtayssier@gmail.com>
|
|||
r0n22 <cameron.regan@gmail.com>
|
||||
Rachit Sharma <rachitsharma613@gmail.com>
|
||||
Radostin Stoyanov <rstoyanov1@gmail.com>
|
||||
Rafael Fernández López <ereslibre@ereslibre.es>
|
||||
Rafal Jeczalik <rjeczalik@gmail.com>
|
||||
Rafe Colton <rafael.colton@gmail.com>
|
||||
Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
|
||||
|
@ -1856,7 +1881,7 @@ Robin Speekenbrink <robin@kingsquare.nl>
|
|||
Robin Thoni <robin@rthoni.com>
|
||||
robpc <rpcann@gmail.com>
|
||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||
Rodrigo Campos <rodrigo@kinvolk.io>
|
||||
Rodrigo Campos <rodrigoca@microsoft.com>
|
||||
Rodrigo Vaz <rodrigo.vaz@gmail.com>
|
||||
Roel Van Nyen <roel.vannyen@gmail.com>
|
||||
Roger Peppe <rogpeppe@gmail.com>
|
||||
|
@ -1995,6 +2020,7 @@ Sevki Hasirci <s@sevki.org>
|
|||
Shane Canon <scanon@lbl.gov>
|
||||
Shane da Silva <shane@dasilva.io>
|
||||
Shaun Kaasten <shaunk@gmail.com>
|
||||
Shaun Thompson <shaun.thompson@docker.com>
|
||||
shaunol <shaunol@gmail.com>
|
||||
Shawn Landden <shawn@churchofgit.com>
|
||||
Shawn Siefkas <shawn.siefkas@meredith.com>
|
||||
|
@ -2013,6 +2039,7 @@ Shijun Qin <qinshijun16@mails.ucas.ac.cn>
|
|||
Shishir Mahajan <shishir.mahajan@redhat.com>
|
||||
Shoubhik Bose <sbose78@gmail.com>
|
||||
Shourya Sarcar <shourya.sarcar@gmail.com>
|
||||
Shreenidhi Shedi <shreenidhi.shedi@broadcom.com>
|
||||
Shu-Wai Chow <shu-wai.chow@seattlechildrens.org>
|
||||
shuai-z <zs.broccoli@gmail.com>
|
||||
Shukui Yang <yangshukui@huawei.com>
|
||||
|
@ -2100,6 +2127,7 @@ Sébastien Stormacq <sebsto@users.noreply.github.com>
|
|||
Sören Tempel <soeren+git@soeren-tempel.net>
|
||||
Tabakhase <mail@tabakhase.com>
|
||||
Tadej Janež <tadej.j@nez.si>
|
||||
Tadeusz Dudkiewicz <tadeusz.dudkiewicz@rtbhouse.com>
|
||||
Takuto Sato <tockn.jp@gmail.com>
|
||||
tang0th <tang0th@gmx.com>
|
||||
Tangi Colin <tangicolin@gmail.com>
|
||||
|
@ -2107,6 +2135,7 @@ Tatsuki Sugiura <sugi@nemui.org>
|
|||
Tatsushi Inagaki <e29253@jp.ibm.com>
|
||||
Taylan Isikdemir <taylani@google.com>
|
||||
Taylor Jones <monitorjbl@gmail.com>
|
||||
tcpdumppy <847462026@qq.com>
|
||||
Ted M. Young <tedyoung@gmail.com>
|
||||
Tehmasp Chaudhri <tehmasp@gmail.com>
|
||||
Tejaswini Duggaraju <naduggar@microsoft.com>
|
||||
|
@ -2391,6 +2420,7 @@ You-Sheng Yang (楊有勝) <vicamo@gmail.com>
|
|||
youcai <omegacoleman@gmail.com>
|
||||
Youcef YEKHLEF <yyekhlef@gmail.com>
|
||||
Youfu Zhang <zhangyoufu@gmail.com>
|
||||
YR Chen <stevapple@icloud.com>
|
||||
Yu Changchun <yuchangchun1@huawei.com>
|
||||
Yu Chengxia <yuchengxia@huawei.com>
|
||||
Yu Peng <yu.peng36@zte.com.cn>
|
||||
|
|
|
@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api"
|
|||
// Common constants for daemon and client.
|
||||
const (
|
||||
// DefaultVersion of the current REST API.
|
||||
DefaultVersion = "1.47"
|
||||
DefaultVersion = "1.48"
|
||||
|
||||
// MinSupportedAPIVersion is the minimum API version that can be supported
|
||||
// by the API server, specified as "major.minor". Note that the daemon
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -11,7 +11,7 @@ import (
|
|||
"github.com/docker/docker/api/types/registry"
|
||||
)
|
||||
|
||||
// NewHijackedResponse intializes a HijackedResponse type
|
||||
// NewHijackedResponse initializes a [HijackedResponse] type.
|
||||
func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse {
|
||||
return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType}
|
||||
}
|
||||
|
@ -129,14 +129,6 @@ type ImageBuildResponse struct {
|
|||
OSType string
|
||||
}
|
||||
|
||||
// RequestPrivilegeFunc is a function interface that
|
||||
// clients can supply to retry operations after
|
||||
// getting an authorization error.
|
||||
// This function returns the registry authentication
|
||||
// header value in base 64 format, or an error
|
||||
// if the privilege request fails.
|
||||
type RequestPrivilegeFunc func(context.Context) (string, error)
|
||||
|
||||
// NodeListOptions holds parameters to list nodes with.
|
||||
type NodeListOptions struct {
|
||||
Filters filters.Args
|
||||
|
@ -235,11 +227,18 @@ type PluginDisableOptions struct {
|
|||
|
||||
// PluginInstallOptions holds parameters to install a plugin.
|
||||
type PluginInstallOptions struct {
|
||||
Disabled bool
|
||||
AcceptAllPermissions bool
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||
RemoteRef string // RemoteRef is the plugin name on the registry
|
||||
PrivilegeFunc RequestPrivilegeFunc
|
||||
Disabled bool
|
||||
AcceptAllPermissions bool
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||
RemoteRef string // RemoteRef is the plugin name on the registry
|
||||
|
||||
// PrivilegeFunc is a function that clients can supply to retry operations
|
||||
// after getting an authorization error. This function returns the registry
|
||||
// authentication header value in base64 encoded format, or an error if the
|
||||
// privilege request fails.
|
||||
//
|
||||
// For details, refer to [github.com/docker/docker/api/types/registry.RequestAuthConfig].
|
||||
PrivilegeFunc func(context.Context) (string, error)
|
||||
AcceptPermissionsFunc func(context.Context, PluginPrivileges) (bool, error)
|
||||
Args []string
|
||||
}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
package types
|
||||
package common
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// IDResponse Response to an API call that returns just an Id
|
||||
// swagger:model IdResponse
|
||||
// swagger:model IDResponse
|
||||
type IDResponse struct {
|
||||
|
||||
// The id of the newly created object.
|
7
common/vendor/github.com/docker/docker/api/types/container/commit.go
generated
vendored
Normal file
7
common/vendor/github.com/docker/docker/api/types/container/commit.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
package container
|
||||
|
||||
import "github.com/docker/docker/api/types/common"
|
||||
|
||||
// CommitResponse response for the commit API call, containing the ID of the
|
||||
// image that was produced.
|
||||
type CommitResponse = common.IDResponse
|
|
@ -4,8 +4,22 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/storage"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ContainerUpdateOKBody OK response to ContainerUpdate operation
|
||||
//
|
||||
// Deprecated: use [UpdateResponse]. This alias will be removed in the next release.
|
||||
type ContainerUpdateOKBody = UpdateResponse
|
||||
|
||||
// ContainerTopOKBody OK response to ContainerTop operation
|
||||
//
|
||||
// Deprecated: use [TopResponse]. This alias will be removed in the next release.
|
||||
type ContainerTopOKBody = TopResponse
|
||||
|
||||
// PruneReport contains the response for Engine API:
|
||||
// POST "/containers/prune"
|
||||
type PruneReport struct {
|
||||
|
@ -42,3 +56,133 @@ type StatsResponseReader struct {
|
|||
Body io.ReadCloser `json:"body"`
|
||||
OSType string `json:"ostype"`
|
||||
}
|
||||
|
||||
// MountPoint represents a mount point configuration inside the container.
|
||||
// This is used for reporting the mountpoints in use by a container.
|
||||
type MountPoint struct {
|
||||
// Type is the type of mount, see `Type<foo>` definitions in
|
||||
// github.com/docker/docker/api/types/mount.Type
|
||||
Type mount.Type `json:",omitempty"`
|
||||
|
||||
// Name is the name reference to the underlying data defined by `Source`
|
||||
// e.g., the volume name.
|
||||
Name string `json:",omitempty"`
|
||||
|
||||
// Source is the source location of the mount.
|
||||
//
|
||||
// For volumes, this contains the storage location of the volume (within
|
||||
// `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains
|
||||
// the source (host) part of the bind-mount. For `tmpfs` mount points, this
|
||||
// field is empty.
|
||||
Source string
|
||||
|
||||
// Destination is the path relative to the container root (`/`) where the
|
||||
// Source is mounted inside the container.
|
||||
Destination string
|
||||
|
||||
// Driver is the volume driver used to create the volume (if it is a volume).
|
||||
Driver string `json:",omitempty"`
|
||||
|
||||
// Mode is a comma separated list of options supplied by the user when
|
||||
// creating the bind/volume mount.
|
||||
//
|
||||
// The default is platform-specific (`"z"` on Linux, empty on Windows).
|
||||
Mode string
|
||||
|
||||
// RW indicates whether the mount is mounted writable (read-write).
|
||||
RW bool
|
||||
|
||||
// Propagation describes how mounts are propagated from the host into the
|
||||
// mount point, and vice-versa. Refer to the Linux kernel documentation
|
||||
// for details:
|
||||
// https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt
|
||||
//
|
||||
// This field is not used on Windows.
|
||||
Propagation mount.Propagation
|
||||
}
|
||||
|
||||
// State stores container's running state
|
||||
// it's part of ContainerJSONBase and returned by "inspect" command
|
||||
type State struct {
|
||||
Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead"
|
||||
Running bool
|
||||
Paused bool
|
||||
Restarting bool
|
||||
OOMKilled bool
|
||||
Dead bool
|
||||
Pid int
|
||||
ExitCode int
|
||||
Error string
|
||||
StartedAt string
|
||||
FinishedAt string
|
||||
Health *Health `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Summary contains response of Engine API:
|
||||
// GET "/containers/json"
|
||||
type Summary struct {
|
||||
ID string `json:"Id"`
|
||||
Names []string
|
||||
Image string
|
||||
ImageID string
|
||||
ImageManifestDescriptor *ocispec.Descriptor `json:"ImageManifestDescriptor,omitempty"`
|
||||
Command string
|
||||
Created int64
|
||||
Ports []Port
|
||||
SizeRw int64 `json:",omitempty"`
|
||||
SizeRootFs int64 `json:",omitempty"`
|
||||
Labels map[string]string
|
||||
State string
|
||||
Status string
|
||||
HostConfig struct {
|
||||
NetworkMode string `json:",omitempty"`
|
||||
Annotations map[string]string `json:",omitempty"`
|
||||
}
|
||||
NetworkSettings *NetworkSettingsSummary
|
||||
Mounts []MountPoint
|
||||
}
|
||||
|
||||
// ContainerJSONBase contains response of Engine API GET "/containers/{name:.*}/json"
|
||||
// for API version 1.18 and older.
|
||||
//
|
||||
// TODO(thaJeztah): combine ContainerJSONBase and InspectResponse into a single struct.
|
||||
// The split between ContainerJSONBase (ContainerJSONBase) and InspectResponse (InspectResponse)
|
||||
// was done in commit 6deaa58ba5f051039643cedceee97c8695e2af74 (https://github.com/moby/moby/pull/13675).
|
||||
// ContainerJSONBase contained all fields for API < 1.19, and InspectResponse
|
||||
// held fields that were added in API 1.19 and up. Given that the minimum
|
||||
// supported API version is now 1.24, we no longer use the separate type.
|
||||
type ContainerJSONBase struct {
|
||||
ID string `json:"Id"`
|
||||
Created string
|
||||
Path string
|
||||
Args []string
|
||||
State *State
|
||||
Image string
|
||||
ResolvConfPath string
|
||||
HostnamePath string
|
||||
HostsPath string
|
||||
LogPath string
|
||||
Name string
|
||||
RestartCount int
|
||||
Driver string
|
||||
Platform string
|
||||
MountLabel string
|
||||
ProcessLabel string
|
||||
AppArmorProfile string
|
||||
ExecIDs []string
|
||||
HostConfig *HostConfig
|
||||
GraphDriver storage.DriverData
|
||||
SizeRw *int64 `json:",omitempty"`
|
||||
SizeRootFs *int64 `json:",omitempty"`
|
||||
}
|
||||
|
||||
// InspectResponse is the response for the GET "/containers/{name:.*}/json"
|
||||
// endpoint.
|
||||
type InspectResponse struct {
|
||||
*ContainerJSONBase
|
||||
Mounts []MountPoint
|
||||
Config *Config
|
||||
NetworkSettings *NetworkSettings
|
||||
// ImageManifestDescriptor is the descriptor of a platform-specific manifest of the image used to create the container.
|
||||
ImageManifestDescriptor *ocispec.Descriptor `json:"ImageManifestDescriptor,omitempty"`
|
||||
}
|
||||
|
|
|
@ -1,22 +0,0 @@
|
|||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Code generated by `swagger generate operation`. DO NOT EDIT.
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// ContainerTopOKBody OK response to ContainerTop operation
|
||||
// swagger:model ContainerTopOKBody
|
||||
type ContainerTopOKBody struct {
|
||||
|
||||
// Each process running in the container, where each is process
|
||||
// is an array of values corresponding to the titles.
|
||||
//
|
||||
// Required: true
|
||||
Processes [][]string `json:"Processes"`
|
||||
|
||||
// The ps column titles
|
||||
// Required: true
|
||||
Titles []string `json:"Titles"`
|
||||
}
|
16
common/vendor/github.com/docker/docker/api/types/container/container_update.go
generated
vendored
16
common/vendor/github.com/docker/docker/api/types/container/container_update.go
generated
vendored
|
@ -1,16 +0,0 @@
|
|||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Code generated by `swagger generate operation`. DO NOT EDIT.
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// ContainerUpdateOKBody OK response to ContainerUpdate operation
|
||||
// swagger:model ContainerUpdateOKBody
|
||||
type ContainerUpdateOKBody struct {
|
||||
|
||||
// warnings
|
||||
// Required: true
|
||||
Warnings []string `json:"Warnings"`
|
||||
}
|
|
@ -1,5 +1,13 @@
|
|||
package container
|
||||
|
||||
import "github.com/docker/docker/api/types/common"
|
||||
|
||||
// ExecCreateResponse is the response for a successful exec-create request.
|
||||
// It holds the ID of the exec that was created.
|
||||
//
|
||||
// TODO(thaJeztah): make this a distinct type.
|
||||
type ExecCreateResponse = common.IDResponse
|
||||
|
||||
// ExecOptions is a small subset of the Config struct that holds the configuration
|
||||
// for the exec feature of docker.
|
||||
type ExecOptions struct {
|
||||
|
|
26
common/vendor/github.com/docker/docker/api/types/container/health.go
generated
vendored
Normal file
26
common/vendor/github.com/docker/docker/api/types/container/health.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
package container
|
||||
|
||||
import "time"
|
||||
|
||||
// Health states
|
||||
const (
|
||||
NoHealthcheck = "none" // Indicates there is no healthcheck
|
||||
Starting = "starting" // Starting indicates that the container is not yet ready
|
||||
Healthy = "healthy" // Healthy indicates that the container is running correctly
|
||||
Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem
|
||||
)
|
||||
|
||||
// Health stores information about the container's healthcheck results
|
||||
type Health struct {
|
||||
Status string // Status is one of [Starting], [Healthy] or [Unhealthy].
|
||||
FailingStreak int // FailingStreak is the number of consecutive failures
|
||||
Log []*HealthcheckResult // Log contains the last few results (oldest first)
|
||||
}
|
||||
|
||||
// HealthcheckResult stores information about a single run of a healthcheck probe
|
||||
type HealthcheckResult struct {
|
||||
Start time.Time // Start is the time this check started
|
||||
End time.Time // End is the time this check ended
|
||||
ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe
|
||||
Output string // Output from last check
|
||||
}
|
56
common/vendor/github.com/docker/docker/api/types/container/network_settings.go
generated
vendored
Normal file
56
common/vendor/github.com/docker/docker/api/types/container/network_settings.go
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/go-connections/nat"
|
||||
)
|
||||
|
||||
// NetworkSettings exposes the network settings in the api
|
||||
type NetworkSettings struct {
|
||||
NetworkSettingsBase
|
||||
DefaultNetworkSettings
|
||||
Networks map[string]*network.EndpointSettings
|
||||
}
|
||||
|
||||
// NetworkSettingsBase holds networking state for a container when inspecting it.
|
||||
type NetworkSettingsBase struct {
|
||||
Bridge string // Bridge contains the name of the default bridge interface iff it was set through the daemon --bridge flag.
|
||||
SandboxID string // SandboxID uniquely represents a container's network stack
|
||||
SandboxKey string // SandboxKey identifies the sandbox
|
||||
Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port
|
||||
|
||||
// HairpinMode specifies if hairpin NAT should be enabled on the virtual interface
|
||||
//
|
||||
// Deprecated: This field is never set and will be removed in a future release.
|
||||
HairpinMode bool
|
||||
// LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix
|
||||
//
|
||||
// Deprecated: This field is never set and will be removed in a future release.
|
||||
LinkLocalIPv6Address string
|
||||
// LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address
|
||||
//
|
||||
// Deprecated: This field is never set and will be removed in a future release.
|
||||
LinkLocalIPv6PrefixLen int
|
||||
SecondaryIPAddresses []network.Address // Deprecated: This field is never set and will be removed in a future release.
|
||||
SecondaryIPv6Addresses []network.Address // Deprecated: This field is never set and will be removed in a future release.
|
||||
}
|
||||
|
||||
// DefaultNetworkSettings holds network information
|
||||
// during the 2 release deprecation period.
|
||||
// It will be removed in Docker 1.11.
|
||||
type DefaultNetworkSettings struct {
|
||||
EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox
|
||||
Gateway string // Gateway holds the gateway address for the network
|
||||
GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address
|
||||
GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address
|
||||
IPAddress string // IPAddress holds the IPv4 address for the network
|
||||
IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address
|
||||
IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6
|
||||
MacAddress string // MacAddress holds the MAC address for the network
|
||||
}
|
||||
|
||||
// NetworkSettingsSummary provides a summary of container's networks
|
||||
// in /containers/json
|
||||
type NetworkSettingsSummary struct {
|
||||
Networks map[string]*network.EndpointSettings
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package types
|
||||
package container
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
|
@ -148,7 +148,15 @@ type PidsStats struct {
|
|||
}
|
||||
|
||||
// Stats is Ultimate struct aggregating all types of stats of one container
|
||||
type Stats struct {
|
||||
//
|
||||
// Deprecated: use [StatsResponse] instead. This type will be removed in the next release.
|
||||
type Stats = StatsResponse
|
||||
|
||||
// StatsResponse aggregates all types of stats of one container.
|
||||
type StatsResponse struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
|
||||
// Common stats
|
||||
Read time.Time `json:"read"`
|
||||
PreRead time.Time `json:"preread"`
|
||||
|
@ -162,20 +170,8 @@ type Stats struct {
|
|||
StorageStats StorageStats `json:"storage_stats,omitempty"`
|
||||
|
||||
// Shared stats
|
||||
CPUStats CPUStats `json:"cpu_stats,omitempty"`
|
||||
PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous"
|
||||
MemoryStats MemoryStats `json:"memory_stats,omitempty"`
|
||||
}
|
||||
|
||||
// StatsResponse is newly used Networks.
|
||||
//
|
||||
// TODO(thaJeztah): unify with [Stats]. This wrapper was to account for pre-api v1.21 changes, see https://github.com/moby/moby/commit/d3379946ec96fb6163cb8c4517d7d5a067045801
|
||||
type StatsResponse struct {
|
||||
Stats
|
||||
|
||||
Name string `json:"name,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
|
||||
// Networks request version >=1.21
|
||||
Networks map[string]NetworkStats `json:"networks,omitempty"`
|
||||
CPUStats CPUStats `json:"cpu_stats,omitempty"`
|
||||
PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous"
|
||||
MemoryStats MemoryStats `json:"memory_stats,omitempty"`
|
||||
Networks map[string]NetworkStats `json:"networks,omitempty"`
|
||||
}
|
||||
|
|
18
common/vendor/github.com/docker/docker/api/types/container/top_response.go
generated
vendored
Normal file
18
common/vendor/github.com/docker/docker/api/types/container/top_response.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
package container
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// TopResponse ContainerTopResponse
|
||||
//
|
||||
// Container "top" response.
|
||||
// swagger:model TopResponse
|
||||
type TopResponse struct {
|
||||
|
||||
// Each process running in the container, where each process
|
||||
// is an array of values corresponding to the titles.
|
||||
Processes [][]string `json:"Processes"`
|
||||
|
||||
// The ps column titles
|
||||
Titles []string `json:"Titles"`
|
||||
}
|
14
common/vendor/github.com/docker/docker/api/types/container/update_response.go
generated
vendored
Normal file
14
common/vendor/github.com/docker/docker/api/types/container/update_response.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
package container
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// UpdateResponse ContainerUpdateResponse
|
||||
//
|
||||
// Response for a successful container-update.
|
||||
// swagger:model UpdateResponse
|
||||
type UpdateResponse struct {
|
||||
|
||||
// Warnings encountered when updating the container.
|
||||
Warnings []string `json:"Warnings"`
|
||||
}
|
|
@ -22,16 +22,3 @@ func (e invalidFilter) Error() string {
|
|||
|
||||
// InvalidParameter marks this error as ErrInvalidParameter
|
||||
func (e invalidFilter) InvalidParameter() {}
|
||||
|
||||
// unreachableCode is an error indicating that the code path was not expected to be reached.
|
||||
type unreachableCode struct {
|
||||
Filter string
|
||||
Value []string
|
||||
}
|
||||
|
||||
// System marks this error as ErrSystem
|
||||
func (e unreachableCode) System() {}
|
||||
|
||||
func (e unreachableCode) Error() string {
|
||||
return fmt.Sprintf("unreachable code reached for filter: %q with values: %s", e.Filter, e.Value)
|
||||
}
|
||||
|
|
|
@ -200,7 +200,6 @@ func (args Args) Match(field, source string) bool {
|
|||
// Error is not nil only if the filter values are not valid boolean or are conflicting.
|
||||
func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) {
|
||||
fieldValues, ok := args.fields[key]
|
||||
|
||||
if !ok {
|
||||
return defaultValue, nil
|
||||
}
|
||||
|
@ -211,20 +210,11 @@ func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) {
|
|||
|
||||
isFalse := fieldValues["0"] || fieldValues["false"]
|
||||
isTrue := fieldValues["1"] || fieldValues["true"]
|
||||
|
||||
conflicting := isFalse && isTrue
|
||||
invalid := !isFalse && !isTrue
|
||||
|
||||
if conflicting || invalid {
|
||||
if isFalse == isTrue {
|
||||
// Either no or conflicting truthy/falsy value were provided
|
||||
return defaultValue, &invalidFilter{key, args.Get(key)}
|
||||
} else if isFalse {
|
||||
return false, nil
|
||||
} else if isTrue {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// This code shouldn't be reached.
|
||||
return defaultValue, &unreachableCode{Filter: key, Value: args.Get(key)}
|
||||
return isTrue, nil
|
||||
}
|
||||
|
||||
// ExactMatch returns true if the source matches exactly one of the values.
|
||||
|
|
140
common/vendor/github.com/docker/docker/api/types/image/image_inspect.go
generated
vendored
Normal file
140
common/vendor/github.com/docker/docker/api/types/image/image_inspect.go
generated
vendored
Normal file
|
@ -0,0 +1,140 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/storage"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// RootFS returns Image's RootFS description including the layer IDs.
|
||||
type RootFS struct {
|
||||
Type string `json:",omitempty"`
|
||||
Layers []string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// InspectResponse contains response of Engine API:
|
||||
// GET "/images/{name:.*}/json"
|
||||
type InspectResponse struct {
|
||||
// ID is the content-addressable ID of an image.
|
||||
//
|
||||
// This identifier is a content-addressable digest calculated from the
|
||||
// image's configuration (which includes the digests of layers used by
|
||||
// the image).
|
||||
//
|
||||
// Note that this digest differs from the `RepoDigests` below, which
|
||||
// holds digests of image manifests that reference the image.
|
||||
ID string `json:"Id"`
|
||||
|
||||
// RepoTags is a list of image names/tags in the local image cache that
|
||||
// reference this image.
|
||||
//
|
||||
// Multiple image tags can refer to the same image, and this list may be
|
||||
// empty if no tags reference the image, in which case the image is
|
||||
// "untagged", in which case it can still be referenced by its ID.
|
||||
RepoTags []string
|
||||
|
||||
// RepoDigests is a list of content-addressable digests of locally available
|
||||
// image manifests that the image is referenced from. Multiple manifests can
|
||||
// refer to the same image.
|
||||
//
|
||||
// These digests are usually only available if the image was either pulled
|
||||
// from a registry, or if the image was pushed to a registry, which is when
|
||||
// the manifest is generated and its digest calculated.
|
||||
RepoDigests []string
|
||||
|
||||
// Parent is the ID of the parent image.
|
||||
//
|
||||
// Depending on how the image was created, this field may be empty and
|
||||
// is only set for images that were built/created locally. This field
|
||||
// is empty if the image was pulled from an image registry.
|
||||
Parent string
|
||||
|
||||
// Comment is an optional message that can be set when committing or
|
||||
// importing the image.
|
||||
Comment string
|
||||
|
||||
// Created is the date and time at which the image was created, formatted in
|
||||
// RFC 3339 nano-seconds (time.RFC3339Nano).
|
||||
//
|
||||
// This information is only available if present in the image,
|
||||
// and omitted otherwise.
|
||||
Created string `json:",omitempty"`
|
||||
|
||||
// Container is the ID of the container that was used to create the image.
|
||||
//
|
||||
// Depending on how the image was created, this field may be empty.
|
||||
//
|
||||
// Deprecated: this field is omitted in API v1.45, but kept for backward compatibility.
|
||||
Container string `json:",omitempty"`
|
||||
|
||||
// ContainerConfig is an optional field containing the configuration of the
|
||||
// container that was last committed when creating the image.
|
||||
//
|
||||
// Previous versions of Docker builder used this field to store build cache,
|
||||
// and it is not in active use anymore.
|
||||
//
|
||||
// Deprecated: this field is omitted in API v1.45, but kept for backward compatibility.
|
||||
ContainerConfig *container.Config `json:",omitempty"`
|
||||
|
||||
// DockerVersion is the version of Docker that was used to build the image.
|
||||
//
|
||||
// Depending on how the image was created, this field may be empty.
|
||||
DockerVersion string
|
||||
|
||||
// Author is the name of the author that was specified when committing the
|
||||
// image, or as specified through MAINTAINER (deprecated) in the Dockerfile.
|
||||
Author string
|
||||
Config *container.Config
|
||||
|
||||
// Architecture is the hardware CPU architecture that the image runs on.
|
||||
Architecture string
|
||||
|
||||
// Variant is the CPU architecture variant (presently ARM-only).
|
||||
Variant string `json:",omitempty"`
|
||||
|
||||
// OS is the Operating System the image is built to run on.
|
||||
Os string
|
||||
|
||||
// OsVersion is the version of the Operating System the image is built to
|
||||
// run on (especially for Windows).
|
||||
OsVersion string `json:",omitempty"`
|
||||
|
||||
// Size is the total size of the image including all layers it is composed of.
|
||||
Size int64
|
||||
|
||||
// VirtualSize is the total size of the image including all layers it is
|
||||
// composed of.
|
||||
//
|
||||
// Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead.
|
||||
VirtualSize int64 `json:"VirtualSize,omitempty"`
|
||||
|
||||
// GraphDriver holds information about the storage driver used to store the
|
||||
// container's and image's filesystem.
|
||||
GraphDriver storage.DriverData
|
||||
|
||||
// RootFS contains information about the image's RootFS, including the
|
||||
// layer IDs.
|
||||
RootFS RootFS
|
||||
|
||||
// Metadata of the image in the local cache.
|
||||
//
|
||||
// This information is local to the daemon, and not part of the image itself.
|
||||
Metadata Metadata
|
||||
|
||||
// Descriptor is the OCI descriptor of the image target.
|
||||
// It's only set if the daemon provides a multi-platform image store.
|
||||
//
|
||||
// WARNING: This is experimental and may change at any time without any backward
|
||||
// compatibility.
|
||||
Descriptor *ocispec.Descriptor `json:"Descriptor,omitempty"`
|
||||
|
||||
// Manifests is a list of image manifests available in this image. It
|
||||
// provides a more detailed view of the platform-specific image manifests or
|
||||
// other image-attached data like build attestations.
|
||||
//
|
||||
// Only available if the daemon provides a multi-platform image store.
|
||||
//
|
||||
// WARNING: This is experimental and may change at any time without any backward
|
||||
// compatibility.
|
||||
Manifests []ManifestSummary `json:"Manifests,omitempty"`
|
||||
}
|
|
@ -38,7 +38,7 @@ type PullOptions struct {
|
|||
// authentication header value in base64 encoded format, or an error if the
|
||||
// privilege request fails.
|
||||
//
|
||||
// Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc].
|
||||
// For details, refer to [github.com/docker/docker/api/types/registry.RequestAuthConfig].
|
||||
PrivilegeFunc func(context.Context) (string, error)
|
||||
Platform string
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ type PushOptions struct {
|
|||
// authentication header value in base64 encoded format, or an error if the
|
||||
// privilege request fails.
|
||||
//
|
||||
// Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc].
|
||||
// For details, refer to [github.com/docker/docker/api/types/registry.RequestAuthConfig].
|
||||
PrivilegeFunc func(context.Context) (string, error)
|
||||
|
||||
// Platform is an optional field that selects a specific platform to push
|
||||
|
@ -86,3 +86,31 @@ type RemoveOptions struct {
|
|||
Force bool
|
||||
PruneChildren bool
|
||||
}
|
||||
|
||||
// HistoryOptions holds parameters to get image history.
|
||||
type HistoryOptions struct {
|
||||
// Platform from the manifest list to use for history.
|
||||
Platform *ocispec.Platform
|
||||
}
|
||||
|
||||
// LoadOptions holds parameters to load images.
|
||||
type LoadOptions struct {
|
||||
// Quiet suppresses progress output
|
||||
Quiet bool
|
||||
|
||||
// Platforms selects the platforms to load if the image is a
|
||||
// multi-platform image and has multiple variants.
|
||||
Platforms []ocispec.Platform
|
||||
}
|
||||
|
||||
type InspectOptions struct {
|
||||
// Manifests returns the image manifests.
|
||||
Manifests bool
|
||||
}
|
||||
|
||||
// SaveOptions holds parameters to save images.
|
||||
type SaveOptions struct {
|
||||
// Platforms selects the platforms to save if the image is a
|
||||
// multi-platform image and has multiple variants.
|
||||
Platforms []ocispec.Platform
|
||||
}
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
package image
|
||||
|
||||
import ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
type Summary struct {
|
||||
|
||||
// Number of containers using this image. Includes both stopped and running
|
||||
|
@ -42,6 +44,13 @@ type Summary struct {
|
|||
// Required: true
|
||||
ParentID string `json:"ParentId"`
|
||||
|
||||
// Descriptor is the OCI descriptor of the image target.
|
||||
// It's only set if the daemon provides a multi-platform image store.
|
||||
//
|
||||
// WARNING: This is experimental and may change at any time without any backward
|
||||
// compatibility.
|
||||
Descriptor *ocispec.Descriptor `json:"Descriptor,omitempty"`
|
||||
|
||||
// Manifests is a list of image manifests available in this image. It
|
||||
// provides a more detailed view of the platform-specific image manifests or
|
||||
// other image-attached data like build attestations.
|
||||
|
|
|
@ -19,6 +19,8 @@ const (
|
|||
TypeNamedPipe Type = "npipe"
|
||||
// TypeCluster is the type for Swarm Cluster Volumes.
|
||||
TypeCluster Type = "cluster"
|
||||
// TypeImage is the type for mounting another image's filesystem
|
||||
TypeImage Type = "image"
|
||||
)
|
||||
|
||||
// Mount represents a mount (volume).
|
||||
|
@ -34,6 +36,7 @@ type Mount struct {
|
|||
|
||||
BindOptions *BindOptions `json:",omitempty"`
|
||||
VolumeOptions *VolumeOptions `json:",omitempty"`
|
||||
ImageOptions *ImageOptions `json:",omitempty"`
|
||||
TmpfsOptions *TmpfsOptions `json:",omitempty"`
|
||||
ClusterOptions *ClusterOptions `json:",omitempty"`
|
||||
}
|
||||
|
@ -100,6 +103,10 @@ type VolumeOptions struct {
|
|||
DriverConfig *Driver `json:",omitempty"`
|
||||
}
|
||||
|
||||
type ImageOptions struct {
|
||||
Subpath string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Driver represents a volume driver.
|
||||
type Driver struct {
|
||||
Name string `json:",omitempty"`
|
||||
|
|
|
@ -19,6 +19,12 @@ type EndpointSettings struct {
|
|||
// generated address).
|
||||
MacAddress string
|
||||
DriverOpts map[string]string
|
||||
|
||||
// GwPriority determines which endpoint will provide the default gateway
|
||||
// for the container. The endpoint with the highest priority will be used.
|
||||
// If multiple endpoints have the same priority, they are lexicographically
|
||||
// sorted based on their network name, and the one that sorts first is picked.
|
||||
GwPriority int
|
||||
// Operational data
|
||||
NetworkID string
|
||||
EndpointID string
|
||||
|
|
|
@ -33,6 +33,7 @@ type CreateRequest struct {
|
|||
type CreateOptions struct {
|
||||
Driver string // Driver is the driver-name used to create the network (e.g. `bridge`, `overlay`)
|
||||
Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level).
|
||||
EnableIPv4 *bool `json:",omitempty"` // EnableIPv4 represents whether to enable IPv4.
|
||||
EnableIPv6 *bool `json:",omitempty"` // EnableIPv6 represents whether to enable IPv6.
|
||||
IPAM *IPAM // IPAM is the network's IP Address Management.
|
||||
Internal bool // Internal represents if the network is used internal only.
|
||||
|
@ -76,7 +77,8 @@ type Inspect struct {
|
|||
Created time.Time // Created is the time the network created
|
||||
Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level)
|
||||
Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
|
||||
EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
|
||||
EnableIPv4 bool // EnableIPv4 represents whether IPv4 is enabled
|
||||
EnableIPv6 bool // EnableIPv6 represents whether IPv6 is enabled
|
||||
IPAM IPAM // IPAM is the network's IP Address Management
|
||||
Internal bool // Internal represents if the network is used internal only
|
||||
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
|
||||
|
|
|
@ -1,17 +1,29 @@
|
|||
package registry // import "github.com/docker/docker/api/types/registry"
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// AuthHeader is the name of the header used to send encoded registry
|
||||
// authorization credentials for registry operations (push/pull).
|
||||
const AuthHeader = "X-Registry-Auth"
|
||||
|
||||
// RequestAuthConfig is a function interface that clients can supply
|
||||
// to retry operations after getting an authorization error.
|
||||
//
|
||||
// The function must return the [AuthHeader] value ([AuthConfig]), encoded
|
||||
// in base64url format ([RFC4648, section 5]), which can be decoded by
|
||||
// [DecodeAuthConfig].
|
||||
//
|
||||
// It must return an error if the privilege request fails.
|
||||
//
|
||||
// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5
|
||||
type RequestAuthConfig func(context.Context) (string, error)
|
||||
|
||||
// AuthConfig contains authorization information for connecting to a Registry.
|
||||
type AuthConfig struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
|
@ -85,7 +97,7 @@ func decodeAuthConfigFromReader(rdr io.Reader) (*AuthConfig, error) {
|
|||
}
|
||||
|
||||
func invalid(err error) error {
|
||||
return errInvalidParameter{errors.Wrap(err, "invalid X-Registry-Auth header")}
|
||||
return errInvalidParameter{fmt.Errorf("invalid X-Registry-Auth header: %w", err)}
|
||||
}
|
||||
|
||||
type errInvalidParameter struct{ error }
|
||||
|
|
|
@ -9,11 +9,29 @@ import (
|
|||
|
||||
// ServiceConfig stores daemon registry services configuration.
|
||||
type ServiceConfig struct {
|
||||
AllowNondistributableArtifactsCIDRs []*NetIPNet
|
||||
AllowNondistributableArtifactsHostnames []string
|
||||
InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
|
||||
IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
|
||||
Mirrors []string
|
||||
AllowNondistributableArtifactsCIDRs []*NetIPNet `json:"AllowNondistributableArtifactsCIDRs,omitempty"` // Deprecated: non-distributable artifacts are deprecated and enabled by default. This field will be removed in the next release.
|
||||
AllowNondistributableArtifactsHostnames []string `json:"AllowNondistributableArtifactsHostnames,omitempty"` // Deprecated: non-distributable artifacts are deprecated and enabled by default. This field will be removed in the next release.
|
||||
|
||||
InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
|
||||
IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
|
||||
Mirrors []string
|
||||
}
|
||||
|
||||
// MarshalJSON implements a custom marshaler to include legacy fields
|
||||
// in API responses.
|
||||
func (sc ServiceConfig) MarshalJSON() ([]byte, error) {
|
||||
tmp := map[string]interface{}{
|
||||
"InsecureRegistryCIDRs": sc.InsecureRegistryCIDRs,
|
||||
"IndexConfigs": sc.IndexConfigs,
|
||||
"Mirrors": sc.Mirrors,
|
||||
}
|
||||
if sc.AllowNondistributableArtifactsCIDRs != nil {
|
||||
tmp["AllowNondistributableArtifactsCIDRs"] = nil
|
||||
}
|
||||
if sc.AllowNondistributableArtifactsHostnames != nil {
|
||||
tmp["AllowNondistributableArtifactsHostnames"] = nil
|
||||
}
|
||||
return json.Marshal(tmp)
|
||||
}
|
||||
|
||||
// NetIPNet is the net.IPNet type, which can be marshalled and
|
||||
|
|
|
@ -10,11 +10,12 @@ import (
|
|||
type SearchOptions struct {
|
||||
RegistryAuth string
|
||||
|
||||
// PrivilegeFunc is a [types.RequestPrivilegeFunc] the client can
|
||||
// supply to retry operations after getting an authorization error.
|
||||
// PrivilegeFunc is a function that clients can supply to retry operations
|
||||
// after getting an authorization error. This function returns the registry
|
||||
// authentication header value in base64 encoded format, or an error if the
|
||||
// privilege request fails.
|
||||
//
|
||||
// It must return the registry authentication header value in base64
|
||||
// format, or an error if the privilege request fails.
|
||||
// For details, refer to [github.com/docker/docker/api/types/registry.RequestAuthConfig].
|
||||
PrivilegeFunc func(context.Context) (string, error)
|
||||
Filters filters.Args
|
||||
Limit int
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
package types
|
||||
package storage
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// GraphDriverData Information about the storage driver used to store the container's and
|
||||
// DriverData Information about the storage driver used to store the container's and
|
||||
// image's filesystem.
|
||||
//
|
||||
// swagger:model GraphDriverData
|
||||
type GraphDriverData struct {
|
||||
// swagger:model DriverData
|
||||
type DriverData struct {
|
||||
|
||||
// Low-level storage metadata, provided as key/value pairs.
|
||||
//
|
|
@ -29,8 +29,8 @@ type Info struct {
|
|||
CPUSet bool
|
||||
PidsLimit bool
|
||||
IPv4Forwarding bool
|
||||
BridgeNfIptables bool
|
||||
BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
|
||||
BridgeNfIptables bool `json:"BridgeNfIptables"` // Deprecated: netfilter module is now loaded on-demand and no longer during daemon startup, making this field obsolete. This field is always false and will be removed in the next release.
|
||||
BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` // Deprecated: netfilter module is now loaded on-demand and no longer during daemon startup, making this field obsolete. This field is always false and will be removed in the next release.
|
||||
Debug bool
|
||||
NFd int
|
||||
OomKillDisable bool
|
||||
|
@ -137,8 +137,13 @@ type PluginsInfo struct {
|
|||
// Commit holds the Git-commit (SHA1) that a binary was built from, as reported
|
||||
// in the version-string of external tools, such as containerd, or runC.
|
||||
type Commit struct {
|
||||
ID string // ID is the actual commit ID of external tool.
|
||||
Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time.
|
||||
// ID is the actual commit ID or version of external tool.
|
||||
ID string
|
||||
|
||||
// Expected is the commit ID of external tool expected by dockerd as set at build time.
|
||||
//
|
||||
// Deprecated: this field is no longer used in API v1.49, but kept for backward-compatibility with older API versions.
|
||||
Expected string
|
||||
}
|
||||
|
||||
// NetworkAddressPool is a temp struct used by [Info] struct.
|
||||
|
|
|
@ -6,11 +6,8 @@ import (
|
|||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"github.com/docker/go-connections/nat"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -21,145 +18,6 @@ const (
|
|||
MediaTypeMultiplexedStream = "application/vnd.docker.multiplexed-stream"
|
||||
)
|
||||
|
||||
// RootFS returns Image's RootFS description including the layer IDs.
|
||||
type RootFS struct {
|
||||
Type string `json:",omitempty"`
|
||||
Layers []string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ImageInspect contains response of Engine API:
|
||||
// GET "/images/{name:.*}/json"
|
||||
type ImageInspect struct {
|
||||
// ID is the content-addressable ID of an image.
|
||||
//
|
||||
// This identifier is a content-addressable digest calculated from the
|
||||
// image's configuration (which includes the digests of layers used by
|
||||
// the image).
|
||||
//
|
||||
// Note that this digest differs from the `RepoDigests` below, which
|
||||
// holds digests of image manifests that reference the image.
|
||||
ID string `json:"Id"`
|
||||
|
||||
// RepoTags is a list of image names/tags in the local image cache that
|
||||
// reference this image.
|
||||
//
|
||||
// Multiple image tags can refer to the same image, and this list may be
|
||||
// empty if no tags reference the image, in which case the image is
|
||||
// "untagged", in which case it can still be referenced by its ID.
|
||||
RepoTags []string
|
||||
|
||||
// RepoDigests is a list of content-addressable digests of locally available
|
||||
// image manifests that the image is referenced from. Multiple manifests can
|
||||
// refer to the same image.
|
||||
//
|
||||
// These digests are usually only available if the image was either pulled
|
||||
// from a registry, or if the image was pushed to a registry, which is when
|
||||
// the manifest is generated and its digest calculated.
|
||||
RepoDigests []string
|
||||
|
||||
// Parent is the ID of the parent image.
|
||||
//
|
||||
// Depending on how the image was created, this field may be empty and
|
||||
// is only set for images that were built/created locally. This field
|
||||
// is empty if the image was pulled from an image registry.
|
||||
Parent string
|
||||
|
||||
// Comment is an optional message that can be set when committing or
|
||||
// importing the image.
|
||||
Comment string
|
||||
|
||||
// Created is the date and time at which the image was created, formatted in
|
||||
// RFC 3339 nano-seconds (time.RFC3339Nano).
|
||||
//
|
||||
// This information is only available if present in the image,
|
||||
// and omitted otherwise.
|
||||
Created string `json:",omitempty"`
|
||||
|
||||
// Container is the ID of the container that was used to create the image.
|
||||
//
|
||||
// Depending on how the image was created, this field may be empty.
|
||||
//
|
||||
// Deprecated: this field is omitted in API v1.45, but kept for backward compatibility.
|
||||
Container string `json:",omitempty"`
|
||||
|
||||
// ContainerConfig is an optional field containing the configuration of the
|
||||
// container that was last committed when creating the image.
|
||||
//
|
||||
// Previous versions of Docker builder used this field to store build cache,
|
||||
// and it is not in active use anymore.
|
||||
//
|
||||
// Deprecated: this field is omitted in API v1.45, but kept for backward compatibility.
|
||||
ContainerConfig *container.Config `json:",omitempty"`
|
||||
|
||||
// DockerVersion is the version of Docker that was used to build the image.
|
||||
//
|
||||
// Depending on how the image was created, this field may be empty.
|
||||
DockerVersion string
|
||||
|
||||
// Author is the name of the author that was specified when committing the
|
||||
// image, or as specified through MAINTAINER (deprecated) in the Dockerfile.
|
||||
Author string
|
||||
Config *container.Config
|
||||
|
||||
// Architecture is the hardware CPU architecture that the image runs on.
|
||||
Architecture string
|
||||
|
||||
// Variant is the CPU architecture variant (presently ARM-only).
|
||||
Variant string `json:",omitempty"`
|
||||
|
||||
// OS is the Operating System the image is built to run on.
|
||||
Os string
|
||||
|
||||
// OsVersion is the version of the Operating System the image is built to
|
||||
// run on (especially for Windows).
|
||||
OsVersion string `json:",omitempty"`
|
||||
|
||||
// Size is the total size of the image including all layers it is composed of.
|
||||
Size int64
|
||||
|
||||
// VirtualSize is the total size of the image including all layers it is
|
||||
// composed of.
|
||||
//
|
||||
// Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead.
|
||||
VirtualSize int64 `json:"VirtualSize,omitempty"`
|
||||
|
||||
// GraphDriver holds information about the storage driver used to store the
|
||||
// container's and image's filesystem.
|
||||
GraphDriver GraphDriverData
|
||||
|
||||
// RootFS contains information about the image's RootFS, including the
|
||||
// layer IDs.
|
||||
RootFS RootFS
|
||||
|
||||
// Metadata of the image in the local cache.
|
||||
//
|
||||
// This information is local to the daemon, and not part of the image itself.
|
||||
Metadata image.Metadata
|
||||
}
|
||||
|
||||
// Container contains response of Engine API:
|
||||
// GET "/containers/json"
|
||||
type Container struct {
|
||||
ID string `json:"Id"`
|
||||
Names []string
|
||||
Image string
|
||||
ImageID string
|
||||
Command string
|
||||
Created int64
|
||||
Ports []Port
|
||||
SizeRw int64 `json:",omitempty"`
|
||||
SizeRootFs int64 `json:",omitempty"`
|
||||
Labels map[string]string
|
||||
State string
|
||||
Status string
|
||||
HostConfig struct {
|
||||
NetworkMode string `json:",omitempty"`
|
||||
Annotations map[string]string `json:",omitempty"`
|
||||
}
|
||||
NetworkSettings *SummaryNetworkSettings
|
||||
Mounts []MountPoint
|
||||
}
|
||||
|
||||
// Ping contains response of Engine API:
|
||||
// GET "/_ping"
|
||||
type Ping struct {
|
||||
|
@ -205,176 +63,6 @@ type Version struct {
|
|||
BuildTime string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// HealthcheckResult stores information about a single run of a healthcheck probe
|
||||
type HealthcheckResult struct {
|
||||
Start time.Time // Start is the time this check started
|
||||
End time.Time // End is the time this check ended
|
||||
ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe
|
||||
Output string // Output from last check
|
||||
}
|
||||
|
||||
// Health states
|
||||
const (
|
||||
NoHealthcheck = "none" // Indicates there is no healthcheck
|
||||
Starting = "starting" // Starting indicates that the container is not yet ready
|
||||
Healthy = "healthy" // Healthy indicates that the container is running correctly
|
||||
Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem
|
||||
)
|
||||
|
||||
// Health stores information about the container's healthcheck results
|
||||
type Health struct {
|
||||
Status string // Status is one of Starting, Healthy or Unhealthy
|
||||
FailingStreak int // FailingStreak is the number of consecutive failures
|
||||
Log []*HealthcheckResult // Log contains the last few results (oldest first)
|
||||
}
|
||||
|
||||
// ContainerState stores container's running state
|
||||
// it's part of ContainerJSONBase and will return by "inspect" command
|
||||
type ContainerState struct {
|
||||
Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead"
|
||||
Running bool
|
||||
Paused bool
|
||||
Restarting bool
|
||||
OOMKilled bool
|
||||
Dead bool
|
||||
Pid int
|
||||
ExitCode int
|
||||
Error string
|
||||
StartedAt string
|
||||
FinishedAt string
|
||||
Health *Health `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ContainerJSONBase contains response of Engine API:
|
||||
// GET "/containers/{name:.*}/json"
|
||||
type ContainerJSONBase struct {
|
||||
ID string `json:"Id"`
|
||||
Created string
|
||||
Path string
|
||||
Args []string
|
||||
State *ContainerState
|
||||
Image string
|
||||
ResolvConfPath string
|
||||
HostnamePath string
|
||||
HostsPath string
|
||||
LogPath string
|
||||
Node *ContainerNode `json:",omitempty"` // Deprecated: Node was only propagated by Docker Swarm standalone API. It sill be removed in the next release.
|
||||
Name string
|
||||
RestartCount int
|
||||
Driver string
|
||||
Platform string
|
||||
MountLabel string
|
||||
ProcessLabel string
|
||||
AppArmorProfile string
|
||||
ExecIDs []string
|
||||
HostConfig *container.HostConfig
|
||||
GraphDriver GraphDriverData
|
||||
SizeRw *int64 `json:",omitempty"`
|
||||
SizeRootFs *int64 `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ContainerJSON is newly used struct along with MountPoint
|
||||
type ContainerJSON struct {
|
||||
*ContainerJSONBase
|
||||
Mounts []MountPoint
|
||||
Config *container.Config
|
||||
NetworkSettings *NetworkSettings
|
||||
}
|
||||
|
||||
// NetworkSettings exposes the network settings in the api
|
||||
type NetworkSettings struct {
|
||||
NetworkSettingsBase
|
||||
DefaultNetworkSettings
|
||||
Networks map[string]*network.EndpointSettings
|
||||
}
|
||||
|
||||
// SummaryNetworkSettings provides a summary of container's networks
|
||||
// in /containers/json
|
||||
type SummaryNetworkSettings struct {
|
||||
Networks map[string]*network.EndpointSettings
|
||||
}
|
||||
|
||||
// NetworkSettingsBase holds networking state for a container when inspecting it.
|
||||
type NetworkSettingsBase struct {
|
||||
Bridge string // Bridge contains the name of the default bridge interface iff it was set through the daemon --bridge flag.
|
||||
SandboxID string // SandboxID uniquely represents a container's network stack
|
||||
SandboxKey string // SandboxKey identifies the sandbox
|
||||
Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port
|
||||
|
||||
// HairpinMode specifies if hairpin NAT should be enabled on the virtual interface
|
||||
//
|
||||
// Deprecated: This field is never set and will be removed in a future release.
|
||||
HairpinMode bool
|
||||
// LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix
|
||||
//
|
||||
// Deprecated: This field is never set and will be removed in a future release.
|
||||
LinkLocalIPv6Address string
|
||||
// LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address
|
||||
//
|
||||
// Deprecated: This field is never set and will be removed in a future release.
|
||||
LinkLocalIPv6PrefixLen int
|
||||
SecondaryIPAddresses []network.Address // Deprecated: This field is never set and will be removed in a future release.
|
||||
SecondaryIPv6Addresses []network.Address // Deprecated: This field is never set and will be removed in a future release.
|
||||
}
|
||||
|
||||
// DefaultNetworkSettings holds network information
|
||||
// during the 2 release deprecation period.
|
||||
// It will be removed in Docker 1.11.
|
||||
type DefaultNetworkSettings struct {
|
||||
EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox
|
||||
Gateway string // Gateway holds the gateway address for the network
|
||||
GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address
|
||||
GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address
|
||||
IPAddress string // IPAddress holds the IPv4 address for the network
|
||||
IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address
|
||||
IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6
|
||||
MacAddress string // MacAddress holds the MAC address for the network
|
||||
}
|
||||
|
||||
// MountPoint represents a mount point configuration inside the container.
|
||||
// This is used for reporting the mountpoints in use by a container.
|
||||
type MountPoint struct {
|
||||
// Type is the type of mount, see `Type<foo>` definitions in
|
||||
// github.com/docker/docker/api/types/mount.Type
|
||||
Type mount.Type `json:",omitempty"`
|
||||
|
||||
// Name is the name reference to the underlying data defined by `Source`
|
||||
// e.g., the volume name.
|
||||
Name string `json:",omitempty"`
|
||||
|
||||
// Source is the source location of the mount.
|
||||
//
|
||||
// For volumes, this contains the storage location of the volume (within
|
||||
// `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains
|
||||
// the source (host) part of the bind-mount. For `tmpfs` mount points, this
|
||||
// field is empty.
|
||||
Source string
|
||||
|
||||
// Destination is the path relative to the container root (`/`) where the
|
||||
// Source is mounted inside the container.
|
||||
Destination string
|
||||
|
||||
// Driver is the volume driver used to create the volume (if it is a volume).
|
||||
Driver string `json:",omitempty"`
|
||||
|
||||
// Mode is a comma separated list of options supplied by the user when
|
||||
// creating the bind/volume mount.
|
||||
//
|
||||
// The default is platform-specific (`"z"` on Linux, empty on Windows).
|
||||
Mode string
|
||||
|
||||
// RW indicates whether the mount is mounted writable (read-write).
|
||||
RW bool
|
||||
|
||||
// Propagation describes how mounts are propagated from the host into the
|
||||
// mount point, and vice-versa. Refer to the Linux kernel documentation
|
||||
// for details:
|
||||
// https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt
|
||||
//
|
||||
// This field is not used on Windows.
|
||||
Propagation mount.Propagation
|
||||
}
|
||||
|
||||
// DiskUsageObject represents an object type used for disk usage query filtering.
|
||||
type DiskUsageObject string
|
||||
|
||||
|
@ -401,7 +89,7 @@ type DiskUsageOptions struct {
|
|||
type DiskUsage struct {
|
||||
LayersSize int64
|
||||
Images []*image.Summary
|
||||
Containers []*Container
|
||||
Containers []*container.Summary
|
||||
Volumes []*volume.Volume
|
||||
BuildCache []*BuildCache
|
||||
BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40.
|
||||
|
@ -481,9 +169,11 @@ type BuildCache struct {
|
|||
|
||||
// BuildCachePruneOptions hold parameters to prune the build cache
|
||||
type BuildCachePruneOptions struct {
|
||||
All bool
|
||||
KeepStorage int64
|
||||
Filters filters.Args
|
||||
All bool
|
||||
ReservedSpace int64
|
||||
MaxUsedSpace int64
|
||||
MinFreeSpace int64
|
||||
Filters filters.Args
|
||||
|
||||
// FIXME(thaJeztah): add new options; see https://github.com/moby/moby/issues/48639
|
||||
KeepStorage int64 // Deprecated: deprecated in API 1.48.
|
||||
}
|
||||
|
|
|
@ -1,210 +1,115 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/docker/api/types/common"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"github.com/docker/docker/api/types/storage"
|
||||
)
|
||||
|
||||
// ImagesPruneReport contains the response for Engine API:
|
||||
// POST "/images/prune"
|
||||
// IDResponse Response to an API call that returns just an Id.
|
||||
//
|
||||
// Deprecated: use [image.PruneReport].
|
||||
type ImagesPruneReport = image.PruneReport
|
||||
// Deprecated: use either [container.CommitResponse] or [container.ExecCreateResponse]. It will be removed in the next release.
|
||||
type IDResponse = common.IDResponse
|
||||
|
||||
// VolumesPruneReport contains the response for Engine API:
|
||||
// POST "/volumes/prune".
|
||||
// ContainerJSONBase contains response of Engine API GET "/containers/{name:.*}/json"
|
||||
// for API version 1.18 and older.
|
||||
//
|
||||
// Deprecated: use [volume.PruneReport].
|
||||
type VolumesPruneReport = volume.PruneReport
|
||||
// Deprecated: use [container.InspectResponse] or [container.ContainerJSONBase]. It will be removed in the next release.
|
||||
type ContainerJSONBase = container.ContainerJSONBase
|
||||
|
||||
// NetworkCreateRequest is the request message sent to the server for network create call.
|
||||
// ContainerJSON is the response for the GET "/containers/{name:.*}/json"
|
||||
// endpoint.
|
||||
//
|
||||
// Deprecated: use [network.CreateRequest].
|
||||
type NetworkCreateRequest = network.CreateRequest
|
||||
// Deprecated: use [container.InspectResponse]. It will be removed in the next release.
|
||||
type ContainerJSON = container.InspectResponse
|
||||
|
||||
// NetworkCreate is the expected body of the "create network" http request message
|
||||
// Container contains response of Engine API:
|
||||
// GET "/containers/json"
|
||||
//
|
||||
// Deprecated: use [network.CreateOptions].
|
||||
type NetworkCreate = network.CreateOptions
|
||||
// Deprecated: use [container.Summary].
|
||||
type Container = container.Summary
|
||||
|
||||
// NetworkListOptions holds parameters to filter the list of networks with.
|
||||
// ContainerState stores container's running state
|
||||
//
|
||||
// Deprecated: use [network.ListOptions].
|
||||
type NetworkListOptions = network.ListOptions
|
||||
// Deprecated: use [container.State].
|
||||
type ContainerState = container.State
|
||||
|
||||
// NetworkCreateResponse is the response message sent by the server for network create call.
|
||||
// NetworkSettings exposes the network settings in the api.
|
||||
//
|
||||
// Deprecated: use [network.CreateResponse].
|
||||
type NetworkCreateResponse = network.CreateResponse
|
||||
// Deprecated: use [container.NetworkSettings].
|
||||
type NetworkSettings = container.NetworkSettings
|
||||
|
||||
// NetworkInspectOptions holds parameters to inspect network.
|
||||
// NetworkSettingsBase holds networking state for a container when inspecting it.
|
||||
//
|
||||
// Deprecated: use [network.InspectOptions].
|
||||
type NetworkInspectOptions = network.InspectOptions
|
||||
// Deprecated: use [container.NetworkSettingsBase].
|
||||
type NetworkSettingsBase = container.NetworkSettingsBase
|
||||
|
||||
// NetworkConnect represents the data to be used to connect a container to the network
|
||||
// DefaultNetworkSettings holds network information
|
||||
// during the 2 release deprecation period.
|
||||
// It will be removed in Docker 1.11.
|
||||
//
|
||||
// Deprecated: use [network.ConnectOptions].
|
||||
type NetworkConnect = network.ConnectOptions
|
||||
// Deprecated: use [container.DefaultNetworkSettings].
|
||||
type DefaultNetworkSettings = container.DefaultNetworkSettings
|
||||
|
||||
// NetworkDisconnect represents the data to be used to disconnect a container from the network
|
||||
// SummaryNetworkSettings provides a summary of container's networks
|
||||
// in /containers/json.
|
||||
//
|
||||
// Deprecated: use [network.DisconnectOptions].
|
||||
type NetworkDisconnect = network.DisconnectOptions
|
||||
// Deprecated: use [container.NetworkSettingsSummary].
|
||||
type SummaryNetworkSettings = container.NetworkSettingsSummary
|
||||
|
||||
// EndpointResource contains network resources allocated and used for a container in a network.
|
||||
//
|
||||
// Deprecated: use [network.EndpointResource].
|
||||
type EndpointResource = network.EndpointResource
|
||||
// Health states
|
||||
const (
|
||||
NoHealthcheck = container.NoHealthcheck // Deprecated: use [container.NoHealthcheck].
|
||||
Starting = container.Starting // Deprecated: use [container.Starting].
|
||||
Healthy = container.Healthy // Deprecated: use [container.Healthy].
|
||||
Unhealthy = container.Unhealthy // Deprecated: use [container.Unhealthy].
|
||||
)
|
||||
|
||||
// NetworkResource is the body of the "get network" http response message/
|
||||
// Health stores information about the container's healthcheck results.
|
||||
//
|
||||
// Deprecated: use [network.Inspect] or [network.Summary] (for list operations).
|
||||
type NetworkResource = network.Inspect
|
||||
// Deprecated: use [container.Health].
|
||||
type Health = container.Health
|
||||
|
||||
// NetworksPruneReport contains the response for Engine API:
|
||||
// POST "/networks/prune"
|
||||
// HealthcheckResult stores information about a single run of a healthcheck probe.
|
||||
//
|
||||
// Deprecated: use [network.PruneReport].
|
||||
type NetworksPruneReport = network.PruneReport
|
||||
// Deprecated: use [container.HealthcheckResult].
|
||||
type HealthcheckResult = container.HealthcheckResult
|
||||
|
||||
// ExecConfig is a small subset of the Config struct that holds the configuration
|
||||
// for the exec feature of docker.
|
||||
// MountPoint represents a mount point configuration inside the container.
|
||||
// This is used for reporting the mountpoints in use by a container.
|
||||
//
|
||||
// Deprecated: use [container.ExecOptions].
|
||||
type ExecConfig = container.ExecOptions
|
||||
// Deprecated: use [container.MountPoint].
|
||||
type MountPoint = container.MountPoint
|
||||
|
||||
// ExecStartCheck is a temp struct used by execStart
|
||||
// Config fields is part of ExecConfig in runconfig package
|
||||
// Port An open port on a container
|
||||
//
|
||||
// Deprecated: use [container.ExecStartOptions] or [container.ExecAttachOptions].
|
||||
type ExecStartCheck = container.ExecStartOptions
|
||||
// Deprecated: use [container.Port].
|
||||
type Port = container.Port
|
||||
|
||||
// ContainerExecInspect holds information returned by exec inspect.
|
||||
// GraphDriverData Information about the storage driver used to store the container's and
|
||||
// image's filesystem.
|
||||
//
|
||||
// Deprecated: use [container.ExecInspect].
|
||||
type ContainerExecInspect = container.ExecInspect
|
||||
// Deprecated: use [storage.DriverData].
|
||||
type GraphDriverData = storage.DriverData
|
||||
|
||||
// ContainersPruneReport contains the response for Engine API:
|
||||
// POST "/containers/prune"
|
||||
// RootFS returns Image's RootFS description including the layer IDs.
|
||||
//
|
||||
// Deprecated: use [container.PruneReport].
|
||||
type ContainersPruneReport = container.PruneReport
|
||||
// Deprecated: use [image.RootFS].
|
||||
type RootFS = image.RootFS
|
||||
|
||||
// ContainerPathStat is used to encode the header from
|
||||
// GET "/containers/{name:.*}/archive"
|
||||
// "Name" is the file or directory name.
|
||||
// ImageInspect contains response of Engine API:
|
||||
// GET "/images/{name:.*}/json"
|
||||
//
|
||||
// Deprecated: use [container.PathStat].
|
||||
type ContainerPathStat = container.PathStat
|
||||
// Deprecated: use [image.InspectResponse].
|
||||
type ImageInspect = image.InspectResponse
|
||||
|
||||
// CopyToContainerOptions holds information
|
||||
// about files to copy into a container.
|
||||
// RequestPrivilegeFunc is a function interface that clients can supply to
|
||||
// retry operations after getting an authorization error.
|
||||
// This function returns the registry authentication header value in base64
|
||||
// format, or an error if the privilege request fails.
|
||||
//
|
||||
// Deprecated: use [container.CopyToContainerOptions],
|
||||
type CopyToContainerOptions = container.CopyToContainerOptions
|
||||
|
||||
// ContainerStats contains response of Engine API:
|
||||
// GET "/stats"
|
||||
//
|
||||
// Deprecated: use [container.StatsResponseReader].
|
||||
type ContainerStats = container.StatsResponseReader
|
||||
|
||||
// ThrottlingData stores CPU throttling stats of one running container.
|
||||
// Not used on Windows.
|
||||
//
|
||||
// Deprecated: use [container.ThrottlingData].
|
||||
type ThrottlingData = container.ThrottlingData
|
||||
|
||||
// CPUUsage stores All CPU stats aggregated since container inception.
|
||||
//
|
||||
// Deprecated: use [container.CPUUsage].
|
||||
type CPUUsage = container.CPUUsage
|
||||
|
||||
// CPUStats aggregates and wraps all CPU related info of container
|
||||
//
|
||||
// Deprecated: use [container.CPUStats].
|
||||
type CPUStats = container.CPUStats
|
||||
|
||||
// MemoryStats aggregates all memory stats since container inception on Linux.
|
||||
// Windows returns stats for commit and private working set only.
|
||||
//
|
||||
// Deprecated: use [container.MemoryStats].
|
||||
type MemoryStats = container.MemoryStats
|
||||
|
||||
// BlkioStatEntry is one small entity to store a piece of Blkio stats
|
||||
// Not used on Windows.
|
||||
//
|
||||
// Deprecated: use [container.BlkioStatEntry].
|
||||
type BlkioStatEntry = container.BlkioStatEntry
|
||||
|
||||
// BlkioStats stores All IO service stats for data read and write.
|
||||
// This is a Linux specific structure as the differences between expressing
|
||||
// block I/O on Windows and Linux are sufficiently significant to make
|
||||
// little sense attempting to morph into a combined structure.
|
||||
//
|
||||
// Deprecated: use [container.BlkioStats].
|
||||
type BlkioStats = container.BlkioStats
|
||||
|
||||
// StorageStats is the disk I/O stats for read/write on Windows.
|
||||
//
|
||||
// Deprecated: use [container.StorageStats].
|
||||
type StorageStats = container.StorageStats
|
||||
|
||||
// NetworkStats aggregates the network stats of one container
|
||||
//
|
||||
// Deprecated: use [container.NetworkStats].
|
||||
type NetworkStats = container.NetworkStats
|
||||
|
||||
// PidsStats contains the stats of a container's pids
|
||||
//
|
||||
// Deprecated: use [container.PidsStats].
|
||||
type PidsStats = container.PidsStats
|
||||
|
||||
// Stats is Ultimate struct aggregating all types of stats of one container
|
||||
//
|
||||
// Deprecated: use [container.Stats].
|
||||
type Stats = container.Stats
|
||||
|
||||
// StatsJSON is newly used Networks
|
||||
//
|
||||
// Deprecated: use [container.StatsResponse].
|
||||
type StatsJSON = container.StatsResponse
|
||||
|
||||
// EventsOptions holds parameters to filter events with.
|
||||
//
|
||||
// Deprecated: use [events.ListOptions].
|
||||
type EventsOptions = events.ListOptions
|
||||
|
||||
// ImageSearchOptions holds parameters to search images with.
|
||||
//
|
||||
// Deprecated: use [registry.SearchOptions].
|
||||
type ImageSearchOptions = registry.SearchOptions
|
||||
|
||||
// ImageImportSource holds source information for ImageImport
|
||||
//
|
||||
// Deprecated: use [image.ImportSource].
|
||||
type ImageImportSource image.ImportSource
|
||||
|
||||
// ImageLoadResponse returns information to the client about a load process.
|
||||
//
|
||||
// Deprecated: use [image.LoadResponse].
|
||||
type ImageLoadResponse = image.LoadResponse
|
||||
|
||||
// ContainerNode stores information about the node that a container
|
||||
// is running on. It's only used by the Docker Swarm standalone API.
|
||||
//
|
||||
// Deprecated: ContainerNode was used for the classic Docker Swarm standalone API. It will be removed in the next release.
|
||||
type ContainerNode struct {
|
||||
ID string
|
||||
IPAddress string `json:"IP"`
|
||||
Addr string
|
||||
Name string
|
||||
Cpus int
|
||||
Memory int64
|
||||
Labels map[string]string
|
||||
}
|
||||
// Deprecated: moved to [github.com/docker/docker/api/types/registry.RequestAuthConfig].
|
||||
type RequestPrivilegeFunc func(context.Context) (string, error)
|
||||
|
|
|
@ -10,7 +10,7 @@ func (cli *Client) BuildCancel(ctx context.Context, id string) error {
|
|||
query := url.Values{}
|
||||
query.Set("id", id)
|
||||
|
||||
serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil)
|
||||
ensureReaderClosed(serverResp)
|
||||
resp, err := cli.post(ctx, "/build/cancel", query, nil, nil)
|
||||
ensureReaderClosed(resp)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -17,27 +17,38 @@ func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePru
|
|||
return nil, err
|
||||
}
|
||||
|
||||
report := types.BuildCachePruneReport{}
|
||||
|
||||
query := url.Values{}
|
||||
if opts.All {
|
||||
query.Set("all", "1")
|
||||
}
|
||||
query.Set("keep-storage", strconv.Itoa(int(opts.KeepStorage)))
|
||||
|
||||
if opts.KeepStorage != 0 {
|
||||
query.Set("keep-storage", strconv.Itoa(int(opts.KeepStorage)))
|
||||
}
|
||||
if opts.ReservedSpace != 0 {
|
||||
query.Set("reserved-space", strconv.Itoa(int(opts.ReservedSpace)))
|
||||
}
|
||||
if opts.MaxUsedSpace != 0 {
|
||||
query.Set("max-used-space", strconv.Itoa(int(opts.MaxUsedSpace)))
|
||||
}
|
||||
if opts.MinFreeSpace != 0 {
|
||||
query.Set("min-free-space", strconv.Itoa(int(opts.MinFreeSpace)))
|
||||
}
|
||||
f, err := filters.ToJSON(opts.Filters)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "prune could not marshal filters option")
|
||||
}
|
||||
query.Set("filters", f)
|
||||
|
||||
serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil)
|
||||
defer ensureReaderClosed(serverResp)
|
||||
resp, err := cli.post(ctx, "/build/prune", query, nil, nil)
|
||||
defer ensureReaderClosed(resp)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
|
||||
report := types.BuildCachePruneReport{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&report); err != nil {
|
||||
return nil, errors.Wrap(err, "error retrieving disk usage")
|
||||
}
|
||||
|
||||
|
|
|
@ -6,11 +6,11 @@ import (
|
|||
"github.com/docker/docker/api/types/checkpoint"
|
||||
)
|
||||
|
||||
type apiClientExperimental interface {
|
||||
CheckpointAPIClient
|
||||
}
|
||||
|
||||
// CheckpointAPIClient defines API client methods for the checkpoints
|
||||
// CheckpointAPIClient defines API client methods for the checkpoints.
|
||||
//
|
||||
// Experimental: checkpoint and restore is still an experimental feature,
|
||||
// and only available if the daemon is running with experimental features
|
||||
// enabled.
|
||||
type CheckpointAPIClient interface {
|
||||
CheckpointCreate(ctx context.Context, container string, options checkpoint.CreateOptions) error
|
||||
CheckpointDelete(ctx context.Context, container string, options checkpoint.DeleteOptions) error
|
|
@ -7,8 +7,13 @@ import (
|
|||
)
|
||||
|
||||
// CheckpointCreate creates a checkpoint from the given container with the given name
|
||||
func (cli *Client) CheckpointCreate(ctx context.Context, container string, options checkpoint.CreateOptions) error {
|
||||
resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil)
|
||||
func (cli *Client) CheckpointCreate(ctx context.Context, containerID string, options checkpoint.CreateOptions) error {
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := cli.post(ctx, "/containers/"+containerID+"/checkpoints", nil, options, nil)
|
||||
ensureReaderClosed(resp)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -9,6 +9,11 @@ import (
|
|||
|
||||
// CheckpointDelete deletes the checkpoint with the given name from the given container
|
||||
func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options checkpoint.DeleteOptions) error {
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
query := url.Values{}
|
||||
if options.CheckpointDir != "" {
|
||||
query.Set("dir", options.CheckpointDir)
|
||||
|
|
|
@ -23,6 +23,6 @@ func (cli *Client) CheckpointList(ctx context.Context, container string, options
|
|||
return checkpoints, err
|
||||
}
|
||||
|
||||
err = json.NewDecoder(resp.body).Decode(&checkpoints)
|
||||
err = json.NewDecoder(resp.Body).Decode(&checkpoints)
|
||||
return checkpoints, err
|
||||
}
|
||||
|
|
|
@ -59,7 +59,6 @@ import (
|
|||
"github.com/docker/go-connections/sockets"
|
||||
"github.com/pkg/errors"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
// DummyHost is a hostname used for local communication.
|
||||
|
@ -99,6 +98,9 @@ const DummyHost = "api.moby.localhost"
|
|||
// recent version before negotiation was introduced.
|
||||
const fallbackAPIVersion = "1.24"
|
||||
|
||||
// Ensure that Client always implements APIClient.
|
||||
var _ APIClient = &Client{}
|
||||
|
||||
// Client is the API client that performs all operations
|
||||
// against a docker server.
|
||||
type Client struct {
|
||||
|
@ -138,7 +140,7 @@ type Client struct {
|
|||
// negotiateLock is used to single-flight the version negotiation process
|
||||
negotiateLock sync.Mutex
|
||||
|
||||
tp trace.TracerProvider
|
||||
traceOpts []otelhttp.Option
|
||||
|
||||
// When the client transport is an *http.Transport (default) we need to do some extra things (like closing idle connections).
|
||||
// Store the original transport as the http.Client transport will be wrapped with tracing libs.
|
||||
|
@ -200,6 +202,12 @@ func NewClientWithOpts(ops ...Opt) (*Client, error) {
|
|||
client: client,
|
||||
proto: hostURL.Scheme,
|
||||
addr: hostURL.Host,
|
||||
|
||||
traceOpts: []otelhttp.Option{
|
||||
otelhttp.WithSpanNameFormatter(func(_ string, req *http.Request) string {
|
||||
return req.Method + " " + req.URL.Path
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, op := range ops {
|
||||
|
@ -227,13 +235,7 @@ func NewClientWithOpts(ops ...Opt) (*Client, error) {
|
|||
}
|
||||
}
|
||||
|
||||
c.client.Transport = otelhttp.NewTransport(
|
||||
c.client.Transport,
|
||||
otelhttp.WithTracerProvider(c.tp),
|
||||
otelhttp.WithSpanNameFormatter(func(_ string, req *http.Request) string {
|
||||
return req.Method + " " + req.URL.Path
|
||||
}),
|
||||
)
|
||||
c.client.Transport = otelhttp.NewTransport(c.client.Transport, c.traceOpts...)
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
@ -304,8 +306,7 @@ func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) s
|
|||
var apiPath string
|
||||
_ = cli.checkVersion(ctx)
|
||||
if cli.version != "" {
|
||||
v := strings.TrimPrefix(cli.version, "v")
|
||||
apiPath = path.Join(cli.basePath, "/v"+v, p)
|
||||
apiPath = path.Join(cli.basePath, "/v"+strings.TrimPrefix(cli.version, "v"), p)
|
||||
} else {
|
||||
apiPath = path.Join(cli.basePath, p)
|
||||
}
|
||||
|
@ -450,6 +451,10 @@ func (cli *Client) dialerFromTransport() func(context.Context, string, string) (
|
|||
//
|
||||
// ["docker dial-stdio"]: https://github.com/docker/cli/pull/1014
|
||||
func (cli *Client) Dialer() func(context.Context) (net.Conn, error) {
|
||||
return cli.dialer()
|
||||
}
|
||||
|
||||
func (cli *Client) dialer() func(context.Context) (net.Conn, error) {
|
||||
return func(ctx context.Context) (net.Conn, error) {
|
||||
if dialFn := cli.dialerFromTransport(); dialFn != nil {
|
||||
return dialFn(ctx, cli.proto, cli.addr)
|
||||
|
|
|
@ -20,17 +20,23 @@ import (
|
|||
)
|
||||
|
||||
// CommonAPIClient is the common methods between stable and experimental versions of APIClient.
|
||||
type CommonAPIClient interface {
|
||||
//
|
||||
// Deprecated: use [APIClient] instead. This type will be an alias for [APIClient] in the next release, and removed after.
|
||||
type CommonAPIClient = stableAPIClient
|
||||
|
||||
// APIClient is an interface that clients that talk with a docker server must implement.
|
||||
type APIClient interface {
|
||||
stableAPIClient
|
||||
CheckpointAPIClient // CheckpointAPIClient is still experimental.
|
||||
}
|
||||
|
||||
type stableAPIClient interface {
|
||||
ConfigAPIClient
|
||||
ContainerAPIClient
|
||||
DistributionAPIClient
|
||||
ImageAPIClient
|
||||
NodeAPIClient
|
||||
NetworkAPIClient
|
||||
PluginAPIClient
|
||||
ServiceAPIClient
|
||||
SwarmAPIClient
|
||||
SecretAPIClient
|
||||
SystemAPIClient
|
||||
VolumeAPIClient
|
||||
ClientVersion() string
|
||||
|
@ -39,27 +45,43 @@ type CommonAPIClient interface {
|
|||
ServerVersion(ctx context.Context) (types.Version, error)
|
||||
NegotiateAPIVersion(ctx context.Context)
|
||||
NegotiateAPIVersionPing(types.Ping)
|
||||
DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error)
|
||||
HijackDialer
|
||||
Dialer() func(context.Context) (net.Conn, error)
|
||||
Close() error
|
||||
SwarmManagementAPIClient
|
||||
}
|
||||
|
||||
// SwarmManagementAPIClient defines all methods for managing Swarm-specific
|
||||
// objects.
|
||||
type SwarmManagementAPIClient interface {
|
||||
SwarmAPIClient
|
||||
NodeAPIClient
|
||||
ServiceAPIClient
|
||||
SecretAPIClient
|
||||
ConfigAPIClient
|
||||
}
|
||||
|
||||
// HijackDialer defines methods for a hijack dialer.
|
||||
type HijackDialer interface {
|
||||
DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error)
|
||||
}
|
||||
|
||||
// ContainerAPIClient defines API client methods for the containers
|
||||
type ContainerAPIClient interface {
|
||||
ContainerAttach(ctx context.Context, container string, options container.AttachOptions) (types.HijackedResponse, error)
|
||||
ContainerCommit(ctx context.Context, container string, options container.CommitOptions) (types.IDResponse, error)
|
||||
ContainerCommit(ctx context.Context, container string, options container.CommitOptions) (container.CommitResponse, error)
|
||||
ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error)
|
||||
ContainerDiff(ctx context.Context, container string) ([]container.FilesystemChange, error)
|
||||
ContainerExecAttach(ctx context.Context, execID string, options container.ExecAttachOptions) (types.HijackedResponse, error)
|
||||
ContainerExecCreate(ctx context.Context, container string, options container.ExecOptions) (types.IDResponse, error)
|
||||
ContainerExecCreate(ctx context.Context, container string, options container.ExecOptions) (container.ExecCreateResponse, error)
|
||||
ContainerExecInspect(ctx context.Context, execID string) (container.ExecInspect, error)
|
||||
ContainerExecResize(ctx context.Context, execID string, options container.ResizeOptions) error
|
||||
ContainerExecStart(ctx context.Context, execID string, options container.ExecStartOptions) error
|
||||
ContainerExport(ctx context.Context, container string) (io.ReadCloser, error)
|
||||
ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error)
|
||||
ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error)
|
||||
ContainerInspect(ctx context.Context, container string) (container.InspectResponse, error)
|
||||
ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (container.InspectResponse, []byte, error)
|
||||
ContainerKill(ctx context.Context, container, signal string) error
|
||||
ContainerList(ctx context.Context, options container.ListOptions) ([]types.Container, error)
|
||||
ContainerList(ctx context.Context, options container.ListOptions) ([]container.Summary, error)
|
||||
ContainerLogs(ctx context.Context, container string, options container.LogsOptions) (io.ReadCloser, error)
|
||||
ContainerPause(ctx context.Context, container string) error
|
||||
ContainerRemove(ctx context.Context, container string, options container.RemoveOptions) error
|
||||
|
@ -71,9 +93,9 @@ type ContainerAPIClient interface {
|
|||
ContainerStatsOneShot(ctx context.Context, container string) (container.StatsResponseReader, error)
|
||||
ContainerStart(ctx context.Context, container string, options container.StartOptions) error
|
||||
ContainerStop(ctx context.Context, container string, options container.StopOptions) error
|
||||
ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error)
|
||||
ContainerTop(ctx context.Context, container string, arguments []string) (container.TopResponse, error)
|
||||
ContainerUnpause(ctx context.Context, container string) error
|
||||
ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error)
|
||||
ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.UpdateResponse, error)
|
||||
ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error)
|
||||
CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, container.PathStat, error)
|
||||
CopyToContainer(ctx context.Context, container, path string, content io.Reader, options container.CopyToContainerOptions) error
|
||||
|
@ -91,18 +113,30 @@ type ImageAPIClient interface {
|
|||
BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error)
|
||||
BuildCancel(ctx context.Context, id string) error
|
||||
ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error)
|
||||
ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error)
|
||||
ImageImport(ctx context.Context, source image.ImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error)
|
||||
ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error)
|
||||
|
||||
ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error)
|
||||
ImageLoad(ctx context.Context, input io.Reader, quiet bool) (image.LoadResponse, error)
|
||||
ImagePull(ctx context.Context, ref string, options image.PullOptions) (io.ReadCloser, error)
|
||||
ImagePush(ctx context.Context, ref string, options image.PushOptions) (io.ReadCloser, error)
|
||||
ImageRemove(ctx context.Context, image string, options image.RemoveOptions) ([]image.DeleteResponse, error)
|
||||
ImageSearch(ctx context.Context, term string, options registry.SearchOptions) ([]registry.SearchResult, error)
|
||||
ImageSave(ctx context.Context, images []string) (io.ReadCloser, error)
|
||||
ImageTag(ctx context.Context, image, ref string) error
|
||||
ImagesPrune(ctx context.Context, pruneFilter filters.Args) (image.PruneReport, error)
|
||||
|
||||
ImageInspect(ctx context.Context, image string, _ ...ImageInspectOption) (image.InspectResponse, error)
|
||||
ImageHistory(ctx context.Context, image string, _ ...ImageHistoryOption) ([]image.HistoryResponseItem, error)
|
||||
ImageLoad(ctx context.Context, input io.Reader, _ ...ImageLoadOption) (image.LoadResponse, error)
|
||||
ImageSave(ctx context.Context, images []string, _ ...ImageSaveOption) (io.ReadCloser, error)
|
||||
|
||||
ImageAPIClientDeprecated
|
||||
}
|
||||
|
||||
// ImageAPIClientDeprecated defines deprecated methods of the ImageAPIClient.
|
||||
type ImageAPIClientDeprecated interface {
|
||||
// ImageInspectWithRaw returns the image information and its raw representation.
|
||||
//
|
||||
// Deprecated: Use [Client.ImageInspect] instead. Raw response can be obtained using the [ImageInspectWithRawResponse] option.
|
||||
ImageInspectWithRaw(ctx context.Context, image string) (image.InspectResponse, []byte, error)
|
||||
}
|
||||
|
||||
// NetworkAPIClient defines API client methods for the networks
|
|
@ -20,6 +20,6 @@ func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (t
|
|||
return response, err
|
||||
}
|
||||
|
||||
err = json.NewDecoder(resp.body).Decode(&response)
|
||||
err = json.NewDecoder(resp.Body).Decode(&response)
|
||||
return response, err
|
||||
}
|
||||
|
|
|
@ -11,8 +11,9 @@ import (
|
|||
|
||||
// ConfigInspectWithRaw returns the config information with raw data
|
||||
func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) {
|
||||
if id == "" {
|
||||
return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id}
|
||||
id, err := trimID("contig", id)
|
||||
if err != nil {
|
||||
return swarm.Config{}, nil, err
|
||||
}
|
||||
if err := cli.NewVersionError(ctx, "1.30", "config inspect"); err != nil {
|
||||
return swarm.Config{}, nil, err
|
||||
|
@ -23,7 +24,7 @@ func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.C
|
|||
return swarm.Config{}, nil, err
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return swarm.Config{}, nil, err
|
||||
}
|
||||
|
|
|
@ -33,6 +33,6 @@ func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptio
|
|||
}
|
||||
|
||||
var configs []swarm.Config
|
||||
err = json.NewDecoder(resp.body).Decode(&configs)
|
||||
err = json.NewDecoder(resp.Body).Decode(&configs)
|
||||
return configs, err
|
||||
}
|
||||
|
|
|
@ -4,6 +4,10 @@ import "context"
|
|||
|
||||
// ConfigRemove removes a config.
|
||||
func (cli *Client) ConfigRemove(ctx context.Context, id string) error {
|
||||
id, err := trimID("config", id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cli.NewVersionError(ctx, "1.30", "config remove"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -9,6 +9,10 @@ import (
|
|||
|
||||
// ConfigUpdate attempts to update a config
|
||||
func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error {
|
||||
id, err := trimID("config", id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cli.NewVersionError(ctx, "1.30", "config update"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -33,7 +33,12 @@ import (
|
|||
//
|
||||
// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this
|
||||
// stream.
|
||||
func (cli *Client) ContainerAttach(ctx context.Context, container string, options container.AttachOptions) (types.HijackedResponse, error) {
|
||||
func (cli *Client) ContainerAttach(ctx context.Context, containerID string, options container.AttachOptions) (types.HijackedResponse, error) {
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
return types.HijackedResponse{}, err
|
||||
}
|
||||
|
||||
query := url.Values{}
|
||||
if options.Stream {
|
||||
query.Set("stream", "1")
|
||||
|
@ -54,7 +59,7 @@ func (cli *Client) ContainerAttach(ctx context.Context, container string, option
|
|||
query.Set("logs", "1")
|
||||
}
|
||||
|
||||
return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, http.Header{
|
||||
return cli.postHijacked(ctx, "/containers/"+containerID+"/attach", query, nil, http.Header{
|
||||
"Content-Type": {"text/plain"},
|
||||
})
|
||||
}
|
||||
|
|
|
@ -7,21 +7,25 @@ import (
|
|||
"net/url"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
)
|
||||
|
||||
// ContainerCommit applies changes to a container and creates a new tagged image.
|
||||
func (cli *Client) ContainerCommit(ctx context.Context, container string, options container.CommitOptions) (types.IDResponse, error) {
|
||||
func (cli *Client) ContainerCommit(ctx context.Context, containerID string, options container.CommitOptions) (container.CommitResponse, error) {
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
return container.CommitResponse{}, err
|
||||
}
|
||||
|
||||
var repository, tag string
|
||||
if options.Reference != "" {
|
||||
ref, err := reference.ParseNormalizedNamed(options.Reference)
|
||||
if err != nil {
|
||||
return types.IDResponse{}, err
|
||||
return container.CommitResponse{}, err
|
||||
}
|
||||
|
||||
if _, isCanonical := ref.(reference.Canonical); isCanonical {
|
||||
return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference")
|
||||
return container.CommitResponse{}, errors.New("refusing to create a tag with a digest reference")
|
||||
}
|
||||
ref = reference.TagNameOnly(ref)
|
||||
|
||||
|
@ -32,7 +36,7 @@ func (cli *Client) ContainerCommit(ctx context.Context, container string, option
|
|||
}
|
||||
|
||||
query := url.Values{}
|
||||
query.Set("container", container)
|
||||
query.Set("container", containerID)
|
||||
query.Set("repo", repository)
|
||||
query.Set("tag", tag)
|
||||
query.Set("comment", options.Comment)
|
||||
|
@ -44,13 +48,13 @@ func (cli *Client) ContainerCommit(ctx context.Context, container string, option
|
|||
query.Set("pause", "0")
|
||||
}
|
||||
|
||||
var response types.IDResponse
|
||||
var response container.CommitResponse
|
||||
resp, err := cli.post(ctx, "/commit", query, options.Config, nil)
|
||||
defer ensureReaderClosed(resp)
|
||||
if err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
err = json.NewDecoder(resp.body).Decode(&response)
|
||||
err = json.NewDecoder(resp.Body).Decode(&response)
|
||||
return response, err
|
||||
}
|
||||
|
|
|
@ -16,21 +16,30 @@ import (
|
|||
|
||||
// ContainerStatPath returns stat information about a path inside the container filesystem.
|
||||
func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (container.PathStat, error) {
|
||||
query := url.Values{}
|
||||
query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
|
||||
|
||||
urlStr := "/containers/" + containerID + "/archive"
|
||||
response, err := cli.head(ctx, urlStr, query, nil)
|
||||
defer ensureReaderClosed(response)
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
return container.PathStat{}, err
|
||||
}
|
||||
return getContainerPathStatFromHeader(response.header)
|
||||
|
||||
query := url.Values{}
|
||||
query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
|
||||
|
||||
resp, err := cli.head(ctx, "/containers/"+containerID+"/archive", query, nil)
|
||||
defer ensureReaderClosed(resp)
|
||||
if err != nil {
|
||||
return container.PathStat{}, err
|
||||
}
|
||||
return getContainerPathStatFromHeader(resp.Header)
|
||||
}
|
||||
|
||||
// CopyToContainer copies content into the container filesystem.
|
||||
// Note that `content` must be a Reader for a TAR archive
|
||||
func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options container.CopyToContainerOptions) error {
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
query := url.Values{}
|
||||
query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API.
|
||||
// Do not allow for an existing directory to be overwritten by a non-directory and vice versa.
|
||||
|
@ -42,9 +51,7 @@ func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath str
|
|||
query.Set("copyUIDGID", "true")
|
||||
}
|
||||
|
||||
apiPath := "/containers/" + containerID + "/archive"
|
||||
|
||||
response, err := cli.putRaw(ctx, apiPath, query, content, nil)
|
||||
response, err := cli.putRaw(ctx, "/containers/"+containerID+"/archive", query, content, nil)
|
||||
defer ensureReaderClosed(response)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -56,11 +63,15 @@ func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath str
|
|||
// CopyFromContainer gets the content from the container and returns it as a Reader
|
||||
// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader.
|
||||
func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, container.PathStat, error) {
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
return nil, container.PathStat{}, err
|
||||
}
|
||||
|
||||
query := make(url.Values, 1)
|
||||
query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API.
|
||||
|
||||
apiPath := "/containers/" + containerID + "/archive"
|
||||
response, err := cli.get(ctx, apiPath, query, nil)
|
||||
resp, err := cli.get(ctx, "/containers/"+containerID+"/archive", query, nil)
|
||||
if err != nil {
|
||||
return nil, container.PathStat{}, err
|
||||
}
|
||||
|
@ -71,11 +82,11 @@ func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath s
|
|||
// copy it locally. Along with the stat info about the local destination,
|
||||
// we have everything we need to handle the multiple possibilities there
|
||||
// can be when copying a file/dir from one location to another file/dir.
|
||||
stat, err := getContainerPathStatFromHeader(response.header)
|
||||
stat, err := getContainerPathStatFromHeader(resp.Header)
|
||||
if err != nil {
|
||||
return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err)
|
||||
}
|
||||
return response.body, stat, err
|
||||
return resp.Body, stat, err
|
||||
}
|
||||
|
||||
func getContainerPathStatFromHeader(header http.Header) (container.PathStat, error) {
|
||||
|
|
|
@ -5,6 +5,8 @@ import (
|
|||
"encoding/json"
|
||||
"net/url"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
|
@ -12,12 +14,6 @@ import (
|
|||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
type configWrapper struct {
|
||||
*container.Config
|
||||
HostConfig *container.HostConfig
|
||||
NetworkingConfig *network.NetworkingConfig
|
||||
}
|
||||
|
||||
// ContainerCreate creates a new container based on the given configuration.
|
||||
// It can be associated with a name, but it's not mandatory.
|
||||
func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) {
|
||||
|
@ -58,6 +54,9 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config
|
|||
// When using API under 1.42, the Linux daemon doesn't respect the ConsoleSize
|
||||
hostConfig.ConsoleSize = [2]uint{0, 0}
|
||||
}
|
||||
|
||||
hostConfig.CapAdd = normalizeCapabilities(hostConfig.CapAdd)
|
||||
hostConfig.CapDrop = normalizeCapabilities(hostConfig.CapDrop)
|
||||
}
|
||||
|
||||
// Since API 1.44, the container-wide MacAddress is deprecated and will trigger a WARNING if it's specified.
|
||||
|
@ -74,19 +73,19 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config
|
|||
query.Set("name", containerName)
|
||||
}
|
||||
|
||||
body := configWrapper{
|
||||
body := container.CreateRequest{
|
||||
Config: config,
|
||||
HostConfig: hostConfig,
|
||||
NetworkingConfig: networkingConfig,
|
||||
}
|
||||
|
||||
serverResp, err := cli.post(ctx, "/containers/create", query, body, nil)
|
||||
defer ensureReaderClosed(serverResp)
|
||||
resp, err := cli.post(ctx, "/containers/create", query, body, nil)
|
||||
defer ensureReaderClosed(resp)
|
||||
if err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
err = json.NewDecoder(serverResp.body).Decode(&response)
|
||||
err = json.NewDecoder(resp.Body).Decode(&response)
|
||||
return response, err
|
||||
}
|
||||
|
||||
|
@ -114,3 +113,42 @@ func hasEndpointSpecificMacAddress(networkingConfig *network.NetworkingConfig) b
|
|||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// allCapabilities is a magic value for "all capabilities"
|
||||
const allCapabilities = "ALL"
|
||||
|
||||
// normalizeCapabilities normalizes capabilities to their canonical form,
|
||||
// removes duplicates, and sorts the results.
|
||||
//
|
||||
// It is similar to [github.com/docker/docker/oci/caps.NormalizeLegacyCapabilities],
|
||||
// but performs no validation based on supported capabilities.
|
||||
func normalizeCapabilities(caps []string) []string {
|
||||
var normalized []string
|
||||
|
||||
unique := make(map[string]struct{})
|
||||
for _, c := range caps {
|
||||
c = normalizeCap(c)
|
||||
if _, ok := unique[c]; ok {
|
||||
continue
|
||||
}
|
||||
unique[c] = struct{}{}
|
||||
normalized = append(normalized, c)
|
||||
}
|
||||
|
||||
sort.Strings(normalized)
|
||||
return normalized
|
||||
}
|
||||
|
||||
// normalizeCap normalizes a capability to its canonical format by upper-casing
|
||||
// and adding a "CAP_" prefix (if not yet present). It also accepts the "ALL"
|
||||
// magic-value.
|
||||
func normalizeCap(cap string) string {
|
||||
cap = strings.ToUpper(cap)
|
||||
if cap == allCapabilities {
|
||||
return cap
|
||||
}
|
||||
if !strings.HasPrefix(cap, "CAP_") {
|
||||
cap = "CAP_" + cap
|
||||
}
|
||||
return cap
|
||||
}
|
||||
|
|
|
@ -10,14 +10,21 @@ import (
|
|||
|
||||
// ContainerDiff shows differences in a container filesystem since it was started.
|
||||
func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.FilesystemChange, error) {
|
||||
var changes []container.FilesystemChange
|
||||
|
||||
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil)
|
||||
defer ensureReaderClosed(serverResp)
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
return changes, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = json.NewDecoder(serverResp.body).Decode(&changes)
|
||||
resp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil)
|
||||
defer ensureReaderClosed(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var changes []container.FilesystemChange
|
||||
err = json.NewDecoder(resp.Body).Decode(&changes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return changes, err
|
||||
}
|
||||
|
|
|
@ -11,8 +11,11 @@ import (
|
|||
)
|
||||
|
||||
// ContainerExecCreate creates a new exec configuration to run an exec process.
|
||||
func (cli *Client) ContainerExecCreate(ctx context.Context, container string, options container.ExecOptions) (types.IDResponse, error) {
|
||||
var response types.IDResponse
|
||||
func (cli *Client) ContainerExecCreate(ctx context.Context, containerID string, options container.ExecOptions) (container.ExecCreateResponse, error) {
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
return container.ExecCreateResponse{}, err
|
||||
}
|
||||
|
||||
// Make sure we negotiated (if the client is configured to do so),
|
||||
// as code below contains API-version specific handling of options.
|
||||
|
@ -20,22 +23,24 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, op
|
|||
// Normally, version-negotiation (if enabled) would not happen until
|
||||
// the API request is made.
|
||||
if err := cli.checkVersion(ctx); err != nil {
|
||||
return response, err
|
||||
return container.ExecCreateResponse{}, err
|
||||
}
|
||||
|
||||
if err := cli.NewVersionError(ctx, "1.25", "env"); len(options.Env) != 0 && err != nil {
|
||||
return response, err
|
||||
return container.ExecCreateResponse{}, err
|
||||
}
|
||||
if versions.LessThan(cli.ClientVersion(), "1.42") {
|
||||
options.ConsoleSize = nil
|
||||
}
|
||||
|
||||
resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, options, nil)
|
||||
resp, err := cli.post(ctx, "/containers/"+containerID+"/exec", nil, options, nil)
|
||||
defer ensureReaderClosed(resp)
|
||||
if err != nil {
|
||||
return response, err
|
||||
return container.ExecCreateResponse{}, err
|
||||
}
|
||||
err = json.NewDecoder(resp.body).Decode(&response)
|
||||
|
||||
var response container.ExecCreateResponse
|
||||
err = json.NewDecoder(resp.Body).Decode(&response)
|
||||
return response, err
|
||||
}
|
||||
|
||||
|
@ -70,7 +75,7 @@ func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (con
|
|||
return response, err
|
||||
}
|
||||
|
||||
err = json.NewDecoder(resp.body).Decode(&response)
|
||||
err = json.NewDecoder(resp.Body).Decode(&response)
|
||||
ensureReaderClosed(resp)
|
||||
return response, err
|
||||
}
|
||||
|
|
|
@ -10,10 +10,15 @@ import (
|
|||
// and returns them as an io.ReadCloser. It's up to the caller
|
||||
// to close the stream.
|
||||
func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) {
|
||||
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil)
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return serverResp.body, nil
|
||||
resp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
|
|
@ -7,46 +7,50 @@ import (
|
|||
"io"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
)
|
||||
|
||||
// ContainerInspect returns the container information.
|
||||
func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
|
||||
if containerID == "" {
|
||||
return types.ContainerJSON{}, objectNotFoundError{object: "container", id: containerID}
|
||||
}
|
||||
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil)
|
||||
defer ensureReaderClosed(serverResp)
|
||||
func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (container.InspectResponse, error) {
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
return types.ContainerJSON{}, err
|
||||
return container.InspectResponse{}, err
|
||||
}
|
||||
|
||||
var response types.ContainerJSON
|
||||
err = json.NewDecoder(serverResp.body).Decode(&response)
|
||||
resp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil)
|
||||
defer ensureReaderClosed(resp)
|
||||
if err != nil {
|
||||
return container.InspectResponse{}, err
|
||||
}
|
||||
|
||||
var response container.InspectResponse
|
||||
err = json.NewDecoder(resp.Body).Decode(&response)
|
||||
return response, err
|
||||
}
|
||||
|
||||
// ContainerInspectWithRaw returns the container information and its raw representation.
|
||||
func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) {
|
||||
if containerID == "" {
|
||||
return types.ContainerJSON{}, nil, objectNotFoundError{object: "container", id: containerID}
|
||||
func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (container.InspectResponse, []byte, error) {
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
return container.InspectResponse{}, nil, err
|
||||
}
|
||||
|
||||
query := url.Values{}
|
||||
if getSize {
|
||||
query.Set("size", "1")
|
||||
}
|
||||
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil)
|
||||
defer ensureReaderClosed(serverResp)
|
||||
resp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil)
|
||||
defer ensureReaderClosed(resp)
|
||||
if err != nil {
|
||||
return types.ContainerJSON{}, nil, err
|
||||
return container.InspectResponse{}, nil, err
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(serverResp.body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return types.ContainerJSON{}, nil, err
|
||||
return container.InspectResponse{}, nil, err
|
||||
}
|
||||
|
||||
var response types.ContainerJSON
|
||||
var response container.InspectResponse
|
||||
rdr := bytes.NewReader(body)
|
||||
err = json.NewDecoder(rdr).Decode(&response)
|
||||
return response, body, err
|
||||
|
|
|
@ -7,6 +7,11 @@ import (
|
|||
|
||||
// ContainerKill terminates the container process but does not remove the container from the docker host.
|
||||
func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error {
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
query := url.Values{}
|
||||
if signal != "" {
|
||||
query.Set("signal", signal)
|
||||
|
|
|
@ -6,13 +6,12 @@ import (
|
|||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
)
|
||||
|
||||
// ContainerList returns the list of containers in the docker host.
|
||||
func (cli *Client) ContainerList(ctx context.Context, options container.ListOptions) ([]types.Container, error) {
|
||||
func (cli *Client) ContainerList(ctx context.Context, options container.ListOptions) ([]container.Summary, error) {
|
||||
query := url.Values{}
|
||||
|
||||
if options.All {
|
||||
|
@ -51,7 +50,7 @@ func (cli *Client) ContainerList(ctx context.Context, options container.ListOpti
|
|||
return nil, err
|
||||
}
|
||||
|
||||
var containers []types.Container
|
||||
err = json.NewDecoder(resp.body).Decode(&containers)
|
||||
var containers []container.Summary
|
||||
err = json.NewDecoder(resp.Body).Decode(&containers)
|
||||
return containers, err
|
||||
}
|
||||
|
|
|
@ -33,7 +33,12 @@ import (
|
|||
//
|
||||
// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this
|
||||
// stream.
|
||||
func (cli *Client) ContainerLogs(ctx context.Context, container string, options container.LogsOptions) (io.ReadCloser, error) {
|
||||
func (cli *Client) ContainerLogs(ctx context.Context, containerID string, options container.LogsOptions) (io.ReadCloser, error) {
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
query := url.Values{}
|
||||
if options.ShowStdout {
|
||||
query.Set("stdout", "1")
|
||||
|
@ -72,9 +77,9 @@ func (cli *Client) ContainerLogs(ctx context.Context, container string, options
|
|||
}
|
||||
query.Set("tail", options.Tail)
|
||||
|
||||
resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil)
|
||||
resp, err := cli.get(ctx, "/containers/"+containerID+"/logs", query, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.body, nil
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
|
|
@ -4,6 +4,11 @@ import "context"
|
|||
|
||||
// ContainerPause pauses the main process of a given container without terminating it.
|
||||
func (cli *Client) ContainerPause(ctx context.Context, containerID string) error {
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil)
|
||||
ensureReaderClosed(resp)
|
||||
return err
|
||||
|
|
|
@ -11,25 +11,24 @@ import (
|
|||
|
||||
// ContainersPrune requests the daemon to delete unused data
|
||||
func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (container.PruneReport, error) {
|
||||
var report container.PruneReport
|
||||
|
||||
if err := cli.NewVersionError(ctx, "1.25", "container prune"); err != nil {
|
||||
return report, err
|
||||
return container.PruneReport{}, err
|
||||
}
|
||||
|
||||
query, err := getFiltersQuery(pruneFilters)
|
||||
if err != nil {
|
||||
return report, err
|
||||
return container.PruneReport{}, err
|
||||
}
|
||||
|
||||
serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil)
|
||||
defer ensureReaderClosed(serverResp)
|
||||
resp, err := cli.post(ctx, "/containers/prune", query, nil, nil)
|
||||
defer ensureReaderClosed(resp)
|
||||
if err != nil {
|
||||
return report, err
|
||||
return container.PruneReport{}, err
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
|
||||
return report, fmt.Errorf("Error retrieving disk usage: %v", err)
|
||||
var report container.PruneReport
|
||||
if err := json.NewDecoder(resp.Body).Decode(&report); err != nil {
|
||||
return container.PruneReport{}, fmt.Errorf("Error retrieving disk usage: %v", err)
|
||||
}
|
||||
|
||||
return report, nil
|
||||
|
|
|
@ -9,6 +9,11 @@ import (
|
|||
|
||||
// ContainerRemove kills and removes a container from the docker host.
|
||||
func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options container.RemoveOptions) error {
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
query := url.Values{}
|
||||
if options.RemoveVolumes {
|
||||
query.Set("v", "1")
|
||||
|
|
|
@ -7,6 +7,11 @@ import (
|
|||
|
||||
// ContainerRename changes the name of a given container.
|
||||
func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error {
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
query := url.Values{}
|
||||
query.Set("name", newContainerName)
|
||||
resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil)
|
||||
|
|
|
@ -10,18 +10,27 @@ import (
|
|||
|
||||
// ContainerResize changes the size of the tty for a container.
|
||||
func (cli *Client) ContainerResize(ctx context.Context, containerID string, options container.ResizeOptions) error {
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width)
|
||||
}
|
||||
|
||||
// ContainerExecResize changes the size of the tty for an exec process running inside a container.
|
||||
func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options container.ResizeOptions) error {
|
||||
execID, err := trimID("exec", execID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width)
|
||||
}
|
||||
|
||||
func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error {
|
||||
// FIXME(thaJeztah): the API / backend accepts uint32, but container.ResizeOptions uses uint.
|
||||
query := url.Values{}
|
||||
query.Set("h", strconv.Itoa(int(height)))
|
||||
query.Set("w", strconv.Itoa(int(width)))
|
||||
query.Set("h", strconv.FormatUint(uint64(height), 10))
|
||||
query.Set("w", strconv.FormatUint(uint64(width), 10))
|
||||
|
||||
resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil)
|
||||
ensureReaderClosed(resp)
|
||||
|
|
|
@ -13,6 +13,11 @@ import (
|
|||
// It makes the daemon wait for the container to be up again for
|
||||
// a specific amount of time, given the timeout.
|
||||
func (cli *Client) ContainerRestart(ctx context.Context, containerID string, options container.StopOptions) error {
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
query := url.Values{}
|
||||
if options.Timeout != nil {
|
||||
query.Set("t", strconv.Itoa(*options.Timeout))
|
||||
|
|
|
@ -9,6 +9,11 @@ import (
|
|||
|
||||
// ContainerStart sends a request to the docker daemon to start a container.
|
||||
func (cli *Client) ContainerStart(ctx context.Context, containerID string, options container.StartOptions) error {
|
||||
containerID, err := trimID("container", containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
query := url.Values{}
|
||||
if len(options.CheckpointID) != 0 {
|
||||
query.Set("checkpoint", options.CheckpointID)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue