diff --git a/go.mod b/go.mod
index 827bde2e48..0d8f1d37b0 100644
--- a/go.mod
+++ b/go.mod
@@ -51,11 +51,12 @@ require (
github.com/blang/semver/v4 v4.0.0
github.com/denverdino/aliyungo v0.0.0-20210425065611-55bee4942cba
github.com/digitalocean/godo v1.60.0
- github.com/docker/docker v20.10.6+incompatible
+ github.com/docker/docker v20.10.6+incompatible // indirect
github.com/go-ini/ini v1.62.0
github.com/go-logr/logr v0.4.0
github.com/gogo/protobuf v1.3.2
github.com/google/go-cmp v0.5.5
+ github.com/google/go-containerregistry v0.5.1
github.com/google/uuid v1.2.0
github.com/gophercloud/gophercloud v0.18.0
github.com/hashicorp/hcl/v2 v2.10.0
diff --git a/go.sum b/go.sum
index 1fd2a3a2ca..ec7318f6af 100644
--- a/go.sum
+++ b/go.sum
@@ -109,6 +109,7 @@ github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFP
github.com/Masterminds/squirrel v1.5.0/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10=
github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA=
github.com/MichaelTJones/walk v0.0.0-20161122175330-4748e29d5718/go.mod h1:VVwKsx9Dc8rNG55BWqogoJzGubjKnRoXdUvpGbWqeCc=
+github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/go-winio v0.4.15 h1:qkLXKzb1QoVatRyd/YlXZ/Kg0m5K3SPuoD82jjSOaBc=
github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
@@ -240,6 +241,7 @@ github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.4.4 h1:rtRG4N6Ct7GNssATwgpvMGfnjnwfjnu/Zs9W3Ikzq+M=
@@ -249,6 +251,8 @@ github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41 h1:kIFnQBO7r
github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY=
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
+github.com/containerd/stargz-snapshotter/estargz v0.4.1 h1:5e7heayhB7CcgdTkqfZqrNaNv15gABwr3Q2jBTbLlt4=
+github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
@@ -302,12 +306,14 @@ github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQ
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
+github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492 h1:FwssHbCDJD025h+BchanCwE1Q8fyMgqDr2mOQAWOLGw=
github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.6+incompatible h1:oXI3Vas8TI8Eu/EjH4srKHJBVqraSzJybhxY7Om9faQ=
@@ -539,6 +545,8 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-containerregistry v0.5.1 h1:/+mFTs4AlwsJ/mJe8NDtKb7BxLtbZFpcn8vDsneEkwQ=
+github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -706,6 +714,7 @@ github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHW
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
+github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
@@ -803,6 +812,7 @@ github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
github.com/mgutz/str v1.2.0/go.mod h1:w1v0ofgLaJdoD0HpQ3fycxKD1WtxpjSo151pK/31q6w=
github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
@@ -1029,6 +1039,7 @@ github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkB
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
@@ -1395,6 +1406,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1504,6 +1516,7 @@ golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -1544,6 +1557,7 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
@@ -1634,6 +1648,7 @@ google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
@@ -1793,6 +1808,7 @@ k8s.io/cri-api v0.21.1/go.mod h1:nJbXlTpXwYCYuGMR7v3PQb1Du4WOGj2I9085xMVjr3I=
k8s.io/csi-translation-lib v0.21.1 h1:yzAIfaMKv+j5pPtMKwGgAOLaIVhPBv+UjMRyusq+im4=
k8s.io/csi-translation-lib v0.21.1/go.mod h1:y6NwcsxV1IsoqOm07G4hqANvCLXNDMC1t00lf+CqKRA=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo v0.0.0-20210203185629-de9496dff47b h1:bAU8IlrMA6KbP0dIg/sVSJn95pDCUHDZx0DpTGrf2v4=
k8s.io/gengo v0.0.0-20210203185629-de9496dff47b/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
diff --git a/pkg/assets/BUILD.bazel b/pkg/assets/BUILD.bazel
index aaca334527..91714d06f7 100644
--- a/pkg/assets/BUILD.bazel
+++ b/pkg/assets/BUILD.bazel
@@ -7,8 +7,6 @@ go_library(
"copy.go",
"copyfile.go",
"copyimage.go",
- "docker_api.go",
- "docker_cli.go",
],
importpath = "k8s.io/kops/pkg/assets",
visibility = ["//visibility:public"],
@@ -22,9 +20,10 @@ go_library(
"//util/pkg/mirrors:go_default_library",
"//util/pkg/vfs:go_default_library",
"//vendor/github.com/blang/semver/v4:go_default_library",
- "//vendor/github.com/docker/docker/api/types:go_default_library",
- "//vendor/github.com/docker/docker/api/types/filters:go_default_library",
- "//vendor/github.com/docker/docker/client:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/pkg/authn:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/pkg/name:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/pkg/v1/remote:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/pkg/v1/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
],
diff --git a/pkg/assets/copyimage.go b/pkg/assets/copyimage.go
index 5328b778b0..80236eff71 100644
--- a/pkg/assets/copyimage.go
+++ b/pkg/assets/copyimage.go
@@ -1,5 +1,5 @@
/*
-Copyright 2017 The Kubernetes Authors.
+Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -19,6 +19,10 @@ package assets
import (
"fmt"
+ "github.com/google/go-containerregistry/pkg/authn"
+ "github.com/google/go-containerregistry/pkg/name"
+ "github.com/google/go-containerregistry/pkg/v1/remote"
+ "github.com/google/go-containerregistry/pkg/v1/types"
"k8s.io/klog/v2"
)
@@ -31,43 +35,64 @@ type CopyImage struct {
}
func (e *CopyImage) Run() error {
- api, err := newDockerAPI()
- if err != nil {
- return err
- }
-
- cli, err := newDockerCLI()
- if err != nil {
- return err
-
- }
-
source := e.SourceImage
target := e.TargetImage
- klog.Infof("copying docker image from %q to %q", source, target)
-
- err = cli.pullImage(source)
+ sourceRef, err := name.ParseReference(source)
if err != nil {
- return fmt.Errorf("error pulling image %q: %v", source, err)
- }
- sourceImage, err := api.findImage(source)
- if err != nil {
- return fmt.Errorf("error finding image %q: %v", source, err)
- }
- if sourceImage == nil {
- return fmt.Errorf("source image %q not found", source)
+ return fmt.Errorf("parsing reference %q: %v", source, err)
}
- err = api.tagImage(sourceImage.ID, target)
+ targetRef, err := name.ParseReference(target)
if err != nil {
- return fmt.Errorf("error tagging image %q: %v", source, err)
+ return fmt.Errorf("parsing reference for %q: %v", target, err)
}
- err = cli.pushImage(target)
+ options := []remote.Option{remote.WithAuthFromKeychain(authn.DefaultKeychain)}
+
+ desc, err := remote.Get(sourceRef, options...)
if err != nil {
- return fmt.Errorf("error pushing image %q: %v", target, err)
+ return fmt.Errorf("fetching %q: %v", source, err)
+ }
+
+ targetDesc, err := remote.Get(targetRef, options...)
+ if err == nil && desc.Digest.String() == targetDesc.Digest.String() {
+ klog.Infof("no need to copy image from %v to %v", sourceRef, targetRef)
+ return nil
+ }
+
+ switch desc.MediaType {
+ case types.OCIImageIndex, types.DockerManifestList:
+ // Handle indexes separately.
+ if err := copyIndex(desc, sourceRef, targetRef, options...); err != nil {
+ return fmt.Errorf("failed to copy index: %v", err)
+ }
+ default:
+ // Assume anything else is an image, since some registries don't set mediaTypes properly.
+ if err := copyImage(desc, sourceRef, targetRef, options...); err != nil {
+ return fmt.Errorf("failed to copy image: %v", err)
+ }
}
return nil
}
+
+func copyImage(desc *remote.Descriptor, sourceRef name.Reference, targetRef name.Reference, options ...remote.Option) error {
+ klog.Infof("copying image from %v to %v", sourceRef, targetRef)
+
+ img, err := desc.Image()
+ if err != nil {
+ return err
+ }
+ return remote.Write(targetRef, img, options...)
+}
+
+func copyIndex(desc *remote.Descriptor, sourceRef name.Reference, targetRef name.Reference, options ...remote.Option) error {
+ klog.Infof("copying image index from %v to %v", sourceRef, targetRef)
+
+ idx, err := desc.ImageIndex()
+ if err != nil {
+ return err
+ }
+ return remote.WriteIndex(targetRef, idx, options...)
+}
diff --git a/pkg/assets/docker_api.go b/pkg/assets/docker_api.go
deleted file mode 100644
index 20a75fc966..0000000000
--- a/pkg/assets/docker_api.go
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
-Copyright 2017 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package assets
-
-import (
- "context"
- "fmt"
- "strings"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/client"
- "k8s.io/klog/v2"
-)
-
-// dockerAPI encapsulates access to docker via the API
-type dockerAPI struct {
- client *client.Client
-}
-
-// newDockerAPI builds a dockerAPI object, for talking to docker via the API
-func newDockerAPI() (*dockerAPI, error) {
- klog.V(4).Infof("docker creating api client")
- c, err := client.NewClientWithOpts(client.FromEnv)
- if err != nil {
- return nil, fmt.Errorf("error building docker client: %v", err)
- }
-
- if c == nil {
- return nil, fmt.Errorf("error building docker client, client returned is nil")
- }
-
- // Test the client
- ctx := context.Background()
- _, err = c.Info(ctx)
- if err != nil {
- // TODO check if /var/run/docker.sock exists and create a connection using that
- klog.Errorf("Unable to create docker client please set DOCKER_HOST to unix socket or tcp socket")
- klog.Errorf("Standard DOCKER_HOST values can be %q and defaults to %q", "unix:///var/run/docker.sock", client.DefaultDockerHost)
- return nil, fmt.Errorf("error building docker client, unable to make info call: %v", err)
- }
-
- return &dockerAPI{
- client: c,
- }, nil
-}
-
-// findImage does a `docker images` via the API, and finds the specified image
-func (d *dockerAPI) findImage(name string) (*types.ImageSummary, error) {
- name = strings.TrimPrefix(name, "docker.io/")
- klog.V(4).Infof("docker query for image %q", name)
- filter := filters.NewArgs(filters.KeyValuePair{Key: "reference", Value: name})
- options := types.ImageListOptions{
- Filters: filter,
- }
- ctx := context.Background()
- images, err := d.client.ImageList(ctx, options)
- if err != nil {
- return nil, fmt.Errorf("error listing images: %v", err)
- }
- for i := range images {
- for _, repoTag := range images[i].RepoTags {
- if repoTag == name {
- return &images[i], nil
- }
- }
- }
- return nil, nil
-}
-
-// tagImage does a `docker tag`, via the API
-func (d *dockerAPI) tagImage(imageID string, ref string) error {
- klog.V(4).Infof("docker tag for image %q, tag %q", imageID, ref)
-
- ctx := context.Background()
- err := d.client.ImageTag(ctx, imageID, ref)
- if err != nil {
- return fmt.Errorf("error tagging image %q with tag %q: %v", imageID, ref, err)
- }
- return nil
-}
diff --git a/pkg/assets/docker_cli.go b/pkg/assets/docker_cli.go
deleted file mode 100644
index ec52fd59b4..0000000000
--- a/pkg/assets/docker_cli.go
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
-Copyright 2017 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package assets
-
-import (
- "fmt"
- "os/exec"
-
- "k8s.io/klog/v2"
-)
-
-// dockerCLI encapsulates access to docker via the CLI
-type dockerCLI struct {
-}
-
-// newDockerCLI builds a dockerCLI object, for talking to docker via the CLI
-func newDockerCLI() (*dockerCLI, error) {
- return &dockerCLI{}, nil
-}
-
-// pullImage does a `docker pull`, shelling out to the CLI
-func (d *dockerCLI) pullImage(name string) error {
- klog.V(4).Infof("docker pull for image %q", name)
-
- cmd := exec.Command("docker", "pull", name)
- err := cmd.Run()
- if err != nil {
- return fmt.Errorf("error pulling image %q: %v", name, err)
- }
-
- return nil
-}
-
-// pushImage does a docker push, shelling out to the CLI
-func (d *dockerCLI) pushImage(name string) error {
- klog.V(4).Infof("docker push for image %q", name)
-
- cmd := exec.Command("docker", "push", name)
- err := cmd.Run()
- if err != nil {
- return fmt.Errorf("error pushing image %q: %v", name, err)
- }
-
- return nil
-}
diff --git a/tests/e2e/go.sum b/tests/e2e/go.sum
index 83f854b79e..10609cac4e 100644
--- a/tests/e2e/go.sum
+++ b/tests/e2e/go.sum
@@ -321,6 +321,7 @@ github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/stargz-snapshotter/estargz v0.0.0-20201217071531-2b97b583765b/go.mod h1:E9uVkkBKf0EaC39j2JVW9EzdNhYvpz6eQIjILHebruk=
+github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
@@ -681,6 +682,7 @@ github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-containerregistry v0.0.0-20200115214256-379933c9c22b/go.mod h1:Wtl/v6YdQxv397EREtzwgd9+Ud7Q5D8XMbi3Zazgkrs=
github.com/google/go-containerregistry v0.3.0/go.mod h1:BJ7VxR1hAhdiZBGGnvGETHEmFs1hzXc4VM1xjOPO9wA=
+github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-github/v29 v29.0.3/go.mod h1:CHKiKKPHJ0REzfwc14QMklvtHwCveD0PxlMjLlzAM5E=
@@ -2178,6 +2180,7 @@ k8s.io/csi-translation-lib v0.21.0/go.mod h1:edq+UMpgqEx3roTuGF/03uIuSOsI986jtu6
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20191108084044-e500ee069b5c/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo v0.0.0-20210203185629-de9496dff47b/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM=
diff --git a/vendor/github.com/docker/docker/api/BUILD.bazel b/vendor/github.com/docker/docker/api/BUILD.bazel
deleted file mode 100644
index b43021f9a8..0000000000
--- a/vendor/github.com/docker/docker/api/BUILD.bazel
+++ /dev/null
@@ -1,13 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
- name = "go_default_library",
- srcs = [
- "common.go",
- "common_unix.go",
- "common_windows.go",
- ],
- importmap = "k8s.io/kops/vendor/github.com/docker/docker/api",
- importpath = "github.com/docker/docker/api",
- visibility = ["//visibility:public"],
-)
diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md
deleted file mode 100644
index f136c3433a..0000000000
--- a/vendor/github.com/docker/docker/api/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
-# Working on the Engine API
-
-The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon.
-
-It consists of various components in this repository:
-
-- `api/swagger.yaml` A Swagger definition of the API.
-- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this.
-- `cli/` The command-line client.
-- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs.
-- `daemon/` The daemon, which serves the API.
-
-## Swagger definition
-
-The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to:
-
-1. Automatically generate documentation.
-2. Automatically generate the Go server and client. (A work-in-progress.)
-3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc.
-
-## Updating the API documentation
-
-The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation.
-
-The file is split into two main sections:
-
-- `definitions`, which defines re-usable objects used in requests and responses
-- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable)
-
-To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section.
-
-There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919).
-
-`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing.
-
-## Viewing the API documentation
-
-When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly.
-
-Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation.
-
-The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io).
diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go
deleted file mode 100644
index 1565e2af64..0000000000
--- a/vendor/github.com/docker/docker/api/common.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package api // import "github.com/docker/docker/api"
-
-// Common constants for daemon and client.
-const (
- // DefaultVersion of Current REST API
- DefaultVersion = "1.41"
-
- // NoBaseImageSpecifier is the symbol used by the FROM
- // command to specify that no base image is to be used.
- NoBaseImageSpecifier = "scratch"
-)
diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go
deleted file mode 100644
index 504b0c90d7..0000000000
--- a/vendor/github.com/docker/docker/api/common_unix.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// +build !windows
-
-package api // import "github.com/docker/docker/api"
-
-// MinVersion represents Minimum REST API version supported
-const MinVersion = "1.12"
diff --git a/vendor/github.com/docker/docker/api/common_windows.go b/vendor/github.com/docker/docker/api/common_windows.go
deleted file mode 100644
index 590ba5479b..0000000000
--- a/vendor/github.com/docker/docker/api/common_windows.go
+++ /dev/null
@@ -1,8 +0,0 @@
-package api // import "github.com/docker/docker/api"
-
-// MinVersion represents Minimum REST API version supported
-// Technically the first daemon API version released on Windows is v1.25 in
-// engine version 1.13. However, some clients are explicitly using downlevel
-// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive.
-// Hence also allowing 1.24 on Windows.
-const MinVersion string = "1.24"
diff --git a/vendor/github.com/docker/docker/api/swagger-gen.yaml b/vendor/github.com/docker/docker/api/swagger-gen.yaml
deleted file mode 100644
index f07a02737f..0000000000
--- a/vendor/github.com/docker/docker/api/swagger-gen.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-
-layout:
- models:
- - name: definition
- source: asset:model
- target: "{{ joinFilePath .Target .ModelPackage }}"
- file_name: "{{ (snakize (pascalize .Name)) }}.go"
- operations:
- - name: handler
- source: asset:serverOperation
- target: "{{ joinFilePath .Target .APIPackage .Package }}"
- file_name: "{{ (snakize (pascalize .Name)) }}.go"
diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml
deleted file mode 100644
index 1294e5a22c..0000000000
--- a/vendor/github.com/docker/docker/api/swagger.yaml
+++ /dev/null
@@ -1,11425 +0,0 @@
-# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API.
-#
-# This is used for generating API documentation and the types used by the
-# client/server. See api/README.md for more information.
-#
-# Some style notes:
-# - This file is used by ReDoc, which allows GitHub Flavored Markdown in
-# descriptions.
-# - There is no maximum line length, for ease of editing and pretty diffs.
-# - operationIds are in the format "NounVerb", with a singular noun.
-
-swagger: "2.0"
-schemes:
- - "http"
- - "https"
-produces:
- - "application/json"
- - "text/plain"
-consumes:
- - "application/json"
- - "text/plain"
-basePath: "/v1.41"
-info:
- title: "Docker Engine API"
- version: "1.41"
- x-logo:
- url: "https://docs.docker.com/images/logo-docker-main.png"
- description: |
- The Engine API is an HTTP API served by Docker Engine. It is the API the
- Docker client uses to communicate with the Engine, so everything the Docker
- client can do can be done with the API.
-
- Most of the client's commands map directly to API endpoints (e.g. `docker ps`
- is `GET /containers/json`). The notable exception is running containers,
- which consists of several API calls.
-
- # Errors
-
- The API uses standard HTTP status codes to indicate the success or failure
- of the API call. The body of the response will be JSON in the following
- format:
-
- ```
- {
- "message": "page not found"
- }
- ```
-
- # Versioning
-
- The API is usually changed in each release, so API calls are versioned to
- ensure that clients don't break. To lock to a specific version of the API,
- you prefix the URL with its version, for example, call `/v1.30/info` to use
- the v1.30 version of the `/info` endpoint. If the API version specified in
- the URL is not supported by the daemon, a HTTP `400 Bad Request` error message
- is returned.
-
- If you omit the version-prefix, the current version of the API (v1.41) is used.
- For example, calling `/info` is the same as calling `/v1.41/info`. Using the
- API without a version-prefix is deprecated and will be removed in a future release.
-
- Engine releases in the near future should support this version of the API,
- so your client will continue to work even if it is talking to a newer Engine.
-
- The API uses an open schema model, which means server may add extra properties
- to responses. Likewise, the server will ignore any extra query parameters and
- request body properties. When you write clients, you need to ignore additional
- properties in responses to ensure they do not break when talking to newer
- daemons.
-
-
- # Authentication
-
- Authentication for registries is handled client side. The client has to send
- authentication details to various endpoints that need to communicate with
- registries, such as `POST /images/(name)/push`. These are sent as
- `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5)
- (JSON) string with the following structure:
-
- ```
- {
- "username": "string",
- "password": "string",
- "email": "string",
- "serveraddress": "string"
- }
- ```
-
- The `serveraddress` is a domain/IP without a protocol. Throughout this
- structure, double quotes are required.
-
- If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth),
- you can just pass this instead of credentials:
-
- ```
- {
- "identitytoken": "9cbaf023786cd7..."
- }
- ```
-
-# The tags on paths define the menu sections in the ReDoc documentation, so
-# the usage of tags must make sense for that:
-# - They should be singular, not plural.
-# - There should not be too many tags, or the menu becomes unwieldy. For
-# example, it is preferable to add a path to the "System" tag instead of
-# creating a tag with a single path in it.
-# - The order of tags in this list defines the order in the menu.
-tags:
- # Primary objects
- - name: "Container"
- x-displayName: "Containers"
- description: |
- Create and manage containers.
- - name: "Image"
- x-displayName: "Images"
- - name: "Network"
- x-displayName: "Networks"
- description: |
- Networks are user-defined networks that containers can be attached to.
- See the [networking documentation](https://docs.docker.com/network/)
- for more information.
- - name: "Volume"
- x-displayName: "Volumes"
- description: |
- Create and manage persistent storage that can be attached to containers.
- - name: "Exec"
- x-displayName: "Exec"
- description: |
- Run new commands inside running containers. Refer to the
- [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/)
- for more information.
-
- To exec a command in a container, you first need to create an exec instance,
- then start it. These two API endpoints are wrapped up in a single command-line
- command, `docker exec`.
-
- # Swarm things
- - name: "Swarm"
- x-displayName: "Swarm"
- description: |
- Engines can be clustered together in a swarm. Refer to the
- [swarm mode documentation](https://docs.docker.com/engine/swarm/)
- for more information.
- - name: "Node"
- x-displayName: "Nodes"
- description: |
- Nodes are instances of the Engine participating in a swarm. Swarm mode
- must be enabled for these endpoints to work.
- - name: "Service"
- x-displayName: "Services"
- description: |
- Services are the definitions of tasks to run on a swarm. Swarm mode must
- be enabled for these endpoints to work.
- - name: "Task"
- x-displayName: "Tasks"
- description: |
- A task is a container running on a swarm. It is the atomic scheduling unit
- of swarm. Swarm mode must be enabled for these endpoints to work.
- - name: "Secret"
- x-displayName: "Secrets"
- description: |
- Secrets are sensitive data that can be used by services. Swarm mode must
- be enabled for these endpoints to work.
- - name: "Config"
- x-displayName: "Configs"
- description: |
- Configs are application configurations that can be used by services. Swarm
- mode must be enabled for these endpoints to work.
- # System things
- - name: "Plugin"
- x-displayName: "Plugins"
- - name: "System"
- x-displayName: "System"
-
-definitions:
- Port:
- type: "object"
- description: "An open port on a container"
- required: [PrivatePort, Type]
- properties:
- IP:
- type: "string"
- format: "ip-address"
- description: "Host IP address that the container's port is mapped to"
- PrivatePort:
- type: "integer"
- format: "uint16"
- x-nullable: false
- description: "Port on the container"
- PublicPort:
- type: "integer"
- format: "uint16"
- description: "Port exposed on the host"
- Type:
- type: "string"
- x-nullable: false
- enum: ["tcp", "udp", "sctp"]
- example:
- PrivatePort: 8080
- PublicPort: 80
- Type: "tcp"
-
- MountPoint:
- type: "object"
- description: "A mount point inside a container"
- properties:
- Type:
- type: "string"
- Name:
- type: "string"
- Source:
- type: "string"
- Destination:
- type: "string"
- Driver:
- type: "string"
- Mode:
- type: "string"
- RW:
- type: "boolean"
- Propagation:
- type: "string"
-
- DeviceMapping:
- type: "object"
- description: "A device mapping between the host and container"
- properties:
- PathOnHost:
- type: "string"
- PathInContainer:
- type: "string"
- CgroupPermissions:
- type: "string"
- example:
- PathOnHost: "/dev/deviceName"
- PathInContainer: "/dev/deviceName"
- CgroupPermissions: "mrw"
-
- DeviceRequest:
- type: "object"
- description: "A request for devices to be sent to device drivers"
- properties:
- Driver:
- type: "string"
- example: "nvidia"
- Count:
- type: "integer"
- example: -1
- DeviceIDs:
- type: "array"
- items:
- type: "string"
- example:
- - "0"
- - "1"
- - "GPU-fef8089b-4820-abfc-e83e-94318197576e"
- Capabilities:
- description: |
- A list of capabilities; an OR list of AND lists of capabilities.
- type: "array"
- items:
- type: "array"
- items:
- type: "string"
- example:
- # gpu AND nvidia AND compute
- - ["gpu", "nvidia", "compute"]
- Options:
- description: |
- Driver-specific options, specified as a key/value pairs. These options
- are passed directly to the driver.
- type: "object"
- additionalProperties:
- type: "string"
-
- ThrottleDevice:
- type: "object"
- properties:
- Path:
- description: "Device path"
- type: "string"
- Rate:
- description: "Rate"
- type: "integer"
- format: "int64"
- minimum: 0
-
- Mount:
- type: "object"
- properties:
- Target:
- description: "Container path."
- type: "string"
- Source:
- description: "Mount source (e.g. a volume name, a host path)."
- type: "string"
- Type:
- description: |
- The mount type. Available types:
-
- - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container.
- - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed.
- - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs.
- - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container.
- type: "string"
- enum:
- - "bind"
- - "volume"
- - "tmpfs"
- - "npipe"
- ReadOnly:
- description: "Whether the mount should be read-only."
- type: "boolean"
- Consistency:
- description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`."
- type: "string"
- BindOptions:
- description: "Optional configuration for the `bind` type."
- type: "object"
- properties:
- Propagation:
- description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`."
- type: "string"
- enum:
- - "private"
- - "rprivate"
- - "shared"
- - "rshared"
- - "slave"
- - "rslave"
- NonRecursive:
- description: "Disable recursive bind mount."
- type: "boolean"
- default: false
- VolumeOptions:
- description: "Optional configuration for the `volume` type."
- type: "object"
- properties:
- NoCopy:
- description: "Populate volume with data from the target."
- type: "boolean"
- default: false
- Labels:
- description: "User-defined key/value metadata."
- type: "object"
- additionalProperties:
- type: "string"
- DriverConfig:
- description: "Map of driver specific options"
- type: "object"
- properties:
- Name:
- description: "Name of the driver to use to create the volume."
- type: "string"
- Options:
- description: "key/value map of driver specific options."
- type: "object"
- additionalProperties:
- type: "string"
- TmpfsOptions:
- description: "Optional configuration for the `tmpfs` type."
- type: "object"
- properties:
- SizeBytes:
- description: "The size for the tmpfs mount in bytes."
- type: "integer"
- format: "int64"
- Mode:
- description: "The permission mode for the tmpfs mount in an integer."
- type: "integer"
-
- RestartPolicy:
- description: |
- The behavior to apply when the container exits. The default is not to
- restart.
-
- An ever increasing delay (double the previous delay, starting at 100ms) is
- added before each restart to prevent flooding the server.
- type: "object"
- properties:
- Name:
- type: "string"
- description: |
- - Empty string means not to restart
- - `always` Always restart
- - `unless-stopped` Restart always except when the user has manually stopped the container
- - `on-failure` Restart only when the container exit code is non-zero
- enum:
- - ""
- - "always"
- - "unless-stopped"
- - "on-failure"
- MaximumRetryCount:
- type: "integer"
- description: |
- If `on-failure` is used, the number of times to retry before giving up.
-
- Resources:
- description: "A container's resources (cgroups config, ulimits, etc)"
- type: "object"
- properties:
- # Applicable to all platforms
- CpuShares:
- description: |
- An integer value representing this container's relative CPU weight
- versus other containers.
- type: "integer"
- Memory:
- description: "Memory limit in bytes."
- type: "integer"
- format: "int64"
- default: 0
- # Applicable to UNIX platforms
- CgroupParent:
- description: |
- Path to `cgroups` under which the container's `cgroup` is created. If
- the path is not absolute, the path is considered to be relative to the
- `cgroups` path of the init process. Cgroups are created if they do not
- already exist.
- type: "string"
- BlkioWeight:
- description: "Block IO weight (relative weight)."
- type: "integer"
- minimum: 0
- maximum: 1000
- BlkioWeightDevice:
- description: |
- Block IO weight (relative device weight) in the form:
-
- ```
- [{"Path": "device_path", "Weight": weight}]
- ```
- type: "array"
- items:
- type: "object"
- properties:
- Path:
- type: "string"
- Weight:
- type: "integer"
- minimum: 0
- BlkioDeviceReadBps:
- description: |
- Limit read rate (bytes per second) from a device, in the form:
-
- ```
- [{"Path": "device_path", "Rate": rate}]
- ```
- type: "array"
- items:
- $ref: "#/definitions/ThrottleDevice"
- BlkioDeviceWriteBps:
- description: |
- Limit write rate (bytes per second) to a device, in the form:
-
- ```
- [{"Path": "device_path", "Rate": rate}]
- ```
- type: "array"
- items:
- $ref: "#/definitions/ThrottleDevice"
- BlkioDeviceReadIOps:
- description: |
- Limit read rate (IO per second) from a device, in the form:
-
- ```
- [{"Path": "device_path", "Rate": rate}]
- ```
- type: "array"
- items:
- $ref: "#/definitions/ThrottleDevice"
- BlkioDeviceWriteIOps:
- description: |
- Limit write rate (IO per second) to a device, in the form:
-
- ```
- [{"Path": "device_path", "Rate": rate}]
- ```
- type: "array"
- items:
- $ref: "#/definitions/ThrottleDevice"
- CpuPeriod:
- description: "The length of a CPU period in microseconds."
- type: "integer"
- format: "int64"
- CpuQuota:
- description: |
- Microseconds of CPU time that the container can get in a CPU period.
- type: "integer"
- format: "int64"
- CpuRealtimePeriod:
- description: |
- The length of a CPU real-time period in microseconds. Set to 0 to
- allocate no time allocated to real-time tasks.
- type: "integer"
- format: "int64"
- CpuRealtimeRuntime:
- description: |
- The length of a CPU real-time runtime in microseconds. Set to 0 to
- allocate no time allocated to real-time tasks.
- type: "integer"
- format: "int64"
- CpusetCpus:
- description: |
- CPUs in which to allow execution (e.g., `0-3`, `0,1`).
- type: "string"
- example: "0-3"
- CpusetMems:
- description: |
- Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only
- effective on NUMA systems.
- type: "string"
- Devices:
- description: "A list of devices to add to the container."
- type: "array"
- items:
- $ref: "#/definitions/DeviceMapping"
- DeviceCgroupRules:
- description: "a list of cgroup rules to apply to the container"
- type: "array"
- items:
- type: "string"
- example: "c 13:* rwm"
- DeviceRequests:
- description: |
- A list of requests for devices to be sent to device drivers.
- type: "array"
- items:
- $ref: "#/definitions/DeviceRequest"
- KernelMemory:
- description: |
- Kernel memory limit in bytes.
-
-
-
- > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated
- > `kmem.limit_in_bytes`.
- type: "integer"
- format: "int64"
- example: 209715200
- KernelMemoryTCP:
- description: "Hard limit for kernel TCP buffer memory (in bytes)."
- type: "integer"
- format: "int64"
- MemoryReservation:
- description: "Memory soft limit in bytes."
- type: "integer"
- format: "int64"
- MemorySwap:
- description: |
- Total memory limit (memory + swap). Set as `-1` to enable unlimited
- swap.
- type: "integer"
- format: "int64"
- MemorySwappiness:
- description: |
- Tune a container's memory swappiness behavior. Accepts an integer
- between 0 and 100.
- type: "integer"
- format: "int64"
- minimum: 0
- maximum: 100
- NanoCpus:
- description: "CPU quota in units of 10-9 CPUs."
- type: "integer"
- format: "int64"
- OomKillDisable:
- description: "Disable OOM Killer for the container."
- type: "boolean"
- Init:
- description: |
- Run an init inside the container that forwards signals and reaps
- processes. This field is omitted if empty, and the default (as
- configured on the daemon) is used.
- type: "boolean"
- x-nullable: true
- PidsLimit:
- description: |
- Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null`
- to not change.
- type: "integer"
- format: "int64"
- x-nullable: true
- Ulimits:
- description: |
- A list of resource limits to set in the container. For example:
-
- ```
- {"Name": "nofile", "Soft": 1024, "Hard": 2048}
- ```
- type: "array"
- items:
- type: "object"
- properties:
- Name:
- description: "Name of ulimit"
- type: "string"
- Soft:
- description: "Soft limit"
- type: "integer"
- Hard:
- description: "Hard limit"
- type: "integer"
- # Applicable to Windows
- CpuCount:
- description: |
- The number of usable CPUs (Windows only).
-
- On Windows Server containers, the processor resource controls are
- mutually exclusive. The order of precedence is `CPUCount` first, then
- `CPUShares`, and `CPUPercent` last.
- type: "integer"
- format: "int64"
- CpuPercent:
- description: |
- The usable percentage of the available CPUs (Windows only).
-
- On Windows Server containers, the processor resource controls are
- mutually exclusive. The order of precedence is `CPUCount` first, then
- `CPUShares`, and `CPUPercent` last.
- type: "integer"
- format: "int64"
- IOMaximumIOps:
- description: "Maximum IOps for the container system drive (Windows only)"
- type: "integer"
- format: "int64"
- IOMaximumBandwidth:
- description: |
- Maximum IO in bytes per second for the container system drive
- (Windows only).
- type: "integer"
- format: "int64"
-
- Limit:
- description: |
- An object describing a limit on resources which can be requested by a task.
- type: "object"
- properties:
- NanoCPUs:
- type: "integer"
- format: "int64"
- example: 4000000000
- MemoryBytes:
- type: "integer"
- format: "int64"
- example: 8272408576
- Pids:
- description: |
- Limits the maximum number of PIDs in the container. Set `0` for unlimited.
- type: "integer"
- format: "int64"
- default: 0
- example: 100
-
- ResourceObject:
- description: |
- An object describing the resources which can be advertised by a node and
- requested by a task.
- type: "object"
- properties:
- NanoCPUs:
- type: "integer"
- format: "int64"
- example: 4000000000
- MemoryBytes:
- type: "integer"
- format: "int64"
- example: 8272408576
- GenericResources:
- $ref: "#/definitions/GenericResources"
-
- GenericResources:
- description: |
- User-defined resources can be either Integer resources (e.g, `SSD=3`) or
- String resources (e.g, `GPU=UUID1`).
- type: "array"
- items:
- type: "object"
- properties:
- NamedResourceSpec:
- type: "object"
- properties:
- Kind:
- type: "string"
- Value:
- type: "string"
- DiscreteResourceSpec:
- type: "object"
- properties:
- Kind:
- type: "string"
- Value:
- type: "integer"
- format: "int64"
- example:
- - DiscreteResourceSpec:
- Kind: "SSD"
- Value: 3
- - NamedResourceSpec:
- Kind: "GPU"
- Value: "UUID1"
- - NamedResourceSpec:
- Kind: "GPU"
- Value: "UUID2"
-
- HealthConfig:
- description: "A test to perform to check that the container is healthy."
- type: "object"
- properties:
- Test:
- description: |
- The test to perform. Possible values are:
-
- - `[]` inherit healthcheck from image or parent image
- - `["NONE"]` disable healthcheck
- - `["CMD", args...]` exec arguments directly
- - `["CMD-SHELL", command]` run command with system's default shell
- type: "array"
- items:
- type: "string"
- Interval:
- description: |
- The time to wait between checks in nanoseconds. It should be 0 or at
- least 1000000 (1 ms). 0 means inherit.
- type: "integer"
- Timeout:
- description: |
- The time to wait before considering the check to have hung. It should
- be 0 or at least 1000000 (1 ms). 0 means inherit.
- type: "integer"
- Retries:
- description: |
- The number of consecutive failures needed to consider a container as
- unhealthy. 0 means inherit.
- type: "integer"
- StartPeriod:
- description: |
- Start period for the container to initialize before starting
- health-retries countdown in nanoseconds. It should be 0 or at least
- 1000000 (1 ms). 0 means inherit.
- type: "integer"
-
- Health:
- description: |
- Health stores information about the container's healthcheck results.
- type: "object"
- properties:
- Status:
- description: |
- Status is one of `none`, `starting`, `healthy` or `unhealthy`
-
- - "none" Indicates there is no healthcheck
- - "starting" Starting indicates that the container is not yet ready
- - "healthy" Healthy indicates that the container is running correctly
- - "unhealthy" Unhealthy indicates that the container has a problem
- type: "string"
- enum:
- - "none"
- - "starting"
- - "healthy"
- - "unhealthy"
- example: "healthy"
- FailingStreak:
- description: "FailingStreak is the number of consecutive failures"
- type: "integer"
- example: 0
- Log:
- type: "array"
- description: |
- Log contains the last few results (oldest first)
- items:
- x-nullable: true
- $ref: "#/definitions/HealthcheckResult"
-
- HealthcheckResult:
- description: |
- HealthcheckResult stores information about a single run of a healthcheck probe
- type: "object"
- properties:
- Start:
- description: |
- Date and time at which this check started in
- [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
- type: "string"
- format: "date-time"
- example: "2020-01-04T10:44:24.496525531Z"
- End:
- description: |
- Date and time at which this check ended in
- [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
- type: "string"
- format: "dateTime"
- example: "2020-01-04T10:45:21.364524523Z"
- ExitCode:
- description: |
- ExitCode meanings:
-
- - `0` healthy
- - `1` unhealthy
- - `2` reserved (considered unhealthy)
- - other values: error running probe
- type: "integer"
- example: 0
- Output:
- description: "Output from last check"
- type: "string"
-
- HostConfig:
- description: "Container configuration that depends on the host we are running on"
- allOf:
- - $ref: "#/definitions/Resources"
- - type: "object"
- properties:
- # Applicable to all platforms
- Binds:
- type: "array"
- description: |
- A list of volume bindings for this container. Each volume binding
- is a string in one of these forms:
-
- - `host-src:container-dest[:options]` to bind-mount a host path
- into the container. Both `host-src`, and `container-dest` must
- be an _absolute_ path.
- - `volume-name:container-dest[:options]` to bind-mount a volume
- managed by a volume driver into the container. `container-dest`
- must be an _absolute_ path.
-
- `options` is an optional, comma-delimited list of:
-
- - `nocopy` disables automatic copying of data from the container
- path to the volume. The `nocopy` flag only applies to named volumes.
- - `[ro|rw]` mounts a volume read-only or read-write, respectively.
- If omitted or set to `rw`, volumes are mounted read-write.
- - `[z|Z]` applies SELinux labels to allow or deny multiple containers
- to read and write to the same volume.
- - `z`: a _shared_ content label is applied to the content. This
- label indicates that multiple containers can share the volume
- content, for both reading and writing.
- - `Z`: a _private unshared_ label is applied to the content.
- This label indicates that only the current container can use
- a private volume. Labeling systems such as SELinux require
- proper labels to be placed on volume content that is mounted
- into a container. Without a label, the security system can
- prevent a container's processes from using the content. By
- default, the labels set by the host operating system are not
- modified.
- - `[[r]shared|[r]slave|[r]private]` specifies mount
- [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt).
- This only applies to bind-mounted volumes, not internal volumes
- or named volumes. Mount propagation requires the source mount
- point (the location where the source directory is mounted in the
- host operating system) to have the correct propagation properties.
- For shared volumes, the source mount point must be set to `shared`.
- For slave volumes, the mount must be set to either `shared` or
- `slave`.
- items:
- type: "string"
- ContainerIDFile:
- type: "string"
- description: "Path to a file where the container ID is written"
- LogConfig:
- type: "object"
- description: "The logging configuration for this container"
- properties:
- Type:
- type: "string"
- enum:
- - "json-file"
- - "syslog"
- - "journald"
- - "gelf"
- - "fluentd"
- - "awslogs"
- - "splunk"
- - "etwlogs"
- - "none"
- Config:
- type: "object"
- additionalProperties:
- type: "string"
- NetworkMode:
- type: "string"
- description: |
- Network mode to use for this container. Supported standard values
- are: `bridge`, `host`, `none`, and `container:`. Any
- other value is taken as a custom network's name to which this
- container should connect to.
- PortBindings:
- $ref: "#/definitions/PortMap"
- RestartPolicy:
- $ref: "#/definitions/RestartPolicy"
- AutoRemove:
- type: "boolean"
- description: |
- Automatically remove the container when the container's process
- exits. This has no effect if `RestartPolicy` is set.
- VolumeDriver:
- type: "string"
- description: "Driver that this container uses to mount volumes."
- VolumesFrom:
- type: "array"
- description: |
- A list of volumes to inherit from another container, specified in
- the form `[:]`.
- items:
- type: "string"
- Mounts:
- description: |
- Specification for mounts to be added to the container.
- type: "array"
- items:
- $ref: "#/definitions/Mount"
-
- # Applicable to UNIX platforms
- CapAdd:
- type: "array"
- description: |
- A list of kernel capabilities to add to the container. Conflicts
- with option 'Capabilities'.
- items:
- type: "string"
- CapDrop:
- type: "array"
- description: |
- A list of kernel capabilities to drop from the container. Conflicts
- with option 'Capabilities'.
- items:
- type: "string"
- CgroupnsMode:
- type: "string"
- enum:
- - "private"
- - "host"
- description: |
- cgroup namespace mode for the container. Possible values are:
-
- - `"private"`: the container runs in its own private cgroup namespace
- - `"host"`: use the host system's cgroup namespace
-
- If not specified, the daemon default is used, which can either be `"private"`
- or `"host"`, depending on daemon version, kernel support and configuration.
- Dns:
- type: "array"
- description: "A list of DNS servers for the container to use."
- items:
- type: "string"
- DnsOptions:
- type: "array"
- description: "A list of DNS options."
- items:
- type: "string"
- DnsSearch:
- type: "array"
- description: "A list of DNS search domains."
- items:
- type: "string"
- ExtraHosts:
- type: "array"
- description: |
- A list of hostnames/IP mappings to add to the container's `/etc/hosts`
- file. Specified in the form `["hostname:IP"]`.
- items:
- type: "string"
- GroupAdd:
- type: "array"
- description: |
- A list of additional groups that the container process will run as.
- items:
- type: "string"
- IpcMode:
- type: "string"
- description: |
- IPC sharing mode for the container. Possible values are:
-
- - `"none"`: own private IPC namespace, with /dev/shm not mounted
- - `"private"`: own private IPC namespace
- - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers
- - `"container:"`: join another (shareable) container's IPC namespace
- - `"host"`: use the host system's IPC namespace
-
- If not specified, daemon default is used, which can either be `"private"`
- or `"shareable"`, depending on daemon version and configuration.
- Cgroup:
- type: "string"
- description: "Cgroup to use for the container."
- Links:
- type: "array"
- description: |
- A list of links for the container in the form `container_name:alias`.
- items:
- type: "string"
- OomScoreAdj:
- type: "integer"
- description: |
- An integer value containing the score given to the container in
- order to tune OOM killer preferences.
- example: 500
- PidMode:
- type: "string"
- description: |
- Set the PID (Process) Namespace mode for the container. It can be
- either:
-
- - `"container:"`: joins another container's PID namespace
- - `"host"`: use the host's PID namespace inside the container
- Privileged:
- type: "boolean"
- description: "Gives the container full access to the host."
- PublishAllPorts:
- type: "boolean"
- description: |
- Allocates an ephemeral host port for all of a container's
- exposed ports.
-
- Ports are de-allocated when the container stops and allocated when
- the container starts. The allocated port might be changed when
- restarting the container.
-
- The port is selected from the ephemeral port range that depends on
- the kernel. For example, on Linux the range is defined by
- `/proc/sys/net/ipv4/ip_local_port_range`.
- ReadonlyRootfs:
- type: "boolean"
- description: "Mount the container's root filesystem as read only."
- SecurityOpt:
- type: "array"
- description: "A list of string values to customize labels for MLS
- systems, such as SELinux."
- items:
- type: "string"
- StorageOpt:
- type: "object"
- description: |
- Storage driver options for this container, in the form `{"size": "120G"}`.
- additionalProperties:
- type: "string"
- Tmpfs:
- type: "object"
- description: |
- A map of container directories which should be replaced by tmpfs
- mounts, and their corresponding mount options. For example:
-
- ```
- { "/run": "rw,noexec,nosuid,size=65536k" }
- ```
- additionalProperties:
- type: "string"
- UTSMode:
- type: "string"
- description: "UTS namespace to use for the container."
- UsernsMode:
- type: "string"
- description: |
- Sets the usernamespace mode for the container when usernamespace
- remapping option is enabled.
- ShmSize:
- type: "integer"
- description: |
- Size of `/dev/shm` in bytes. If omitted, the system uses 64MB.
- minimum: 0
- Sysctls:
- type: "object"
- description: |
- A list of kernel parameters (sysctls) to set in the container.
- For example:
-
- ```
- {"net.ipv4.ip_forward": "1"}
- ```
- additionalProperties:
- type: "string"
- Runtime:
- type: "string"
- description: "Runtime to use with this container."
- # Applicable to Windows
- ConsoleSize:
- type: "array"
- description: |
- Initial console size, as an `[height, width]` array. (Windows only)
- minItems: 2
- maxItems: 2
- items:
- type: "integer"
- minimum: 0
- Isolation:
- type: "string"
- description: |
- Isolation technology of the container. (Windows only)
- enum:
- - "default"
- - "process"
- - "hyperv"
- MaskedPaths:
- type: "array"
- description: |
- The list of paths to be masked inside the container (this overrides
- the default set of paths).
- items:
- type: "string"
- ReadonlyPaths:
- type: "array"
- description: |
- The list of paths to be set as read-only inside the container
- (this overrides the default set of paths).
- items:
- type: "string"
-
- ContainerConfig:
- description: "Configuration for a container that is portable between hosts"
- type: "object"
- properties:
- Hostname:
- description: "The hostname to use for the container, as a valid RFC 1123 hostname."
- type: "string"
- Domainname:
- description: "The domain name to use for the container."
- type: "string"
- User:
- description: "The user that commands are run as inside the container."
- type: "string"
- AttachStdin:
- description: "Whether to attach to `stdin`."
- type: "boolean"
- default: false
- AttachStdout:
- description: "Whether to attach to `stdout`."
- type: "boolean"
- default: true
- AttachStderr:
- description: "Whether to attach to `stderr`."
- type: "boolean"
- default: true
- ExposedPorts:
- description: |
- An object mapping ports to an empty object in the form:
-
- `{"/": {}}`
- type: "object"
- additionalProperties:
- type: "object"
- enum:
- - {}
- default: {}
- Tty:
- description: |
- Attach standard streams to a TTY, including `stdin` if it is not closed.
- type: "boolean"
- default: false
- OpenStdin:
- description: "Open `stdin`"
- type: "boolean"
- default: false
- StdinOnce:
- description: "Close `stdin` after one attached client disconnects"
- type: "boolean"
- default: false
- Env:
- description: |
- A list of environment variables to set inside the container in the
- form `["VAR=value", ...]`. A variable without `=` is removed from the
- environment, rather than to have an empty value.
- type: "array"
- items:
- type: "string"
- Cmd:
- description: |
- Command to run specified as a string or an array of strings.
- type: "array"
- items:
- type: "string"
- Healthcheck:
- $ref: "#/definitions/HealthConfig"
- ArgsEscaped:
- description: "Command is already escaped (Windows only)"
- type: "boolean"
- Image:
- description: |
- The name of the image to use when creating the container/
- type: "string"
- Volumes:
- description: |
- An object mapping mount point paths inside the container to empty
- objects.
- type: "object"
- additionalProperties:
- type: "object"
- enum:
- - {}
- default: {}
- WorkingDir:
- description: "The working directory for commands to run in."
- type: "string"
- Entrypoint:
- description: |
- The entry point for the container as a string or an array of strings.
-
- If the array consists of exactly one empty string (`[""]`) then the
- entry point is reset to system default (i.e., the entry point used by
- docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`).
- type: "array"
- items:
- type: "string"
- NetworkDisabled:
- description: "Disable networking for the container."
- type: "boolean"
- MacAddress:
- description: "MAC address of the container."
- type: "string"
- OnBuild:
- description: |
- `ONBUILD` metadata that were defined in the image's `Dockerfile`.
- type: "array"
- items:
- type: "string"
- Labels:
- description: "User-defined key/value metadata."
- type: "object"
- additionalProperties:
- type: "string"
- StopSignal:
- description: |
- Signal to stop a container as a string or unsigned integer.
- type: "string"
- default: "SIGTERM"
- StopTimeout:
- description: "Timeout to stop a container in seconds."
- type: "integer"
- default: 10
- Shell:
- description: |
- Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell.
- type: "array"
- items:
- type: "string"
-
- NetworkingConfig:
- description: |
- NetworkingConfig represents the container's networking configuration for
- each of its interfaces.
- It is used for the networking configs specified in the `docker create`
- and `docker network connect` commands.
- type: "object"
- properties:
- EndpointsConfig:
- description: |
- A mapping of network name to endpoint configuration for that network.
- type: "object"
- additionalProperties:
- $ref: "#/definitions/EndpointSettings"
- example:
- # putting an example here, instead of using the example values from
- # /definitions/EndpointSettings, because containers/create currently
- # does not support attaching to multiple networks, so the example request
- # would be confusing if it showed that multiple networks can be contained
- # in the EndpointsConfig.
- # TODO remove once we support multiple networks on container create (see https://github.com/moby/moby/blob/07e6b843594e061f82baa5fa23c2ff7d536c2a05/daemon/create.go#L323)
- EndpointsConfig:
- isolated_nw:
- IPAMConfig:
- IPv4Address: "172.20.30.33"
- IPv6Address: "2001:db8:abcd::3033"
- LinkLocalIPs:
- - "169.254.34.68"
- - "fe80::3468"
- Links:
- - "container_1"
- - "container_2"
- Aliases:
- - "server_x"
- - "server_y"
-
- NetworkSettings:
- description: "NetworkSettings exposes the network settings in the API"
- type: "object"
- properties:
- Bridge:
- description: Name of the network'a bridge (for example, `docker0`).
- type: "string"
- example: "docker0"
- SandboxID:
- description: SandboxID uniquely represents a container's network stack.
- type: "string"
- example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3"
- HairpinMode:
- description: |
- Indicates if hairpin NAT should be enabled on the virtual interface.
- type: "boolean"
- example: false
- LinkLocalIPv6Address:
- description: IPv6 unicast address using the link-local prefix.
- type: "string"
- example: "fe80::42:acff:fe11:1"
- LinkLocalIPv6PrefixLen:
- description: Prefix length of the IPv6 unicast address.
- type: "integer"
- example: "64"
- Ports:
- $ref: "#/definitions/PortMap"
- SandboxKey:
- description: SandboxKey identifies the sandbox
- type: "string"
- example: "/var/run/docker/netns/8ab54b426c38"
-
- # TODO is SecondaryIPAddresses actually used?
- SecondaryIPAddresses:
- description: ""
- type: "array"
- items:
- $ref: "#/definitions/Address"
- x-nullable: true
-
- # TODO is SecondaryIPv6Addresses actually used?
- SecondaryIPv6Addresses:
- description: ""
- type: "array"
- items:
- $ref: "#/definitions/Address"
- x-nullable: true
-
- # TODO properties below are part of DefaultNetworkSettings, which is
- # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12
- EndpointID:
- description: |
- EndpointID uniquely represents a service endpoint in a Sandbox.
-
-
-
- > **Deprecated**: This field is only propagated when attached to the
- > default "bridge" network. Use the information from the "bridge"
- > network inside the `Networks` map instead, which contains the same
- > information. This field was deprecated in Docker 1.9 and is scheduled
- > to be removed in Docker 17.12.0
- type: "string"
- example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b"
- Gateway:
- description: |
- Gateway address for the default "bridge" network.
-
-
-
- > **Deprecated**: This field is only propagated when attached to the
- > default "bridge" network. Use the information from the "bridge"
- > network inside the `Networks` map instead, which contains the same
- > information. This field was deprecated in Docker 1.9 and is scheduled
- > to be removed in Docker 17.12.0
- type: "string"
- example: "172.17.0.1"
- GlobalIPv6Address:
- description: |
- Global IPv6 address for the default "bridge" network.
-
-
-
- > **Deprecated**: This field is only propagated when attached to the
- > default "bridge" network. Use the information from the "bridge"
- > network inside the `Networks` map instead, which contains the same
- > information. This field was deprecated in Docker 1.9 and is scheduled
- > to be removed in Docker 17.12.0
- type: "string"
- example: "2001:db8::5689"
- GlobalIPv6PrefixLen:
- description: |
- Mask length of the global IPv6 address.
-
-
-
- > **Deprecated**: This field is only propagated when attached to the
- > default "bridge" network. Use the information from the "bridge"
- > network inside the `Networks` map instead, which contains the same
- > information. This field was deprecated in Docker 1.9 and is scheduled
- > to be removed in Docker 17.12.0
- type: "integer"
- example: 64
- IPAddress:
- description: |
- IPv4 address for the default "bridge" network.
-
-
-
- > **Deprecated**: This field is only propagated when attached to the
- > default "bridge" network. Use the information from the "bridge"
- > network inside the `Networks` map instead, which contains the same
- > information. This field was deprecated in Docker 1.9 and is scheduled
- > to be removed in Docker 17.12.0
- type: "string"
- example: "172.17.0.4"
- IPPrefixLen:
- description: |
- Mask length of the IPv4 address.
-
-
-
- > **Deprecated**: This field is only propagated when attached to the
- > default "bridge" network. Use the information from the "bridge"
- > network inside the `Networks` map instead, which contains the same
- > information. This field was deprecated in Docker 1.9 and is scheduled
- > to be removed in Docker 17.12.0
- type: "integer"
- example: 16
- IPv6Gateway:
- description: |
- IPv6 gateway address for this network.
-
-
-
- > **Deprecated**: This field is only propagated when attached to the
- > default "bridge" network. Use the information from the "bridge"
- > network inside the `Networks` map instead, which contains the same
- > information. This field was deprecated in Docker 1.9 and is scheduled
- > to be removed in Docker 17.12.0
- type: "string"
- example: "2001:db8:2::100"
- MacAddress:
- description: |
- MAC address for the container on the default "bridge" network.
-
-
-
- > **Deprecated**: This field is only propagated when attached to the
- > default "bridge" network. Use the information from the "bridge"
- > network inside the `Networks` map instead, which contains the same
- > information. This field was deprecated in Docker 1.9 and is scheduled
- > to be removed in Docker 17.12.0
- type: "string"
- example: "02:42:ac:11:00:04"
- Networks:
- description: |
- Information about all networks that the container is connected to.
- type: "object"
- additionalProperties:
- $ref: "#/definitions/EndpointSettings"
-
- Address:
- description: Address represents an IPv4 or IPv6 IP address.
- type: "object"
- properties:
- Addr:
- description: IP address.
- type: "string"
- PrefixLen:
- description: Mask length of the IP address.
- type: "integer"
-
- PortMap:
- description: |
- PortMap describes the mapping of container ports to host ports, using the
- container's port-number and protocol as key in the format `/`,
- for example, `80/udp`.
-
- If a container's port is mapped for multiple protocols, separate entries
- are added to the mapping table.
- type: "object"
- additionalProperties:
- type: "array"
- x-nullable: true
- items:
- $ref: "#/definitions/PortBinding"
- example:
- "443/tcp":
- - HostIp: "127.0.0.1"
- HostPort: "4443"
- "80/tcp":
- - HostIp: "0.0.0.0"
- HostPort: "80"
- - HostIp: "0.0.0.0"
- HostPort: "8080"
- "80/udp":
- - HostIp: "0.0.0.0"
- HostPort: "80"
- "53/udp":
- - HostIp: "0.0.0.0"
- HostPort: "53"
- "2377/tcp": null
-
- PortBinding:
- description: |
- PortBinding represents a binding between a host IP address and a host
- port.
- type: "object"
- properties:
- HostIp:
- description: "Host IP address that the container's port is mapped to."
- type: "string"
- example: "127.0.0.1"
- HostPort:
- description: "Host port number that the container's port is mapped to."
- type: "string"
- example: "4443"
-
- GraphDriverData:
- description: "Information about a container's graph driver."
- type: "object"
- required: [Name, Data]
- properties:
- Name:
- type: "string"
- x-nullable: false
- Data:
- type: "object"
- x-nullable: false
- additionalProperties:
- type: "string"
-
- Image:
- type: "object"
- required:
- - Id
- - Parent
- - Comment
- - Created
- - Container
- - DockerVersion
- - Author
- - Architecture
- - Os
- - Size
- - VirtualSize
- - GraphDriver
- - RootFS
- properties:
- Id:
- type: "string"
- x-nullable: false
- RepoTags:
- type: "array"
- items:
- type: "string"
- RepoDigests:
- type: "array"
- items:
- type: "string"
- Parent:
- type: "string"
- x-nullable: false
- Comment:
- type: "string"
- x-nullable: false
- Created:
- type: "string"
- x-nullable: false
- Container:
- type: "string"
- x-nullable: false
- ContainerConfig:
- $ref: "#/definitions/ContainerConfig"
- DockerVersion:
- type: "string"
- x-nullable: false
- Author:
- type: "string"
- x-nullable: false
- Config:
- $ref: "#/definitions/ContainerConfig"
- Architecture:
- type: "string"
- x-nullable: false
- Os:
- type: "string"
- x-nullable: false
- OsVersion:
- type: "string"
- Size:
- type: "integer"
- format: "int64"
- x-nullable: false
- VirtualSize:
- type: "integer"
- format: "int64"
- x-nullable: false
- GraphDriver:
- $ref: "#/definitions/GraphDriverData"
- RootFS:
- type: "object"
- required: [Type]
- properties:
- Type:
- type: "string"
- x-nullable: false
- Layers:
- type: "array"
- items:
- type: "string"
- BaseLayer:
- type: "string"
- Metadata:
- type: "object"
- properties:
- LastTagTime:
- type: "string"
- format: "dateTime"
-
- ImageSummary:
- type: "object"
- required:
- - Id
- - ParentId
- - RepoTags
- - RepoDigests
- - Created
- - Size
- - SharedSize
- - VirtualSize
- - Labels
- - Containers
- properties:
- Id:
- type: "string"
- x-nullable: false
- ParentId:
- type: "string"
- x-nullable: false
- RepoTags:
- type: "array"
- x-nullable: false
- items:
- type: "string"
- RepoDigests:
- type: "array"
- x-nullable: false
- items:
- type: "string"
- Created:
- type: "integer"
- x-nullable: false
- Size:
- type: "integer"
- x-nullable: false
- SharedSize:
- type: "integer"
- x-nullable: false
- VirtualSize:
- type: "integer"
- x-nullable: false
- Labels:
- type: "object"
- x-nullable: false
- additionalProperties:
- type: "string"
- Containers:
- x-nullable: false
- type: "integer"
-
- AuthConfig:
- type: "object"
- properties:
- username:
- type: "string"
- password:
- type: "string"
- email:
- type: "string"
- serveraddress:
- type: "string"
- example:
- username: "hannibal"
- password: "xxxx"
- serveraddress: "https://index.docker.io/v1/"
-
- ProcessConfig:
- type: "object"
- properties:
- privileged:
- type: "boolean"
- user:
- type: "string"
- tty:
- type: "boolean"
- entrypoint:
- type: "string"
- arguments:
- type: "array"
- items:
- type: "string"
-
- Volume:
- type: "object"
- required: [Name, Driver, Mountpoint, Labels, Scope, Options]
- properties:
- Name:
- type: "string"
- description: "Name of the volume."
- x-nullable: false
- Driver:
- type: "string"
- description: "Name of the volume driver used by the volume."
- x-nullable: false
- Mountpoint:
- type: "string"
- description: "Mount path of the volume on the host."
- x-nullable: false
- CreatedAt:
- type: "string"
- format: "dateTime"
- description: "Date/Time the volume was created."
- Status:
- type: "object"
- description: |
- Low-level details about the volume, provided by the volume driver.
- Details are returned as a map with key/value pairs:
- `{"key":"value","key2":"value2"}`.
-
- The `Status` field is optional, and is omitted if the volume driver
- does not support this feature.
- additionalProperties:
- type: "object"
- Labels:
- type: "object"
- description: "User-defined key/value metadata."
- x-nullable: false
- additionalProperties:
- type: "string"
- Scope:
- type: "string"
- description: |
- The level at which the volume exists. Either `global` for cluster-wide,
- or `local` for machine level.
- default: "local"
- x-nullable: false
- enum: ["local", "global"]
- Options:
- type: "object"
- description: |
- The driver specific options used when creating the volume.
- additionalProperties:
- type: "string"
- UsageData:
- type: "object"
- x-nullable: true
- required: [Size, RefCount]
- description: |
- Usage details about the volume. This information is used by the
- `GET /system/df` endpoint, and omitted in other endpoints.
- properties:
- Size:
- type: "integer"
- default: -1
- description: |
- Amount of disk space used by the volume (in bytes). This information
- is only available for volumes created with the `"local"` volume
- driver. For volumes created with other volume drivers, this field
- is set to `-1` ("not available")
- x-nullable: false
- RefCount:
- type: "integer"
- default: -1
- description: |
- The number of containers referencing this volume. This field
- is set to `-1` if the reference-count is not available.
- x-nullable: false
-
- example:
- Name: "tardis"
- Driver: "custom"
- Mountpoint: "/var/lib/docker/volumes/tardis"
- Status:
- hello: "world"
- Labels:
- com.example.some-label: "some-value"
- com.example.some-other-label: "some-other-value"
- Scope: "local"
- CreatedAt: "2016-06-07T20:31:11.853781916Z"
-
- Network:
- type: "object"
- properties:
- Name:
- type: "string"
- Id:
- type: "string"
- Created:
- type: "string"
- format: "dateTime"
- Scope:
- type: "string"
- Driver:
- type: "string"
- EnableIPv6:
- type: "boolean"
- IPAM:
- $ref: "#/definitions/IPAM"
- Internal:
- type: "boolean"
- Attachable:
- type: "boolean"
- Ingress:
- type: "boolean"
- Containers:
- type: "object"
- additionalProperties:
- $ref: "#/definitions/NetworkContainer"
- Options:
- type: "object"
- additionalProperties:
- type: "string"
- Labels:
- type: "object"
- additionalProperties:
- type: "string"
- example:
- Name: "net01"
- Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99"
- Created: "2016-10-19T04:33:30.360899459Z"
- Scope: "local"
- Driver: "bridge"
- EnableIPv6: false
- IPAM:
- Driver: "default"
- Config:
- - Subnet: "172.19.0.0/16"
- Gateway: "172.19.0.1"
- Options:
- foo: "bar"
- Internal: false
- Attachable: false
- Ingress: false
- Containers:
- 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c:
- Name: "test"
- EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a"
- MacAddress: "02:42:ac:13:00:02"
- IPv4Address: "172.19.0.2/16"
- IPv6Address: ""
- Options:
- com.docker.network.bridge.default_bridge: "true"
- com.docker.network.bridge.enable_icc: "true"
- com.docker.network.bridge.enable_ip_masquerade: "true"
- com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
- com.docker.network.bridge.name: "docker0"
- com.docker.network.driver.mtu: "1500"
- Labels:
- com.example.some-label: "some-value"
- com.example.some-other-label: "some-other-value"
- IPAM:
- type: "object"
- properties:
- Driver:
- description: "Name of the IPAM driver to use."
- type: "string"
- default: "default"
- Config:
- description: |
- List of IPAM configuration options, specified as a map:
-
- ```
- {"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }
- ```
- type: "array"
- items:
- type: "object"
- additionalProperties:
- type: "string"
- Options:
- description: "Driver-specific options, specified as a map."
- type: "object"
- additionalProperties:
- type: "string"
-
- NetworkContainer:
- type: "object"
- properties:
- Name:
- type: "string"
- EndpointID:
- type: "string"
- MacAddress:
- type: "string"
- IPv4Address:
- type: "string"
- IPv6Address:
- type: "string"
-
- BuildInfo:
- type: "object"
- properties:
- id:
- type: "string"
- stream:
- type: "string"
- error:
- type: "string"
- errorDetail:
- $ref: "#/definitions/ErrorDetail"
- status:
- type: "string"
- progress:
- type: "string"
- progressDetail:
- $ref: "#/definitions/ProgressDetail"
- aux:
- $ref: "#/definitions/ImageID"
-
- BuildCache:
- type: "object"
- properties:
- ID:
- type: "string"
- Parent:
- type: "string"
- Type:
- type: "string"
- Description:
- type: "string"
- InUse:
- type: "boolean"
- Shared:
- type: "boolean"
- Size:
- description: |
- Amount of disk space used by the build cache (in bytes).
- type: "integer"
- CreatedAt:
- description: |
- Date and time at which the build cache was created in
- [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
- type: "string"
- format: "dateTime"
- example: "2016-08-18T10:44:24.496525531Z"
- LastUsedAt:
- description: |
- Date and time at which the build cache was last used in
- [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
- type: "string"
- format: "dateTime"
- x-nullable: true
- example: "2017-08-09T07:09:37.632105588Z"
- UsageCount:
- type: "integer"
-
- ImageID:
- type: "object"
- description: "Image ID or Digest"
- properties:
- ID:
- type: "string"
- example:
- ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c"
-
- CreateImageInfo:
- type: "object"
- properties:
- id:
- type: "string"
- error:
- type: "string"
- status:
- type: "string"
- progress:
- type: "string"
- progressDetail:
- $ref: "#/definitions/ProgressDetail"
-
- PushImageInfo:
- type: "object"
- properties:
- error:
- type: "string"
- status:
- type: "string"
- progress:
- type: "string"
- progressDetail:
- $ref: "#/definitions/ProgressDetail"
-
- ErrorDetail:
- type: "object"
- properties:
- code:
- type: "integer"
- message:
- type: "string"
-
- ProgressDetail:
- type: "object"
- properties:
- current:
- type: "integer"
- total:
- type: "integer"
-
- ErrorResponse:
- description: "Represents an error."
- type: "object"
- required: ["message"]
- properties:
- message:
- description: "The error message."
- type: "string"
- x-nullable: false
- example:
- message: "Something went wrong."
-
- IdResponse:
- description: "Response to an API call that returns just an Id"
- type: "object"
- required: ["Id"]
- properties:
- Id:
- description: "The id of the newly created object."
- type: "string"
- x-nullable: false
-
- EndpointSettings:
- description: "Configuration for a network endpoint."
- type: "object"
- properties:
- # Configurations
- IPAMConfig:
- $ref: "#/definitions/EndpointIPAMConfig"
- Links:
- type: "array"
- items:
- type: "string"
- example:
- - "container_1"
- - "container_2"
- Aliases:
- type: "array"
- items:
- type: "string"
- example:
- - "server_x"
- - "server_y"
-
- # Operational data
- NetworkID:
- description: |
- Unique ID of the network.
- type: "string"
- example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a"
- EndpointID:
- description: |
- Unique ID for the service endpoint in a Sandbox.
- type: "string"
- example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b"
- Gateway:
- description: |
- Gateway address for this network.
- type: "string"
- example: "172.17.0.1"
- IPAddress:
- description: |
- IPv4 address.
- type: "string"
- example: "172.17.0.4"
- IPPrefixLen:
- description: |
- Mask length of the IPv4 address.
- type: "integer"
- example: 16
- IPv6Gateway:
- description: |
- IPv6 gateway address.
- type: "string"
- example: "2001:db8:2::100"
- GlobalIPv6Address:
- description: |
- Global IPv6 address.
- type: "string"
- example: "2001:db8::5689"
- GlobalIPv6PrefixLen:
- description: |
- Mask length of the global IPv6 address.
- type: "integer"
- format: "int64"
- example: 64
- MacAddress:
- description: |
- MAC address for the endpoint on this network.
- type: "string"
- example: "02:42:ac:11:00:04"
- DriverOpts:
- description: |
- DriverOpts is a mapping of driver options and values. These options
- are passed directly to the driver and are driver specific.
- type: "object"
- x-nullable: true
- additionalProperties:
- type: "string"
- example:
- com.example.some-label: "some-value"
- com.example.some-other-label: "some-other-value"
-
- EndpointIPAMConfig:
- description: |
- EndpointIPAMConfig represents an endpoint's IPAM configuration.
- type: "object"
- x-nullable: true
- properties:
- IPv4Address:
- type: "string"
- example: "172.20.30.33"
- IPv6Address:
- type: "string"
- example: "2001:db8:abcd::3033"
- LinkLocalIPs:
- type: "array"
- items:
- type: "string"
- example:
- - "169.254.34.68"
- - "fe80::3468"
-
- PluginMount:
- type: "object"
- x-nullable: false
- required: [Name, Description, Settable, Source, Destination, Type, Options]
- properties:
- Name:
- type: "string"
- x-nullable: false
- example: "some-mount"
- Description:
- type: "string"
- x-nullable: false
- example: "This is a mount that's used by the plugin."
- Settable:
- type: "array"
- items:
- type: "string"
- Source:
- type: "string"
- example: "/var/lib/docker/plugins/"
- Destination:
- type: "string"
- x-nullable: false
- example: "/mnt/state"
- Type:
- type: "string"
- x-nullable: false
- example: "bind"
- Options:
- type: "array"
- items:
- type: "string"
- example:
- - "rbind"
- - "rw"
-
- PluginDevice:
- type: "object"
- required: [Name, Description, Settable, Path]
- x-nullable: false
- properties:
- Name:
- type: "string"
- x-nullable: false
- Description:
- type: "string"
- x-nullable: false
- Settable:
- type: "array"
- items:
- type: "string"
- Path:
- type: "string"
- example: "/dev/fuse"
-
- PluginEnv:
- type: "object"
- x-nullable: false
- required: [Name, Description, Settable, Value]
- properties:
- Name:
- x-nullable: false
- type: "string"
- Description:
- x-nullable: false
- type: "string"
- Settable:
- type: "array"
- items:
- type: "string"
- Value:
- type: "string"
-
- PluginInterfaceType:
- type: "object"
- x-nullable: false
- required: [Prefix, Capability, Version]
- properties:
- Prefix:
- type: "string"
- x-nullable: false
- Capability:
- type: "string"
- x-nullable: false
- Version:
- type: "string"
- x-nullable: false
-
- Plugin:
- description: "A plugin for the Engine API"
- type: "object"
- required: [Settings, Enabled, Config, Name]
- properties:
- Id:
- type: "string"
- example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078"
- Name:
- type: "string"
- x-nullable: false
- example: "tiborvass/sample-volume-plugin"
- Enabled:
- description:
- True if the plugin is running. False if the plugin is not running,
- only installed.
- type: "boolean"
- x-nullable: false
- example: true
- Settings:
- description: "Settings that can be modified by users."
- type: "object"
- x-nullable: false
- required: [Args, Devices, Env, Mounts]
- properties:
- Mounts:
- type: "array"
- items:
- $ref: "#/definitions/PluginMount"
- Env:
- type: "array"
- items:
- type: "string"
- example:
- - "DEBUG=0"
- Args:
- type: "array"
- items:
- type: "string"
- Devices:
- type: "array"
- items:
- $ref: "#/definitions/PluginDevice"
- PluginReference:
- description: "plugin remote reference used to push/pull the plugin"
- type: "string"
- x-nullable: false
- example: "localhost:5000/tiborvass/sample-volume-plugin:latest"
- Config:
- description: "The config of a plugin."
- type: "object"
- x-nullable: false
- required:
- - Description
- - Documentation
- - Interface
- - Entrypoint
- - WorkDir
- - Network
- - Linux
- - PidHost
- - PropagatedMount
- - IpcHost
- - Mounts
- - Env
- - Args
- properties:
- DockerVersion:
- description: "Docker Version used to create the plugin"
- type: "string"
- x-nullable: false
- example: "17.06.0-ce"
- Description:
- type: "string"
- x-nullable: false
- example: "A sample volume plugin for Docker"
- Documentation:
- type: "string"
- x-nullable: false
- example: "https://docs.docker.com/engine/extend/plugins/"
- Interface:
- description: "The interface between Docker and the plugin"
- x-nullable: false
- type: "object"
- required: [Types, Socket]
- properties:
- Types:
- type: "array"
- items:
- $ref: "#/definitions/PluginInterfaceType"
- example:
- - "docker.volumedriver/1.0"
- Socket:
- type: "string"
- x-nullable: false
- example: "plugins.sock"
- ProtocolScheme:
- type: "string"
- example: "some.protocol/v1.0"
- description: "Protocol to use for clients connecting to the plugin."
- enum:
- - ""
- - "moby.plugins.http/v1"
- Entrypoint:
- type: "array"
- items:
- type: "string"
- example:
- - "/usr/bin/sample-volume-plugin"
- - "/data"
- WorkDir:
- type: "string"
- x-nullable: false
- example: "/bin/"
- User:
- type: "object"
- x-nullable: false
- properties:
- UID:
- type: "integer"
- format: "uint32"
- example: 1000
- GID:
- type: "integer"
- format: "uint32"
- example: 1000
- Network:
- type: "object"
- x-nullable: false
- required: [Type]
- properties:
- Type:
- x-nullable: false
- type: "string"
- example: "host"
- Linux:
- type: "object"
- x-nullable: false
- required: [Capabilities, AllowAllDevices, Devices]
- properties:
- Capabilities:
- type: "array"
- items:
- type: "string"
- example:
- - "CAP_SYS_ADMIN"
- - "CAP_SYSLOG"
- AllowAllDevices:
- type: "boolean"
- x-nullable: false
- example: false
- Devices:
- type: "array"
- items:
- $ref: "#/definitions/PluginDevice"
- PropagatedMount:
- type: "string"
- x-nullable: false
- example: "/mnt/volumes"
- IpcHost:
- type: "boolean"
- x-nullable: false
- example: false
- PidHost:
- type: "boolean"
- x-nullable: false
- example: false
- Mounts:
- type: "array"
- items:
- $ref: "#/definitions/PluginMount"
- Env:
- type: "array"
- items:
- $ref: "#/definitions/PluginEnv"
- example:
- - Name: "DEBUG"
- Description: "If set, prints debug messages"
- Settable: null
- Value: "0"
- Args:
- type: "object"
- x-nullable: false
- required: [Name, Description, Settable, Value]
- properties:
- Name:
- x-nullable: false
- type: "string"
- example: "args"
- Description:
- x-nullable: false
- type: "string"
- example: "command line arguments"
- Settable:
- type: "array"
- items:
- type: "string"
- Value:
- type: "array"
- items:
- type: "string"
- rootfs:
- type: "object"
- properties:
- type:
- type: "string"
- example: "layers"
- diff_ids:
- type: "array"
- items:
- type: "string"
- example:
- - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887"
- - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8"
-
- ObjectVersion:
- description: |
- The version number of the object such as node, service, etc. This is needed
- to avoid conflicting writes. The client must send the version number along
- with the modified specification when updating these objects.
-
- This approach ensures safe concurrency and determinism in that the change
- on the object may not be applied if the version number has changed from the
- last read. In other words, if two update requests specify the same base
- version, only one of the requests can succeed. As a result, two separate
- update requests that happen at the same time will not unintentionally
- overwrite each other.
- type: "object"
- properties:
- Index:
- type: "integer"
- format: "uint64"
- example: 373531
-
- NodeSpec:
- type: "object"
- properties:
- Name:
- description: "Name for the node."
- type: "string"
- example: "my-node"
- Labels:
- description: "User-defined key/value metadata."
- type: "object"
- additionalProperties:
- type: "string"
- Role:
- description: "Role of the node."
- type: "string"
- enum:
- - "worker"
- - "manager"
- example: "manager"
- Availability:
- description: "Availability of the node."
- type: "string"
- enum:
- - "active"
- - "pause"
- - "drain"
- example: "active"
- example:
- Availability: "active"
- Name: "node-name"
- Role: "manager"
- Labels:
- foo: "bar"
-
- Node:
- type: "object"
- properties:
- ID:
- type: "string"
- example: "24ifsmvkjbyhk"
- Version:
- $ref: "#/definitions/ObjectVersion"
- CreatedAt:
- description: |
- Date and time at which the node was added to the swarm in
- [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
- type: "string"
- format: "dateTime"
- example: "2016-08-18T10:44:24.496525531Z"
- UpdatedAt:
- description: |
- Date and time at which the node was last updated in
- [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
- type: "string"
- format: "dateTime"
- example: "2017-08-09T07:09:37.632105588Z"
- Spec:
- $ref: "#/definitions/NodeSpec"
- Description:
- $ref: "#/definitions/NodeDescription"
- Status:
- $ref: "#/definitions/NodeStatus"
- ManagerStatus:
- $ref: "#/definitions/ManagerStatus"
-
- NodeDescription:
- description: |
- NodeDescription encapsulates the properties of the Node as reported by the
- agent.
- type: "object"
- properties:
- Hostname:
- type: "string"
- example: "bf3067039e47"
- Platform:
- $ref: "#/definitions/Platform"
- Resources:
- $ref: "#/definitions/ResourceObject"
- Engine:
- $ref: "#/definitions/EngineDescription"
- TLSInfo:
- $ref: "#/definitions/TLSInfo"
-
- Platform:
- description: |
- Platform represents the platform (Arch/OS).
- type: "object"
- properties:
- Architecture:
- description: |
- Architecture represents the hardware architecture (for example,
- `x86_64`).
- type: "string"
- example: "x86_64"
- OS:
- description: |
- OS represents the Operating System (for example, `linux` or `windows`).
- type: "string"
- example: "linux"
-
- EngineDescription:
- description: "EngineDescription provides information about an engine."
- type: "object"
- properties:
- EngineVersion:
- type: "string"
- example: "17.06.0"
- Labels:
- type: "object"
- additionalProperties:
- type: "string"
- example:
- foo: "bar"
- Plugins:
- type: "array"
- items:
- type: "object"
- properties:
- Type:
- type: "string"
- Name:
- type: "string"
- example:
- - Type: "Log"
- Name: "awslogs"
- - Type: "Log"
- Name: "fluentd"
- - Type: "Log"
- Name: "gcplogs"
- - Type: "Log"
- Name: "gelf"
- - Type: "Log"
- Name: "journald"
- - Type: "Log"
- Name: "json-file"
- - Type: "Log"
- Name: "logentries"
- - Type: "Log"
- Name: "splunk"
- - Type: "Log"
- Name: "syslog"
- - Type: "Network"
- Name: "bridge"
- - Type: "Network"
- Name: "host"
- - Type: "Network"
- Name: "ipvlan"
- - Type: "Network"
- Name: "macvlan"
- - Type: "Network"
- Name: "null"
- - Type: "Network"
- Name: "overlay"
- - Type: "Volume"
- Name: "local"
- - Type: "Volume"
- Name: "localhost:5000/vieux/sshfs:latest"
- - Type: "Volume"
- Name: "vieux/sshfs:latest"
-
- TLSInfo:
- description: |
- Information about the issuer of leaf TLS certificates and the trusted root
- CA certificate.
- type: "object"
- properties:
- TrustRoot:
- description: |
- The root CA certificate(s) that are used to validate leaf TLS
- certificates.
- type: "string"
- CertIssuerSubject:
- description:
- The base64-url-safe-encoded raw subject bytes of the issuer.
- type: "string"
- CertIssuerPublicKey:
- description: |
- The base64-url-safe-encoded raw public key bytes of the issuer.
- type: "string"
- example:
- TrustRoot: |
- -----BEGIN CERTIFICATE-----
- MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw
- EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0
- MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH
- A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf
- 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB
- Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO
- PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz
- pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H
- -----END CERTIFICATE-----
- CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh"
- CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A=="
-
- NodeStatus:
- description: |
- NodeStatus represents the status of a node.
-
- It provides the current status of the node, as seen by the manager.
- type: "object"
- properties:
- State:
- $ref: "#/definitions/NodeState"
- Message:
- type: "string"
- example: ""
- Addr:
- description: "IP address of the node."
- type: "string"
- example: "172.17.0.2"
-
- NodeState:
- description: "NodeState represents the state of a node."
- type: "string"
- enum:
- - "unknown"
- - "down"
- - "ready"
- - "disconnected"
- example: "ready"
-
- ManagerStatus:
- description: |
- ManagerStatus represents the status of a manager.
-
- It provides the current status of a node's manager component, if the node
- is a manager.
- x-nullable: true
- type: "object"
- properties:
- Leader:
- type: "boolean"
- default: false
- example: true
- Reachability:
- $ref: "#/definitions/Reachability"
- Addr:
- description: |
- The IP address and port at which the manager is reachable.
- type: "string"
- example: "10.0.0.46:2377"
-
- Reachability:
- description: "Reachability represents the reachability of a node."
- type: "string"
- enum:
- - "unknown"
- - "unreachable"
- - "reachable"
- example: "reachable"
-
- SwarmSpec:
- description: "User modifiable swarm configuration."
- type: "object"
- properties:
- Name:
- description: "Name of the swarm."
- type: "string"
- example: "default"
- Labels:
- description: "User-defined key/value metadata."
- type: "object"
- additionalProperties:
- type: "string"
- example:
- com.example.corp.type: "production"
- com.example.corp.department: "engineering"
- Orchestration:
- description: "Orchestration configuration."
- type: "object"
- x-nullable: true
- properties:
- TaskHistoryRetentionLimit:
- description: |
- The number of historic tasks to keep per instance or node. If
- negative, never remove completed or failed tasks.
- type: "integer"
- format: "int64"
- example: 10
- Raft:
- description: "Raft configuration."
- type: "object"
- properties:
- SnapshotInterval:
- description: "The number of log entries between snapshots."
- type: "integer"
- format: "uint64"
- example: 10000
- KeepOldSnapshots:
- description: |
- The number of snapshots to keep beyond the current snapshot.
- type: "integer"
- format: "uint64"
- LogEntriesForSlowFollowers:
- description: |
- The number of log entries to keep around to sync up slow followers
- after a snapshot is created.
- type: "integer"
- format: "uint64"
- example: 500
- ElectionTick:
- description: |
- The number of ticks that a follower will wait for a message from
- the leader before becoming a candidate and starting an election.
- `ElectionTick` must be greater than `HeartbeatTick`.
-
- A tick currently defaults to one second, so these translate
- directly to seconds currently, but this is NOT guaranteed.
- type: "integer"
- example: 3
- HeartbeatTick:
- description: |
- The number of ticks between heartbeats. Every HeartbeatTick ticks,
- the leader will send a heartbeat to the followers.
-
- A tick currently defaults to one second, so these translate
- directly to seconds currently, but this is NOT guaranteed.
- type: "integer"
- example: 1
- Dispatcher:
- description: "Dispatcher configuration."
- type: "object"
- x-nullable: true
- properties:
- HeartbeatPeriod:
- description: |
- The delay for an agent to send a heartbeat to the dispatcher.
- type: "integer"
- format: "int64"
- example: 5000000000
- CAConfig:
- description: "CA configuration."
- type: "object"
- x-nullable: true
- properties:
- NodeCertExpiry:
- description: "The duration node certificates are issued for."
- type: "integer"
- format: "int64"
- example: 7776000000000000
- ExternalCAs:
- description: |
- Configuration for forwarding signing requests to an external
- certificate authority.
- type: "array"
- items:
- type: "object"
- properties:
- Protocol:
- description: |
- Protocol for communication with the external CA (currently
- only `cfssl` is supported).
- type: "string"
- enum:
- - "cfssl"
- default: "cfssl"
- URL:
- description: |
- URL where certificate signing requests should be sent.
- type: "string"
- Options:
- description: |
- An object with key/value pairs that are interpreted as
- protocol-specific options for the external CA driver.
- type: "object"
- additionalProperties:
- type: "string"
- CACert:
- description: |
- The root CA certificate (in PEM format) this external CA uses
- to issue TLS certificates (assumed to be to the current swarm
- root CA certificate if not provided).
- type: "string"
- SigningCACert:
- description: |
- The desired signing CA certificate for all swarm node TLS leaf
- certificates, in PEM format.
- type: "string"
- SigningCAKey:
- description: |
- The desired signing CA key for all swarm node TLS leaf certificates,
- in PEM format.
- type: "string"
- ForceRotate:
- description: |
- An integer whose purpose is to force swarm to generate a new
- signing CA certificate and key, if none have been specified in
- `SigningCACert` and `SigningCAKey`
- format: "uint64"
- type: "integer"
- EncryptionConfig:
- description: "Parameters related to encryption-at-rest."
- type: "object"
- properties:
- AutoLockManagers:
- description: |
- If set, generate a key and use it to lock data stored on the
- managers.
- type: "boolean"
- example: false
- TaskDefaults:
- description: "Defaults for creating tasks in this cluster."
- type: "object"
- properties:
- LogDriver:
- description: |
- The log driver to use for tasks created in the orchestrator if
- unspecified by a service.
-
- Updating this value only affects new tasks. Existing tasks continue
- to use their previously configured log driver until recreated.
- type: "object"
- properties:
- Name:
- description: |
- The log driver to use as a default for new tasks.
- type: "string"
- example: "json-file"
- Options:
- description: |
- Driver-specific options for the selectd log driver, specified
- as key/value pairs.
- type: "object"
- additionalProperties:
- type: "string"
- example:
- "max-file": "10"
- "max-size": "100m"
-
- # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but
- # without `JoinTokens`.
- ClusterInfo:
- description: |
- ClusterInfo represents information about the swarm as is returned by the
- "/info" endpoint. Join-tokens are not included.
- x-nullable: true
- type: "object"
- properties:
- ID:
- description: "The ID of the swarm."
- type: "string"
- example: "abajmipo7b4xz5ip2nrla6b11"
- Version:
- $ref: "#/definitions/ObjectVersion"
- CreatedAt:
- description: |
- Date and time at which the swarm was initialised in
- [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
- type: "string"
- format: "dateTime"
- example: "2016-08-18T10:44:24.496525531Z"
- UpdatedAt:
- description: |
- Date and time at which the swarm was last updated in
- [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
- type: "string"
- format: "dateTime"
- example: "2017-08-09T07:09:37.632105588Z"
- Spec:
- $ref: "#/definitions/SwarmSpec"
- TLSInfo:
- $ref: "#/definitions/TLSInfo"
- RootRotationInProgress:
- description: |
- Whether there is currently a root CA rotation in progress for the swarm
- type: "boolean"
- example: false
- DataPathPort:
- description: |
- DataPathPort specifies the data path port number for data traffic.
- Acceptable port range is 1024 to 49151.
- If no port is set or is set to 0, the default port (4789) is used.
- type: "integer"
- format: "uint32"
- default: 4789
- example: 4789
- DefaultAddrPool:
- description: |
- Default Address Pool specifies default subnet pools for global scope
- networks.
- type: "array"
- items:
- type: "string"
- format: "CIDR"
- example: ["10.10.0.0/16", "20.20.0.0/16"]
- SubnetSize:
- description: |
- SubnetSize specifies the subnet size of the networks created from the
- default subnet pool.
- type: "integer"
- format: "uint32"
- maximum: 29
- default: 24
- example: 24
-
- JoinTokens:
- description: |
- JoinTokens contains the tokens workers and managers need to join the swarm.
- type: "object"
- properties:
- Worker:
- description: |
- The token workers can use to join the swarm.
- type: "string"
- example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx"
- Manager:
- description: |
- The token managers can use to join the swarm.
- type: "string"
- example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
-
- Swarm:
- type: "object"
- allOf:
- - $ref: "#/definitions/ClusterInfo"
- - type: "object"
- properties:
- JoinTokens:
- $ref: "#/definitions/JoinTokens"
-
- TaskSpec:
- description: "User modifiable task configuration."
- type: "object"
- properties:
- PluginSpec:
- type: "object"
- description: |
- Plugin spec for the service. *(Experimental release only.)*
-
-
-
- > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are
- > mutually exclusive. PluginSpec is only used when the Runtime field
- > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime
- > field is set to `attachment`.
- properties:
- Name:
- description: "The name or 'alias' to use for the plugin."
- type: "string"
- Remote:
- description: "The plugin image reference to use."
- type: "string"
- Disabled:
- description: "Disable the plugin once scheduled."
- type: "boolean"
- PluginPrivilege:
- type: "array"
- items:
- description: |
- Describes a permission accepted by the user upon installing the
- plugin.
- type: "object"
- properties:
- Name:
- type: "string"
- Description:
- type: "string"
- Value:
- type: "array"
- items:
- type: "string"
- ContainerSpec:
- type: "object"
- description: |
- Container spec for the service.
-
-
-
- > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are
- > mutually exclusive. PluginSpec is only used when the Runtime field
- > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime
- > field is set to `attachment`.
- properties:
- Image:
- description: "The image name to use for the container"
- type: "string"
- Labels:
- description: "User-defined key/value data."
- type: "object"
- additionalProperties:
- type: "string"
- Command:
- description: "The command to be run in the image."
- type: "array"
- items:
- type: "string"
- Args:
- description: "Arguments to the command."
- type: "array"
- items:
- type: "string"
- Hostname:
- description: |
- The hostname to use for the container, as a valid
- [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname.
- type: "string"
- Env:
- description: |
- A list of environment variables in the form `VAR=value`.
- type: "array"
- items:
- type: "string"
- Dir:
- description: "The working directory for commands to run in."
- type: "string"
- User:
- description: "The user inside the container."
- type: "string"
- Groups:
- type: "array"
- description: |
- A list of additional groups that the container process will run as.
- items:
- type: "string"
- Privileges:
- type: "object"
- description: "Security options for the container"
- properties:
- CredentialSpec:
- type: "object"
- description: "CredentialSpec for managed service account (Windows only)"
- properties:
- Config:
- type: "string"
- example: "0bt9dmxjvjiqermk6xrop3ekq"
- description: |
- Load credential spec from a Swarm Config with the given ID.
- The specified config must also be present in the Configs
- field with the Runtime property set.
-
-
-
-
- > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,
- > and `CredentialSpec.Config` are mutually exclusive.
- File:
- type: "string"
- example: "spec.json"
- description: |
- Load credential spec from this file. The file is read by
- the daemon, and must be present in the `CredentialSpecs`
- subdirectory in the docker data directory, which defaults
- to `C:\ProgramData\Docker\` on Windows.
-
- For example, specifying `spec.json` loads
- `C:\ProgramData\Docker\CredentialSpecs\spec.json`.
-
-
-
- > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,
- > and `CredentialSpec.Config` are mutually exclusive.
- Registry:
- type: "string"
- description: |
- Load credential spec from this value in the Windows
- registry. The specified registry value must be located in:
-
- `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs`
-
-
-
-
- > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,
- > and `CredentialSpec.Config` are mutually exclusive.
- SELinuxContext:
- type: "object"
- description: "SELinux labels of the container"
- properties:
- Disable:
- type: "boolean"
- description: "Disable SELinux"
- User:
- type: "string"
- description: "SELinux user label"
- Role:
- type: "string"
- description: "SELinux role label"
- Type:
- type: "string"
- description: "SELinux type label"
- Level:
- type: "string"
- description: "SELinux level label"
- TTY:
- description: "Whether a pseudo-TTY should be allocated."
- type: "boolean"
- OpenStdin:
- description: "Open `stdin`"
- type: "boolean"
- ReadOnly:
- description: "Mount the container's root filesystem as read only."
- type: "boolean"
- Mounts:
- description: |
- Specification for mounts to be added to containers created as part
- of the service.
- type: "array"
- items:
- $ref: "#/definitions/Mount"
- StopSignal:
- description: "Signal to stop the container."
- type: "string"
- StopGracePeriod:
- description: |
- Amount of time to wait for the container to terminate before
- forcefully killing it.
- type: "integer"
- format: "int64"
- HealthCheck:
- $ref: "#/definitions/HealthConfig"
- Hosts:
- type: "array"
- description: |
- A list of hostname/IP mappings to add to the container's `hosts`
- file. The format of extra hosts is specified in the
- [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html)
- man page:
-
- IP_address canonical_hostname [aliases...]
- items:
- type: "string"
- DNSConfig:
- description: |
- Specification for DNS related configurations in resolver configuration
- file (`resolv.conf`).
- type: "object"
- properties:
- Nameservers:
- description: "The IP addresses of the name servers."
- type: "array"
- items:
- type: "string"
- Search:
- description: "A search list for host-name lookup."
- type: "array"
- items:
- type: "string"
- Options:
- description: |
- A list of internal resolver variables to be modified (e.g.,
- `debug`, `ndots:3`, etc.).
- type: "array"
- items:
- type: "string"
- Secrets:
- description: |
- Secrets contains references to zero or more secrets that will be
- exposed to the service.
- type: "array"
- items:
- type: "object"
- properties:
- File:
- description: |
- File represents a specific target that is backed by a file.
- type: "object"
- properties:
- Name:
- description: |
- Name represents the final filename in the filesystem.
- type: "string"
- UID:
- description: "UID represents the file UID."
- type: "string"
- GID:
- description: "GID represents the file GID."
- type: "string"
- Mode:
- description: "Mode represents the FileMode of the file."
- type: "integer"
- format: "uint32"
- SecretID:
- description: |
- SecretID represents the ID of the specific secret that we're
- referencing.
- type: "string"
- SecretName:
- description: |
- SecretName is the name of the secret that this references,
- but this is just provided for lookup/display purposes. The
- secret in the reference will be identified by its ID.
- type: "string"
- Configs:
- description: |
- Configs contains references to zero or more configs that will be
- exposed to the service.
- type: "array"
- items:
- type: "object"
- properties:
- File:
- description: |
- File represents a specific target that is backed by a file.
-
-
-
- > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive
- type: "object"
- properties:
- Name:
- description: |
- Name represents the final filename in the filesystem.
- type: "string"
- UID:
- description: "UID represents the file UID."
- type: "string"
- GID:
- description: "GID represents the file GID."
- type: "string"
- Mode:
- description: "Mode represents the FileMode of the file."
- type: "integer"
- format: "uint32"
- Runtime:
- description: |
- Runtime represents a target that is not mounted into the
- container but is used by the task
-
-
-
- > **Note**: `Configs.File` and `Configs.Runtime` are mutually
- > exclusive
- type: "object"
- ConfigID:
- description: |
- ConfigID represents the ID of the specific config that we're
- referencing.
- type: "string"
- ConfigName:
- description: |
- ConfigName is the name of the config that this references,
- but this is just provided for lookup/display purposes. The
- config in the reference will be identified by its ID.
- type: "string"
- Isolation:
- type: "string"
- description: |
- Isolation technology of the containers running the service.
- (Windows only)
- enum:
- - "default"
- - "process"
- - "hyperv"
- Init:
- description: |
- Run an init inside the container that forwards signals and reaps
- processes. This field is omitted if empty, and the default (as
- configured on the daemon) is used.
- type: "boolean"
- x-nullable: true
- Sysctls:
- description: |
- Set kernel namedspaced parameters (sysctls) in the container.
- The Sysctls option on services accepts the same sysctls as the
- are supported on containers. Note that while the same sysctls are
- supported, no guarantees or checks are made about their
- suitability for a clustered environment, and it's up to the user
- to determine whether a given sysctl will work properly in a
- Service.
- type: "object"
- additionalProperties:
- type: "string"
- # This option is not used by Windows containers
- CapabilityAdd:
- type: "array"
- description: |
- A list of kernel capabilities to add to the default set
- for the container.
- items:
- type: "string"
- example:
- - "CAP_NET_RAW"
- - "CAP_SYS_ADMIN"
- - "CAP_SYS_CHROOT"
- - "CAP_SYSLOG"
- CapabilityDrop:
- type: "array"
- description: |
- A list of kernel capabilities to drop from the default set
- for the container.
- items:
- type: "string"
- example:
- - "CAP_NET_RAW"
- Ulimits:
- description: |
- A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`"
- type: "array"
- items:
- type: "object"
- properties:
- Name:
- description: "Name of ulimit"
- type: "string"
- Soft:
- description: "Soft limit"
- type: "integer"
- Hard:
- description: "Hard limit"
- type: "integer"
- NetworkAttachmentSpec:
- description: |
- Read-only spec type for non-swarm containers attached to swarm overlay
- networks.
-
-
-
- > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are
- > mutually exclusive. PluginSpec is only used when the Runtime field
- > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime
- > field is set to `attachment`.
- type: "object"
- properties:
- ContainerID:
- description: "ID of the container represented by this task"
- type: "string"
- Resources:
- description: |
- Resource requirements which apply to each individual container created
- as part of the service.
- type: "object"
- properties:
- Limits:
- description: "Define resources limits."
- $ref: "#/definitions/Limit"
- Reservation:
- description: "Define resources reservation."
- $ref: "#/definitions/ResourceObject"
- RestartPolicy:
- description: |
- Specification for the restart policy which applies to containers
- created as part of this service.
- type: "object"
- properties:
- Condition:
- description: "Condition for restart."
- type: "string"
- enum:
- - "none"
- - "on-failure"
- - "any"
- Delay:
- description: "Delay between restart attempts."
- type: "integer"
- format: "int64"
- MaxAttempts:
- description: |
- Maximum attempts to restart a given container before giving up
- (default value is 0, which is ignored).
- type: "integer"
- format: "int64"
- default: 0
- Window:
- description: |
- Windows is the time window used to evaluate the restart policy
- (default value is 0, which is unbounded).
- type: "integer"
- format: "int64"
- default: 0
- Placement:
- type: "object"
- properties:
- Constraints:
- description: |
- An array of constraint expressions to limit the set of nodes where
- a task can be scheduled. Constraint expressions can either use a
- _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find
- nodes that satisfy every expression (AND match). Constraints can
- match node or Docker Engine labels as follows:
-
- node attribute | matches | example
- ---------------------|--------------------------------|-----------------------------------------------
- `node.id` | Node ID | `node.id==2ivku8v2gvtg4`
- `node.hostname` | Node hostname | `node.hostname!=node-2`
- `node.role` | Node role (`manager`/`worker`) | `node.role==manager`
- `node.platform.os` | Node operating system | `node.platform.os==windows`
- `node.platform.arch` | Node architecture | `node.platform.arch==x86_64`
- `node.labels` | User-defined node labels | `node.labels.security==high`
- `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04`
-
- `engine.labels` apply to Docker Engine labels like operating system,
- drivers, etc. Swarm administrators add `node.labels` for operational
- purposes by using the [`node update endpoint`](#operation/NodeUpdate).
-
- type: "array"
- items:
- type: "string"
- example:
- - "node.hostname!=node3.corp.example.com"
- - "node.role!=manager"
- - "node.labels.type==production"
- - "node.platform.os==linux"
- - "node.platform.arch==x86_64"
- Preferences:
- description: |
- Preferences provide a way to make the scheduler aware of factors
- such as topology. They are provided in order from highest to
- lowest precedence.
- type: "array"
- items:
- type: "object"
- properties:
- Spread:
- type: "object"
- properties:
- SpreadDescriptor:
- description: |
- label descriptor, such as `engine.labels.az`.
- type: "string"
- example:
- - Spread:
- SpreadDescriptor: "node.labels.datacenter"
- - Spread:
- SpreadDescriptor: "node.labels.rack"
- MaxReplicas:
- description: |
- Maximum number of replicas for per node (default value is 0, which
- is unlimited)
- type: "integer"
- format: "int64"
- default: 0
- Platforms:
- description: |
- Platforms stores all the platforms that the service's image can
- run on. This field is used in the platform filter for scheduling.
- If empty, then the platform filter is off, meaning there are no
- scheduling restrictions.
- type: "array"
- items:
- $ref: "#/definitions/Platform"
- ForceUpdate:
- description: |
- A counter that triggers an update even if no relevant parameters have
- been changed.
- type: "integer"
- Runtime:
- description: |
- Runtime is the type of runtime specified for the task executor.
- type: "string"
- Networks:
- description: "Specifies which networks the service should attach to."
- type: "array"
- items:
- $ref: "#/definitions/NetworkAttachmentConfig"
- LogDriver:
- description: |
- Specifies the log driver to use for tasks created from this spec. If
- not present, the default one for the swarm will be used, finally
- falling back to the engine default if not specified.
- type: "object"
- properties:
- Name:
- type: "string"
- Options:
- type: "object"
- additionalProperties:
- type: "string"
-
- TaskState:
- type: "string"
- enum:
- - "new"
- - "allocated"
- - "pending"
- - "assigned"
- - "accepted"
- - "preparing"
- - "ready"
- - "starting"
- - "running"
- - "complete"
- - "shutdown"
- - "failed"
- - "rejected"
- - "remove"
- - "orphaned"
-
- Task:
- type: "object"
- properties:
- ID:
- description: "The ID of the task."
- type: "string"
- Version:
- $ref: "#/definitions/ObjectVersion"
- CreatedAt:
- type: "string"
- format: "dateTime"
- UpdatedAt:
- type: "string"
- format: "dateTime"
- Name:
- description: "Name of the task."
- type: "string"
- Labels:
- description: "User-defined key/value metadata."
- type: "object"
- additionalProperties:
- type: "string"
- Spec:
- $ref: "#/definitions/TaskSpec"
- ServiceID:
- description: "The ID of the service this task is part of."
- type: "string"
- Slot:
- type: "integer"
- NodeID:
- description: "The ID of the node that this task is on."
- type: "string"
- AssignedGenericResources:
- $ref: "#/definitions/GenericResources"
- Status:
- type: "object"
- properties:
- Timestamp:
- type: "string"
- format: "dateTime"
- State:
- $ref: "#/definitions/TaskState"
- Message:
- type: "string"
- Err:
- type: "string"
- ContainerStatus:
- type: "object"
- properties:
- ContainerID:
- type: "string"
- PID:
- type: "integer"
- ExitCode:
- type: "integer"
- DesiredState:
- $ref: "#/definitions/TaskState"
- JobIteration:
- description: |
- If the Service this Task belongs to is a job-mode service, contains
- the JobIteration of the Service this Task was created for. Absent if
- the Task was created for a Replicated or Global Service.
- $ref: "#/definitions/ObjectVersion"
- example:
- ID: "0kzzo1i0y4jz6027t0k7aezc7"
- Version:
- Index: 71
- CreatedAt: "2016-06-07T21:07:31.171892745Z"
- UpdatedAt: "2016-06-07T21:07:31.376370513Z"
- Spec:
- ContainerSpec:
- Image: "redis"
- Resources:
- Limits: {}
- Reservations: {}
- RestartPolicy:
- Condition: "any"
- MaxAttempts: 0
- Placement: {}
- ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz"
- Slot: 1
- NodeID: "60gvrl6tm78dmak4yl7srz94v"
- Status:
- Timestamp: "2016-06-07T21:07:31.290032978Z"
- State: "running"
- Message: "started"
- ContainerStatus:
- ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035"
- PID: 677
- DesiredState: "running"
- NetworksAttachments:
- - Network:
- ID: "4qvuz4ko70xaltuqbt8956gd1"
- Version:
- Index: 18
- CreatedAt: "2016-06-07T20:31:11.912919752Z"
- UpdatedAt: "2016-06-07T21:07:29.955277358Z"
- Spec:
- Name: "ingress"
- Labels:
- com.docker.swarm.internal: "true"
- DriverConfiguration: {}
- IPAMOptions:
- Driver: {}
- Configs:
- - Subnet: "10.255.0.0/16"
- Gateway: "10.255.0.1"
- DriverState:
- Name: "overlay"
- Options:
- com.docker.network.driver.overlay.vxlanid_list: "256"
- IPAMOptions:
- Driver:
- Name: "default"
- Configs:
- - Subnet: "10.255.0.0/16"
- Gateway: "10.255.0.1"
- Addresses:
- - "10.255.0.10/16"
- AssignedGenericResources:
- - DiscreteResourceSpec:
- Kind: "SSD"
- Value: 3
- - NamedResourceSpec:
- Kind: "GPU"
- Value: "UUID1"
- - NamedResourceSpec:
- Kind: "GPU"
- Value: "UUID2"
-
- ServiceSpec:
- description: "User modifiable configuration for a service."
- properties:
- Name:
- description: "Name of the service."
- type: "string"
- Labels:
- description: "User-defined key/value metadata."
- type: "object"
- additionalProperties:
- type: "string"
- TaskTemplate:
- $ref: "#/definitions/TaskSpec"
- Mode:
- description: "Scheduling mode for the service."
- type: "object"
- properties:
- Replicated:
- type: "object"
- properties:
- Replicas:
- type: "integer"
- format: "int64"
- Global:
- type: "object"
- ReplicatedJob:
- description: |
- The mode used for services with a finite number of tasks that run
- to a completed state.
- type: "object"
- properties:
- MaxConcurrent:
- description: |
- The maximum number of replicas to run simultaneously.
- type: "integer"
- format: "int64"
- default: 1
- TotalCompletions:
- description: |
- The total number of replicas desired to reach the Completed
- state. If unset, will default to the value of `MaxConcurrent`
- type: "integer"
- format: "int64"
- GlobalJob:
- description: |
- The mode used for services which run a task to the completed state
- on each valid node.
- type: "object"
- UpdateConfig:
- description: "Specification for the update strategy of the service."
- type: "object"
- properties:
- Parallelism:
- description: |
- Maximum number of tasks to be updated in one iteration (0 means
- unlimited parallelism).
- type: "integer"
- format: "int64"
- Delay:
- description: "Amount of time between updates, in nanoseconds."
- type: "integer"
- format: "int64"
- FailureAction:
- description: |
- Action to take if an updated task fails to run, or stops running
- during the update.
- type: "string"
- enum:
- - "continue"
- - "pause"
- - "rollback"
- Monitor:
- description: |
- Amount of time to monitor each updated task for failures, in
- nanoseconds.
- type: "integer"
- format: "int64"
- MaxFailureRatio:
- description: |
- The fraction of tasks that may fail during an update before the
- failure action is invoked, specified as a floating point number
- between 0 and 1.
- type: "number"
- default: 0
- Order:
- description: |
- The order of operations when rolling out an updated task. Either
- the old task is shut down before the new task is started, or the
- new task is started before the old task is shut down.
- type: "string"
- enum:
- - "stop-first"
- - "start-first"
- RollbackConfig:
- description: "Specification for the rollback strategy of the service."
- type: "object"
- properties:
- Parallelism:
- description: |
- Maximum number of tasks to be rolled back in one iteration (0 means
- unlimited parallelism).
- type: "integer"
- format: "int64"
- Delay:
- description: |
- Amount of time between rollback iterations, in nanoseconds.
- type: "integer"
- format: "int64"
- FailureAction:
- description: |
- Action to take if an rolled back task fails to run, or stops
- running during the rollback.
- type: "string"
- enum:
- - "continue"
- - "pause"
- Monitor:
- description: |
- Amount of time to monitor each rolled back task for failures, in
- nanoseconds.
- type: "integer"
- format: "int64"
- MaxFailureRatio:
- description: |
- The fraction of tasks that may fail during a rollback before the
- failure action is invoked, specified as a floating point number
- between 0 and 1.
- type: "number"
- default: 0
- Order:
- description: |
- The order of operations when rolling back a task. Either the old
- task is shut down before the new task is started, or the new task
- is started before the old task is shut down.
- type: "string"
- enum:
- - "stop-first"
- - "start-first"
- Networks:
- description: "Specifies which networks the service should attach to."
- type: "array"
- items:
- $ref: "#/definitions/NetworkAttachmentConfig"
-
- EndpointSpec:
- $ref: "#/definitions/EndpointSpec"
-
- EndpointPortConfig:
- type: "object"
- properties:
- Name:
- type: "string"
- Protocol:
- type: "string"
- enum:
- - "tcp"
- - "udp"
- - "sctp"
- TargetPort:
- description: "The port inside the container."
- type: "integer"
- PublishedPort:
- description: "The port on the swarm hosts."
- type: "integer"
- PublishMode:
- description: |
- The mode in which port is published.
-
-
-
- - "ingress" makes the target port accessible on every node,
- regardless of whether there is a task for the service running on
- that node or not.
- - "host" bypasses the routing mesh and publish the port directly on
- the swarm node where that service is running.
-
- type: "string"
- enum:
- - "ingress"
- - "host"
- default: "ingress"
- example: "ingress"
-
- EndpointSpec:
- description: "Properties that can be configured to access and load balance a service."
- type: "object"
- properties:
- Mode:
- description: |
- The mode of resolution to use for internal load balancing between tasks.
- type: "string"
- enum:
- - "vip"
- - "dnsrr"
- default: "vip"
- Ports:
- description: |
- List of exposed ports that this service is accessible on from the
- outside. Ports can only be provided if `vip` resolution mode is used.
- type: "array"
- items:
- $ref: "#/definitions/EndpointPortConfig"
-
- Service:
- type: "object"
- properties:
- ID:
- type: "string"
- Version:
- $ref: "#/definitions/ObjectVersion"
- CreatedAt:
- type: "string"
- format: "dateTime"
- UpdatedAt:
- type: "string"
- format: "dateTime"
- Spec:
- $ref: "#/definitions/ServiceSpec"
- Endpoint:
- type: "object"
- properties:
- Spec:
- $ref: "#/definitions/EndpointSpec"
- Ports:
- type: "array"
- items:
- $ref: "#/definitions/EndpointPortConfig"
- VirtualIPs:
- type: "array"
- items:
- type: "object"
- properties:
- NetworkID:
- type: "string"
- Addr:
- type: "string"
- UpdateStatus:
- description: "The status of a service update."
- type: "object"
- properties:
- State:
- type: "string"
- enum:
- - "updating"
- - "paused"
- - "completed"
- StartedAt:
- type: "string"
- format: "dateTime"
- CompletedAt:
- type: "string"
- format: "dateTime"
- Message:
- type: "string"
- ServiceStatus:
- description: |
- The status of the service's tasks. Provided only when requested as
- part of a ServiceList operation.
- type: "object"
- properties:
- RunningTasks:
- description: |
- The number of tasks for the service currently in the Running state.
- type: "integer"
- format: "uint64"
- example: 7
- DesiredTasks:
- description: |
- The number of tasks for the service desired to be running.
- For replicated services, this is the replica count from the
- service spec. For global services, this is computed by taking
- count of all tasks for the service with a Desired State other
- than Shutdown.
- type: "integer"
- format: "uint64"
- example: 10
- CompletedTasks:
- description: |
- The number of tasks for a job that are in the Completed state.
- This field must be cross-referenced with the service type, as the
- value of 0 may mean the service is not in a job mode, or it may
- mean the job-mode service has no tasks yet Completed.
- type: "integer"
- format: "uint64"
- JobStatus:
- description: |
- The status of the service when it is in one of ReplicatedJob or
- GlobalJob modes. Absent on Replicated and Global mode services. The
- JobIteration is an ObjectVersion, but unlike the Service's version,
- does not need to be sent with an update request.
- type: "object"
- properties:
- JobIteration:
- description: |
- JobIteration is a value increased each time a Job is executed,
- successfully or otherwise. "Executed", in this case, means the
- job as a whole has been started, not that an individual Task has
- been launched. A job is "Executed" when its ServiceSpec is
- updated. JobIteration can be used to disambiguate Tasks belonging
- to different executions of a job. Though JobIteration will
- increase with each subsequent execution, it may not necessarily
- increase by 1, and so JobIteration should not be used to
- $ref: "#/definitions/ObjectVersion"
- LastExecution:
- description: |
- The last time, as observed by the server, that this job was
- started.
- type: "string"
- format: "dateTime"
- example:
- ID: "9mnpnzenvg8p8tdbtq4wvbkcz"
- Version:
- Index: 19
- CreatedAt: "2016-06-07T21:05:51.880065305Z"
- UpdatedAt: "2016-06-07T21:07:29.962229872Z"
- Spec:
- Name: "hopeful_cori"
- TaskTemplate:
- ContainerSpec:
- Image: "redis"
- Resources:
- Limits: {}
- Reservations: {}
- RestartPolicy:
- Condition: "any"
- MaxAttempts: 0
- Placement: {}
- ForceUpdate: 0
- Mode:
- Replicated:
- Replicas: 1
- UpdateConfig:
- Parallelism: 1
- Delay: 1000000000
- FailureAction: "pause"
- Monitor: 15000000000
- MaxFailureRatio: 0.15
- RollbackConfig:
- Parallelism: 1
- Delay: 1000000000
- FailureAction: "pause"
- Monitor: 15000000000
- MaxFailureRatio: 0.15
- EndpointSpec:
- Mode: "vip"
- Ports:
- -
- Protocol: "tcp"
- TargetPort: 6379
- PublishedPort: 30001
- Endpoint:
- Spec:
- Mode: "vip"
- Ports:
- -
- Protocol: "tcp"
- TargetPort: 6379
- PublishedPort: 30001
- Ports:
- -
- Protocol: "tcp"
- TargetPort: 6379
- PublishedPort: 30001
- VirtualIPs:
- -
- NetworkID: "4qvuz4ko70xaltuqbt8956gd1"
- Addr: "10.255.0.2/16"
- -
- NetworkID: "4qvuz4ko70xaltuqbt8956gd1"
- Addr: "10.255.0.3/16"
-
- ImageDeleteResponseItem:
- type: "object"
- properties:
- Untagged:
- description: "The image ID of an image that was untagged"
- type: "string"
- Deleted:
- description: "The image ID of an image that was deleted"
- type: "string"
-
- ServiceUpdateResponse:
- type: "object"
- properties:
- Warnings:
- description: "Optional warning messages"
- type: "array"
- items:
- type: "string"
- example:
- Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"
-
- ContainerSummary:
- type: "array"
- items:
- type: "object"
- properties:
- Id:
- description: "The ID of this container"
- type: "string"
- x-go-name: "ID"
- Names:
- description: "The names that this container has been given"
- type: "array"
- items:
- type: "string"
- Image:
- description: "The name of the image used when creating this container"
- type: "string"
- ImageID:
- description: "The ID of the image that this container was created from"
- type: "string"
- Command:
- description: "Command to run when starting the container"
- type: "string"
- Created:
- description: "When the container was created"
- type: "integer"
- format: "int64"
- Ports:
- description: "The ports exposed by this container"
- type: "array"
- items:
- $ref: "#/definitions/Port"
- SizeRw:
- description: "The size of files that have been created or changed by this container"
- type: "integer"
- format: "int64"
- SizeRootFs:
- description: "The total size of all the files in this container"
- type: "integer"
- format: "int64"
- Labels:
- description: "User-defined key/value metadata."
- type: "object"
- additionalProperties:
- type: "string"
- State:
- description: "The state of this container (e.g. `Exited`)"
- type: "string"
- Status:
- description: "Additional human-readable status of this container (e.g. `Exit 0`)"
- type: "string"
- HostConfig:
- type: "object"
- properties:
- NetworkMode:
- type: "string"
- NetworkSettings:
- description: "A summary of the container's network settings"
- type: "object"
- properties:
- Networks:
- type: "object"
- additionalProperties:
- $ref: "#/definitions/EndpointSettings"
- Mounts:
- type: "array"
- items:
- $ref: "#/definitions/Mount"
-
- Driver:
- description: "Driver represents a driver (network, logging, secrets)."
- type: "object"
- required: [Name]
- properties:
- Name:
- description: "Name of the driver."
- type: "string"
- x-nullable: false
- example: "some-driver"
- Options:
- description: "Key/value map of driver-specific options."
- type: "object"
- x-nullable: false
- additionalProperties:
- type: "string"
- example:
- OptionA: "value for driver-specific option A"
- OptionB: "value for driver-specific option B"
-
- SecretSpec:
- type: "object"
- properties:
- Name:
- description: "User-defined name of the secret."
- type: "string"
- Labels:
- description: "User-defined key/value metadata."
- type: "object"
- additionalProperties:
- type: "string"
- example:
- com.example.some-label: "some-value"
- com.example.some-other-label: "some-other-value"
- Data:
- description: |
- Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5))
- data to store as secret.
-
- This field is only used to _create_ a secret, and is not returned by
- other endpoints.
- type: "string"
- example: ""
- Driver:
- description: |
- Name of the secrets driver used to fetch the secret's value from an
- external secret store.
- $ref: "#/definitions/Driver"
- Templating:
- description: |
- Templating driver, if applicable
-
- Templating controls whether and how to evaluate the config payload as
- a template. If no driver is set, no templating is used.
- $ref: "#/definitions/Driver"
-
- Secret:
- type: "object"
- properties:
- ID:
- type: "string"
- example: "blt1owaxmitz71s9v5zh81zun"
- Version:
- $ref: "#/definitions/ObjectVersion"
- CreatedAt:
- type: "string"
- format: "dateTime"
- example: "2017-07-20T13:55:28.678958722Z"
- UpdatedAt:
- type: "string"
- format: "dateTime"
- example: "2017-07-20T13:55:28.678958722Z"
- Spec:
- $ref: "#/definitions/SecretSpec"
-
- ConfigSpec:
- type: "object"
- properties:
- Name:
- description: "User-defined name of the config."
- type: "string"
- Labels:
- description: "User-defined key/value metadata."
- type: "object"
- additionalProperties:
- type: "string"
- Data:
- description: |
- Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5))
- config data.
- type: "string"
- Templating:
- description: |
- Templating driver, if applicable
-
- Templating controls whether and how to evaluate the config payload as
- a template. If no driver is set, no templating is used.
- $ref: "#/definitions/Driver"
-
- Config:
- type: "object"
- properties:
- ID:
- type: "string"
- Version:
- $ref: "#/definitions/ObjectVersion"
- CreatedAt:
- type: "string"
- format: "dateTime"
- UpdatedAt:
- type: "string"
- format: "dateTime"
- Spec:
- $ref: "#/definitions/ConfigSpec"
-
- ContainerState:
- description: |
- ContainerState stores container's running state. It's part of ContainerJSONBase
- and will be returned by the "inspect" command.
- type: "object"
- properties:
- Status:
- description: |
- String representation of the container state. Can be one of "created",
- "running", "paused", "restarting", "removing", "exited", or "dead".
- type: "string"
- enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"]
- example: "running"
- Running:
- description: |
- Whether this container is running.
-
- Note that a running container can be _paused_. The `Running` and `Paused`
- booleans are not mutually exclusive:
-
- When pausing a container (on Linux), the freezer cgroup is used to suspend
- all processes in the container. Freezing the process requires the process to
- be running. As a result, paused containers are both `Running` _and_ `Paused`.
-
- Use the `Status` field instead to determine if a container's state is "running".
- type: "boolean"
- example: true
- Paused:
- description: "Whether this container is paused."
- type: "boolean"
- example: false
- Restarting:
- description: "Whether this container is restarting."
- type: "boolean"
- example: false
- OOMKilled:
- description: |
- Whether this container has been killed because it ran out of memory.
- type: "boolean"
- example: false
- Dead:
- type: "boolean"
- example: false
- Pid:
- description: "The process ID of this container"
- type: "integer"
- example: 1234
- ExitCode:
- description: "The last exit code of this container"
- type: "integer"
- example: 0
- Error:
- type: "string"
- StartedAt:
- description: "The time when this container was last started."
- type: "string"
- example: "2020-01-06T09:06:59.461876391Z"
- FinishedAt:
- description: "The time when this container last exited."
- type: "string"
- example: "2020-01-06T09:07:59.461876391Z"
- Health:
- x-nullable: true
- $ref: "#/definitions/Health"
-
- SystemVersion:
- type: "object"
- description: |
- Response of Engine API: GET "/version"
- properties:
- Platform:
- type: "object"
- required: [Name]
- properties:
- Name:
- type: "string"
- Components:
- type: "array"
- description: |
- Information about system components
- items:
- type: "object"
- x-go-name: ComponentVersion
- required: [Name, Version]
- properties:
- Name:
- description: |
- Name of the component
- type: "string"
- example: "Engine"
- Version:
- description: |
- Version of the component
- type: "string"
- x-nullable: false
- example: "19.03.12"
- Details:
- description: |
- Key/value pairs of strings with additional information about the
- component. These values are intended for informational purposes
- only, and their content is not defined, and not part of the API
- specification.
-
- These messages can be printed by the client as information to the user.
- type: "object"
- x-nullable: true
- Version:
- description: "The version of the daemon"
- type: "string"
- example: "19.03.12"
- ApiVersion:
- description: |
- The default (and highest) API version that is supported by the daemon
- type: "string"
- example: "1.40"
- MinAPIVersion:
- description: |
- The minimum API version that is supported by the daemon
- type: "string"
- example: "1.12"
- GitCommit:
- description: |
- The Git commit of the source code that was used to build the daemon
- type: "string"
- example: "48a66213fe"
- GoVersion:
- description: |
- The version Go used to compile the daemon, and the version of the Go
- runtime in use.
- type: "string"
- example: "go1.13.14"
- Os:
- description: |
- The operating system that the daemon is running on ("linux" or "windows")
- type: "string"
- example: "linux"
- Arch:
- description: |
- The architecture that the daemon is running on
- type: "string"
- example: "amd64"
- KernelVersion:
- description: |
- The kernel version (`uname -r`) that the daemon is running on.
-
- This field is omitted when empty.
- type: "string"
- example: "4.19.76-linuxkit"
- Experimental:
- description: |
- Indicates if the daemon is started with experimental features enabled.
-
- This field is omitted when empty / false.
- type: "boolean"
- example: true
- BuildTime:
- description: |
- The date and time that the daemon was compiled.
- type: "string"
- example: "2020-06-22T15:49:27.000000000+00:00"
-
-
- SystemInfo:
- type: "object"
- properties:
- ID:
- description: |
- Unique identifier of the daemon.
-
-
-
- > **Note**: The format of the ID itself is not part of the API, and
- > should not be considered stable.
- type: "string"
- example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS"
- Containers:
- description: "Total number of containers on the host."
- type: "integer"
- example: 14
- ContainersRunning:
- description: |
- Number of containers with status `"running"`.
- type: "integer"
- example: 3
- ContainersPaused:
- description: |
- Number of containers with status `"paused"`.
- type: "integer"
- example: 1
- ContainersStopped:
- description: |
- Number of containers with status `"stopped"`.
- type: "integer"
- example: 10
- Images:
- description: |
- Total number of images on the host.
-
- Both _tagged_ and _untagged_ (dangling) images are counted.
- type: "integer"
- example: 508
- Driver:
- description: "Name of the storage driver in use."
- type: "string"
- example: "overlay2"
- DriverStatus:
- description: |
- Information specific to the storage driver, provided as
- "label" / "value" pairs.
-
- This information is provided by the storage driver, and formatted
- in a way consistent with the output of `docker info` on the command
- line.
-
-
-
- > **Note**: The information returned in this field, including the
- > formatting of values and labels, should not be considered stable,
- > and may change without notice.
- type: "array"
- items:
- type: "array"
- items:
- type: "string"
- example:
- - ["Backing Filesystem", "extfs"]
- - ["Supports d_type", "true"]
- - ["Native Overlay Diff", "true"]
- DockerRootDir:
- description: |
- Root directory of persistent Docker state.
-
- Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker`
- on Windows.
- type: "string"
- example: "/var/lib/docker"
- Plugins:
- $ref: "#/definitions/PluginsInfo"
- MemoryLimit:
- description: "Indicates if the host has memory limit support enabled."
- type: "boolean"
- example: true
- SwapLimit:
- description: "Indicates if the host has memory swap limit support enabled."
- type: "boolean"
- example: true
- KernelMemory:
- description: |
- Indicates if the host has kernel memory limit support enabled.
-
-
-
- > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated
- > `kmem.limit_in_bytes`.
- type: "boolean"
- example: true
- CpuCfsPeriod:
- description: |
- Indicates if CPU CFS(Completely Fair Scheduler) period is supported by
- the host.
- type: "boolean"
- example: true
- CpuCfsQuota:
- description: |
- Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by
- the host.
- type: "boolean"
- example: true
- CPUShares:
- description: |
- Indicates if CPU Shares limiting is supported by the host.
- type: "boolean"
- example: true
- CPUSet:
- description: |
- Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host.
-
- See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt)
- type: "boolean"
- example: true
- PidsLimit:
- description: "Indicates if the host kernel has PID limit support enabled."
- type: "boolean"
- example: true
- OomKillDisable:
- description: "Indicates if OOM killer disable is supported on the host."
- type: "boolean"
- IPv4Forwarding:
- description: "Indicates IPv4 forwarding is enabled."
- type: "boolean"
- example: true
- BridgeNfIptables:
- description: "Indicates if `bridge-nf-call-iptables` is available on the host."
- type: "boolean"
- example: true
- BridgeNfIp6tables:
- description: "Indicates if `bridge-nf-call-ip6tables` is available on the host."
- type: "boolean"
- example: true
- Debug:
- description: |
- Indicates if the daemon is running in debug-mode / with debug-level
- logging enabled.
- type: "boolean"
- example: true
- NFd:
- description: |
- The total number of file Descriptors in use by the daemon process.
-
- This information is only returned if debug-mode is enabled.
- type: "integer"
- example: 64
- NGoroutines:
- description: |
- The number of goroutines that currently exist.
-
- This information is only returned if debug-mode is enabled.
- type: "integer"
- example: 174
- SystemTime:
- description: |
- Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt)
- format with nano-seconds.
- type: "string"
- example: "2017-08-08T20:28:29.06202363Z"
- LoggingDriver:
- description: |
- The logging driver to use as a default for new containers.
- type: "string"
- CgroupDriver:
- description: |
- The driver to use for managing cgroups.
- type: "string"
- enum: ["cgroupfs", "systemd", "none"]
- default: "cgroupfs"
- example: "cgroupfs"
- CgroupVersion:
- description: |
- The version of the cgroup.
- type: "string"
- enum: ["1", "2"]
- default: "1"
- example: "1"
- NEventsListener:
- description: "Number of event listeners subscribed."
- type: "integer"
- example: 30
- KernelVersion:
- description: |
- Kernel version of the host.
-
- On Linux, this information obtained from `uname`. On Windows this
- information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\
- registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_.
- type: "string"
- example: "4.9.38-moby"
- OperatingSystem:
- description: |
- Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS"
- or "Windows Server 2016 Datacenter"
- type: "string"
- example: "Alpine Linux v3.5"
- OSVersion:
- description: |
- Version of the host's operating system
-
-
-
- > **Note**: The information returned in this field, including its
- > very existence, and the formatting of values, should not be considered
- > stable, and may change without notice.
- type: "string"
- example: "16.04"
- OSType:
- description: |
- Generic type of the operating system of the host, as returned by the
- Go runtime (`GOOS`).
-
- Currently returned values are "linux" and "windows". A full list of
- possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment).
- type: "string"
- example: "linux"
- Architecture:
- description: |
- Hardware architecture of the host, as returned by the Go runtime
- (`GOARCH`).
-
- A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment).
- type: "string"
- example: "x86_64"
- NCPU:
- description: |
- The number of logical CPUs usable by the daemon.
-
- The number of available CPUs is checked by querying the operating
- system when the daemon starts. Changes to operating system CPU
- allocation after the daemon is started are not reflected.
- type: "integer"
- example: 4
- MemTotal:
- description: |
- Total amount of physical memory available on the host, in bytes.
- type: "integer"
- format: "int64"
- example: 2095882240
-
- IndexServerAddress:
- description: |
- Address / URL of the index server that is used for image search,
- and as a default for user authentication for Docker Hub and Docker Cloud.
- default: "https://index.docker.io/v1/"
- type: "string"
- example: "https://index.docker.io/v1/"
- RegistryConfig:
- $ref: "#/definitions/RegistryServiceConfig"
- GenericResources:
- $ref: "#/definitions/GenericResources"
- HttpProxy:
- description: |
- HTTP-proxy configured for the daemon. This value is obtained from the
- [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.
- Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL
- are masked in the API response.
-
- Containers do not automatically inherit this configuration.
- type: "string"
- example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080"
- HttpsProxy:
- description: |
- HTTPS-proxy configured for the daemon. This value is obtained from the
- [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.
- Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL
- are masked in the API response.
-
- Containers do not automatically inherit this configuration.
- type: "string"
- example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443"
- NoProxy:
- description: |
- Comma-separated list of domain extensions for which no proxy should be
- used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html)
- environment variable.
-
- Containers do not automatically inherit this configuration.
- type: "string"
- example: "*.local, 169.254/16"
- Name:
- description: "Hostname of the host."
- type: "string"
- example: "node5.corp.example.com"
- Labels:
- description: |
- User-defined labels (key/value metadata) as set on the daemon.
-
-
-
- > **Note**: When part of a Swarm, nodes can both have _daemon_ labels,
- > set through the daemon configuration, and _node_ labels, set from a
- > manager node in the Swarm. Node labels are not included in this
- > field. Node labels can be retrieved using the `/nodes/(id)` endpoint
- > on a manager node in the Swarm.
- type: "array"
- items:
- type: "string"
- example: ["storage=ssd", "production"]
- ExperimentalBuild:
- description: |
- Indicates if experimental features are enabled on the daemon.
- type: "boolean"
- example: true
- ServerVersion:
- description: |
- Version string of the daemon.
-
- > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/)
- > returns the Swarm version instead of the daemon version, for example
- > `swarm/1.2.8`.
- type: "string"
- example: "17.06.0-ce"
- ClusterStore:
- description: |
- URL of the distributed storage backend.
-
-
- The storage backend is used for multihost networking (to store
- network and endpoint information) and by the node discovery mechanism.
-
-
-
- > **Deprecated**: This field is only propagated when using standalone Swarm
- > mode, and overlay networking using an external k/v store. Overlay
- > networks with Swarm mode enabled use the built-in raft store, and
- > this field will be empty.
- type: "string"
- example: "consul://consul.corp.example.com:8600/some/path"
- ClusterAdvertise:
- description: |
- The network endpoint that the Engine advertises for the purpose of
- node discovery. ClusterAdvertise is a `host:port` combination on which
- the daemon is reachable by other hosts.
-
-
-
- > **Deprecated**: This field is only propagated when using standalone Swarm
- > mode, and overlay networking using an external k/v store. Overlay
- > networks with Swarm mode enabled use the built-in raft store, and
- > this field will be empty.
- type: "string"
- example: "node5.corp.example.com:8000"
- Runtimes:
- description: |
- List of [OCI compliant](https://github.com/opencontainers/runtime-spec)
- runtimes configured on the daemon. Keys hold the "name" used to
- reference the runtime.
-
- The Docker daemon relies on an OCI compliant runtime (invoked via the
- `containerd` daemon) as its interface to the Linux kernel namespaces,
- cgroups, and SELinux.
-
- The default runtime is `runc`, and automatically configured. Additional
- runtimes can be configured by the user and will be listed here.
- type: "object"
- additionalProperties:
- $ref: "#/definitions/Runtime"
- default:
- runc:
- path: "runc"
- example:
- runc:
- path: "runc"
- runc-master:
- path: "/go/bin/runc"
- custom:
- path: "/usr/local/bin/my-oci-runtime"
- runtimeArgs: ["--debug", "--systemd-cgroup=false"]
- DefaultRuntime:
- description: |
- Name of the default OCI runtime that is used when starting containers.
-
- The default can be overridden per-container at create time.
- type: "string"
- default: "runc"
- example: "runc"
- Swarm:
- $ref: "#/definitions/SwarmInfo"
- LiveRestoreEnabled:
- description: |
- Indicates if live restore is enabled.
-
- If enabled, containers are kept running when the daemon is shutdown
- or upon daemon start if running containers are detected.
- type: "boolean"
- default: false
- example: false
- Isolation:
- description: |
- Represents the isolation technology to use as a default for containers.
- The supported values are platform-specific.
-
- If no isolation value is specified on daemon start, on Windows client,
- the default is `hyperv`, and on Windows server, the default is `process`.
-
- This option is currently not used on other platforms.
- default: "default"
- type: "string"
- enum:
- - "default"
- - "hyperv"
- - "process"
- InitBinary:
- description: |
- Name and, optional, path of the `docker-init` binary.
-
- If the path is omitted, the daemon searches the host's `$PATH` for the
- binary and uses the first result.
- type: "string"
- example: "docker-init"
- ContainerdCommit:
- $ref: "#/definitions/Commit"
- RuncCommit:
- $ref: "#/definitions/Commit"
- InitCommit:
- $ref: "#/definitions/Commit"
- SecurityOptions:
- description: |
- List of security features that are enabled on the daemon, such as
- apparmor, seccomp, SELinux, user-namespaces (userns), and rootless.
-
- Additional configuration options for each security feature may
- be present, and are included as a comma-separated list of key/value
- pairs.
- type: "array"
- items:
- type: "string"
- example:
- - "name=apparmor"
- - "name=seccomp,profile=default"
- - "name=selinux"
- - "name=userns"
- - "name=rootless"
- ProductLicense:
- description: |
- Reports a summary of the product license on the daemon.
-
- If a commercial license has been applied to the daemon, information
- such as number of nodes, and expiration are included.
- type: "string"
- example: "Community Engine"
- DefaultAddressPools:
- description: |
- List of custom default address pools for local networks, which can be
- specified in the daemon.json file or dockerd option.
-
- Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256
- 10.10.[0-255].0/24 address pools.
- type: "array"
- items:
- type: "object"
- properties:
- Base:
- description: "The network address in CIDR format"
- type: "string"
- example: "10.10.0.0/16"
- Size:
- description: "The network pool size"
- type: "integer"
- example: "24"
- Warnings:
- description: |
- List of warnings / informational messages about missing features, or
- issues related to the daemon configuration.
-
- These messages can be printed by the client as information to the user.
- type: "array"
- items:
- type: "string"
- example:
- - "WARNING: No memory limit support"
- - "WARNING: bridge-nf-call-iptables is disabled"
- - "WARNING: bridge-nf-call-ip6tables is disabled"
-
-
- # PluginsInfo is a temp struct holding Plugins name
- # registered with docker daemon. It is used by Info struct
- PluginsInfo:
- description: |
- Available plugins per type.
-
-
-
- > **Note**: Only unmanaged (V1) plugins are included in this list.
- > V1 plugins are "lazily" loaded, and are not returned in this list
- > if there is no resource using the plugin.
- type: "object"
- properties:
- Volume:
- description: "Names of available volume-drivers, and network-driver plugins."
- type: "array"
- items:
- type: "string"
- example: ["local"]
- Network:
- description: "Names of available network-drivers, and network-driver plugins."
- type: "array"
- items:
- type: "string"
- example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"]
- Authorization:
- description: "Names of available authorization plugins."
- type: "array"
- items:
- type: "string"
- example: ["img-authz-plugin", "hbm"]
- Log:
- description: "Names of available logging-drivers, and logging-driver plugins."
- type: "array"
- items:
- type: "string"
- example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"]
-
-
- RegistryServiceConfig:
- description: |
- RegistryServiceConfig stores daemon registry services configuration.
- type: "object"
- x-nullable: true
- properties:
- AllowNondistributableArtifactsCIDRs:
- description: |
- List of IP ranges to which nondistributable artifacts can be pushed,
- using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632).
-
- Some images (for example, Windows base images) contain artifacts
- whose distribution is restricted by license. When these images are
- pushed to a registry, restricted artifacts are not included.
-
- This configuration override this behavior, and enables the daemon to
- push nondistributable artifacts to all registries whose resolved IP
- address is within the subnet described by the CIDR syntax.
-
- This option is useful when pushing images containing
- nondistributable artifacts to a registry on an air-gapped network so
- hosts on that network can pull the images without connecting to
- another server.
-
- > **Warning**: Nondistributable artifacts typically have restrictions
- > on how and where they can be distributed and shared. Only use this
- > feature to push artifacts to private registries and ensure that you
- > are in compliance with any terms that cover redistributing
- > nondistributable artifacts.
-
- type: "array"
- items:
- type: "string"
- example: ["::1/128", "127.0.0.0/8"]
- AllowNondistributableArtifactsHostnames:
- description: |
- List of registry hostnames to which nondistributable artifacts can be
- pushed, using the format `[:]` or `[:]`.
-
- Some images (for example, Windows base images) contain artifacts
- whose distribution is restricted by license. When these images are
- pushed to a registry, restricted artifacts are not included.
-
- This configuration override this behavior for the specified
- registries.
-
- This option is useful when pushing images containing
- nondistributable artifacts to a registry on an air-gapped network so
- hosts on that network can pull the images without connecting to
- another server.
-
- > **Warning**: Nondistributable artifacts typically have restrictions
- > on how and where they can be distributed and shared. Only use this
- > feature to push artifacts to private registries and ensure that you
- > are in compliance with any terms that cover redistributing
- > nondistributable artifacts.
- type: "array"
- items:
- type: "string"
- example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"]
- InsecureRegistryCIDRs:
- description: |
- List of IP ranges of insecure registries, using the CIDR syntax
- ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries
- accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates
- from unknown CAs) communication.
-
- By default, local registries (`127.0.0.0/8`) are configured as
- insecure. All other registries are secure. Communicating with an
- insecure registry is not possible if the daemon assumes that registry
- is secure.
-
- This configuration override this behavior, insecure communication with
- registries whose resolved IP address is within the subnet described by
- the CIDR syntax.
-
- Registries can also be marked insecure by hostname. Those registries
- are listed under `IndexConfigs` and have their `Secure` field set to
- `false`.
-
- > **Warning**: Using this option can be useful when running a local
- > registry, but introduces security vulnerabilities. This option
- > should therefore ONLY be used for testing purposes. For increased
- > security, users should add their CA to their system's list of trusted
- > CAs instead of enabling this option.
- type: "array"
- items:
- type: "string"
- example: ["::1/128", "127.0.0.0/8"]
- IndexConfigs:
- type: "object"
- additionalProperties:
- $ref: "#/definitions/IndexInfo"
- example:
- "127.0.0.1:5000":
- "Name": "127.0.0.1:5000"
- "Mirrors": []
- "Secure": false
- "Official": false
- "[2001:db8:a0b:12f0::1]:80":
- "Name": "[2001:db8:a0b:12f0::1]:80"
- "Mirrors": []
- "Secure": false
- "Official": false
- "docker.io":
- Name: "docker.io"
- Mirrors: ["https://hub-mirror.corp.example.com:5000/"]
- Secure: true
- Official: true
- "registry.internal.corp.example.com:3000":
- Name: "registry.internal.corp.example.com:3000"
- Mirrors: []
- Secure: false
- Official: false
- Mirrors:
- description: |
- List of registry URLs that act as a mirror for the official
- (`docker.io`) registry.
-
- type: "array"
- items:
- type: "string"
- example:
- - "https://hub-mirror.corp.example.com:5000/"
- - "https://[2001:db8:a0b:12f0::1]/"
-
- IndexInfo:
- description:
- IndexInfo contains information about a registry.
- type: "object"
- x-nullable: true
- properties:
- Name:
- description: |
- Name of the registry, such as "docker.io".
- type: "string"
- example: "docker.io"
- Mirrors:
- description: |
- List of mirrors, expressed as URIs.
- type: "array"
- items:
- type: "string"
- example:
- - "https://hub-mirror.corp.example.com:5000/"
- - "https://registry-2.docker.io/"
- - "https://registry-3.docker.io/"
- Secure:
- description: |
- Indicates if the registry is part of the list of insecure
- registries.
-
- If `false`, the registry is insecure. Insecure registries accept
- un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from
- unknown CAs) communication.
-
- > **Warning**: Insecure registries can be useful when running a local
- > registry. However, because its use creates security vulnerabilities
- > it should ONLY be enabled for testing purposes. For increased
- > security, users should add their CA to their system's list of
- > trusted CAs instead of enabling this option.
- type: "boolean"
- example: true
- Official:
- description: |
- Indicates whether this is an official registry (i.e., Docker Hub / docker.io)
- type: "boolean"
- example: true
-
- Runtime:
- description: |
- Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec)
- runtime.
-
- The runtime is invoked by the daemon via the `containerd` daemon. OCI
- runtimes act as an interface to the Linux kernel namespaces, cgroups,
- and SELinux.
- type: "object"
- properties:
- path:
- description: |
- Name and, optional, path, of the OCI executable binary.
-
- If the path is omitted, the daemon searches the host's `$PATH` for the
- binary and uses the first result.
- type: "string"
- example: "/usr/local/bin/my-oci-runtime"
- runtimeArgs:
- description: |
- List of command-line arguments to pass to the runtime when invoked.
- type: "array"
- x-nullable: true
- items:
- type: "string"
- example: ["--debug", "--systemd-cgroup=false"]
-
- Commit:
- description: |
- Commit holds the Git-commit (SHA1) that a binary was built from, as
- reported in the version-string of external tools, such as `containerd`,
- or `runC`.
- type: "object"
- properties:
- ID:
- description: "Actual commit ID of external tool."
- type: "string"
- example: "cfb82a876ecc11b5ca0977d1733adbe58599088a"
- Expected:
- description: |
- Commit ID of external tool expected by dockerd as set at build time.
- type: "string"
- example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4"
-
- SwarmInfo:
- description: |
- Represents generic information about swarm.
- type: "object"
- properties:
- NodeID:
- description: "Unique identifier of for this node in the swarm."
- type: "string"
- default: ""
- example: "k67qz4598weg5unwwffg6z1m1"
- NodeAddr:
- description: |
- IP address at which this node can be reached by other nodes in the
- swarm.
- type: "string"
- default: ""
- example: "10.0.0.46"
- LocalNodeState:
- $ref: "#/definitions/LocalNodeState"
- ControlAvailable:
- type: "boolean"
- default: false
- example: true
- Error:
- type: "string"
- default: ""
- RemoteManagers:
- description: |
- List of ID's and addresses of other managers in the swarm.
- type: "array"
- default: null
- x-nullable: true
- items:
- $ref: "#/definitions/PeerNode"
- example:
- - NodeID: "71izy0goik036k48jg985xnds"
- Addr: "10.0.0.158:2377"
- - NodeID: "79y6h1o4gv8n120drcprv5nmc"
- Addr: "10.0.0.159:2377"
- - NodeID: "k67qz4598weg5unwwffg6z1m1"
- Addr: "10.0.0.46:2377"
- Nodes:
- description: "Total number of nodes in the swarm."
- type: "integer"
- x-nullable: true
- example: 4
- Managers:
- description: "Total number of managers in the swarm."
- type: "integer"
- x-nullable: true
- example: 3
- Cluster:
- $ref: "#/definitions/ClusterInfo"
-
- LocalNodeState:
- description: "Current local status of this node."
- type: "string"
- default: ""
- enum:
- - ""
- - "inactive"
- - "pending"
- - "active"
- - "error"
- - "locked"
- example: "active"
-
- PeerNode:
- description: "Represents a peer-node in the swarm"
- properties:
- NodeID:
- description: "Unique identifier of for this node in the swarm."
- type: "string"
- Addr:
- description: |
- IP address and ports at which this node can be reached.
- type: "string"
-
- NetworkAttachmentConfig:
- description: |
- Specifies how a service should be attached to a particular network.
- type: "object"
- properties:
- Target:
- description: |
- The target network for attachment. Must be a network name or ID.
- type: "string"
- Aliases:
- description: |
- Discoverable alternate names for the service on this network.
- type: "array"
- items:
- type: "string"
- DriverOpts:
- description: |
- Driver attachment options for the network target.
- type: "object"
- additionalProperties:
- type: "string"
-
-paths:
- /containers/json:
- get:
- summary: "List containers"
- description: |
- Returns a list of containers. For details on the format, see the
- [inspect endpoint](#operation/ContainerInspect).
-
- Note that it uses a different, smaller representation of a container
- than inspecting a single container. For example, the list of linked
- containers is not propagated .
- operationId: "ContainerList"
- produces:
- - "application/json"
- parameters:
- - name: "all"
- in: "query"
- description: |
- Return all containers. By default, only running containers are shown.
- type: "boolean"
- default: false
- - name: "limit"
- in: "query"
- description: |
- Return this number of most recently created containers, including
- non-running ones.
- type: "integer"
- - name: "size"
- in: "query"
- description: |
- Return the size of container as fields `SizeRw` and `SizeRootFs`.
- type: "boolean"
- default: false
- - name: "filters"
- in: "query"
- description: |
- Filters to process on the container list, encoded as JSON (a
- `map[string][]string`). For example, `{"status": ["paused"]}` will
- only return paused containers.
-
- Available filters:
-
- - `ancestor`=(`[:]`, ``, or ``)
- - `before`=(`` or ``)
- - `expose`=(`[/]`|`/[]`)
- - `exited=` containers with exit code of ``
- - `health`=(`starting`|`healthy`|`unhealthy`|`none`)
- - `id=` a container's ID
- - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only)
- - `is-task=`(`true`|`false`)
- - `label=key` or `label="key=value"` of a container label
- - `name=` a container's name
- - `network`=(`` or ``)
- - `publish`=(`[/]`|`/[]`)
- - `since`=(`` or ``)
- - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`)
- - `volume`=(`` or ``)
- type: "string"
- responses:
- 200:
- description: "no error"
- schema:
- $ref: "#/definitions/ContainerSummary"
- examples:
- application/json:
- - Id: "8dfafdbc3a40"
- Names:
- - "/boring_feynman"
- Image: "ubuntu:latest"
- ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82"
- Command: "echo 1"
- Created: 1367854155
- State: "Exited"
- Status: "Exit 0"
- Ports:
- - PrivatePort: 2222
- PublicPort: 3333
- Type: "tcp"
- Labels:
- com.example.vendor: "Acme"
- com.example.license: "GPL"
- com.example.version: "1.0"
- SizeRw: 12288
- SizeRootFs: 0
- HostConfig:
- NetworkMode: "default"
- NetworkSettings:
- Networks:
- bridge:
- NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"
- EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f"
- Gateway: "172.17.0.1"
- IPAddress: "172.17.0.2"
- IPPrefixLen: 16
- IPv6Gateway: ""
- GlobalIPv6Address: ""
- GlobalIPv6PrefixLen: 0
- MacAddress: "02:42:ac:11:00:02"
- Mounts:
- - Name: "fac362...80535"
- Source: "/data"
- Destination: "/data"
- Driver: "local"
- Mode: "ro,Z"
- RW: false
- Propagation: ""
- - Id: "9cd87474be90"
- Names:
- - "/coolName"
- Image: "ubuntu:latest"
- ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82"
- Command: "echo 222222"
- Created: 1367854155
- State: "Exited"
- Status: "Exit 0"
- Ports: []
- Labels: {}
- SizeRw: 12288
- SizeRootFs: 0
- HostConfig:
- NetworkMode: "default"
- NetworkSettings:
- Networks:
- bridge:
- NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"
- EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a"
- Gateway: "172.17.0.1"
- IPAddress: "172.17.0.8"
- IPPrefixLen: 16
- IPv6Gateway: ""
- GlobalIPv6Address: ""
- GlobalIPv6PrefixLen: 0
- MacAddress: "02:42:ac:11:00:08"
- Mounts: []
- - Id: "3176a2479c92"
- Names:
- - "/sleepy_dog"
- Image: "ubuntu:latest"
- ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82"
- Command: "echo 3333333333333333"
- Created: 1367854154
- State: "Exited"
- Status: "Exit 0"
- Ports: []
- Labels: {}
- SizeRw: 12288
- SizeRootFs: 0
- HostConfig:
- NetworkMode: "default"
- NetworkSettings:
- Networks:
- bridge:
- NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"
- EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d"
- Gateway: "172.17.0.1"
- IPAddress: "172.17.0.6"
- IPPrefixLen: 16
- IPv6Gateway: ""
- GlobalIPv6Address: ""
- GlobalIPv6PrefixLen: 0
- MacAddress: "02:42:ac:11:00:06"
- Mounts: []
- - Id: "4cb07b47f9fb"
- Names:
- - "/running_cat"
- Image: "ubuntu:latest"
- ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82"
- Command: "echo 444444444444444444444444444444444"
- Created: 1367854152
- State: "Exited"
- Status: "Exit 0"
- Ports: []
- Labels: {}
- SizeRw: 12288
- SizeRootFs: 0
- HostConfig:
- NetworkMode: "default"
- NetworkSettings:
- Networks:
- bridge:
- NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"
- EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9"
- Gateway: "172.17.0.1"
- IPAddress: "172.17.0.5"
- IPPrefixLen: 16
- IPv6Gateway: ""
- GlobalIPv6Address: ""
- GlobalIPv6PrefixLen: 0
- MacAddress: "02:42:ac:11:00:05"
- Mounts: []
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Container"]
- /containers/create:
- post:
- summary: "Create a container"
- operationId: "ContainerCreate"
- consumes:
- - "application/json"
- - "application/octet-stream"
- produces:
- - "application/json"
- parameters:
- - name: "name"
- in: "query"
- description: |
- Assign the specified name to the container. Must match
- `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`.
- type: "string"
- pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$"
- - name: "body"
- in: "body"
- description: "Container to create"
- schema:
- allOf:
- - $ref: "#/definitions/ContainerConfig"
- - type: "object"
- properties:
- HostConfig:
- $ref: "#/definitions/HostConfig"
- NetworkingConfig:
- $ref: "#/definitions/NetworkingConfig"
- example:
- Hostname: ""
- Domainname: ""
- User: ""
- AttachStdin: false
- AttachStdout: true
- AttachStderr: true
- Tty: false
- OpenStdin: false
- StdinOnce: false
- Env:
- - "FOO=bar"
- - "BAZ=quux"
- Cmd:
- - "date"
- Entrypoint: ""
- Image: "ubuntu"
- Labels:
- com.example.vendor: "Acme"
- com.example.license: "GPL"
- com.example.version: "1.0"
- Volumes:
- /volumes/data: {}
- WorkingDir: ""
- NetworkDisabled: false
- MacAddress: "12:34:56:78:9a:bc"
- ExposedPorts:
- 22/tcp: {}
- StopSignal: "SIGTERM"
- StopTimeout: 10
- HostConfig:
- Binds:
- - "/tmp:/tmp"
- Links:
- - "redis3:redis"
- Memory: 0
- MemorySwap: 0
- MemoryReservation: 0
- KernelMemory: 0
- NanoCpus: 500000
- CpuPercent: 80
- CpuShares: 512
- CpuPeriod: 100000
- CpuRealtimePeriod: 1000000
- CpuRealtimeRuntime: 10000
- CpuQuota: 50000
- CpusetCpus: "0,1"
- CpusetMems: "0,1"
- MaximumIOps: 0
- MaximumIOBps: 0
- BlkioWeight: 300
- BlkioWeightDevice:
- - {}
- BlkioDeviceReadBps:
- - {}
- BlkioDeviceReadIOps:
- - {}
- BlkioDeviceWriteBps:
- - {}
- BlkioDeviceWriteIOps:
- - {}
- DeviceRequests:
- - Driver: "nvidia"
- Count: -1
- DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"]
- Capabilities: [["gpu", "nvidia", "compute"]]
- Options:
- property1: "string"
- property2: "string"
- MemorySwappiness: 60
- OomKillDisable: false
- OomScoreAdj: 500
- PidMode: ""
- PidsLimit: 0
- PortBindings:
- 22/tcp:
- - HostPort: "11022"
- PublishAllPorts: false
- Privileged: false
- ReadonlyRootfs: false
- Dns:
- - "8.8.8.8"
- DnsOptions:
- - ""
- DnsSearch:
- - ""
- VolumesFrom:
- - "parent"
- - "other:ro"
- CapAdd:
- - "NET_ADMIN"
- CapDrop:
- - "MKNOD"
- GroupAdd:
- - "newgroup"
- RestartPolicy:
- Name: ""
- MaximumRetryCount: 0
- AutoRemove: true
- NetworkMode: "bridge"
- Devices: []
- Ulimits:
- - {}
- LogConfig:
- Type: "json-file"
- Config: {}
- SecurityOpt: []
- StorageOpt: {}
- CgroupParent: ""
- VolumeDriver: ""
- ShmSize: 67108864
- NetworkingConfig:
- EndpointsConfig:
- isolated_nw:
- IPAMConfig:
- IPv4Address: "172.20.30.33"
- IPv6Address: "2001:db8:abcd::3033"
- LinkLocalIPs:
- - "169.254.34.68"
- - "fe80::3468"
- Links:
- - "container_1"
- - "container_2"
- Aliases:
- - "server_x"
- - "server_y"
-
- required: true
- responses:
- 201:
- description: "Container created successfully"
- schema:
- type: "object"
- title: "ContainerCreateResponse"
- description: "OK response to ContainerCreate operation"
- required: [Id, Warnings]
- properties:
- Id:
- description: "The ID of the created container"
- type: "string"
- x-nullable: false
- Warnings:
- description: "Warnings encountered when creating the container"
- type: "array"
- x-nullable: false
- items:
- type: "string"
- examples:
- application/json:
- Id: "e90e34656806"
- Warnings: []
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 409:
- description: "conflict"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Container"]
- /containers/{id}/json:
- get:
- summary: "Inspect a container"
- description: "Return low-level information about a container."
- operationId: "ContainerInspect"
- produces:
- - "application/json"
- responses:
- 200:
- description: "no error"
- schema:
- type: "object"
- title: "ContainerInspectResponse"
- properties:
- Id:
- description: "The ID of the container"
- type: "string"
- Created:
- description: "The time the container was created"
- type: "string"
- Path:
- description: "The path to the command being run"
- type: "string"
- Args:
- description: "The arguments to the command being run"
- type: "array"
- items:
- type: "string"
- State:
- x-nullable: true
- $ref: "#/definitions/ContainerState"
- Image:
- description: "The container's image ID"
- type: "string"
- ResolvConfPath:
- type: "string"
- HostnamePath:
- type: "string"
- HostsPath:
- type: "string"
- LogPath:
- type: "string"
- Name:
- type: "string"
- RestartCount:
- type: "integer"
- Driver:
- type: "string"
- Platform:
- type: "string"
- MountLabel:
- type: "string"
- ProcessLabel:
- type: "string"
- AppArmorProfile:
- type: "string"
- ExecIDs:
- description: "IDs of exec instances that are running in the container."
- type: "array"
- items:
- type: "string"
- x-nullable: true
- HostConfig:
- $ref: "#/definitions/HostConfig"
- GraphDriver:
- $ref: "#/definitions/GraphDriverData"
- SizeRw:
- description: |
- The size of files that have been created or changed by this
- container.
- type: "integer"
- format: "int64"
- SizeRootFs:
- description: "The total size of all the files in this container."
- type: "integer"
- format: "int64"
- Mounts:
- type: "array"
- items:
- $ref: "#/definitions/MountPoint"
- Config:
- $ref: "#/definitions/ContainerConfig"
- NetworkSettings:
- $ref: "#/definitions/NetworkSettings"
- examples:
- application/json:
- AppArmorProfile: ""
- Args:
- - "-c"
- - "exit 9"
- Config:
- AttachStderr: true
- AttachStdin: false
- AttachStdout: true
- Cmd:
- - "/bin/sh"
- - "-c"
- - "exit 9"
- Domainname: ""
- Env:
- - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- Healthcheck:
- Test: ["CMD-SHELL", "exit 0"]
- Hostname: "ba033ac44011"
- Image: "ubuntu"
- Labels:
- com.example.vendor: "Acme"
- com.example.license: "GPL"
- com.example.version: "1.0"
- MacAddress: ""
- NetworkDisabled: false
- OpenStdin: false
- StdinOnce: false
- Tty: false
- User: ""
- Volumes:
- /volumes/data: {}
- WorkingDir: ""
- StopSignal: "SIGTERM"
- StopTimeout: 10
- Created: "2015-01-06T15:47:31.485331387Z"
- Driver: "devicemapper"
- ExecIDs:
- - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca"
- - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4"
- HostConfig:
- MaximumIOps: 0
- MaximumIOBps: 0
- BlkioWeight: 0
- BlkioWeightDevice:
- - {}
- BlkioDeviceReadBps:
- - {}
- BlkioDeviceWriteBps:
- - {}
- BlkioDeviceReadIOps:
- - {}
- BlkioDeviceWriteIOps:
- - {}
- ContainerIDFile: ""
- CpusetCpus: ""
- CpusetMems: ""
- CpuPercent: 80
- CpuShares: 0
- CpuPeriod: 100000
- CpuRealtimePeriod: 1000000
- CpuRealtimeRuntime: 10000
- Devices: []
- DeviceRequests:
- - Driver: "nvidia"
- Count: -1
- DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"]
- Capabilities: [["gpu", "nvidia", "compute"]]
- Options:
- property1: "string"
- property2: "string"
- IpcMode: ""
- LxcConf: []
- Memory: 0
- MemorySwap: 0
- MemoryReservation: 0
- KernelMemory: 0
- OomKillDisable: false
- OomScoreAdj: 500
- NetworkMode: "bridge"
- PidMode: ""
- PortBindings: {}
- Privileged: false
- ReadonlyRootfs: false
- PublishAllPorts: false
- RestartPolicy:
- MaximumRetryCount: 2
- Name: "on-failure"
- LogConfig:
- Type: "json-file"
- Sysctls:
- net.ipv4.ip_forward: "1"
- Ulimits:
- - {}
- VolumeDriver: ""
- ShmSize: 67108864
- HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname"
- HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts"
- LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log"
- Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39"
- Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2"
- MountLabel: ""
- Name: "/boring_euclid"
- NetworkSettings:
- Bridge: ""
- SandboxID: ""
- HairpinMode: false
- LinkLocalIPv6Address: ""
- LinkLocalIPv6PrefixLen: 0
- SandboxKey: ""
- EndpointID: ""
- Gateway: ""
- GlobalIPv6Address: ""
- GlobalIPv6PrefixLen: 0
- IPAddress: ""
- IPPrefixLen: 0
- IPv6Gateway: ""
- MacAddress: ""
- Networks:
- bridge:
- NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"
- EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d"
- Gateway: "172.17.0.1"
- IPAddress: "172.17.0.2"
- IPPrefixLen: 16
- IPv6Gateway: ""
- GlobalIPv6Address: ""
- GlobalIPv6PrefixLen: 0
- MacAddress: "02:42:ac:12:00:02"
- Path: "/bin/sh"
- ProcessLabel: ""
- ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf"
- RestartCount: 1
- State:
- Error: ""
- ExitCode: 9
- FinishedAt: "2015-01-06T15:47:32.080254511Z"
- Health:
- Status: "healthy"
- FailingStreak: 0
- Log:
- - Start: "2019-12-22T10:59:05.6385933Z"
- End: "2019-12-22T10:59:05.8078452Z"
- ExitCode: 0
- Output: ""
- OOMKilled: false
- Dead: false
- Paused: false
- Pid: 0
- Restarting: false
- Running: true
- StartedAt: "2015-01-06T15:47:32.072697474Z"
- Status: "running"
- Mounts:
- - Name: "fac362...80535"
- Source: "/data"
- Destination: "/data"
- Driver: "local"
- Mode: "ro,Z"
- RW: false
- Propagation: ""
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "size"
- in: "query"
- type: "boolean"
- default: false
- description: "Return the size of container as fields `SizeRw` and `SizeRootFs`"
- tags: ["Container"]
- /containers/{id}/top:
- get:
- summary: "List processes running inside a container"
- description: |
- On Unix systems, this is done by running the `ps` command. This endpoint
- is not supported on Windows.
- operationId: "ContainerTop"
- responses:
- 200:
- description: "no error"
- schema:
- type: "object"
- title: "ContainerTopResponse"
- description: "OK response to ContainerTop operation"
- properties:
- Titles:
- description: "The ps column titles"
- type: "array"
- items:
- type: "string"
- Processes:
- description: |
- Each process running in the container, where each is process
- is an array of values corresponding to the titles.
- type: "array"
- items:
- type: "array"
- items:
- type: "string"
- examples:
- application/json:
- Titles:
- - "UID"
- - "PID"
- - "PPID"
- - "C"
- - "STIME"
- - "TTY"
- - "TIME"
- - "CMD"
- Processes:
- -
- - "root"
- - "13642"
- - "882"
- - "0"
- - "17:03"
- - "pts/0"
- - "00:00:00"
- - "/bin/bash"
- -
- - "root"
- - "13735"
- - "13642"
- - "0"
- - "17:06"
- - "pts/0"
- - "00:00:00"
- - "sleep 10"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "ps_args"
- in: "query"
- description: "The arguments to pass to `ps`. For example, `aux`"
- type: "string"
- default: "-ef"
- tags: ["Container"]
- /containers/{id}/logs:
- get:
- summary: "Get container logs"
- description: |
- Get `stdout` and `stderr` logs from a container.
-
- Note: This endpoint works only for containers with the `json-file` or
- `journald` logging driver.
- operationId: "ContainerLogs"
- responses:
- 200:
- description: |
- logs returned as a stream in response body.
- For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach).
- Note that unlike the attach endpoint, the logs endpoint does not
- upgrade the connection and does not set Content-Type.
- schema:
- type: "string"
- format: "binary"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "follow"
- in: "query"
- description: "Keep connection after returning logs."
- type: "boolean"
- default: false
- - name: "stdout"
- in: "query"
- description: "Return logs from `stdout`"
- type: "boolean"
- default: false
- - name: "stderr"
- in: "query"
- description: "Return logs from `stderr`"
- type: "boolean"
- default: false
- - name: "since"
- in: "query"
- description: "Only return logs since this time, as a UNIX timestamp"
- type: "integer"
- default: 0
- - name: "until"
- in: "query"
- description: "Only return logs before this time, as a UNIX timestamp"
- type: "integer"
- default: 0
- - name: "timestamps"
- in: "query"
- description: "Add timestamps to every log line"
- type: "boolean"
- default: false
- - name: "tail"
- in: "query"
- description: |
- Only return this number of log lines from the end of the logs.
- Specify as an integer or `all` to output all log lines.
- type: "string"
- default: "all"
- tags: ["Container"]
- /containers/{id}/changes:
- get:
- summary: "Get changes on a container’s filesystem"
- description: |
- Returns which files in a container's filesystem have been added, deleted,
- or modified. The `Kind` of modification can be one of:
-
- - `0`: Modified
- - `1`: Added
- - `2`: Deleted
- operationId: "ContainerChanges"
- produces: ["application/json"]
- responses:
- 200:
- description: "The list of changes"
- schema:
- type: "array"
- items:
- type: "object"
- x-go-name: "ContainerChangeResponseItem"
- title: "ContainerChangeResponseItem"
- description: "change item in response to ContainerChanges operation"
- required: [Path, Kind]
- properties:
- Path:
- description: "Path to file that has changed"
- type: "string"
- x-nullable: false
- Kind:
- description: "Kind of change"
- type: "integer"
- format: "uint8"
- enum: [0, 1, 2]
- x-nullable: false
- examples:
- application/json:
- - Path: "/dev"
- Kind: 0
- - Path: "/dev/kmsg"
- Kind: 1
- - Path: "/test"
- Kind: 1
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- tags: ["Container"]
- /containers/{id}/export:
- get:
- summary: "Export a container"
- description: "Export the contents of a container as a tarball."
- operationId: "ContainerExport"
- produces:
- - "application/octet-stream"
- responses:
- 200:
- description: "no error"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- tags: ["Container"]
- /containers/{id}/stats:
- get:
- summary: "Get container stats based on resource usage"
- description: |
- This endpoint returns a live stream of a container’s resource usage
- statistics.
-
- The `precpu_stats` is the CPU statistic of the *previous* read, and is
- used to calculate the CPU usage percentage. It is not an exact copy
- of the `cpu_stats` field.
-
- If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is
- nil then for compatibility with older daemons the length of the
- corresponding `cpu_usage.percpu_usage` array should be used.
-
- On a cgroup v2 host, the following fields are not set
- * `blkio_stats`: all fields other than `io_service_bytes_recursive`
- * `cpu_stats`: `cpu_usage.percpu_usage`
- * `memory_stats`: `max_usage` and `failcnt`
- Also, `memory_stats.stats` fields are incompatible with cgroup v1.
-
- To calculate the values shown by the `stats` command of the docker cli tool
- the following formulas can be used:
- * used_memory = `memory_stats.usage - memory_stats.stats.cache`
- * available_memory = `memory_stats.limit`
- * Memory usage % = `(used_memory / available_memory) * 100.0`
- * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage`
- * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage`
- * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus`
- * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0`
- operationId: "ContainerStats"
- produces: ["application/json"]
- responses:
- 200:
- description: "no error"
- schema:
- type: "object"
- examples:
- application/json:
- read: "2015-01-08T22:57:31.547920715Z"
- pids_stats:
- current: 3
- networks:
- eth0:
- rx_bytes: 5338
- rx_dropped: 0
- rx_errors: 0
- rx_packets: 36
- tx_bytes: 648
- tx_dropped: 0
- tx_errors: 0
- tx_packets: 8
- eth5:
- rx_bytes: 4641
- rx_dropped: 0
- rx_errors: 0
- rx_packets: 26
- tx_bytes: 690
- tx_dropped: 0
- tx_errors: 0
- tx_packets: 9
- memory_stats:
- stats:
- total_pgmajfault: 0
- cache: 0
- mapped_file: 0
- total_inactive_file: 0
- pgpgout: 414
- rss: 6537216
- total_mapped_file: 0
- writeback: 0
- unevictable: 0
- pgpgin: 477
- total_unevictable: 0
- pgmajfault: 0
- total_rss: 6537216
- total_rss_huge: 6291456
- total_writeback: 0
- total_inactive_anon: 0
- rss_huge: 6291456
- hierarchical_memory_limit: 67108864
- total_pgfault: 964
- total_active_file: 0
- active_anon: 6537216
- total_active_anon: 6537216
- total_pgpgout: 414
- total_cache: 0
- inactive_anon: 0
- active_file: 0
- pgfault: 964
- inactive_file: 0
- total_pgpgin: 477
- max_usage: 6651904
- usage: 6537216
- failcnt: 0
- limit: 67108864
- blkio_stats: {}
- cpu_stats:
- cpu_usage:
- percpu_usage:
- - 8646879
- - 24472255
- - 36438778
- - 30657443
- usage_in_usermode: 50000000
- total_usage: 100215355
- usage_in_kernelmode: 30000000
- system_cpu_usage: 739306590000000
- online_cpus: 4
- throttling_data:
- periods: 0
- throttled_periods: 0
- throttled_time: 0
- precpu_stats:
- cpu_usage:
- percpu_usage:
- - 8646879
- - 24350896
- - 36438778
- - 30657443
- usage_in_usermode: 50000000
- total_usage: 100093996
- usage_in_kernelmode: 30000000
- system_cpu_usage: 9492140000000
- online_cpus: 4
- throttling_data:
- periods: 0
- throttled_periods: 0
- throttled_time: 0
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "stream"
- in: "query"
- description: |
- Stream the output. If false, the stats will be output once and then
- it will disconnect.
- type: "boolean"
- default: true
- - name: "one-shot"
- in: "query"
- description: |
- Only get a single stat instead of waiting for 2 cycles. Must be used
- with `stream=false`.
- type: "boolean"
- default: false
- tags: ["Container"]
- /containers/{id}/resize:
- post:
- summary: "Resize a container TTY"
- description: "Resize the TTY for a container."
- operationId: "ContainerResize"
- consumes:
- - "application/octet-stream"
- produces:
- - "text/plain"
- responses:
- 200:
- description: "no error"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "cannot resize container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "h"
- in: "query"
- description: "Height of the TTY session in characters"
- type: "integer"
- - name: "w"
- in: "query"
- description: "Width of the TTY session in characters"
- type: "integer"
- tags: ["Container"]
- /containers/{id}/start:
- post:
- summary: "Start a container"
- operationId: "ContainerStart"
- responses:
- 204:
- description: "no error"
- 304:
- description: "container already started"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "detachKeys"
- in: "query"
- description: |
- Override the key sequence for detaching a container. Format is a
- single character `[a-Z]` or `ctrl-` where `` is one
- of: `a-z`, `@`, `^`, `[`, `,` or `_`.
- type: "string"
- tags: ["Container"]
- /containers/{id}/stop:
- post:
- summary: "Stop a container"
- operationId: "ContainerStop"
- responses:
- 204:
- description: "no error"
- 304:
- description: "container already stopped"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "t"
- in: "query"
- description: "Number of seconds to wait before killing the container"
- type: "integer"
- tags: ["Container"]
- /containers/{id}/restart:
- post:
- summary: "Restart a container"
- operationId: "ContainerRestart"
- responses:
- 204:
- description: "no error"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "t"
- in: "query"
- description: "Number of seconds to wait before killing the container"
- type: "integer"
- tags: ["Container"]
- /containers/{id}/kill:
- post:
- summary: "Kill a container"
- description: |
- Send a POSIX signal to a container, defaulting to killing to the
- container.
- operationId: "ContainerKill"
- responses:
- 204:
- description: "no error"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 409:
- description: "container is not running"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "signal"
- in: "query"
- description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)"
- type: "string"
- default: "SIGKILL"
- tags: ["Container"]
- /containers/{id}/update:
- post:
- summary: "Update a container"
- description: |
- Change various configuration options of a container without having to
- recreate it.
- operationId: "ContainerUpdate"
- consumes: ["application/json"]
- produces: ["application/json"]
- responses:
- 200:
- description: "The container has been updated."
- schema:
- type: "object"
- title: "ContainerUpdateResponse"
- description: "OK response to ContainerUpdate operation"
- properties:
- Warnings:
- type: "array"
- items:
- type: "string"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "update"
- in: "body"
- required: true
- schema:
- allOf:
- - $ref: "#/definitions/Resources"
- - type: "object"
- properties:
- RestartPolicy:
- $ref: "#/definitions/RestartPolicy"
- example:
- BlkioWeight: 300
- CpuShares: 512
- CpuPeriod: 100000
- CpuQuota: 50000
- CpuRealtimePeriod: 1000000
- CpuRealtimeRuntime: 10000
- CpusetCpus: "0,1"
- CpusetMems: "0"
- Memory: 314572800
- MemorySwap: 514288000
- MemoryReservation: 209715200
- KernelMemory: 52428800
- RestartPolicy:
- MaximumRetryCount: 4
- Name: "on-failure"
- tags: ["Container"]
- /containers/{id}/rename:
- post:
- summary: "Rename a container"
- operationId: "ContainerRename"
- responses:
- 204:
- description: "no error"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 409:
- description: "name already in use"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "name"
- in: "query"
- required: true
- description: "New name for the container"
- type: "string"
- tags: ["Container"]
- /containers/{id}/pause:
- post:
- summary: "Pause a container"
- description: |
- Use the freezer cgroup to suspend all processes in a container.
-
- Traditionally, when suspending a process the `SIGSTOP` signal is used,
- which is observable by the process being suspended. With the freezer
- cgroup the process is unaware, and unable to capture, that it is being
- suspended, and subsequently resumed.
- operationId: "ContainerPause"
- responses:
- 204:
- description: "no error"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- tags: ["Container"]
- /containers/{id}/unpause:
- post:
- summary: "Unpause a container"
- description: "Resume a container which has been paused."
- operationId: "ContainerUnpause"
- responses:
- 204:
- description: "no error"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- tags: ["Container"]
- /containers/{id}/attach:
- post:
- summary: "Attach to a container"
- description: |
- Attach to a container to read its output or send it input. You can attach
- to the same container multiple times and you can reattach to containers
- that have been detached.
-
- Either the `stream` or `logs` parameter must be `true` for this endpoint
- to do anything.
-
- See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/)
- for more details.
-
- ### Hijacking
-
- This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`,
- and `stderr` on the same socket.
-
- This is the response from the daemon for an attach request:
-
- ```
- HTTP/1.1 200 OK
- Content-Type: application/vnd.docker.raw-stream
-
- [STREAM]
- ```
-
- After the headers and two new lines, the TCP connection can now be used
- for raw, bidirectional communication between the client and server.
-
- To hint potential proxies about connection hijacking, the Docker client
- can also optionally send connection upgrade headers.
-
- For example, the client sends this request to upgrade the connection:
-
- ```
- POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1
- Upgrade: tcp
- Connection: Upgrade
- ```
-
- The Docker daemon will respond with a `101 UPGRADED` response, and will
- similarly follow with the raw stream:
-
- ```
- HTTP/1.1 101 UPGRADED
- Content-Type: application/vnd.docker.raw-stream
- Connection: Upgrade
- Upgrade: tcp
-
- [STREAM]
- ```
-
- ### Stream format
-
- When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate),
- the stream over the hijacked connected is multiplexed to separate out
- `stdout` and `stderr`. The stream consists of a series of frames, each
- containing a header and a payload.
-
- The header contains the information which the stream writes (`stdout` or
- `stderr`). It also contains the size of the associated frame encoded in
- the last four bytes (`uint32`).
-
- It is encoded on the first eight bytes like this:
-
- ```go
- header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}
- ```
-
- `STREAM_TYPE` can be:
-
- - 0: `stdin` (is written on `stdout`)
- - 1: `stdout`
- - 2: `stderr`
-
- `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size
- encoded as big endian.
-
- Following the header is the payload, which is the specified number of
- bytes of `STREAM_TYPE`.
-
- The simplest way to implement this protocol is the following:
-
- 1. Read 8 bytes.
- 2. Choose `stdout` or `stderr` depending on the first byte.
- 3. Extract the frame size from the last four bytes.
- 4. Read the extracted size and output it on the correct output.
- 5. Goto 1.
-
- ### Stream format when using a TTY
-
- When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate),
- the stream is not multiplexed. The data exchanged over the hijacked
- connection is simply the raw data from the process PTY and client's
- `stdin`.
-
- operationId: "ContainerAttach"
- produces:
- - "application/vnd.docker.raw-stream"
- responses:
- 101:
- description: "no error, hints proxy about hijacking"
- 200:
- description: "no error, no upgrade header found"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "detachKeys"
- in: "query"
- description: |
- Override the key sequence for detaching a container.Format is a single
- character `[a-Z]` or `ctrl-` where `` is one of: `a-z`,
- `@`, `^`, `[`, `,` or `_`.
- type: "string"
- - name: "logs"
- in: "query"
- description: |
- Replay previous logs from the container.
-
- This is useful for attaching to a container that has started and you
- want to output everything since the container started.
-
- If `stream` is also enabled, once all the previous output has been
- returned, it will seamlessly transition into streaming current
- output.
- type: "boolean"
- default: false
- - name: "stream"
- in: "query"
- description: |
- Stream attached streams from the time the request was made onwards.
- type: "boolean"
- default: false
- - name: "stdin"
- in: "query"
- description: "Attach to `stdin`"
- type: "boolean"
- default: false
- - name: "stdout"
- in: "query"
- description: "Attach to `stdout`"
- type: "boolean"
- default: false
- - name: "stderr"
- in: "query"
- description: "Attach to `stderr`"
- type: "boolean"
- default: false
- tags: ["Container"]
- /containers/{id}/attach/ws:
- get:
- summary: "Attach to a container via a websocket"
- operationId: "ContainerAttachWebsocket"
- responses:
- 101:
- description: "no error, hints proxy about hijacking"
- 200:
- description: "no error, no upgrade header found"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "detachKeys"
- in: "query"
- description: |
- Override the key sequence for detaching a container.Format is a single
- character `[a-Z]` or `ctrl-` where `` is one of: `a-z`,
- `@`, `^`, `[`, `,`, or `_`.
- type: "string"
- - name: "logs"
- in: "query"
- description: "Return logs"
- type: "boolean"
- default: false
- - name: "stream"
- in: "query"
- description: "Return stream"
- type: "boolean"
- default: false
- - name: "stdin"
- in: "query"
- description: "Attach to `stdin`"
- type: "boolean"
- default: false
- - name: "stdout"
- in: "query"
- description: "Attach to `stdout`"
- type: "boolean"
- default: false
- - name: "stderr"
- in: "query"
- description: "Attach to `stderr`"
- type: "boolean"
- default: false
- tags: ["Container"]
- /containers/{id}/wait:
- post:
- summary: "Wait for a container"
- description: "Block until a container stops, then returns the exit code."
- operationId: "ContainerWait"
- produces: ["application/json"]
- responses:
- 200:
- description: "The container has exit."
- schema:
- type: "object"
- title: "ContainerWaitResponse"
- description: "OK response to ContainerWait operation"
- required: [StatusCode]
- properties:
- StatusCode:
- description: "Exit code of the container"
- type: "integer"
- x-nullable: false
- Error:
- description: "container waiting error, if any"
- type: "object"
- properties:
- Message:
- description: "Details of an error"
- type: "string"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "condition"
- in: "query"
- description: |
- Wait until a container state reaches the given condition, either
- 'not-running' (default), 'next-exit', or 'removed'.
- type: "string"
- default: "not-running"
- tags: ["Container"]
- /containers/{id}:
- delete:
- summary: "Remove a container"
- operationId: "ContainerDelete"
- responses:
- 204:
- description: "no error"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 409:
- description: "conflict"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: |
- You cannot remove a running container: c2ada9df5af8. Stop the
- container before attempting removal or force remove
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "v"
- in: "query"
- description: "Remove anonymous volumes associated with the container."
- type: "boolean"
- default: false
- - name: "force"
- in: "query"
- description: "If the container is running, kill it before removing it."
- type: "boolean"
- default: false
- - name: "link"
- in: "query"
- description: "Remove the specified link associated with the container."
- type: "boolean"
- default: false
- tags: ["Container"]
- /containers/{id}/archive:
- head:
- summary: "Get information about files in a container"
- description: |
- A response header `X-Docker-Container-Path-Stat` is returned, containing
- a base64 - encoded JSON object with some filesystem header information
- about the path.
- operationId: "ContainerArchiveInfo"
- responses:
- 200:
- description: "no error"
- headers:
- X-Docker-Container-Path-Stat:
- type: "string"
- description: |
- A base64 - encoded JSON object with some filesystem header
- information about the path
- 400:
- description: "Bad parameter"
- schema:
- allOf:
- - $ref: "#/definitions/ErrorResponse"
- - type: "object"
- properties:
- message:
- description: |
- The error message. Either "must specify path parameter"
- (path cannot be empty) or "not a directory" (path was
- asserted to be a directory but exists as a file).
- type: "string"
- x-nullable: false
- 404:
- description: "Container or path does not exist"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "path"
- in: "query"
- required: true
- description: "Resource in the container’s filesystem to archive."
- type: "string"
- tags: ["Container"]
- get:
- summary: "Get an archive of a filesystem resource in a container"
- description: "Get a tar archive of a resource in the filesystem of container id."
- operationId: "ContainerArchive"
- produces: ["application/x-tar"]
- responses:
- 200:
- description: "no error"
- 400:
- description: "Bad parameter"
- schema:
- allOf:
- - $ref: "#/definitions/ErrorResponse"
- - type: "object"
- properties:
- message:
- description: |
- The error message. Either "must specify path parameter"
- (path cannot be empty) or "not a directory" (path was
- asserted to be a directory but exists as a file).
- type: "string"
- x-nullable: false
- 404:
- description: "Container or path does not exist"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "path"
- in: "query"
- required: true
- description: "Resource in the container’s filesystem to archive."
- type: "string"
- tags: ["Container"]
- put:
- summary: "Extract an archive of files or folders to a directory in a container"
- description: "Upload a tar archive to be extracted to a path in the filesystem of container id."
- operationId: "PutContainerArchive"
- consumes: ["application/x-tar", "application/octet-stream"]
- responses:
- 200:
- description: "The content was extracted successfully"
- 400:
- description: "Bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 403:
- description: "Permission denied, the volume or container rootfs is marked as read-only."
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "No such container or path does not exist inside the container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "path"
- in: "query"
- required: true
- description: "Path to a directory in the container to extract the archive’s contents into. "
- type: "string"
- - name: "noOverwriteDirNonDir"
- in: "query"
- description: |
- If `1`, `true`, or `True` then it will be an error if unpacking the
- given content would cause an existing directory to be replaced with
- a non-directory and vice versa.
- type: "string"
- - name: "copyUIDGID"
- in: "query"
- description: |
- If `1`, `true`, then it will copy UID/GID maps to the dest file or
- dir
- type: "string"
- - name: "inputStream"
- in: "body"
- required: true
- description: |
- The input stream must be a tar archive compressed with one of the
- following algorithms: `identity` (no compression), `gzip`, `bzip2`,
- or `xz`.
- schema:
- type: "string"
- format: "binary"
- tags: ["Container"]
- /containers/prune:
- post:
- summary: "Delete stopped containers"
- produces:
- - "application/json"
- operationId: "ContainerPrune"
- parameters:
- - name: "filters"
- in: "query"
- description: |
- Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
-
- Available filters:
- - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.
- - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels.
- type: "string"
- responses:
- 200:
- description: "No error"
- schema:
- type: "object"
- title: "ContainerPruneResponse"
- properties:
- ContainersDeleted:
- description: "Container IDs that were deleted"
- type: "array"
- items:
- type: "string"
- SpaceReclaimed:
- description: "Disk space reclaimed in bytes"
- type: "integer"
- format: "int64"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Container"]
- /images/json:
- get:
- summary: "List Images"
- description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image."
- operationId: "ImageList"
- produces:
- - "application/json"
- responses:
- 200:
- description: "Summary image data for the images matching the query"
- schema:
- type: "array"
- items:
- $ref: "#/definitions/ImageSummary"
- examples:
- application/json:
- - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8"
- ParentId: ""
- RepoTags:
- - "ubuntu:12.04"
- - "ubuntu:precise"
- RepoDigests:
- - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787"
- Created: 1474925151
- Size: 103579269
- VirtualSize: 103579269
- SharedSize: 0
- Labels: {}
- Containers: 2
- - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175"
- ParentId: ""
- RepoTags:
- - "ubuntu:12.10"
- - "ubuntu:quantal"
- RepoDigests:
- - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7"
- - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3"
- Created: 1403128455
- Size: 172064416
- VirtualSize: 172064416
- SharedSize: 0
- Labels: {}
- Containers: 5
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "all"
- in: "query"
- description: "Show all images. Only images from a final layer (no children) are shown by default."
- type: "boolean"
- default: false
- - name: "filters"
- in: "query"
- description: |
- A JSON encoded value of the filters (a `map[string][]string`) to
- process on the images list.
-
- Available filters:
-
- - `before`=(`[:]`, `` or ``)
- - `dangling=true`
- - `label=key` or `label="key=value"` of an image label
- - `reference`=(`[:]`)
- - `since`=(`[:]`, `` or ``)
- type: "string"
- - name: "digests"
- in: "query"
- description: "Show digest information as a `RepoDigests` field on each image."
- type: "boolean"
- default: false
- tags: ["Image"]
- /build:
- post:
- summary: "Build an image"
- description: |
- Build an image from a tar archive with a `Dockerfile` in it.
-
- The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/).
-
- The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output.
-
- The build is canceled if the client drops the connection by quitting or being killed.
- operationId: "ImageBuild"
- consumes:
- - "application/octet-stream"
- produces:
- - "application/json"
- parameters:
- - name: "inputStream"
- in: "body"
- description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz."
- schema:
- type: "string"
- format: "binary"
- - name: "dockerfile"
- in: "query"
- description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`."
- type: "string"
- default: "Dockerfile"
- - name: "t"
- in: "query"
- description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters."
- type: "string"
- - name: "extrahosts"
- in: "query"
- description: "Extra hosts to add to /etc/hosts"
- type: "string"
- - name: "remote"
- in: "query"
- description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball."
- type: "string"
- - name: "q"
- in: "query"
- description: "Suppress verbose build output."
- type: "boolean"
- default: false
- - name: "nocache"
- in: "query"
- description: "Do not use the cache when building the image."
- type: "boolean"
- default: false
- - name: "cachefrom"
- in: "query"
- description: "JSON array of images used for build cache resolution."
- type: "string"
- - name: "pull"
- in: "query"
- description: "Attempt to pull the image even if an older image exists locally."
- type: "string"
- - name: "rm"
- in: "query"
- description: "Remove intermediate containers after a successful build."
- type: "boolean"
- default: true
- - name: "forcerm"
- in: "query"
- description: "Always remove intermediate containers, even upon failure."
- type: "boolean"
- default: false
- - name: "memory"
- in: "query"
- description: "Set memory limit for build."
- type: "integer"
- - name: "memswap"
- in: "query"
- description: "Total memory (memory + swap). Set as `-1` to disable swap."
- type: "integer"
- - name: "cpushares"
- in: "query"
- description: "CPU shares (relative weight)."
- type: "integer"
- - name: "cpusetcpus"
- in: "query"
- description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)."
- type: "string"
- - name: "cpuperiod"
- in: "query"
- description: "The length of a CPU period in microseconds."
- type: "integer"
- - name: "cpuquota"
- in: "query"
- description: "Microseconds of CPU time that the container can get in a CPU period."
- type: "integer"
- - name: "buildargs"
- in: "query"
- description: >
- JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker
- uses the buildargs as the environment context for commands run via the `Dockerfile` RUN
- instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for
- passing secret values.
-
-
- For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the
- query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded.
-
-
- [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg)
- type: "string"
- - name: "shmsize"
- in: "query"
- description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB."
- type: "integer"
- - name: "squash"
- in: "query"
- description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*"
- type: "boolean"
- - name: "labels"
- in: "query"
- description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs."
- type: "string"
- - name: "networkmode"
- in: "query"
- description: |
- Sets the networking mode for the run commands during build. Supported
- standard values are: `bridge`, `host`, `none`, and `container:`.
- Any other value is taken as a custom network's name or ID to which this
- container should connect to.
- type: "string"
- - name: "Content-type"
- in: "header"
- type: "string"
- enum:
- - "application/x-tar"
- default: "application/x-tar"
- - name: "X-Registry-Config"
- in: "header"
- description: |
- This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to.
-
- The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example:
-
- ```
- {
- "docker.example.com": {
- "username": "janedoe",
- "password": "hunter2"
- },
- "https://index.docker.io/v1/": {
- "username": "mobydock",
- "password": "conta1n3rize14"
- }
- }
- ```
-
- Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API.
- type: "string"
- - name: "platform"
- in: "query"
- description: "Platform in the format os[/arch[/variant]]"
- type: "string"
- default: ""
- - name: "target"
- in: "query"
- description: "Target build stage"
- type: "string"
- default: ""
- - name: "outputs"
- in: "query"
- description: "BuildKit output configuration"
- type: "string"
- default: ""
- responses:
- 200:
- description: "no error"
- 400:
- description: "Bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Image"]
- /build/prune:
- post:
- summary: "Delete builder cache"
- produces:
- - "application/json"
- operationId: "BuildPrune"
- parameters:
- - name: "keep-storage"
- in: "query"
- description: "Amount of disk space in bytes to keep for cache"
- type: "integer"
- format: "int64"
- - name: "all"
- in: "query"
- type: "boolean"
- description: "Remove all types of build cache"
- - name: "filters"
- in: "query"
- type: "string"
- description: |
- A JSON encoded value of the filters (a `map[string][]string`) to
- process on the list of build cache objects.
-
- Available filters:
-
- - `until=`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h')
- - `id=`
- - `parent=`
- - `type=`
- - `description=`
- - `inuse`
- - `shared`
- - `private`
- responses:
- 200:
- description: "No error"
- schema:
- type: "object"
- title: "BuildPruneResponse"
- properties:
- CachesDeleted:
- type: "array"
- items:
- description: "ID of build cache object"
- type: "string"
- SpaceReclaimed:
- description: "Disk space reclaimed in bytes"
- type: "integer"
- format: "int64"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Image"]
- /images/create:
- post:
- summary: "Create an image"
- description: "Create an image by either pulling it from a registry or importing it."
- operationId: "ImageCreate"
- consumes:
- - "text/plain"
- - "application/octet-stream"
- produces:
- - "application/json"
- responses:
- 200:
- description: "no error"
- 404:
- description: "repository does not exist or no read access"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "fromImage"
- in: "query"
- description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed."
- type: "string"
- - name: "fromSrc"
- in: "query"
- description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image."
- type: "string"
- - name: "repo"
- in: "query"
- description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image."
- type: "string"
- - name: "tag"
- in: "query"
- description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled."
- type: "string"
- - name: "message"
- in: "query"
- description: "Set commit message for imported image."
- type: "string"
- - name: "inputImage"
- in: "body"
- description: "Image content if the value `-` has been specified in fromSrc query parameter"
- schema:
- type: "string"
- required: false
- - name: "X-Registry-Auth"
- in: "header"
- description: |
- A base64url-encoded auth configuration.
-
- Refer to the [authentication section](#section/Authentication) for
- details.
- type: "string"
- - name: "platform"
- in: "query"
- description: "Platform in the format os[/arch[/variant]]"
- type: "string"
- default: ""
- tags: ["Image"]
- /images/{name}/json:
- get:
- summary: "Inspect an image"
- description: "Return low-level information about an image."
- operationId: "ImageInspect"
- produces:
- - "application/json"
- responses:
- 200:
- description: "No error"
- schema:
- $ref: "#/definitions/Image"
- examples:
- application/json:
- Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c"
- Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a"
- Comment: ""
- Os: "linux"
- Architecture: "amd64"
- Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c"
- ContainerConfig:
- Tty: false
- Hostname: "e611e15f9c9d"
- Domainname: ""
- AttachStdout: false
- PublishService: ""
- AttachStdin: false
- OpenStdin: false
- StdinOnce: false
- NetworkDisabled: false
- OnBuild: []
- Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c"
- User: ""
- WorkingDir: ""
- MacAddress: ""
- AttachStderr: false
- Labels:
- com.example.license: "GPL"
- com.example.version: "1.0"
- com.example.vendor: "Acme"
- Env:
- - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- Cmd:
- - "/bin/sh"
- - "-c"
- - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0"
- DockerVersion: "1.9.0-dev"
- VirtualSize: 188359297
- Size: 0
- Author: ""
- Created: "2015-09-10T08:30:53.26995814Z"
- GraphDriver:
- Name: "aufs"
- Data: {}
- RepoDigests:
- - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"
- RepoTags:
- - "example:1.0"
- - "example:latest"
- - "example:stable"
- Config:
- Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c"
- NetworkDisabled: false
- OnBuild: []
- StdinOnce: false
- PublishService: ""
- AttachStdin: false
- OpenStdin: false
- Domainname: ""
- AttachStdout: false
- Tty: false
- Hostname: "e611e15f9c9d"
- Cmd:
- - "/bin/bash"
- Env:
- - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- Labels:
- com.example.vendor: "Acme"
- com.example.version: "1.0"
- com.example.license: "GPL"
- MacAddress: ""
- AttachStderr: false
- WorkingDir: ""
- User: ""
- RootFS:
- Type: "layers"
- Layers:
- - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6"
- - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
- 404:
- description: "No such image"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such image: someimage (tag: latest)"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: "Image name or id"
- type: "string"
- required: true
- tags: ["Image"]
- /images/{name}/history:
- get:
- summary: "Get the history of an image"
- description: "Return parent layers of an image."
- operationId: "ImageHistory"
- produces: ["application/json"]
- responses:
- 200:
- description: "List of image layers"
- schema:
- type: "array"
- items:
- type: "object"
- x-go-name: HistoryResponseItem
- title: "HistoryResponseItem"
- description: "individual image layer information in response to ImageHistory operation"
- required: [Id, Created, CreatedBy, Tags, Size, Comment]
- properties:
- Id:
- type: "string"
- x-nullable: false
- Created:
- type: "integer"
- format: "int64"
- x-nullable: false
- CreatedBy:
- type: "string"
- x-nullable: false
- Tags:
- type: "array"
- items:
- type: "string"
- Size:
- type: "integer"
- format: "int64"
- x-nullable: false
- Comment:
- type: "string"
- x-nullable: false
- examples:
- application/json:
- - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710"
- Created: 1398108230
- CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /"
- Tags:
- - "ubuntu:lucid"
- - "ubuntu:10.04"
- Size: 182964289
- Comment: ""
- - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8"
- Created: 1398108222
- CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/"
- Tags: []
- Size: 0
- Comment: ""
- - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"
- Created: 1371157430
- CreatedBy: ""
- Tags:
- - "scratch12:latest"
- - "scratch:latest"
- Size: 0
- Comment: "Imported from -"
- 404:
- description: "No such image"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: "Image name or ID"
- type: "string"
- required: true
- tags: ["Image"]
- /images/{name}/push:
- post:
- summary: "Push an image"
- description: |
- Push an image to a registry.
-
- If you wish to push an image on to a private registry, that image must
- already have a tag which references the registry. For example,
- `registry.example.com/myimage:latest`.
-
- The push is cancelled if the HTTP connection is closed.
- operationId: "ImagePush"
- consumes:
- - "application/octet-stream"
- responses:
- 200:
- description: "No error"
- 404:
- description: "No such image"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: "Image name or ID."
- type: "string"
- required: true
- - name: "tag"
- in: "query"
- description: "The tag to associate with the image on the registry."
- type: "string"
- - name: "X-Registry-Auth"
- in: "header"
- description: |
- A base64url-encoded auth configuration.
-
- Refer to the [authentication section](#section/Authentication) for
- details.
- type: "string"
- required: true
- tags: ["Image"]
- /images/{name}/tag:
- post:
- summary: "Tag an image"
- description: "Tag an image so that it becomes part of a repository."
- operationId: "ImageTag"
- responses:
- 201:
- description: "No error"
- 400:
- description: "Bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "No such image"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 409:
- description: "Conflict"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: "Image name or ID to tag."
- type: "string"
- required: true
- - name: "repo"
- in: "query"
- description: "The repository to tag in. For example, `someuser/someimage`."
- type: "string"
- - name: "tag"
- in: "query"
- description: "The name of the new tag."
- type: "string"
- tags: ["Image"]
- /images/{name}:
- delete:
- summary: "Remove an image"
- description: |
- Remove an image, along with any untagged parent images that were
- referenced by that image.
-
- Images can't be removed if they have descendant images, are being
- used by a running container or are being used by a build.
- operationId: "ImageDelete"
- produces: ["application/json"]
- responses:
- 200:
- description: "The image was deleted successfully"
- schema:
- type: "array"
- items:
- $ref: "#/definitions/ImageDeleteResponseItem"
- examples:
- application/json:
- - Untagged: "3e2f21a89f"
- - Deleted: "3e2f21a89f"
- - Deleted: "53b4f83ac9"
- 404:
- description: "No such image"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 409:
- description: "Conflict"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: "Image name or ID"
- type: "string"
- required: true
- - name: "force"
- in: "query"
- description: "Remove the image even if it is being used by stopped containers or has other tags"
- type: "boolean"
- default: false
- - name: "noprune"
- in: "query"
- description: "Do not delete untagged parent images"
- type: "boolean"
- default: false
- tags: ["Image"]
- /images/search:
- get:
- summary: "Search images"
- description: "Search for an image on Docker Hub."
- operationId: "ImageSearch"
- produces:
- - "application/json"
- responses:
- 200:
- description: "No error"
- schema:
- type: "array"
- items:
- type: "object"
- title: "ImageSearchResponseItem"
- properties:
- description:
- type: "string"
- is_official:
- type: "boolean"
- is_automated:
- type: "boolean"
- name:
- type: "string"
- star_count:
- type: "integer"
- examples:
- application/json:
- - description: ""
- is_official: false
- is_automated: false
- name: "wma55/u1210sshd"
- star_count: 0
- - description: ""
- is_official: false
- is_automated: false
- name: "jdswinbank/sshd"
- star_count: 0
- - description: ""
- is_official: false
- is_automated: false
- name: "vgauthier/sshd"
- star_count: 0
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "term"
- in: "query"
- description: "Term to search"
- type: "string"
- required: true
- - name: "limit"
- in: "query"
- description: "Maximum number of results to return"
- type: "integer"
- - name: "filters"
- in: "query"
- description: |
- A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters:
-
- - `is-automated=(true|false)`
- - `is-official=(true|false)`
- - `stars=` Matches images that has at least 'number' stars.
- type: "string"
- tags: ["Image"]
- /images/prune:
- post:
- summary: "Delete unused images"
- produces:
- - "application/json"
- operationId: "ImagePrune"
- parameters:
- - name: "filters"
- in: "query"
- description: |
- Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters:
-
- - `dangling=` When set to `true` (or `1`), prune only
- unused *and* untagged images. When set to `false`
- (or `0`), all unused images are pruned.
- - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.
- - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels.
- type: "string"
- responses:
- 200:
- description: "No error"
- schema:
- type: "object"
- title: "ImagePruneResponse"
- properties:
- ImagesDeleted:
- description: "Images that were deleted"
- type: "array"
- items:
- $ref: "#/definitions/ImageDeleteResponseItem"
- SpaceReclaimed:
- description: "Disk space reclaimed in bytes"
- type: "integer"
- format: "int64"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Image"]
- /auth:
- post:
- summary: "Check auth configuration"
- description: |
- Validate credentials for a registry and, if available, get an identity
- token for accessing the registry without password.
- operationId: "SystemAuth"
- consumes: ["application/json"]
- produces: ["application/json"]
- responses:
- 200:
- description: "An identity token was generated successfully."
- schema:
- type: "object"
- title: "SystemAuthResponse"
- required: [Status]
- properties:
- Status:
- description: "The status of the authentication"
- type: "string"
- x-nullable: false
- IdentityToken:
- description: "An opaque token used to authenticate a user after a successful login"
- type: "string"
- x-nullable: false
- examples:
- application/json:
- Status: "Login Succeeded"
- IdentityToken: "9cbaf023786cd7..."
- 204:
- description: "No error"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "authConfig"
- in: "body"
- description: "Authentication to check"
- schema:
- $ref: "#/definitions/AuthConfig"
- tags: ["System"]
- /info:
- get:
- summary: "Get system information"
- operationId: "SystemInfo"
- produces:
- - "application/json"
- responses:
- 200:
- description: "No error"
- schema:
- $ref: "#/definitions/SystemInfo"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["System"]
- /version:
- get:
- summary: "Get version"
- description: "Returns the version of Docker that is running and various information about the system that Docker is running on."
- operationId: "SystemVersion"
- produces: ["application/json"]
- responses:
- 200:
- description: "no error"
- schema:
- $ref: "#/definitions/SystemVersion"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["System"]
- /_ping:
- get:
- summary: "Ping"
- description: "This is a dummy endpoint you can use to test if the server is accessible."
- operationId: "SystemPing"
- produces: ["text/plain"]
- responses:
- 200:
- description: "no error"
- schema:
- type: "string"
- example: "OK"
- headers:
- API-Version:
- type: "string"
- description: "Max API Version the server supports"
- Builder-Version:
- type: "string"
- description: "Default version of docker image builder"
- Docker-Experimental:
- type: "boolean"
- description: "If the server is running with experimental mode enabled"
- Cache-Control:
- type: "string"
- default: "no-cache, no-store, must-revalidate"
- Pragma:
- type: "string"
- default: "no-cache"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- headers:
- Cache-Control:
- type: "string"
- default: "no-cache, no-store, must-revalidate"
- Pragma:
- type: "string"
- default: "no-cache"
- tags: ["System"]
- head:
- summary: "Ping"
- description: "This is a dummy endpoint you can use to test if the server is accessible."
- operationId: "SystemPingHead"
- produces: ["text/plain"]
- responses:
- 200:
- description: "no error"
- schema:
- type: "string"
- example: "(empty)"
- headers:
- API-Version:
- type: "string"
- description: "Max API Version the server supports"
- Builder-Version:
- type: "string"
- description: "Default version of docker image builder"
- Docker-Experimental:
- type: "boolean"
- description: "If the server is running with experimental mode enabled"
- Cache-Control:
- type: "string"
- default: "no-cache, no-store, must-revalidate"
- Pragma:
- type: "string"
- default: "no-cache"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["System"]
- /commit:
- post:
- summary: "Create a new image from a container"
- operationId: "ImageCommit"
- consumes:
- - "application/json"
- produces:
- - "application/json"
- responses:
- 201:
- description: "no error"
- schema:
- $ref: "#/definitions/IdResponse"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "containerConfig"
- in: "body"
- description: "The container configuration"
- schema:
- $ref: "#/definitions/ContainerConfig"
- - name: "container"
- in: "query"
- description: "The ID or name of the container to commit"
- type: "string"
- - name: "repo"
- in: "query"
- description: "Repository name for the created image"
- type: "string"
- - name: "tag"
- in: "query"
- description: "Tag name for the create image"
- type: "string"
- - name: "comment"
- in: "query"
- description: "Commit message"
- type: "string"
- - name: "author"
- in: "query"
- description: "Author of the image (e.g., `John Hannibal Smith `)"
- type: "string"
- - name: "pause"
- in: "query"
- description: "Whether to pause the container before committing"
- type: "boolean"
- default: true
- - name: "changes"
- in: "query"
- description: "`Dockerfile` instructions to apply while committing"
- type: "string"
- tags: ["Image"]
- /events:
- get:
- summary: "Monitor events"
- description: |
- Stream real-time events from the server.
-
- Various objects within Docker report events when something happens to them.
-
- Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune`
-
- Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune`
-
- Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune`
-
- Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune`
-
- The Docker daemon reports these events: `reload`
-
- Services report these events: `create`, `update`, and `remove`
-
- Nodes report these events: `create`, `update`, and `remove`
-
- Secrets report these events: `create`, `update`, and `remove`
-
- Configs report these events: `create`, `update`, and `remove`
-
- The Builder reports `prune` events
-
- operationId: "SystemEvents"
- produces:
- - "application/json"
- responses:
- 200:
- description: "no error"
- schema:
- type: "object"
- title: "SystemEventsResponse"
- properties:
- Type:
- description: "The type of object emitting the event"
- type: "string"
- Action:
- description: "The type of event"
- type: "string"
- Actor:
- type: "object"
- properties:
- ID:
- description: "The ID of the object emitting the event"
- type: "string"
- Attributes:
- description: "Various key/value attributes of the object, depending on its type"
- type: "object"
- additionalProperties:
- type: "string"
- time:
- description: "Timestamp of event"
- type: "integer"
- timeNano:
- description: "Timestamp of event, with nanosecond accuracy"
- type: "integer"
- format: "int64"
- examples:
- application/json:
- Type: "container"
- Action: "create"
- Actor:
- ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743"
- Attributes:
- com.example.some-label: "some-label-value"
- image: "alpine"
- name: "my-container"
- time: 1461943101
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "since"
- in: "query"
- description: "Show events created since this timestamp then stream new events."
- type: "string"
- - name: "until"
- in: "query"
- description: "Show events created until this timestamp then stop streaming."
- type: "string"
- - name: "filters"
- in: "query"
- description: |
- A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters:
-
- - `config=` config name or ID
- - `container=` container name or ID
- - `daemon=` daemon name or ID
- - `event=` event type
- - `image=` image name or ID
- - `label=` image or container label
- - `network=` network name or ID
- - `node=` node ID
- - `plugin`= plugin name or ID
- - `scope`= local or swarm
- - `secret=` secret name or ID
- - `service=` service name or ID
- - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config`
- - `volume=` volume name
- type: "string"
- tags: ["System"]
- /system/df:
- get:
- summary: "Get data usage information"
- operationId: "SystemDataUsage"
- responses:
- 200:
- description: "no error"
- schema:
- type: "object"
- title: "SystemDataUsageResponse"
- properties:
- LayersSize:
- type: "integer"
- format: "int64"
- Images:
- type: "array"
- items:
- $ref: "#/definitions/ImageSummary"
- Containers:
- type: "array"
- items:
- $ref: "#/definitions/ContainerSummary"
- Volumes:
- type: "array"
- items:
- $ref: "#/definitions/Volume"
- BuildCache:
- type: "array"
- items:
- $ref: "#/definitions/BuildCache"
- example:
- LayersSize: 1092588
- Images:
- -
- Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749"
- ParentId: ""
- RepoTags:
- - "busybox:latest"
- RepoDigests:
- - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6"
- Created: 1466724217
- Size: 1092588
- SharedSize: 0
- VirtualSize: 1092588
- Labels: {}
- Containers: 1
- Containers:
- -
- Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148"
- Names:
- - "/top"
- Image: "busybox"
- ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749"
- Command: "top"
- Created: 1472592424
- Ports: []
- SizeRootFs: 1092588
- Labels: {}
- State: "exited"
- Status: "Exited (0) 56 minutes ago"
- HostConfig:
- NetworkMode: "default"
- NetworkSettings:
- Networks:
- bridge:
- IPAMConfig: null
- Links: null
- Aliases: null
- NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92"
- EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a"
- Gateway: "172.18.0.1"
- IPAddress: "172.18.0.2"
- IPPrefixLen: 16
- IPv6Gateway: ""
- GlobalIPv6Address: ""
- GlobalIPv6PrefixLen: 0
- MacAddress: "02:42:ac:12:00:02"
- Mounts: []
- Volumes:
- -
- Name: "my-volume"
- Driver: "local"
- Mountpoint: "/var/lib/docker/volumes/my-volume/_data"
- Labels: null
- Scope: "local"
- Options: null
- UsageData:
- Size: 10920104
- RefCount: 2
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["System"]
- /images/{name}/get:
- get:
- summary: "Export an image"
- description: |
- Get a tarball containing all images and metadata for a repository.
-
- If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced.
-
- ### Image tarball format
-
- An image tarball contains one directory per image layer (named using its long ID), each containing these files:
-
- - `VERSION`: currently `1.0` - the file format version
- - `json`: detailed layer information, similar to `docker inspect layer_id`
- - `layer.tar`: A tarfile containing the filesystem changes in this layer
-
- The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions.
-
- If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs.
-
- ```json
- {
- "hello-world": {
- "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"
- }
- }
- ```
- operationId: "ImageGet"
- produces:
- - "application/x-tar"
- responses:
- 200:
- description: "no error"
- schema:
- type: "string"
- format: "binary"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: "Image name or ID"
- type: "string"
- required: true
- tags: ["Image"]
- /images/get:
- get:
- summary: "Export several images"
- description: |
- Get a tarball containing all images and metadata for several image
- repositories.
-
- For each value of the `names` parameter: if it is a specific name and
- tag (e.g. `ubuntu:latest`), then only that image (and its parents) are
- returned; if it is an image ID, similarly only that image (and its parents)
- are returned and there would be no names referenced in the 'repositories'
- file for this image ID.
-
- For details on the format, see the [export image endpoint](#operation/ImageGet).
- operationId: "ImageGetAll"
- produces:
- - "application/x-tar"
- responses:
- 200:
- description: "no error"
- schema:
- type: "string"
- format: "binary"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "names"
- in: "query"
- description: "Image names to filter by"
- type: "array"
- items:
- type: "string"
- tags: ["Image"]
- /images/load:
- post:
- summary: "Import images"
- description: |
- Load a set of images and tags into a repository.
-
- For details on the format, see the [export image endpoint](#operation/ImageGet).
- operationId: "ImageLoad"
- consumes:
- - "application/x-tar"
- produces:
- - "application/json"
- responses:
- 200:
- description: "no error"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "imagesTarball"
- in: "body"
- description: "Tar archive containing images"
- schema:
- type: "string"
- format: "binary"
- - name: "quiet"
- in: "query"
- description: "Suppress progress details during load."
- type: "boolean"
- default: false
- tags: ["Image"]
- /containers/{id}/exec:
- post:
- summary: "Create an exec instance"
- description: "Run a command inside a running container."
- operationId: "ContainerExec"
- consumes:
- - "application/json"
- produces:
- - "application/json"
- responses:
- 201:
- description: "no error"
- schema:
- $ref: "#/definitions/IdResponse"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 409:
- description: "container is paused"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "execConfig"
- in: "body"
- description: "Exec configuration"
- schema:
- type: "object"
- properties:
- AttachStdin:
- type: "boolean"
- description: "Attach to `stdin` of the exec command."
- AttachStdout:
- type: "boolean"
- description: "Attach to `stdout` of the exec command."
- AttachStderr:
- type: "boolean"
- description: "Attach to `stderr` of the exec command."
- DetachKeys:
- type: "string"
- description: |
- Override the key sequence for detaching a container. Format is
- a single character `[a-Z]` or `ctrl-` where ``
- is one of: `a-z`, `@`, `^`, `[`, `,` or `_`.
- Tty:
- type: "boolean"
- description: "Allocate a pseudo-TTY."
- Env:
- description: |
- A list of environment variables in the form `["VAR=value", ...]`.
- type: "array"
- items:
- type: "string"
- Cmd:
- type: "array"
- description: "Command to run, as a string or array of strings."
- items:
- type: "string"
- Privileged:
- type: "boolean"
- description: "Runs the exec process with extended privileges."
- default: false
- User:
- type: "string"
- description: |
- The user, and optionally, group to run the exec process inside
- the container. Format is one of: `user`, `user:group`, `uid`,
- or `uid:gid`.
- WorkingDir:
- type: "string"
- description: |
- The working directory for the exec process inside the container.
- example:
- AttachStdin: false
- AttachStdout: true
- AttachStderr: true
- DetachKeys: "ctrl-p,ctrl-q"
- Tty: false
- Cmd:
- - "date"
- Env:
- - "FOO=bar"
- - "BAZ=quux"
- required: true
- - name: "id"
- in: "path"
- description: "ID or name of container"
- type: "string"
- required: true
- tags: ["Exec"]
- /exec/{id}/start:
- post:
- summary: "Start an exec instance"
- description: |
- Starts a previously set up exec instance. If detach is true, this endpoint
- returns immediately after starting the command. Otherwise, it sets up an
- interactive session with the command.
- operationId: "ExecStart"
- consumes:
- - "application/json"
- produces:
- - "application/vnd.docker.raw-stream"
- responses:
- 200:
- description: "No error"
- 404:
- description: "No such exec instance"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 409:
- description: "Container is stopped or paused"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "execStartConfig"
- in: "body"
- schema:
- type: "object"
- properties:
- Detach:
- type: "boolean"
- description: "Detach from the command."
- Tty:
- type: "boolean"
- description: "Allocate a pseudo-TTY."
- example:
- Detach: false
- Tty: false
- - name: "id"
- in: "path"
- description: "Exec instance ID"
- required: true
- type: "string"
- tags: ["Exec"]
- /exec/{id}/resize:
- post:
- summary: "Resize an exec instance"
- description: |
- Resize the TTY session used by an exec instance. This endpoint only works
- if `tty` was specified as part of creating and starting the exec instance.
- operationId: "ExecResize"
- responses:
- 201:
- description: "No error"
- 404:
- description: "No such exec instance"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "Exec instance ID"
- required: true
- type: "string"
- - name: "h"
- in: "query"
- description: "Height of the TTY session in characters"
- type: "integer"
- - name: "w"
- in: "query"
- description: "Width of the TTY session in characters"
- type: "integer"
- tags: ["Exec"]
- /exec/{id}/json:
- get:
- summary: "Inspect an exec instance"
- description: "Return low-level information about an exec instance."
- operationId: "ExecInspect"
- produces:
- - "application/json"
- responses:
- 200:
- description: "No error"
- schema:
- type: "object"
- title: "ExecInspectResponse"
- properties:
- CanRemove:
- type: "boolean"
- DetachKeys:
- type: "string"
- ID:
- type: "string"
- Running:
- type: "boolean"
- ExitCode:
- type: "integer"
- ProcessConfig:
- $ref: "#/definitions/ProcessConfig"
- OpenStdin:
- type: "boolean"
- OpenStderr:
- type: "boolean"
- OpenStdout:
- type: "boolean"
- ContainerID:
- type: "string"
- Pid:
- type: "integer"
- description: "The system process ID for the exec process."
- examples:
- application/json:
- CanRemove: false
- ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126"
- DetachKeys: ""
- ExitCode: 2
- ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b"
- OpenStderr: true
- OpenStdin: true
- OpenStdout: true
- ProcessConfig:
- arguments:
- - "-c"
- - "exit 2"
- entrypoint: "sh"
- privileged: false
- tty: true
- user: "1000"
- Running: false
- Pid: 42000
- 404:
- description: "No such exec instance"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "Exec instance ID"
- required: true
- type: "string"
- tags: ["Exec"]
-
- /volumes:
- get:
- summary: "List volumes"
- operationId: "VolumeList"
- produces: ["application/json"]
- responses:
- 200:
- description: "Summary volume data that matches the query"
- schema:
- type: "object"
- title: "VolumeListResponse"
- description: "Volume list response"
- required: [Volumes, Warnings]
- properties:
- Volumes:
- type: "array"
- x-nullable: false
- description: "List of volumes"
- items:
- $ref: "#/definitions/Volume"
- Warnings:
- type: "array"
- x-nullable: false
- description: |
- Warnings that occurred when fetching the list of volumes.
- items:
- type: "string"
-
- examples:
- application/json:
- Volumes:
- - CreatedAt: "2017-07-19T12:00:26Z"
- Name: "tardis"
- Driver: "local"
- Mountpoint: "/var/lib/docker/volumes/tardis"
- Labels:
- com.example.some-label: "some-value"
- com.example.some-other-label: "some-other-value"
- Scope: "local"
- Options:
- device: "tmpfs"
- o: "size=100m,uid=1000"
- type: "tmpfs"
- Warnings: []
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "filters"
- in: "query"
- description: |
- JSON encoded value of the filters (a `map[string][]string`) to
- process on the volumes list. Available filters:
-
- - `dangling=` When set to `true` (or `1`), returns all
- volumes that are not in use by a container. When set to `false`
- (or `0`), only volumes that are in use by one or more
- containers are returned.
- - `driver=` Matches volumes based on their driver.
- - `label=` or `label=:` Matches volumes based on
- the presence of a `label` alone or a `label` and a value.
- - `name=` Matches all or part of a volume name.
- type: "string"
- format: "json"
- tags: ["Volume"]
-
- /volumes/create:
- post:
- summary: "Create a volume"
- operationId: "VolumeCreate"
- consumes: ["application/json"]
- produces: ["application/json"]
- responses:
- 201:
- description: "The volume was created successfully"
- schema:
- $ref: "#/definitions/Volume"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "volumeConfig"
- in: "body"
- required: true
- description: "Volume configuration"
- schema:
- type: "object"
- description: "Volume configuration"
- title: "VolumeConfig"
- properties:
- Name:
- description: |
- The new volume's name. If not specified, Docker generates a name.
- type: "string"
- x-nullable: false
- Driver:
- description: "Name of the volume driver to use."
- type: "string"
- default: "local"
- x-nullable: false
- DriverOpts:
- description: |
- A mapping of driver options and values. These options are
- passed directly to the driver and are driver specific.
- type: "object"
- additionalProperties:
- type: "string"
- Labels:
- description: "User-defined key/value metadata."
- type: "object"
- additionalProperties:
- type: "string"
- example:
- Name: "tardis"
- Labels:
- com.example.some-label: "some-value"
- com.example.some-other-label: "some-other-value"
- Driver: "custom"
- tags: ["Volume"]
-
- /volumes/{name}:
- get:
- summary: "Inspect a volume"
- operationId: "VolumeInspect"
- produces: ["application/json"]
- responses:
- 200:
- description: "No error"
- schema:
- $ref: "#/definitions/Volume"
- 404:
- description: "No such volume"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- required: true
- description: "Volume name or ID"
- type: "string"
- tags: ["Volume"]
-
- delete:
- summary: "Remove a volume"
- description: "Instruct the driver to remove the volume."
- operationId: "VolumeDelete"
- responses:
- 204:
- description: "The volume was removed"
- 404:
- description: "No such volume or volume driver"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 409:
- description: "Volume is in use and cannot be removed"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- required: true
- description: "Volume name or ID"
- type: "string"
- - name: "force"
- in: "query"
- description: "Force the removal of the volume"
- type: "boolean"
- default: false
- tags: ["Volume"]
- /volumes/prune:
- post:
- summary: "Delete unused volumes"
- produces:
- - "application/json"
- operationId: "VolumePrune"
- parameters:
- - name: "filters"
- in: "query"
- description: |
- Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
-
- Available filters:
- - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels.
- type: "string"
- responses:
- 200:
- description: "No error"
- schema:
- type: "object"
- title: "VolumePruneResponse"
- properties:
- VolumesDeleted:
- description: "Volumes that were deleted"
- type: "array"
- items:
- type: "string"
- SpaceReclaimed:
- description: "Disk space reclaimed in bytes"
- type: "integer"
- format: "int64"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Volume"]
- /networks:
- get:
- summary: "List networks"
- description: |
- Returns a list of networks. For details on the format, see the
- [network inspect endpoint](#operation/NetworkInspect).
-
- Note that it uses a different, smaller representation of a network than
- inspecting a single network. For example, the list of containers attached
- to the network is not propagated in API versions 1.28 and up.
- operationId: "NetworkList"
- produces:
- - "application/json"
- responses:
- 200:
- description: "No error"
- schema:
- type: "array"
- items:
- $ref: "#/definitions/Network"
- examples:
- application/json:
- - Name: "bridge"
- Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566"
- Created: "2016-10-19T06:21:00.416543526Z"
- Scope: "local"
- Driver: "bridge"
- EnableIPv6: false
- Internal: false
- Attachable: false
- Ingress: false
- IPAM:
- Driver: "default"
- Config:
- -
- Subnet: "172.17.0.0/16"
- Options:
- com.docker.network.bridge.default_bridge: "true"
- com.docker.network.bridge.enable_icc: "true"
- com.docker.network.bridge.enable_ip_masquerade: "true"
- com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
- com.docker.network.bridge.name: "docker0"
- com.docker.network.driver.mtu: "1500"
- - Name: "none"
- Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794"
- Created: "0001-01-01T00:00:00Z"
- Scope: "local"
- Driver: "null"
- EnableIPv6: false
- Internal: false
- Attachable: false
- Ingress: false
- IPAM:
- Driver: "default"
- Config: []
- Containers: {}
- Options: {}
- - Name: "host"
- Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e"
- Created: "0001-01-01T00:00:00Z"
- Scope: "local"
- Driver: "host"
- EnableIPv6: false
- Internal: false
- Attachable: false
- Ingress: false
- IPAM:
- Driver: "default"
- Config: []
- Containers: {}
- Options: {}
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "filters"
- in: "query"
- description: |
- JSON encoded value of the filters (a `map[string][]string`) to process
- on the networks list.
-
- Available filters:
-
- - `dangling=` When set to `true` (or `1`), returns all
- networks that are not in use by a container. When set to `false`
- (or `0`), only networks that are in use by one or more
- containers are returned.
- - `driver=` Matches a network's driver.
- - `id=` Matches all or part of a network ID.
- - `label=` or `label==` of a network label.
- - `name=` Matches all or part of a network name.
- - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`).
- - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks.
- type: "string"
- tags: ["Network"]
-
- /networks/{id}:
- get:
- summary: "Inspect a network"
- operationId: "NetworkInspect"
- produces:
- - "application/json"
- responses:
- 200:
- description: "No error"
- schema:
- $ref: "#/definitions/Network"
- 404:
- description: "Network not found"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "Network ID or name"
- required: true
- type: "string"
- - name: "verbose"
- in: "query"
- description: "Detailed inspect output for troubleshooting"
- type: "boolean"
- default: false
- - name: "scope"
- in: "query"
- description: "Filter the network by scope (swarm, global, or local)"
- type: "string"
- tags: ["Network"]
-
- delete:
- summary: "Remove a network"
- operationId: "NetworkDelete"
- responses:
- 204:
- description: "No error"
- 403:
- description: "operation not supported for pre-defined networks"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "no such network"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "Network ID or name"
- required: true
- type: "string"
- tags: ["Network"]
-
- /networks/create:
- post:
- summary: "Create a network"
- operationId: "NetworkCreate"
- consumes:
- - "application/json"
- produces:
- - "application/json"
- responses:
- 201:
- description: "No error"
- schema:
- type: "object"
- title: "NetworkCreateResponse"
- properties:
- Id:
- description: "The ID of the created network."
- type: "string"
- Warning:
- type: "string"
- example:
- Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30"
- Warning: ""
- 403:
- description: "operation not supported for pre-defined networks"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "plugin not found"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "networkConfig"
- in: "body"
- description: "Network configuration"
- required: true
- schema:
- type: "object"
- required: ["Name"]
- properties:
- Name:
- description: "The network's name."
- type: "string"
- CheckDuplicate:
- description: |
- Check for networks with duplicate names. Since Network is
- primarily keyed based on a random ID and not on the name, and
- network name is strictly a user-friendly alias to the network
- which is uniquely identified using ID, there is no guaranteed
- way to check for duplicates. CheckDuplicate is there to provide
- a best effort checking of any networks which has the same name
- but it is not guaranteed to catch all name collisions.
- type: "boolean"
- Driver:
- description: "Name of the network driver plugin to use."
- type: "string"
- default: "bridge"
- Internal:
- description: "Restrict external access to the network."
- type: "boolean"
- Attachable:
- description: |
- Globally scoped network is manually attachable by regular
- containers from workers in swarm mode.
- type: "boolean"
- Ingress:
- description: |
- Ingress network is the network which provides the routing-mesh
- in swarm mode.
- type: "boolean"
- IPAM:
- description: "Optional custom IP scheme for the network."
- $ref: "#/definitions/IPAM"
- EnableIPv6:
- description: "Enable IPv6 on the network."
- type: "boolean"
- Options:
- description: "Network specific options to be used by the drivers."
- type: "object"
- additionalProperties:
- type: "string"
- Labels:
- description: "User-defined key/value metadata."
- type: "object"
- additionalProperties:
- type: "string"
- example:
- Name: "isolated_nw"
- CheckDuplicate: false
- Driver: "bridge"
- EnableIPv6: true
- IPAM:
- Driver: "default"
- Config:
- - Subnet: "172.20.0.0/16"
- IPRange: "172.20.10.0/24"
- Gateway: "172.20.10.11"
- - Subnet: "2001:db8:abcd::/64"
- Gateway: "2001:db8:abcd::1011"
- Options:
- foo: "bar"
- Internal: true
- Attachable: false
- Ingress: false
- Options:
- com.docker.network.bridge.default_bridge: "true"
- com.docker.network.bridge.enable_icc: "true"
- com.docker.network.bridge.enable_ip_masquerade: "true"
- com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
- com.docker.network.bridge.name: "docker0"
- com.docker.network.driver.mtu: "1500"
- Labels:
- com.example.some-label: "some-value"
- com.example.some-other-label: "some-other-value"
- tags: ["Network"]
-
- /networks/{id}/connect:
- post:
- summary: "Connect a container to a network"
- operationId: "NetworkConnect"
- consumes:
- - "application/json"
- responses:
- 200:
- description: "No error"
- 403:
- description: "Operation not supported for swarm scoped networks"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "Network or container not found"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "Network ID or name"
- required: true
- type: "string"
- - name: "container"
- in: "body"
- required: true
- schema:
- type: "object"
- properties:
- Container:
- type: "string"
- description: "The ID or name of the container to connect to the network."
- EndpointConfig:
- $ref: "#/definitions/EndpointSettings"
- example:
- Container: "3613f73ba0e4"
- EndpointConfig:
- IPAMConfig:
- IPv4Address: "172.24.56.89"
- IPv6Address: "2001:db8::5689"
- tags: ["Network"]
-
- /networks/{id}/disconnect:
- post:
- summary: "Disconnect a container from a network"
- operationId: "NetworkDisconnect"
- consumes:
- - "application/json"
- responses:
- 200:
- description: "No error"
- 403:
- description: "Operation not supported for swarm scoped networks"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "Network or container not found"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "Network ID or name"
- required: true
- type: "string"
- - name: "container"
- in: "body"
- required: true
- schema:
- type: "object"
- properties:
- Container:
- type: "string"
- description: |
- The ID or name of the container to disconnect from the network.
- Force:
- type: "boolean"
- description: |
- Force the container to disconnect from the network.
- tags: ["Network"]
- /networks/prune:
- post:
- summary: "Delete unused networks"
- produces:
- - "application/json"
- operationId: "NetworkPrune"
- parameters:
- - name: "filters"
- in: "query"
- description: |
- Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
-
- Available filters:
- - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.
- - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels.
- type: "string"
- responses:
- 200:
- description: "No error"
- schema:
- type: "object"
- title: "NetworkPruneResponse"
- properties:
- NetworksDeleted:
- description: "Networks that were deleted"
- type: "array"
- items:
- type: "string"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Network"]
- /plugins:
- get:
- summary: "List plugins"
- operationId: "PluginList"
- description: "Returns information about installed plugins."
- produces: ["application/json"]
- responses:
- 200:
- description: "No error"
- schema:
- type: "array"
- items:
- $ref: "#/definitions/Plugin"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "filters"
- in: "query"
- type: "string"
- description: |
- A JSON encoded value of the filters (a `map[string][]string`) to
- process on the plugin list.
-
- Available filters:
-
- - `capability=`
- - `enable=|`
- tags: ["Plugin"]
-
- /plugins/privileges:
- get:
- summary: "Get plugin privileges"
- operationId: "GetPluginPrivileges"
- responses:
- 200:
- description: "no error"
- schema:
- type: "array"
- items:
- description: |
- Describes a permission the user has to accept upon installing
- the plugin.
- type: "object"
- title: "PluginPrivilegeItem"
- properties:
- Name:
- type: "string"
- Description:
- type: "string"
- Value:
- type: "array"
- items:
- type: "string"
- example:
- - Name: "network"
- Description: ""
- Value:
- - "host"
- - Name: "mount"
- Description: ""
- Value:
- - "/data"
- - Name: "device"
- Description: ""
- Value:
- - "/dev/cpu_dma_latency"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "remote"
- in: "query"
- description: |
- The name of the plugin. The `:latest` tag is optional, and is the
- default if omitted.
- required: true
- type: "string"
- tags:
- - "Plugin"
-
- /plugins/pull:
- post:
- summary: "Install a plugin"
- operationId: "PluginPull"
- description: |
- Pulls and installs a plugin. After the plugin is installed, it can be
- enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable).
- produces:
- - "application/json"
- responses:
- 204:
- description: "no error"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "remote"
- in: "query"
- description: |
- Remote reference for plugin to install.
-
- The `:latest` tag is optional, and is used as the default if omitted.
- required: true
- type: "string"
- - name: "name"
- in: "query"
- description: |
- Local name for the pulled plugin.
-
- The `:latest` tag is optional, and is used as the default if omitted.
- required: false
- type: "string"
- - name: "X-Registry-Auth"
- in: "header"
- description: |
- A base64url-encoded auth configuration to use when pulling a plugin
- from a registry.
-
- Refer to the [authentication section](#section/Authentication) for
- details.
- type: "string"
- - name: "body"
- in: "body"
- schema:
- type: "array"
- items:
- description: |
- Describes a permission accepted by the user upon installing the
- plugin.
- type: "object"
- properties:
- Name:
- type: "string"
- Description:
- type: "string"
- Value:
- type: "array"
- items:
- type: "string"
- example:
- - Name: "network"
- Description: ""
- Value:
- - "host"
- - Name: "mount"
- Description: ""
- Value:
- - "/data"
- - Name: "device"
- Description: ""
- Value:
- - "/dev/cpu_dma_latency"
- tags: ["Plugin"]
- /plugins/{name}/json:
- get:
- summary: "Inspect a plugin"
- operationId: "PluginInspect"
- responses:
- 200:
- description: "no error"
- schema:
- $ref: "#/definitions/Plugin"
- 404:
- description: "plugin is not installed"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: |
- The name of the plugin. The `:latest` tag is optional, and is the
- default if omitted.
- required: true
- type: "string"
- tags: ["Plugin"]
- /plugins/{name}:
- delete:
- summary: "Remove a plugin"
- operationId: "PluginDelete"
- responses:
- 200:
- description: "no error"
- schema:
- $ref: "#/definitions/Plugin"
- 404:
- description: "plugin is not installed"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: |
- The name of the plugin. The `:latest` tag is optional, and is the
- default if omitted.
- required: true
- type: "string"
- - name: "force"
- in: "query"
- description: |
- Disable the plugin before removing. This may result in issues if the
- plugin is in use by a container.
- type: "boolean"
- default: false
- tags: ["Plugin"]
- /plugins/{name}/enable:
- post:
- summary: "Enable a plugin"
- operationId: "PluginEnable"
- responses:
- 200:
- description: "no error"
- 404:
- description: "plugin is not installed"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: |
- The name of the plugin. The `:latest` tag is optional, and is the
- default if omitted.
- required: true
- type: "string"
- - name: "timeout"
- in: "query"
- description: "Set the HTTP client timeout (in seconds)"
- type: "integer"
- default: 0
- tags: ["Plugin"]
- /plugins/{name}/disable:
- post:
- summary: "Disable a plugin"
- operationId: "PluginDisable"
- responses:
- 200:
- description: "no error"
- 404:
- description: "plugin is not installed"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: |
- The name of the plugin. The `:latest` tag is optional, and is the
- default if omitted.
- required: true
- type: "string"
- tags: ["Plugin"]
- /plugins/{name}/upgrade:
- post:
- summary: "Upgrade a plugin"
- operationId: "PluginUpgrade"
- responses:
- 204:
- description: "no error"
- 404:
- description: "plugin not installed"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: |
- The name of the plugin. The `:latest` tag is optional, and is the
- default if omitted.
- required: true
- type: "string"
- - name: "remote"
- in: "query"
- description: |
- Remote reference to upgrade to.
-
- The `:latest` tag is optional, and is used as the default if omitted.
- required: true
- type: "string"
- - name: "X-Registry-Auth"
- in: "header"
- description: |
- A base64url-encoded auth configuration to use when pulling a plugin
- from a registry.
-
- Refer to the [authentication section](#section/Authentication) for
- details.
- type: "string"
- - name: "body"
- in: "body"
- schema:
- type: "array"
- items:
- description: |
- Describes a permission accepted by the user upon installing the
- plugin.
- type: "object"
- properties:
- Name:
- type: "string"
- Description:
- type: "string"
- Value:
- type: "array"
- items:
- type: "string"
- example:
- - Name: "network"
- Description: ""
- Value:
- - "host"
- - Name: "mount"
- Description: ""
- Value:
- - "/data"
- - Name: "device"
- Description: ""
- Value:
- - "/dev/cpu_dma_latency"
- tags: ["Plugin"]
- /plugins/create:
- post:
- summary: "Create a plugin"
- operationId: "PluginCreate"
- consumes:
- - "application/x-tar"
- responses:
- 204:
- description: "no error"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "query"
- description: |
- The name of the plugin. The `:latest` tag is optional, and is the
- default if omitted.
- required: true
- type: "string"
- - name: "tarContext"
- in: "body"
- description: "Path to tar containing plugin rootfs and manifest"
- schema:
- type: "string"
- format: "binary"
- tags: ["Plugin"]
- /plugins/{name}/push:
- post:
- summary: "Push a plugin"
- operationId: "PluginPush"
- description: |
- Push a plugin to the registry.
- parameters:
- - name: "name"
- in: "path"
- description: |
- The name of the plugin. The `:latest` tag is optional, and is the
- default if omitted.
- required: true
- type: "string"
- responses:
- 200:
- description: "no error"
- 404:
- description: "plugin not installed"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Plugin"]
- /plugins/{name}/set:
- post:
- summary: "Configure a plugin"
- operationId: "PluginSet"
- consumes:
- - "application/json"
- parameters:
- - name: "name"
- in: "path"
- description: |
- The name of the plugin. The `:latest` tag is optional, and is the
- default if omitted.
- required: true
- type: "string"
- - name: "body"
- in: "body"
- schema:
- type: "array"
- items:
- type: "string"
- example: ["DEBUG=1"]
- responses:
- 204:
- description: "No error"
- 404:
- description: "Plugin not installed"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Plugin"]
- /nodes:
- get:
- summary: "List nodes"
- operationId: "NodeList"
- responses:
- 200:
- description: "no error"
- schema:
- type: "array"
- items:
- $ref: "#/definitions/Node"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "filters"
- in: "query"
- description: |
- Filters to process on the nodes list, encoded as JSON (a `map[string][]string`).
-
- Available filters:
- - `id=`
- - `label=`
- - `membership=`(`accepted`|`pending`)`
- - `name=`
- - `node.label=`
- - `role=`(`manager`|`worker`)`
- type: "string"
- tags: ["Node"]
- /nodes/{id}:
- get:
- summary: "Inspect a node"
- operationId: "NodeInspect"
- responses:
- 200:
- description: "no error"
- schema:
- $ref: "#/definitions/Node"
- 404:
- description: "no such node"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "The ID or name of the node"
- type: "string"
- required: true
- tags: ["Node"]
- delete:
- summary: "Delete a node"
- operationId: "NodeDelete"
- responses:
- 200:
- description: "no error"
- 404:
- description: "no such node"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "The ID or name of the node"
- type: "string"
- required: true
- - name: "force"
- in: "query"
- description: "Force remove a node from the swarm"
- default: false
- type: "boolean"
- tags: ["Node"]
- /nodes/{id}/update:
- post:
- summary: "Update a node"
- operationId: "NodeUpdate"
- responses:
- 200:
- description: "no error"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "no such node"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "The ID of the node"
- type: "string"
- required: true
- - name: "body"
- in: "body"
- schema:
- $ref: "#/definitions/NodeSpec"
- - name: "version"
- in: "query"
- description: |
- The version number of the node object being updated. This is required
- to avoid conflicting writes.
- type: "integer"
- format: "int64"
- required: true
- tags: ["Node"]
- /swarm:
- get:
- summary: "Inspect swarm"
- operationId: "SwarmInspect"
- responses:
- 200:
- description: "no error"
- schema:
- $ref: "#/definitions/Swarm"
- 404:
- description: "no such swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Swarm"]
- /swarm/init:
- post:
- summary: "Initialize a new swarm"
- operationId: "SwarmInit"
- produces:
- - "application/json"
- - "text/plain"
- responses:
- 200:
- description: "no error"
- schema:
- description: "The node ID"
- type: "string"
- example: "7v2t30z9blmxuhnyo6s4cpenp"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is already part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "body"
- in: "body"
- required: true
- schema:
- type: "object"
- properties:
- ListenAddr:
- description: |
- Listen address used for inter-manager communication, as well
- as determining the networking interface used for the VXLAN
- Tunnel Endpoint (VTEP). This can either be an address/port
- combination in the form `192.168.1.1:4567`, or an interface
- followed by a port number, like `eth0:4567`. If the port number
- is omitted, the default swarm listening port is used.
- type: "string"
- AdvertiseAddr:
- description: |
- Externally reachable address advertised to other nodes. This
- can either be an address/port combination in the form
- `192.168.1.1:4567`, or an interface followed by a port number,
- like `eth0:4567`. If the port number is omitted, the port
- number from the listen address is used. If `AdvertiseAddr` is
- not specified, it will be automatically detected when possible.
- type: "string"
- DataPathAddr:
- description: |
- Address or interface to use for data path traffic (format:
- ``), for example, `192.168.1.1`, or an interface,
- like `eth0`. If `DataPathAddr` is unspecified, the same address
- as `AdvertiseAddr` is used.
-
- The `DataPathAddr` specifies the address that global scope
- network drivers will publish towards other nodes in order to
- reach the containers running on this node. Using this parameter
- it is possible to separate the container data traffic from the
- management traffic of the cluster.
- type: "string"
- DataPathPort:
- description: |
- DataPathPort specifies the data path port number for data traffic.
- Acceptable port range is 1024 to 49151.
- if no port is set or is set to 0, default port 4789 will be used.
- type: "integer"
- format: "uint32"
- DefaultAddrPool:
- description: |
- Default Address Pool specifies default subnet pools for global
- scope networks.
- type: "array"
- items:
- type: "string"
- example: ["10.10.0.0/16", "20.20.0.0/16"]
- ForceNewCluster:
- description: "Force creation of a new swarm."
- type: "boolean"
- SubnetSize:
- description: |
- SubnetSize specifies the subnet size of the networks created
- from the default subnet pool.
- type: "integer"
- format: "uint32"
- Spec:
- $ref: "#/definitions/SwarmSpec"
- example:
- ListenAddr: "0.0.0.0:2377"
- AdvertiseAddr: "192.168.1.1:2377"
- DataPathPort: 4789
- DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"]
- SubnetSize: 24
- ForceNewCluster: false
- Spec:
- Orchestration: {}
- Raft: {}
- Dispatcher: {}
- CAConfig: {}
- EncryptionConfig:
- AutoLockManagers: false
- tags: ["Swarm"]
- /swarm/join:
- post:
- summary: "Join an existing swarm"
- operationId: "SwarmJoin"
- responses:
- 200:
- description: "no error"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is already part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "body"
- in: "body"
- required: true
- schema:
- type: "object"
- properties:
- ListenAddr:
- description: |
- Listen address used for inter-manager communication if the node
- gets promoted to manager, as well as determining the networking
- interface used for the VXLAN Tunnel Endpoint (VTEP).
- type: "string"
- AdvertiseAddr:
- description: |
- Externally reachable address advertised to other nodes. This
- can either be an address/port combination in the form
- `192.168.1.1:4567`, or an interface followed by a port number,
- like `eth0:4567`. If the port number is omitted, the port
- number from the listen address is used. If `AdvertiseAddr` is
- not specified, it will be automatically detected when possible.
- type: "string"
- DataPathAddr:
- description: |
- Address or interface to use for data path traffic (format:
- ``), for example, `192.168.1.1`, or an interface,
- like `eth0`. If `DataPathAddr` is unspecified, the same addres
- as `AdvertiseAddr` is used.
-
- The `DataPathAddr` specifies the address that global scope
- network drivers will publish towards other nodes in order to
- reach the containers running on this node. Using this parameter
- it is possible to separate the container data traffic from the
- management traffic of the cluster.
-
- type: "string"
- RemoteAddrs:
- description: |
- Addresses of manager nodes already participating in the swarm.
- type: "array"
- items:
- type: "string"
- JoinToken:
- description: "Secret token for joining this swarm."
- type: "string"
- example:
- ListenAddr: "0.0.0.0:2377"
- AdvertiseAddr: "192.168.1.1:2377"
- RemoteAddrs:
- - "node1:2377"
- JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
- tags: ["Swarm"]
- /swarm/leave:
- post:
- summary: "Leave a swarm"
- operationId: "SwarmLeave"
- responses:
- 200:
- description: "no error"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "force"
- description: |
- Force leave swarm, even if this is the last manager or that it will
- break the cluster.
- in: "query"
- type: "boolean"
- default: false
- tags: ["Swarm"]
- /swarm/update:
- post:
- summary: "Update a swarm"
- operationId: "SwarmUpdate"
- responses:
- 200:
- description: "no error"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "body"
- in: "body"
- required: true
- schema:
- $ref: "#/definitions/SwarmSpec"
- - name: "version"
- in: "query"
- description: |
- The version number of the swarm object being updated. This is
- required to avoid conflicting writes.
- type: "integer"
- format: "int64"
- required: true
- - name: "rotateWorkerToken"
- in: "query"
- description: "Rotate the worker join token."
- type: "boolean"
- default: false
- - name: "rotateManagerToken"
- in: "query"
- description: "Rotate the manager join token."
- type: "boolean"
- default: false
- - name: "rotateManagerUnlockKey"
- in: "query"
- description: "Rotate the manager unlock key."
- type: "boolean"
- default: false
- tags: ["Swarm"]
- /swarm/unlockkey:
- get:
- summary: "Get the unlock key"
- operationId: "SwarmUnlockkey"
- consumes:
- - "application/json"
- responses:
- 200:
- description: "no error"
- schema:
- type: "object"
- title: "UnlockKeyResponse"
- properties:
- UnlockKey:
- description: "The swarm's unlock key."
- type: "string"
- example:
- UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Swarm"]
- /swarm/unlock:
- post:
- summary: "Unlock a locked manager"
- operationId: "SwarmUnlock"
- consumes:
- - "application/json"
- produces:
- - "application/json"
- parameters:
- - name: "body"
- in: "body"
- required: true
- schema:
- type: "object"
- properties:
- UnlockKey:
- description: "The swarm's unlock key."
- type: "string"
- example:
- UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8"
- responses:
- 200:
- description: "no error"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Swarm"]
- /services:
- get:
- summary: "List services"
- operationId: "ServiceList"
- responses:
- 200:
- description: "no error"
- schema:
- type: "array"
- items:
- $ref: "#/definitions/Service"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "filters"
- in: "query"
- type: "string"
- description: |
- A JSON encoded value of the filters (a `map[string][]string`) to
- process on the services list.
-
- Available filters:
-
- - `id=`
- - `label=`
- - `mode=["replicated"|"global"]`
- - `name=`
- - name: "status"
- in: "query"
- type: "boolean"
- description: |
- Include service status, with count of running and desired tasks.
- tags: ["Service"]
- /services/create:
- post:
- summary: "Create a service"
- operationId: "ServiceCreate"
- consumes:
- - "application/json"
- produces:
- - "application/json"
- responses:
- 201:
- description: "no error"
- schema:
- type: "object"
- title: "ServiceCreateResponse"
- properties:
- ID:
- description: "The ID of the created service."
- type: "string"
- Warning:
- description: "Optional warning message"
- type: "string"
- example:
- ID: "ak7w3gjqoa3kuz8xcpnyy0pvl"
- Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 403:
- description: "network is not eligible for services"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 409:
- description: "name conflicts with an existing service"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "body"
- in: "body"
- required: true
- schema:
- allOf:
- - $ref: "#/definitions/ServiceSpec"
- - type: "object"
- example:
- Name: "web"
- TaskTemplate:
- ContainerSpec:
- Image: "nginx:alpine"
- Mounts:
- -
- ReadOnly: true
- Source: "web-data"
- Target: "/usr/share/nginx/html"
- Type: "volume"
- VolumeOptions:
- DriverConfig: {}
- Labels:
- com.example.something: "something-value"
- Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"]
- User: "33"
- DNSConfig:
- Nameservers: ["8.8.8.8"]
- Search: ["example.org"]
- Options: ["timeout:3"]
- Secrets:
- -
- File:
- Name: "www.example.org.key"
- UID: "33"
- GID: "33"
- Mode: 384
- SecretID: "fpjqlhnwb19zds35k8wn80lq9"
- SecretName: "example_org_domain_key"
- LogDriver:
- Name: "json-file"
- Options:
- max-file: "3"
- max-size: "10M"
- Placement: {}
- Resources:
- Limits:
- MemoryBytes: 104857600
- Reservations: {}
- RestartPolicy:
- Condition: "on-failure"
- Delay: 10000000000
- MaxAttempts: 10
- Mode:
- Replicated:
- Replicas: 4
- UpdateConfig:
- Parallelism: 2
- Delay: 1000000000
- FailureAction: "pause"
- Monitor: 15000000000
- MaxFailureRatio: 0.15
- RollbackConfig:
- Parallelism: 1
- Delay: 1000000000
- FailureAction: "pause"
- Monitor: 15000000000
- MaxFailureRatio: 0.15
- EndpointSpec:
- Ports:
- -
- Protocol: "tcp"
- PublishedPort: 8080
- TargetPort: 80
- Labels:
- foo: "bar"
- - name: "X-Registry-Auth"
- in: "header"
- description: |
- A base64url-encoded auth configuration for pulling from private
- registries.
-
- Refer to the [authentication section](#section/Authentication) for
- details.
- type: "string"
- tags: ["Service"]
- /services/{id}:
- get:
- summary: "Inspect a service"
- operationId: "ServiceInspect"
- responses:
- 200:
- description: "no error"
- schema:
- $ref: "#/definitions/Service"
- 404:
- description: "no such service"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "ID or name of service."
- required: true
- type: "string"
- - name: "insertDefaults"
- in: "query"
- description: "Fill empty fields with default values."
- type: "boolean"
- default: false
- tags: ["Service"]
- delete:
- summary: "Delete a service"
- operationId: "ServiceDelete"
- responses:
- 200:
- description: "no error"
- 404:
- description: "no such service"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "ID or name of service."
- required: true
- type: "string"
- tags: ["Service"]
- /services/{id}/update:
- post:
- summary: "Update a service"
- operationId: "ServiceUpdate"
- consumes: ["application/json"]
- produces: ["application/json"]
- responses:
- 200:
- description: "no error"
- schema:
- $ref: "#/definitions/ServiceUpdateResponse"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "no such service"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "ID or name of service."
- required: true
- type: "string"
- - name: "body"
- in: "body"
- required: true
- schema:
- allOf:
- - $ref: "#/definitions/ServiceSpec"
- - type: "object"
- example:
- Name: "top"
- TaskTemplate:
- ContainerSpec:
- Image: "busybox"
- Args:
- - "top"
- Resources:
- Limits: {}
- Reservations: {}
- RestartPolicy:
- Condition: "any"
- MaxAttempts: 0
- Placement: {}
- ForceUpdate: 0
- Mode:
- Replicated:
- Replicas: 1
- UpdateConfig:
- Parallelism: 2
- Delay: 1000000000
- FailureAction: "pause"
- Monitor: 15000000000
- MaxFailureRatio: 0.15
- RollbackConfig:
- Parallelism: 1
- Delay: 1000000000
- FailureAction: "pause"
- Monitor: 15000000000
- MaxFailureRatio: 0.15
- EndpointSpec:
- Mode: "vip"
-
- - name: "version"
- in: "query"
- description: |
- The version number of the service object being updated. This is
- required to avoid conflicting writes.
- This version number should be the value as currently set on the
- service *before* the update. You can find the current version by
- calling `GET /services/{id}`
- required: true
- type: "integer"
- - name: "registryAuthFrom"
- in: "query"
- description: |
- If the `X-Registry-Auth` header is not specified, this parameter
- indicates where to find registry authorization credentials.
- type: "string"
- enum: ["spec", "previous-spec"]
- default: "spec"
- - name: "rollback"
- in: "query"
- description: |
- Set to this parameter to `previous` to cause a server-side rollback
- to the previous service spec. The supplied spec will be ignored in
- this case.
- type: "string"
- - name: "X-Registry-Auth"
- in: "header"
- description: |
- A base64url-encoded auth configuration for pulling from private
- registries.
-
- Refer to the [authentication section](#section/Authentication) for
- details.
- type: "string"
-
- tags: ["Service"]
- /services/{id}/logs:
- get:
- summary: "Get service logs"
- description: |
- Get `stdout` and `stderr` logs from a service. See also
- [`/containers/{id}/logs`](#operation/ContainerLogs).
-
- **Note**: This endpoint works only for services with the `local`,
- `json-file` or `journald` logging drivers.
- operationId: "ServiceLogs"
- responses:
- 200:
- description: "logs returned as a stream in response body"
- schema:
- type: "string"
- format: "binary"
- 404:
- description: "no such service"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such service: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the service"
- type: "string"
- - name: "details"
- in: "query"
- description: "Show service context and extra details provided to logs."
- type: "boolean"
- default: false
- - name: "follow"
- in: "query"
- description: "Keep connection after returning logs."
- type: "boolean"
- default: false
- - name: "stdout"
- in: "query"
- description: "Return logs from `stdout`"
- type: "boolean"
- default: false
- - name: "stderr"
- in: "query"
- description: "Return logs from `stderr`"
- type: "boolean"
- default: false
- - name: "since"
- in: "query"
- description: "Only return logs since this time, as a UNIX timestamp"
- type: "integer"
- default: 0
- - name: "timestamps"
- in: "query"
- description: "Add timestamps to every log line"
- type: "boolean"
- default: false
- - name: "tail"
- in: "query"
- description: |
- Only return this number of log lines from the end of the logs.
- Specify as an integer or `all` to output all log lines.
- type: "string"
- default: "all"
- tags: ["Service"]
- /tasks:
- get:
- summary: "List tasks"
- operationId: "TaskList"
- produces:
- - "application/json"
- responses:
- 200:
- description: "no error"
- schema:
- type: "array"
- items:
- $ref: "#/definitions/Task"
- example:
- - ID: "0kzzo1i0y4jz6027t0k7aezc7"
- Version:
- Index: 71
- CreatedAt: "2016-06-07T21:07:31.171892745Z"
- UpdatedAt: "2016-06-07T21:07:31.376370513Z"
- Spec:
- ContainerSpec:
- Image: "redis"
- Resources:
- Limits: {}
- Reservations: {}
- RestartPolicy:
- Condition: "any"
- MaxAttempts: 0
- Placement: {}
- ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz"
- Slot: 1
- NodeID: "60gvrl6tm78dmak4yl7srz94v"
- Status:
- Timestamp: "2016-06-07T21:07:31.290032978Z"
- State: "running"
- Message: "started"
- ContainerStatus:
- ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035"
- PID: 677
- DesiredState: "running"
- NetworksAttachments:
- - Network:
- ID: "4qvuz4ko70xaltuqbt8956gd1"
- Version:
- Index: 18
- CreatedAt: "2016-06-07T20:31:11.912919752Z"
- UpdatedAt: "2016-06-07T21:07:29.955277358Z"
- Spec:
- Name: "ingress"
- Labels:
- com.docker.swarm.internal: "true"
- DriverConfiguration: {}
- IPAMOptions:
- Driver: {}
- Configs:
- - Subnet: "10.255.0.0/16"
- Gateway: "10.255.0.1"
- DriverState:
- Name: "overlay"
- Options:
- com.docker.network.driver.overlay.vxlanid_list: "256"
- IPAMOptions:
- Driver:
- Name: "default"
- Configs:
- - Subnet: "10.255.0.0/16"
- Gateway: "10.255.0.1"
- Addresses:
- - "10.255.0.10/16"
- - ID: "1yljwbmlr8er2waf8orvqpwms"
- Version:
- Index: 30
- CreatedAt: "2016-06-07T21:07:30.019104782Z"
- UpdatedAt: "2016-06-07T21:07:30.231958098Z"
- Name: "hopeful_cori"
- Spec:
- ContainerSpec:
- Image: "redis"
- Resources:
- Limits: {}
- Reservations: {}
- RestartPolicy:
- Condition: "any"
- MaxAttempts: 0
- Placement: {}
- ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz"
- Slot: 1
- NodeID: "60gvrl6tm78dmak4yl7srz94v"
- Status:
- Timestamp: "2016-06-07T21:07:30.202183143Z"
- State: "shutdown"
- Message: "shutdown"
- ContainerStatus:
- ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213"
- DesiredState: "shutdown"
- NetworksAttachments:
- - Network:
- ID: "4qvuz4ko70xaltuqbt8956gd1"
- Version:
- Index: 18
- CreatedAt: "2016-06-07T20:31:11.912919752Z"
- UpdatedAt: "2016-06-07T21:07:29.955277358Z"
- Spec:
- Name: "ingress"
- Labels:
- com.docker.swarm.internal: "true"
- DriverConfiguration: {}
- IPAMOptions:
- Driver: {}
- Configs:
- - Subnet: "10.255.0.0/16"
- Gateway: "10.255.0.1"
- DriverState:
- Name: "overlay"
- Options:
- com.docker.network.driver.overlay.vxlanid_list: "256"
- IPAMOptions:
- Driver:
- Name: "default"
- Configs:
- - Subnet: "10.255.0.0/16"
- Gateway: "10.255.0.1"
- Addresses:
- - "10.255.0.5/16"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "filters"
- in: "query"
- type: "string"
- description: |
- A JSON encoded value of the filters (a `map[string][]string`) to
- process on the tasks list.
-
- Available filters:
-
- - `desired-state=(running | shutdown | accepted)`
- - `id=`
- - `label=key` or `label="key=value"`
- - `name=`
- - `node=`
- - `service=`
- tags: ["Task"]
- /tasks/{id}:
- get:
- summary: "Inspect a task"
- operationId: "TaskInspect"
- produces:
- - "application/json"
- responses:
- 200:
- description: "no error"
- schema:
- $ref: "#/definitions/Task"
- 404:
- description: "no such task"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "ID of the task"
- required: true
- type: "string"
- tags: ["Task"]
- /tasks/{id}/logs:
- get:
- summary: "Get task logs"
- description: |
- Get `stdout` and `stderr` logs from a task.
- See also [`/containers/{id}/logs`](#operation/ContainerLogs).
-
- **Note**: This endpoint works only for services with the `local`,
- `json-file` or `journald` logging drivers.
- operationId: "TaskLogs"
- responses:
- 200:
- description: "logs returned as a stream in response body"
- schema:
- type: "string"
- format: "binary"
- 404:
- description: "no such task"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such task: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID of the task"
- type: "string"
- - name: "details"
- in: "query"
- description: "Show task context and extra details provided to logs."
- type: "boolean"
- default: false
- - name: "follow"
- in: "query"
- description: "Keep connection after returning logs."
- type: "boolean"
- default: false
- - name: "stdout"
- in: "query"
- description: "Return logs from `stdout`"
- type: "boolean"
- default: false
- - name: "stderr"
- in: "query"
- description: "Return logs from `stderr`"
- type: "boolean"
- default: false
- - name: "since"
- in: "query"
- description: "Only return logs since this time, as a UNIX timestamp"
- type: "integer"
- default: 0
- - name: "timestamps"
- in: "query"
- description: "Add timestamps to every log line"
- type: "boolean"
- default: false
- - name: "tail"
- in: "query"
- description: |
- Only return this number of log lines from the end of the logs.
- Specify as an integer or `all` to output all log lines.
- type: "string"
- default: "all"
- tags: ["Task"]
- /secrets:
- get:
- summary: "List secrets"
- operationId: "SecretList"
- produces:
- - "application/json"
- responses:
- 200:
- description: "no error"
- schema:
- type: "array"
- items:
- $ref: "#/definitions/Secret"
- example:
- - ID: "blt1owaxmitz71s9v5zh81zun"
- Version:
- Index: 85
- CreatedAt: "2017-07-20T13:55:28.678958722Z"
- UpdatedAt: "2017-07-20T13:55:28.678958722Z"
- Spec:
- Name: "mysql-passwd"
- Labels:
- some.label: "some.value"
- Driver:
- Name: "secret-bucket"
- Options:
- OptionA: "value for driver option A"
- OptionB: "value for driver option B"
- - ID: "ktnbjxoalbkvbvedmg1urrz8h"
- Version:
- Index: 11
- CreatedAt: "2016-11-05T01:20:17.327670065Z"
- UpdatedAt: "2016-11-05T01:20:17.327670065Z"
- Spec:
- Name: "app-dev.crt"
- Labels:
- foo: "bar"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "filters"
- in: "query"
- type: "string"
- description: |
- A JSON encoded value of the filters (a `map[string][]string`) to
- process on the secrets list.
-
- Available filters:
-
- - `id=`
- - `label= or label==value`
- - `name=`
- - `names=`
- tags: ["Secret"]
- /secrets/create:
- post:
- summary: "Create a secret"
- operationId: "SecretCreate"
- consumes:
- - "application/json"
- produces:
- - "application/json"
- responses:
- 201:
- description: "no error"
- schema:
- $ref: "#/definitions/IdResponse"
- 409:
- description: "name conflicts with an existing object"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "body"
- in: "body"
- schema:
- allOf:
- - $ref: "#/definitions/SecretSpec"
- - type: "object"
- example:
- Name: "app-key.crt"
- Labels:
- foo: "bar"
- Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg=="
- Driver:
- Name: "secret-bucket"
- Options:
- OptionA: "value for driver option A"
- OptionB: "value for driver option B"
- tags: ["Secret"]
- /secrets/{id}:
- get:
- summary: "Inspect a secret"
- operationId: "SecretInspect"
- produces:
- - "application/json"
- responses:
- 200:
- description: "no error"
- schema:
- $ref: "#/definitions/Secret"
- examples:
- application/json:
- ID: "ktnbjxoalbkvbvedmg1urrz8h"
- Version:
- Index: 11
- CreatedAt: "2016-11-05T01:20:17.327670065Z"
- UpdatedAt: "2016-11-05T01:20:17.327670065Z"
- Spec:
- Name: "app-dev.crt"
- Labels:
- foo: "bar"
- Driver:
- Name: "secret-bucket"
- Options:
- OptionA: "value for driver option A"
- OptionB: "value for driver option B"
-
- 404:
- description: "secret not found"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- type: "string"
- description: "ID of the secret"
- tags: ["Secret"]
- delete:
- summary: "Delete a secret"
- operationId: "SecretDelete"
- produces:
- - "application/json"
- responses:
- 204:
- description: "no error"
- 404:
- description: "secret not found"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- type: "string"
- description: "ID of the secret"
- tags: ["Secret"]
- /secrets/{id}/update:
- post:
- summary: "Update a Secret"
- operationId: "SecretUpdate"
- responses:
- 200:
- description: "no error"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "no such secret"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "The ID or name of the secret"
- type: "string"
- required: true
- - name: "body"
- in: "body"
- schema:
- $ref: "#/definitions/SecretSpec"
- description: |
- The spec of the secret to update. Currently, only the Labels field
- can be updated. All other fields must remain unchanged from the
- [SecretInspect endpoint](#operation/SecretInspect) response values.
- - name: "version"
- in: "query"
- description: |
- The version number of the secret object being updated. This is
- required to avoid conflicting writes.
- type: "integer"
- format: "int64"
- required: true
- tags: ["Secret"]
- /configs:
- get:
- summary: "List configs"
- operationId: "ConfigList"
- produces:
- - "application/json"
- responses:
- 200:
- description: "no error"
- schema:
- type: "array"
- items:
- $ref: "#/definitions/Config"
- example:
- - ID: "ktnbjxoalbkvbvedmg1urrz8h"
- Version:
- Index: 11
- CreatedAt: "2016-11-05T01:20:17.327670065Z"
- UpdatedAt: "2016-11-05T01:20:17.327670065Z"
- Spec:
- Name: "server.conf"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "filters"
- in: "query"
- type: "string"
- description: |
- A JSON encoded value of the filters (a `map[string][]string`) to
- process on the configs list.
-
- Available filters:
-
- - `id=`
- - `label= or label==value`
- - `name=`
- - `names=`
- tags: ["Config"]
- /configs/create:
- post:
- summary: "Create a config"
- operationId: "ConfigCreate"
- consumes:
- - "application/json"
- produces:
- - "application/json"
- responses:
- 201:
- description: "no error"
- schema:
- $ref: "#/definitions/IdResponse"
- 409:
- description: "name conflicts with an existing object"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "body"
- in: "body"
- schema:
- allOf:
- - $ref: "#/definitions/ConfigSpec"
- - type: "object"
- example:
- Name: "server.conf"
- Labels:
- foo: "bar"
- Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg=="
- tags: ["Config"]
- /configs/{id}:
- get:
- summary: "Inspect a config"
- operationId: "ConfigInspect"
- produces:
- - "application/json"
- responses:
- 200:
- description: "no error"
- schema:
- $ref: "#/definitions/Config"
- examples:
- application/json:
- ID: "ktnbjxoalbkvbvedmg1urrz8h"
- Version:
- Index: 11
- CreatedAt: "2016-11-05T01:20:17.327670065Z"
- UpdatedAt: "2016-11-05T01:20:17.327670065Z"
- Spec:
- Name: "app-dev.crt"
- 404:
- description: "config not found"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- type: "string"
- description: "ID of the config"
- tags: ["Config"]
- delete:
- summary: "Delete a config"
- operationId: "ConfigDelete"
- produces:
- - "application/json"
- responses:
- 204:
- description: "no error"
- 404:
- description: "config not found"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- type: "string"
- description: "ID of the config"
- tags: ["Config"]
- /configs/{id}/update:
- post:
- summary: "Update a Config"
- operationId: "ConfigUpdate"
- responses:
- 200:
- description: "no error"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "no such config"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "The ID or name of the config"
- type: "string"
- required: true
- - name: "body"
- in: "body"
- schema:
- $ref: "#/definitions/ConfigSpec"
- description: |
- The spec of the config to update. Currently, only the Labels field
- can be updated. All other fields must remain unchanged from the
- [ConfigInspect endpoint](#operation/ConfigInspect) response values.
- - name: "version"
- in: "query"
- description: |
- The version number of the config object being updated. This is
- required to avoid conflicting writes.
- type: "integer"
- format: "int64"
- required: true
- tags: ["Config"]
- /distribution/{name}/json:
- get:
- summary: "Get image information from the registry"
- description: |
- Return image digest and platform information by contacting the registry.
- operationId: "DistributionInspect"
- produces:
- - "application/json"
- responses:
- 200:
- description: "descriptor and platform information"
- schema:
- type: "object"
- x-go-name: DistributionInspect
- title: "DistributionInspectResponse"
- required: [Descriptor, Platforms]
- properties:
- Descriptor:
- type: "object"
- description: |
- A descriptor struct containing digest, media type, and size.
- properties:
- MediaType:
- type: "string"
- Size:
- type: "integer"
- format: "int64"
- Digest:
- type: "string"
- URLs:
- type: "array"
- items:
- type: "string"
- Platforms:
- type: "array"
- description: |
- An array containing all platforms supported by the image.
- items:
- type: "object"
- properties:
- Architecture:
- type: "string"
- OS:
- type: "string"
- OSVersion:
- type: "string"
- OSFeatures:
- type: "array"
- items:
- type: "string"
- Variant:
- type: "string"
- Features:
- type: "array"
- items:
- type: "string"
- examples:
- application/json:
- Descriptor:
- MediaType: "application/vnd.docker.distribution.manifest.v2+json"
- Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96"
- Size: 3987495
- URLs:
- - ""
- Platforms:
- - Architecture: "amd64"
- OS: "linux"
- OSVersion: ""
- OSFeatures:
- - ""
- Variant: ""
- Features:
- - ""
- 401:
- description: "Failed authentication or no image found"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such image: someimage (tag: latest)"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: "Image name or id"
- type: "string"
- required: true
- tags: ["Distribution"]
- /session:
- post:
- summary: "Initialize interactive session"
- description: |
- Start a new interactive session with a server. Session allows server to
- call back to the client for advanced capabilities.
-
- ### Hijacking
-
- This endpoint hijacks the HTTP connection to HTTP2 transport that allows
- the client to expose gPRC services on that connection.
-
- For example, the client sends this request to upgrade the connection:
-
- ```
- POST /session HTTP/1.1
- Upgrade: h2c
- Connection: Upgrade
- ```
-
- The Docker daemon responds with a `101 UPGRADED` response follow with
- the raw stream:
-
- ```
- HTTP/1.1 101 UPGRADED
- Connection: Upgrade
- Upgrade: h2c
- ```
- operationId: "Session"
- produces:
- - "application/vnd.docker.raw-stream"
- responses:
- 101:
- description: "no error, hijacking successful"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Session"]
diff --git a/vendor/github.com/docker/docker/api/types/events/BUILD.bazel b/vendor/github.com/docker/docker/api/types/events/BUILD.bazel
deleted file mode 100644
index fa1b772e18..0000000000
--- a/vendor/github.com/docker/docker/api/types/events/BUILD.bazel
+++ /dev/null
@@ -1,9 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
- name = "go_default_library",
- srcs = ["events.go"],
- importmap = "k8s.io/kops/vendor/github.com/docker/docker/api/types/events",
- importpath = "github.com/docker/docker/api/types/events",
- visibility = ["//visibility:public"],
-)
diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go
deleted file mode 100644
index aa8fba8154..0000000000
--- a/vendor/github.com/docker/docker/api/types/events/events.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package events // import "github.com/docker/docker/api/types/events"
-
-const (
- // BuilderEventType is the event type that the builder generates
- BuilderEventType = "builder"
- // ContainerEventType is the event type that containers generate
- ContainerEventType = "container"
- // DaemonEventType is the event type that daemon generate
- DaemonEventType = "daemon"
- // ImageEventType is the event type that images generate
- ImageEventType = "image"
- // NetworkEventType is the event type that networks generate
- NetworkEventType = "network"
- // PluginEventType is the event type that plugins generate
- PluginEventType = "plugin"
- // VolumeEventType is the event type that volumes generate
- VolumeEventType = "volume"
- // ServiceEventType is the event type that services generate
- ServiceEventType = "service"
- // NodeEventType is the event type that nodes generate
- NodeEventType = "node"
- // SecretEventType is the event type that secrets generate
- SecretEventType = "secret"
- // ConfigEventType is the event type that configs generate
- ConfigEventType = "config"
-)
-
-// Actor describes something that generates events,
-// like a container, or a network, or a volume.
-// It has a defined name and a set or attributes.
-// The container attributes are its labels, other actors
-// can generate these attributes from other properties.
-type Actor struct {
- ID string
- Attributes map[string]string
-}
-
-// Message represents the information an event contains
-type Message struct {
- // Deprecated information from JSONMessage.
- // With data only in container events.
- Status string `json:"status,omitempty"`
- ID string `json:"id,omitempty"`
- From string `json:"from,omitempty"`
-
- Type string
- Action string
- Actor Actor
- // Engine events are local scope. Cluster events are swarm scope.
- Scope string `json:"scope,omitempty"`
-
- Time int64 `json:"time,omitempty"`
- TimeNano int64 `json:"timeNano,omitempty"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/image/BUILD.bazel b/vendor/github.com/docker/docker/api/types/image/BUILD.bazel
deleted file mode 100644
index 8ceab4d11e..0000000000
--- a/vendor/github.com/docker/docker/api/types/image/BUILD.bazel
+++ /dev/null
@@ -1,9 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
- name = "go_default_library",
- srcs = ["image_history.go"],
- importmap = "k8s.io/kops/vendor/github.com/docker/docker/api/types/image",
- importpath = "github.com/docker/docker/api/types/image",
- visibility = ["//visibility:public"],
-)
diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go
deleted file mode 100644
index e302bb0aeb..0000000000
--- a/vendor/github.com/docker/docker/api/types/image/image_history.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package image // import "github.com/docker/docker/api/types/image"
-
-// ----------------------------------------------------------------------------
-// Code generated by `swagger generate operation`. DO NOT EDIT.
-//
-// See hack/generate-swagger-api.sh
-// ----------------------------------------------------------------------------
-
-// HistoryResponseItem individual image layer information in response to ImageHistory operation
-// swagger:model HistoryResponseItem
-type HistoryResponseItem struct {
-
- // comment
- // Required: true
- Comment string `json:"Comment"`
-
- // created
- // Required: true
- Created int64 `json:"Created"`
-
- // created by
- // Required: true
- CreatedBy string `json:"CreatedBy"`
-
- // Id
- // Required: true
- ID string `json:"Id"`
-
- // size
- // Required: true
- Size int64 `json:"Size"`
-
- // tags
- // Required: true
- Tags []string `json:"Tags"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/time/BUILD.bazel b/vendor/github.com/docker/docker/api/types/time/BUILD.bazel
deleted file mode 100644
index 997af3fe2a..0000000000
--- a/vendor/github.com/docker/docker/api/types/time/BUILD.bazel
+++ /dev/null
@@ -1,12 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
- name = "go_default_library",
- srcs = [
- "duration_convert.go",
- "timestamp.go",
- ],
- importmap = "k8s.io/kops/vendor/github.com/docker/docker/api/types/time",
- importpath = "github.com/docker/docker/api/types/time",
- visibility = ["//visibility:public"],
-)
diff --git a/vendor/github.com/docker/docker/api/types/time/duration_convert.go b/vendor/github.com/docker/docker/api/types/time/duration_convert.go
deleted file mode 100644
index 84b6f07322..0000000000
--- a/vendor/github.com/docker/docker/api/types/time/duration_convert.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package time // import "github.com/docker/docker/api/types/time"
-
-import (
- "strconv"
- "time"
-)
-
-// DurationToSecondsString converts the specified duration to the number
-// seconds it represents, formatted as a string.
-func DurationToSecondsString(duration time.Duration) string {
- return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64)
-}
diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go
deleted file mode 100644
index ea3495efeb..0000000000
--- a/vendor/github.com/docker/docker/api/types/time/timestamp.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package time // import "github.com/docker/docker/api/types/time"
-
-import (
- "fmt"
- "math"
- "strconv"
- "strings"
- "time"
-)
-
-// These are additional predefined layouts for use in Time.Format and Time.Parse
-// with --since and --until parameters for `docker logs` and `docker events`
-const (
- rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone
- rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone
- dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00
- dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00
-)
-
-// GetTimestamp tries to parse given string as golang duration,
-// then RFC3339 time and finally as a Unix timestamp. If
-// any of these were successful, it returns a Unix timestamp
-// as string otherwise returns the given value back.
-// In case of duration input, the returned timestamp is computed
-// as the given reference time minus the amount of the duration.
-func GetTimestamp(value string, reference time.Time) (string, error) {
- if d, err := time.ParseDuration(value); value != "0" && err == nil {
- return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil
- }
-
- var format string
- // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
- parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
-
- if strings.Contains(value, ".") {
- if parseInLocation {
- format = rFC3339NanoLocal
- } else {
- format = time.RFC3339Nano
- }
- } else if strings.Contains(value, "T") {
- // we want the number of colons in the T portion of the timestamp
- tcolons := strings.Count(value, ":")
- // if parseInLocation is off and we have a +/- zone offset (not Z) then
- // there will be an extra colon in the input for the tz offset subtract that
- // colon from the tcolons count
- if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 {
- tcolons--
- }
- if parseInLocation {
- switch tcolons {
- case 0:
- format = "2006-01-02T15"
- case 1:
- format = "2006-01-02T15:04"
- default:
- format = rFC3339Local
- }
- } else {
- switch tcolons {
- case 0:
- format = "2006-01-02T15Z07:00"
- case 1:
- format = "2006-01-02T15:04Z07:00"
- default:
- format = time.RFC3339
- }
- }
- } else if parseInLocation {
- format = dateLocal
- } else {
- format = dateWithZone
- }
-
- var t time.Time
- var err error
-
- if parseInLocation {
- t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone()))
- } else {
- t, err = time.Parse(format, value)
- }
-
- if err != nil {
- // if there is a `-` then it's an RFC3339 like timestamp
- if strings.Contains(value, "-") {
- return "", err // was probably an RFC3339 like timestamp but the parser failed with an error
- }
- if _, _, err := parseTimestamp(value); err != nil {
- return "", fmt.Errorf("failed to parse value as time or duration: %q", value)
- }
- return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server)
- }
-
- return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil
-}
-
-// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the
-// format "%d.%09d", time.Unix(), int64(time.Nanosecond()))
-// if the incoming nanosecond portion is longer or shorter than 9 digits it is
-// converted to nanoseconds. The expectation is that the seconds and
-// seconds will be used to create a time variable. For example:
-// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0)
-// if err == nil since := time.Unix(seconds, nanoseconds)
-// returns seconds as def(aultSeconds) if value == ""
-func ParseTimestamps(value string, def int64) (int64, int64, error) {
- if value == "" {
- return def, 0, nil
- }
- return parseTimestamp(value)
-}
-
-func parseTimestamp(value string) (int64, int64, error) {
- sa := strings.SplitN(value, ".", 2)
- s, err := strconv.ParseInt(sa[0], 10, 64)
- if err != nil {
- return s, 0, err
- }
- if len(sa) != 2 {
- return s, 0, nil
- }
- n, err := strconv.ParseInt(sa[1], 10, 64)
- if err != nil {
- return s, n, err
- }
- // should already be in nanoseconds but just in case convert n to nanoseconds
- n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1]))))
- return s, n, nil
-}
diff --git a/vendor/github.com/docker/docker/api/types/volume/BUILD.bazel b/vendor/github.com/docker/docker/api/types/volume/BUILD.bazel
deleted file mode 100644
index 4321aa2cee..0000000000
--- a/vendor/github.com/docker/docker/api/types/volume/BUILD.bazel
+++ /dev/null
@@ -1,13 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
- name = "go_default_library",
- srcs = [
- "volume_create.go",
- "volume_list.go",
- ],
- importmap = "k8s.io/kops/vendor/github.com/docker/docker/api/types/volume",
- importpath = "github.com/docker/docker/api/types/volume",
- visibility = ["//visibility:public"],
- deps = ["//vendor/github.com/docker/docker/api/types:go_default_library"],
-)
diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_create.go b/vendor/github.com/docker/docker/api/types/volume/volume_create.go
deleted file mode 100644
index 8538078dd6..0000000000
--- a/vendor/github.com/docker/docker/api/types/volume/volume_create.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package volume // import "github.com/docker/docker/api/types/volume"
-
-// ----------------------------------------------------------------------------
-// Code generated by `swagger generate operation`. DO NOT EDIT.
-//
-// See hack/generate-swagger-api.sh
-// ----------------------------------------------------------------------------
-
-// VolumeCreateBody Volume configuration
-// swagger:model VolumeCreateBody
-type VolumeCreateBody struct {
-
- // Name of the volume driver to use.
- // Required: true
- Driver string `json:"Driver"`
-
- // A mapping of driver options and values. These options are
- // passed directly to the driver and are driver specific.
- //
- // Required: true
- DriverOpts map[string]string `json:"DriverOpts"`
-
- // User-defined key/value metadata.
- // Required: true
- Labels map[string]string `json:"Labels"`
-
- // The new volume's name. If not specified, Docker generates a name.
- //
- // Required: true
- Name string `json:"Name"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_list.go b/vendor/github.com/docker/docker/api/types/volume/volume_list.go
deleted file mode 100644
index be06179bf4..0000000000
--- a/vendor/github.com/docker/docker/api/types/volume/volume_list.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package volume // import "github.com/docker/docker/api/types/volume"
-
-// ----------------------------------------------------------------------------
-// Code generated by `swagger generate operation`. DO NOT EDIT.
-//
-// See hack/generate-swagger-api.sh
-// ----------------------------------------------------------------------------
-
-import "github.com/docker/docker/api/types"
-
-// VolumeListOKBody Volume list response
-// swagger:model VolumeListOKBody
-type VolumeListOKBody struct {
-
- // List of volumes
- // Required: true
- Volumes []*types.Volume `json:"Volumes"`
-
- // Warnings that occurred when fetching the list of volumes.
- //
- // Required: true
- Warnings []string `json:"Warnings"`
-}
diff --git a/vendor/github.com/docker/docker/client/BUILD.bazel b/vendor/github.com/docker/docker/client/BUILD.bazel
deleted file mode 100644
index 6bc7127ba8..0000000000
--- a/vendor/github.com/docker/docker/client/BUILD.bazel
+++ /dev/null
@@ -1,147 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
- name = "go_default_library",
- srcs = [
- "build_cancel.go",
- "build_prune.go",
- "checkpoint_create.go",
- "checkpoint_delete.go",
- "checkpoint_list.go",
- "client.go",
- "client_deprecated.go",
- "client_unix.go",
- "client_windows.go",
- "config_create.go",
- "config_inspect.go",
- "config_list.go",
- "config_remove.go",
- "config_update.go",
- "container_attach.go",
- "container_commit.go",
- "container_copy.go",
- "container_create.go",
- "container_diff.go",
- "container_exec.go",
- "container_export.go",
- "container_inspect.go",
- "container_kill.go",
- "container_list.go",
- "container_logs.go",
- "container_pause.go",
- "container_prune.go",
- "container_remove.go",
- "container_rename.go",
- "container_resize.go",
- "container_restart.go",
- "container_start.go",
- "container_stats.go",
- "container_stop.go",
- "container_top.go",
- "container_unpause.go",
- "container_update.go",
- "container_wait.go",
- "disk_usage.go",
- "distribution_inspect.go",
- "errors.go",
- "events.go",
- "hijack.go",
- "image_build.go",
- "image_create.go",
- "image_history.go",
- "image_import.go",
- "image_inspect.go",
- "image_list.go",
- "image_load.go",
- "image_prune.go",
- "image_pull.go",
- "image_push.go",
- "image_remove.go",
- "image_save.go",
- "image_search.go",
- "image_tag.go",
- "info.go",
- "interface.go",
- "interface_experimental.go",
- "interface_stable.go",
- "login.go",
- "network_connect.go",
- "network_create.go",
- "network_disconnect.go",
- "network_inspect.go",
- "network_list.go",
- "network_prune.go",
- "network_remove.go",
- "node_inspect.go",
- "node_list.go",
- "node_remove.go",
- "node_update.go",
- "options.go",
- "ping.go",
- "plugin_create.go",
- "plugin_disable.go",
- "plugin_enable.go",
- "plugin_inspect.go",
- "plugin_install.go",
- "plugin_list.go",
- "plugin_push.go",
- "plugin_remove.go",
- "plugin_set.go",
- "plugin_upgrade.go",
- "request.go",
- "secret_create.go",
- "secret_inspect.go",
- "secret_list.go",
- "secret_remove.go",
- "secret_update.go",
- "service_create.go",
- "service_inspect.go",
- "service_list.go",
- "service_logs.go",
- "service_remove.go",
- "service_update.go",
- "swarm_get_unlock_key.go",
- "swarm_init.go",
- "swarm_inspect.go",
- "swarm_join.go",
- "swarm_leave.go",
- "swarm_unlock.go",
- "swarm_update.go",
- "task_inspect.go",
- "task_list.go",
- "task_logs.go",
- "transport.go",
- "utils.go",
- "version.go",
- "volume_create.go",
- "volume_inspect.go",
- "volume_list.go",
- "volume_prune.go",
- "volume_remove.go",
- ],
- importmap = "k8s.io/kops/vendor/github.com/docker/docker/client",
- importpath = "github.com/docker/docker/client",
- visibility = ["//visibility:public"],
- deps = [
- "//vendor/github.com/containerd/containerd/platforms:go_default_library",
- "//vendor/github.com/docker/distribution/reference:go_default_library",
- "//vendor/github.com/docker/docker/api:go_default_library",
- "//vendor/github.com/docker/docker/api/types:go_default_library",
- "//vendor/github.com/docker/docker/api/types/container:go_default_library",
- "//vendor/github.com/docker/docker/api/types/events:go_default_library",
- "//vendor/github.com/docker/docker/api/types/filters:go_default_library",
- "//vendor/github.com/docker/docker/api/types/image:go_default_library",
- "//vendor/github.com/docker/docker/api/types/network:go_default_library",
- "//vendor/github.com/docker/docker/api/types/registry:go_default_library",
- "//vendor/github.com/docker/docker/api/types/swarm:go_default_library",
- "//vendor/github.com/docker/docker/api/types/time:go_default_library",
- "//vendor/github.com/docker/docker/api/types/versions:go_default_library",
- "//vendor/github.com/docker/docker/api/types/volume:go_default_library",
- "//vendor/github.com/docker/docker/errdefs:go_default_library",
- "//vendor/github.com/docker/go-connections/sockets:go_default_library",
- "//vendor/github.com/docker/go-connections/tlsconfig:go_default_library",
- "//vendor/github.com/opencontainers/go-digest:go_default_library",
- "//vendor/github.com/opencontainers/image-spec/specs-go/v1:go_default_library",
- "//vendor/github.com/pkg/errors:go_default_library",
- ],
-)
diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md
deleted file mode 100644
index 992f18117d..0000000000
--- a/vendor/github.com/docker/docker/client/README.md
+++ /dev/null
@@ -1,35 +0,0 @@
-# Go client for the Docker Engine API
-
-The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc.
-
-For example, to list running containers (the equivalent of `docker ps`):
-
-```go
-package main
-
-import (
- "context"
- "fmt"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/client"
-)
-
-func main() {
- cli, err := client.NewClientWithOpts(client.FromEnv)
- if err != nil {
- panic(err)
- }
-
- containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{})
- if err != nil {
- panic(err)
- }
-
- for _, container := range containers {
- fmt.Printf("%s %s\n", container.ID[:10], container.Image)
- }
-}
-```
-
-[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client)
diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go
deleted file mode 100644
index 3aae43e3d1..0000000000
--- a/vendor/github.com/docker/docker/client/build_cancel.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-)
-
-// BuildCancel requests the daemon to cancel ongoing build request
-func (cli *Client) BuildCancel(ctx context.Context, id string) error {
- query := url.Values{}
- query.Set("id", id)
-
- serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil)
- ensureReaderClosed(serverResp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go
deleted file mode 100644
index 397d67cdcf..0000000000
--- a/vendor/github.com/docker/docker/client/build_prune.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
- "github.com/pkg/errors"
-)
-
-// BuildCachePrune requests the daemon to delete unused cache data
-func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) {
- if err := cli.NewVersionError("1.31", "build prune"); err != nil {
- return nil, err
- }
-
- report := types.BuildCachePruneReport{}
-
- query := url.Values{}
- if opts.All {
- query.Set("all", "1")
- }
- query.Set("keep-storage", fmt.Sprintf("%d", opts.KeepStorage))
- filters, err := filters.ToJSON(opts.Filters)
- if err != nil {
- return nil, errors.Wrap(err, "prune could not marshal filters option")
- }
- query.Set("filters", filters)
-
- serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil)
- defer ensureReaderClosed(serverResp)
-
- if err != nil {
- return nil, err
- }
-
- if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
- return nil, fmt.Errorf("Error retrieving disk usage: %v", err)
- }
-
- return &report, nil
-}
diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go
deleted file mode 100644
index 921024fe4f..0000000000
--- a/vendor/github.com/docker/docker/client/checkpoint_create.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
-
- "github.com/docker/docker/api/types"
-)
-
-// CheckpointCreate creates a checkpoint from the given container with the given name
-func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error {
- resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go
deleted file mode 100644
index 54f55fa76e..0000000000
--- a/vendor/github.com/docker/docker/client/checkpoint_delete.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// CheckpointDelete deletes the checkpoint with the given name from the given container
-func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error {
- query := url.Values{}
- if options.CheckpointDir != "" {
- query.Set("dir", options.CheckpointDir)
- }
-
- resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go
deleted file mode 100644
index 66d46dd161..0000000000
--- a/vendor/github.com/docker/docker/client/checkpoint_list.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// CheckpointList returns the checkpoints of the given container in the docker host
-func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) {
- var checkpoints []types.Checkpoint
-
- query := url.Values{}
- if options.CheckpointDir != "" {
- query.Set("dir", options.CheckpointDir)
- }
-
- resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return checkpoints, wrapResponseError(err, resp, "container", container)
- }
-
- err = json.NewDecoder(resp.body).Decode(&checkpoints)
- return checkpoints, err
-}
diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go
deleted file mode 100644
index 21edf1fa1f..0000000000
--- a/vendor/github.com/docker/docker/client/client.go
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
-Package client is a Go client for the Docker Engine API.
-
-For more information about the Engine API, see the documentation:
-https://docs.docker.com/engine/api/
-
-Usage
-
-You use the library by creating a client object and calling methods on it. The
-client can be created either from environment variables with NewClientWithOpts(client.FromEnv),
-or configured manually with NewClient().
-
-For example, to list running containers (the equivalent of "docker ps"):
-
- package main
-
- import (
- "context"
- "fmt"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/client"
- )
-
- func main() {
- cli, err := client.NewClientWithOpts(client.FromEnv)
- if err != nil {
- panic(err)
- }
-
- containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{})
- if err != nil {
- panic(err)
- }
-
- for _, container := range containers {
- fmt.Printf("%s %s\n", container.ID[:10], container.Image)
- }
- }
-
-*/
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "fmt"
- "net"
- "net/http"
- "net/url"
- "path"
- "strings"
-
- "github.com/docker/docker/api"
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/versions"
- "github.com/docker/go-connections/sockets"
- "github.com/pkg/errors"
-)
-
-// ErrRedirect is the error returned by checkRedirect when the request is non-GET.
-var ErrRedirect = errors.New("unexpected redirect in response")
-
-// Client is the API client that performs all operations
-// against a docker server.
-type Client struct {
- // scheme sets the scheme for the client
- scheme string
- // host holds the server address to connect to
- host string
- // proto holds the client protocol i.e. unix.
- proto string
- // addr holds the client address.
- addr string
- // basePath holds the path to prepend to the requests.
- basePath string
- // client used to send and receive http requests.
- client *http.Client
- // version of the server to talk to.
- version string
- // custom http headers configured by users.
- customHTTPHeaders map[string]string
- // manualOverride is set to true when the version was set by users.
- manualOverride bool
-
- // negotiateVersion indicates if the client should automatically negotiate
- // the API version to use when making requests. API version negotiation is
- // performed on the first request, after which negotiated is set to "true"
- // so that subsequent requests do not re-negotiate.
- negotiateVersion bool
-
- // negotiated indicates that API version negotiation took place
- negotiated bool
-}
-
-// CheckRedirect specifies the policy for dealing with redirect responses:
-// If the request is non-GET return `ErrRedirect`. Otherwise use the last response.
-//
-// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client .
-// The Docker client (and by extension docker API client) can be made to send a request
-// like POST /containers//start where what would normally be in the name section of the URL is empty.
-// This triggers an HTTP 301 from the daemon.
-// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon.
-// This behavior change manifests in the client in that before the 301 was not followed and
-// the client did not generate an error, but now results in a message like Error response from daemon: page not found.
-func CheckRedirect(req *http.Request, via []*http.Request) error {
- if via[0].Method == http.MethodGet {
- return http.ErrUseLastResponse
- }
- return ErrRedirect
-}
-
-// NewClientWithOpts initializes a new API client with default values. It takes functors
-// to modify values when creating it, like `NewClientWithOpts(WithVersion(…))`
-// It also initializes the custom http headers to add to each request.
-//
-// It won't send any version information if the version number is empty. It is
-// highly recommended that you set a version or your client may break if the
-// server is upgraded.
-func NewClientWithOpts(ops ...Opt) (*Client, error) {
- client, err := defaultHTTPClient(DefaultDockerHost)
- if err != nil {
- return nil, err
- }
- c := &Client{
- host: DefaultDockerHost,
- version: api.DefaultVersion,
- client: client,
- proto: defaultProto,
- addr: defaultAddr,
- }
-
- for _, op := range ops {
- if err := op(c); err != nil {
- return nil, err
- }
- }
-
- if _, ok := c.client.Transport.(http.RoundTripper); !ok {
- return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", c.client.Transport)
- }
- if c.scheme == "" {
- c.scheme = "http"
-
- tlsConfig := resolveTLSConfig(c.client.Transport)
- if tlsConfig != nil {
- // TODO(stevvooe): This isn't really the right way to write clients in Go.
- // `NewClient` should probably only take an `*http.Client` and work from there.
- // Unfortunately, the model of having a host-ish/url-thingy as the connection
- // string has us confusing protocol and transport layers. We continue doing
- // this to avoid breaking existing clients but this should be addressed.
- c.scheme = "https"
- }
- }
-
- return c, nil
-}
-
-func defaultHTTPClient(host string) (*http.Client, error) {
- url, err := ParseHostURL(host)
- if err != nil {
- return nil, err
- }
- transport := new(http.Transport)
- sockets.ConfigureTransport(transport, url.Scheme, url.Host)
- return &http.Client{
- Transport: transport,
- CheckRedirect: CheckRedirect,
- }, nil
-}
-
-// Close the transport used by the client
-func (cli *Client) Close() error {
- if t, ok := cli.client.Transport.(*http.Transport); ok {
- t.CloseIdleConnections()
- }
- return nil
-}
-
-// getAPIPath returns the versioned request path to call the api.
-// It appends the query parameters to the path if they are not empty.
-func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string {
- var apiPath string
- if cli.negotiateVersion && !cli.negotiated {
- cli.NegotiateAPIVersion(ctx)
- }
- if cli.version != "" {
- v := strings.TrimPrefix(cli.version, "v")
- apiPath = path.Join(cli.basePath, "/v"+v, p)
- } else {
- apiPath = path.Join(cli.basePath, p)
- }
- return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String()
-}
-
-// ClientVersion returns the API version used by this client.
-func (cli *Client) ClientVersion() string {
- return cli.version
-}
-
-// NegotiateAPIVersion queries the API and updates the version to match the
-// API version. Any errors are silently ignored. If a manual override is in place,
-// either through the `DOCKER_API_VERSION` environment variable, or if the client
-// was initialized with a fixed version (`opts.WithVersion(xx)`), no negotiation
-// will be performed.
-func (cli *Client) NegotiateAPIVersion(ctx context.Context) {
- if !cli.manualOverride {
- ping, _ := cli.Ping(ctx)
- cli.negotiateAPIVersionPing(ping)
- }
-}
-
-// NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion
-// if the ping version is less than the default version. If a manual override is
-// in place, either through the `DOCKER_API_VERSION` environment variable, or if
-// the client was initialized with a fixed version (`opts.WithVersion(xx)`), no
-// negotiation is performed.
-func (cli *Client) NegotiateAPIVersionPing(p types.Ping) {
- if !cli.manualOverride {
- cli.negotiateAPIVersionPing(p)
- }
-}
-
-// negotiateAPIVersionPing queries the API and updates the version to match the
-// API version. Any errors are silently ignored.
-func (cli *Client) negotiateAPIVersionPing(p types.Ping) {
- // try the latest version before versioning headers existed
- if p.APIVersion == "" {
- p.APIVersion = "1.24"
- }
-
- // if the client is not initialized with a version, start with the latest supported version
- if cli.version == "" {
- cli.version = api.DefaultVersion
- }
-
- // if server version is lower than the client version, downgrade
- if versions.LessThan(p.APIVersion, cli.version) {
- cli.version = p.APIVersion
- }
-
- // Store the results, so that automatic API version negotiation (if enabled)
- // won't be performed on the next request.
- if cli.negotiateVersion {
- cli.negotiated = true
- }
-}
-
-// DaemonHost returns the host address used by the client
-func (cli *Client) DaemonHost() string {
- return cli.host
-}
-
-// HTTPClient returns a copy of the HTTP client bound to the server
-func (cli *Client) HTTPClient() *http.Client {
- c := *cli.client
- return &c
-}
-
-// ParseHostURL parses a url string, validates the string is a host url, and
-// returns the parsed URL
-func ParseHostURL(host string) (*url.URL, error) {
- protoAddrParts := strings.SplitN(host, "://", 2)
- if len(protoAddrParts) == 1 {
- return nil, fmt.Errorf("unable to parse docker host `%s`", host)
- }
-
- var basePath string
- proto, addr := protoAddrParts[0], protoAddrParts[1]
- if proto == "tcp" {
- parsed, err := url.Parse("tcp://" + addr)
- if err != nil {
- return nil, err
- }
- addr = parsed.Host
- basePath = parsed.Path
- }
- return &url.URL{
- Scheme: proto,
- Host: addr,
- Path: basePath,
- }, nil
-}
-
-// CustomHTTPHeaders returns the custom http headers stored by the client.
-func (cli *Client) CustomHTTPHeaders() map[string]string {
- m := make(map[string]string)
- for k, v := range cli.customHTTPHeaders {
- m[k] = v
- }
- return m
-}
-
-// SetCustomHTTPHeaders that will be set on every HTTP request made by the client.
-// Deprecated: use WithHTTPHeaders when creating the client.
-func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) {
- cli.customHTTPHeaders = headers
-}
-
-// Dialer returns a dialer for a raw stream connection, with HTTP/1.1 header, that can be used for proxying the daemon connection.
-// Used by `docker dial-stdio` (docker/cli#889).
-func (cli *Client) Dialer() func(context.Context) (net.Conn, error) {
- return func(ctx context.Context) (net.Conn, error) {
- if transport, ok := cli.client.Transport.(*http.Transport); ok {
- if transport.DialContext != nil && transport.TLSClientConfig == nil {
- return transport.DialContext(ctx, cli.proto, cli.addr)
- }
- }
- return fallbackDial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport))
- }
-}
diff --git a/vendor/github.com/docker/docker/client/client_deprecated.go b/vendor/github.com/docker/docker/client/client_deprecated.go
deleted file mode 100644
index 54cdfc29a8..0000000000
--- a/vendor/github.com/docker/docker/client/client_deprecated.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package client
-
-import "net/http"
-
-// NewClient initializes a new API client for the given host and API version.
-// It uses the given http client as transport.
-// It also initializes the custom http headers to add to each request.
-//
-// It won't send any version information if the version number is empty. It is
-// highly recommended that you set a version or your client may break if the
-// server is upgraded.
-// Deprecated: use NewClientWithOpts
-func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) {
- return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders))
-}
-
-// NewEnvClient initializes a new API client based on environment variables.
-// See FromEnv for a list of support environment variables.
-//
-// Deprecated: use NewClientWithOpts(FromEnv)
-func NewEnvClient() (*Client, error) {
- return NewClientWithOpts(FromEnv)
-}
diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go
deleted file mode 100644
index 9d0f0dcbf0..0000000000
--- a/vendor/github.com/docker/docker/client/client_unix.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build linux freebsd openbsd netbsd darwin solaris illumos dragonfly
-
-package client // import "github.com/docker/docker/client"
-
-// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
-const DefaultDockerHost = "unix:///var/run/docker.sock"
-
-const defaultProto = "unix"
-const defaultAddr = "/var/run/docker.sock"
diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go
deleted file mode 100644
index c649e54412..0000000000
--- a/vendor/github.com/docker/docker/client/client_windows.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
-const DefaultDockerHost = "npipe:////./pipe/docker_engine"
-
-const defaultProto = "npipe"
-const defaultAddr = "//./pipe/docker_engine"
diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go
deleted file mode 100644
index ee7d411df0..0000000000
--- a/vendor/github.com/docker/docker/client/config_create.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/swarm"
-)
-
-// ConfigCreate creates a new Config.
-func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) {
- var response types.ConfigCreateResponse
- if err := cli.NewVersionError("1.30", "config create"); err != nil {
- return response, err
- }
- resp, err := cli.post(ctx, "/configs/create", nil, config, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&response)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go
deleted file mode 100644
index 7d0ce3e11c..0000000000
--- a/vendor/github.com/docker/docker/client/config_inspect.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "io/ioutil"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// ConfigInspectWithRaw returns the config information with raw data
-func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) {
- if id == "" {
- return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id}
- }
- if err := cli.NewVersionError("1.30", "config inspect"); err != nil {
- return swarm.Config{}, nil, err
- }
- resp, err := cli.get(ctx, "/configs/"+id, nil, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id)
- }
-
- body, err := ioutil.ReadAll(resp.body)
- if err != nil {
- return swarm.Config{}, nil, err
- }
-
- var config swarm.Config
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&config)
-
- return config, body, err
-}
diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go
deleted file mode 100644
index 565acc6e27..0000000000
--- a/vendor/github.com/docker/docker/client/config_list.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/api/types/swarm"
-)
-
-// ConfigList returns the list of configs.
-func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) {
- if err := cli.NewVersionError("1.30", "config list"); err != nil {
- return nil, err
- }
- query := url.Values{}
-
- if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToJSON(options.Filters)
- if err != nil {
- return nil, err
- }
-
- query.Set("filters", filterJSON)
- }
-
- resp, err := cli.get(ctx, "/configs", query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return nil, err
- }
-
- var configs []swarm.Config
- err = json.NewDecoder(resp.body).Decode(&configs)
- return configs, err
-}
diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go
deleted file mode 100644
index a708fcaecf..0000000000
--- a/vendor/github.com/docker/docker/client/config_remove.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import "context"
-
-// ConfigRemove removes a Config.
-func (cli *Client) ConfigRemove(ctx context.Context, id string) error {
- if err := cli.NewVersionError("1.30", "config remove"); err != nil {
- return err
- }
- resp, err := cli.delete(ctx, "/configs/"+id, nil, nil)
- defer ensureReaderClosed(resp)
- return wrapResponseError(err, resp, "config", id)
-}
diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go
deleted file mode 100644
index 39e59cf858..0000000000
--- a/vendor/github.com/docker/docker/client/config_update.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
- "strconv"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// ConfigUpdate attempts to update a Config
-func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error {
- if err := cli.NewVersionError("1.30", "config update"); err != nil {
- return err
- }
- query := url.Values{}
- query.Set("version", strconv.FormatUint(version.Index, 10))
- resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go
deleted file mode 100644
index 88ba1ef639..0000000000
--- a/vendor/github.com/docker/docker/client/container_attach.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// ContainerAttach attaches a connection to a container in the server.
-// It returns a types.HijackedConnection with the hijacked connection
-// and the a reader to get output. It's up to the called to close
-// the hijacked connection by calling types.HijackedResponse.Close.
-//
-// The stream format on the response will be in one of two formats:
-//
-// If the container is using a TTY, there is only a single stream (stdout), and
-// data is copied directly from the container output stream, no extra
-// multiplexing or headers.
-//
-// If the container is *not* using a TTY, streams for stdout and stderr are
-// multiplexed.
-// The format of the multiplexed stream is as follows:
-//
-// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT}
-//
-// STREAM_TYPE can be 1 for stdout and 2 for stderr
-//
-// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian.
-// This is the size of OUTPUT.
-//
-// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this
-// stream.
-func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) {
- query := url.Values{}
- if options.Stream {
- query.Set("stream", "1")
- }
- if options.Stdin {
- query.Set("stdin", "1")
- }
- if options.Stdout {
- query.Set("stdout", "1")
- }
- if options.Stderr {
- query.Set("stderr", "1")
- }
- if options.DetachKeys != "" {
- query.Set("detachKeys", options.DetachKeys)
- }
- if options.Logs {
- query.Set("logs", "1")
- }
-
- headers := map[string][]string{"Content-Type": {"text/plain"}}
- return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers)
-}
diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go
deleted file mode 100644
index 2966e88c8e..0000000000
--- a/vendor/github.com/docker/docker/client/container_commit.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "errors"
- "net/url"
-
- "github.com/docker/distribution/reference"
- "github.com/docker/docker/api/types"
-)
-
-// ContainerCommit applies changes into a container and creates a new tagged image.
-func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) {
- var repository, tag string
- if options.Reference != "" {
- ref, err := reference.ParseNormalizedNamed(options.Reference)
- if err != nil {
- return types.IDResponse{}, err
- }
-
- if _, isCanonical := ref.(reference.Canonical); isCanonical {
- return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference")
- }
- ref = reference.TagNameOnly(ref)
-
- if tagged, ok := ref.(reference.Tagged); ok {
- tag = tagged.Tag()
- }
- repository = reference.FamiliarName(ref)
- }
-
- query := url.Values{}
- query.Set("container", container)
- query.Set("repo", repository)
- query.Set("tag", tag)
- query.Set("comment", options.Comment)
- query.Set("author", options.Author)
- for _, change := range options.Changes {
- query.Add("changes", change)
- }
- if !options.Pause {
- query.Set("pause", "0")
- }
-
- var response types.IDResponse
- resp, err := cli.post(ctx, "/commit", query, options.Config, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&response)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go
deleted file mode 100644
index bb278bf7f3..0000000000
--- a/vendor/github.com/docker/docker/client/container_copy.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/base64"
- "encoding/json"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "path/filepath"
- "strings"
-
- "github.com/docker/docker/api/types"
-)
-
-// ContainerStatPath returns Stat information about a path inside the container filesystem.
-func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) {
- query := url.Values{}
- query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
-
- urlStr := "/containers/" + containerID + "/archive"
- response, err := cli.head(ctx, urlStr, query, nil)
- defer ensureReaderClosed(response)
- if err != nil {
- return types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+path)
- }
- return getContainerPathStatFromHeader(response.header)
-}
-
-// CopyToContainer copies content into the container filesystem.
-// Note that `content` must be a Reader for a TAR archive
-func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options types.CopyToContainerOptions) error {
- query := url.Values{}
- query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API.
- // Do not allow for an existing directory to be overwritten by a non-directory and vice versa.
- if !options.AllowOverwriteDirWithFile {
- query.Set("noOverwriteDirNonDir", "true")
- }
-
- if options.CopyUIDGID {
- query.Set("copyUIDGID", "true")
- }
-
- apiPath := "/containers/" + containerID + "/archive"
-
- response, err := cli.putRaw(ctx, apiPath, query, content, nil)
- defer ensureReaderClosed(response)
- if err != nil {
- return wrapResponseError(err, response, "container:path", containerID+":"+dstPath)
- }
-
- // TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior
- if response.statusCode != http.StatusOK {
- return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
- }
-
- return nil
-}
-
-// CopyFromContainer gets the content from the container and returns it as a Reader
-// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader.
-func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) {
- query := make(url.Values, 1)
- query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API.
-
- apiPath := "/containers/" + containerID + "/archive"
- response, err := cli.get(ctx, apiPath, query, nil)
- if err != nil {
- return nil, types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+srcPath)
- }
-
- // TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior
- if response.statusCode != http.StatusOK {
- return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
- }
-
- // In order to get the copy behavior right, we need to know information
- // about both the source and the destination. The response headers include
- // stat info about the source that we can use in deciding exactly how to
- // copy it locally. Along with the stat info about the local destination,
- // we have everything we need to handle the multiple possibilities there
- // can be when copying a file/dir from one location to another file/dir.
- stat, err := getContainerPathStatFromHeader(response.header)
- if err != nil {
- return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err)
- }
- return response.body, stat, err
-}
-
-func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) {
- var stat types.ContainerPathStat
-
- encodedStat := header.Get("X-Docker-Container-Path-Stat")
- statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat))
-
- err := json.NewDecoder(statDecoder).Decode(&stat)
- if err != nil {
- err = fmt.Errorf("unable to decode container path stat header: %s", err)
- }
-
- return stat, err
-}
diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go
deleted file mode 100644
index b1d5fea5bd..0000000000
--- a/vendor/github.com/docker/docker/client/container_create.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/containerd/containerd/platforms"
- "github.com/docker/docker/api/types/container"
- "github.com/docker/docker/api/types/network"
- "github.com/docker/docker/api/types/versions"
- specs "github.com/opencontainers/image-spec/specs-go/v1"
-)
-
-type configWrapper struct {
- *container.Config
- HostConfig *container.HostConfig
- NetworkingConfig *network.NetworkingConfig
- Platform *specs.Platform
-}
-
-// ContainerCreate creates a new container based in the given configuration.
-// It can be associated with a name, but it's not mandatory.
-func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.ContainerCreateCreatedBody, error) {
- var response container.ContainerCreateCreatedBody
-
- if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil {
- return response, err
- }
-
- // When using API 1.24 and under, the client is responsible for removing the container
- if hostConfig != nil && versions.LessThan(cli.ClientVersion(), "1.25") {
- hostConfig.AutoRemove = false
- }
-
- if err := cli.NewVersionError("1.41", "specify container image platform"); platform != nil && err != nil {
- return response, err
- }
-
- query := url.Values{}
- if platform != nil {
- query.Set("platform", platforms.Format(*platform))
- }
-
- if containerName != "" {
- query.Set("name", containerName)
- }
-
- body := configWrapper{
- Config: config,
- HostConfig: hostConfig,
- NetworkingConfig: networkingConfig,
- }
-
- serverResp, err := cli.post(ctx, "/containers/create", query, body, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(serverResp.body).Decode(&response)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go
deleted file mode 100644
index 29dac8491d..0000000000
--- a/vendor/github.com/docker/docker/client/container_diff.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types/container"
-)
-
-// ContainerDiff shows differences in a container filesystem since it was started.
-func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.ContainerChangeResponseItem, error) {
- var changes []container.ContainerChangeResponseItem
-
- serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return changes, err
- }
-
- err = json.NewDecoder(serverResp.body).Decode(&changes)
- return changes, err
-}
diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go
deleted file mode 100644
index e3ee755b71..0000000000
--- a/vendor/github.com/docker/docker/client/container_exec.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
-
- "github.com/docker/docker/api/types"
-)
-
-// ContainerExecCreate creates a new exec configuration to run an exec process.
-func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) {
- var response types.IDResponse
-
- if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil {
- return response, err
- }
-
- resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return response, err
- }
- err = json.NewDecoder(resp.body).Decode(&response)
- return response, err
-}
-
-// ContainerExecStart starts an exec process already created in the docker host.
-func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error {
- resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil)
- ensureReaderClosed(resp)
- return err
-}
-
-// ContainerExecAttach attaches a connection to an exec process in the server.
-// It returns a types.HijackedConnection with the hijacked connection
-// and the a reader to get output. It's up to the called to close
-// the hijacked connection by calling types.HijackedResponse.Close.
-func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) {
- headers := map[string][]string{"Content-Type": {"application/json"}}
- return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers)
-}
-
-// ContainerExecInspect returns information about a specific exec process on the docker host.
-func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) {
- var response types.ContainerExecInspect
- resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&response)
- ensureReaderClosed(resp)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go
deleted file mode 100644
index d0c0a5cbad..0000000000
--- a/vendor/github.com/docker/docker/client/container_export.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net/url"
-)
-
-// ContainerExport retrieves the raw contents of a container
-// and returns them as an io.ReadCloser. It's up to the caller
-// to close the stream.
-func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) {
- serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil)
- if err != nil {
- return nil, err
- }
-
- return serverResp.body, nil
-}
diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go
deleted file mode 100644
index c496bcffea..0000000000
--- a/vendor/github.com/docker/docker/client/container_inspect.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "io/ioutil"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// ContainerInspect returns the container information.
-func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
- if containerID == "" {
- return types.ContainerJSON{}, objectNotFoundError{object: "container", id: containerID}
- }
- serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID)
- }
-
- var response types.ContainerJSON
- err = json.NewDecoder(serverResp.body).Decode(&response)
- return response, err
-}
-
-// ContainerInspectWithRaw returns the container information and its raw representation.
-func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) {
- if containerID == "" {
- return types.ContainerJSON{}, nil, objectNotFoundError{object: "container", id: containerID}
- }
- query := url.Values{}
- if getSize {
- query.Set("size", "1")
- }
- serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID)
- }
-
- body, err := ioutil.ReadAll(serverResp.body)
- if err != nil {
- return types.ContainerJSON{}, nil, err
- }
-
- var response types.ContainerJSON
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&response)
- return response, body, err
-}
diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go
deleted file mode 100644
index 4d6f1d23da..0000000000
--- a/vendor/github.com/docker/docker/client/container_kill.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-)
-
-// ContainerKill terminates the container process but does not remove the container from the docker host.
-func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error {
- query := url.Values{}
- query.Set("signal", signal)
-
- resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go
deleted file mode 100644
index a973de597f..0000000000
--- a/vendor/github.com/docker/docker/client/container_list.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
- "strconv"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
-)
-
-// ContainerList returns the list of containers in the docker host.
-func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
- query := url.Values{}
-
- if options.All {
- query.Set("all", "1")
- }
-
- if options.Limit != -1 {
- query.Set("limit", strconv.Itoa(options.Limit))
- }
-
- if options.Since != "" {
- query.Set("since", options.Since)
- }
-
- if options.Before != "" {
- query.Set("before", options.Before)
- }
-
- if options.Size {
- query.Set("size", "1")
- }
-
- if options.Filters.Len() > 0 {
- //nolint:staticcheck // ignore SA1019 for old code
- filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
-
- if err != nil {
- return nil, err
- }
-
- query.Set("filters", filterJSON)
- }
-
- resp, err := cli.get(ctx, "/containers/json", query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return nil, err
- }
-
- var containers []types.Container
- err = json.NewDecoder(resp.body).Decode(&containers)
- return containers, err
-}
diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go
deleted file mode 100644
index 5b6541f035..0000000000
--- a/vendor/github.com/docker/docker/client/container_logs.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net/url"
- "time"
-
- "github.com/docker/docker/api/types"
- timetypes "github.com/docker/docker/api/types/time"
- "github.com/pkg/errors"
-)
-
-// ContainerLogs returns the logs generated by a container in an io.ReadCloser.
-// It's up to the caller to close the stream.
-//
-// The stream format on the response will be in one of two formats:
-//
-// If the container is using a TTY, there is only a single stream (stdout), and
-// data is copied directly from the container output stream, no extra
-// multiplexing or headers.
-//
-// If the container is *not* using a TTY, streams for stdout and stderr are
-// multiplexed.
-// The format of the multiplexed stream is as follows:
-//
-// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT}
-//
-// STREAM_TYPE can be 1 for stdout and 2 for stderr
-//
-// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian.
-// This is the size of OUTPUT.
-//
-// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this
-// stream.
-func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
- query := url.Values{}
- if options.ShowStdout {
- query.Set("stdout", "1")
- }
-
- if options.ShowStderr {
- query.Set("stderr", "1")
- }
-
- if options.Since != "" {
- ts, err := timetypes.GetTimestamp(options.Since, time.Now())
- if err != nil {
- return nil, errors.Wrap(err, `invalid value for "since"`)
- }
- query.Set("since", ts)
- }
-
- if options.Until != "" {
- ts, err := timetypes.GetTimestamp(options.Until, time.Now())
- if err != nil {
- return nil, errors.Wrap(err, `invalid value for "until"`)
- }
- query.Set("until", ts)
- }
-
- if options.Timestamps {
- query.Set("timestamps", "1")
- }
-
- if options.Details {
- query.Set("details", "1")
- }
-
- if options.Follow {
- query.Set("follow", "1")
- }
- query.Set("tail", options.Tail)
-
- resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil)
- if err != nil {
- return nil, wrapResponseError(err, resp, "container", container)
- }
- return resp.body, nil
-}
diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go
deleted file mode 100644
index 5e7271a371..0000000000
--- a/vendor/github.com/docker/docker/client/container_pause.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import "context"
-
-// ContainerPause pauses the main process of a given container without terminating it.
-func (cli *Client) ContainerPause(ctx context.Context, containerID string) error {
- resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go
deleted file mode 100644
index 04383deaaf..0000000000
--- a/vendor/github.com/docker/docker/client/container_prune.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "fmt"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
-)
-
-// ContainersPrune requests the daemon to delete unused data
-func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) {
- var report types.ContainersPruneReport
-
- if err := cli.NewVersionError("1.25", "container prune"); err != nil {
- return report, err
- }
-
- query, err := getFiltersQuery(pruneFilters)
- if err != nil {
- return report, err
- }
-
- serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return report, err
- }
-
- if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
- return report, fmt.Errorf("Error retrieving disk usage: %v", err)
- }
-
- return report, nil
-}
diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go
deleted file mode 100644
index df81461b88..0000000000
--- a/vendor/github.com/docker/docker/client/container_remove.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// ContainerRemove kills and removes a container from the docker host.
-func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error {
- query := url.Values{}
- if options.RemoveVolumes {
- query.Set("v", "1")
- }
- if options.RemoveLinks {
- query.Set("link", "1")
- }
-
- if options.Force {
- query.Set("force", "1")
- }
-
- resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil)
- defer ensureReaderClosed(resp)
- return wrapResponseError(err, resp, "container", containerID)
-}
diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go
deleted file mode 100644
index 240fdf552b..0000000000
--- a/vendor/github.com/docker/docker/client/container_rename.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-)
-
-// ContainerRename changes the name of a given container.
-func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error {
- query := url.Values{}
- query.Set("name", newContainerName)
- resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go
deleted file mode 100644
index a9d4c0c79a..0000000000
--- a/vendor/github.com/docker/docker/client/container_resize.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
- "strconv"
-
- "github.com/docker/docker/api/types"
-)
-
-// ContainerResize changes the size of the tty for a container.
-func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error {
- return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width)
-}
-
-// ContainerExecResize changes the size of the tty for an exec process running inside a container.
-func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error {
- return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width)
-}
-
-func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error {
- query := url.Values{}
- query.Set("h", strconv.Itoa(int(height)))
- query.Set("w", strconv.Itoa(int(width)))
-
- resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go
deleted file mode 100644
index 41e421969f..0000000000
--- a/vendor/github.com/docker/docker/client/container_restart.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
- "time"
-
- timetypes "github.com/docker/docker/api/types/time"
-)
-
-// ContainerRestart stops and starts a container again.
-// It makes the daemon to wait for the container to be up again for
-// a specific amount of time, given the timeout.
-func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error {
- query := url.Values{}
- if timeout != nil {
- query.Set("t", timetypes.DurationToSecondsString(*timeout))
- }
- resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go
deleted file mode 100644
index c2e0b15dca..0000000000
--- a/vendor/github.com/docker/docker/client/container_start.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// ContainerStart sends a request to the docker daemon to start a container.
-func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error {
- query := url.Values{}
- if len(options.CheckpointID) != 0 {
- query.Set("checkpoint", options.CheckpointID)
- }
- if len(options.CheckpointDir) != 0 {
- query.Set("checkpoint-dir", options.CheckpointDir)
- }
-
- resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go
deleted file mode 100644
index 0a6488dde8..0000000000
--- a/vendor/github.com/docker/docker/client/container_stats.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// ContainerStats returns near realtime stats for a given container.
-// It's up to the caller to close the io.ReadCloser returned.
-func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) {
- query := url.Values{}
- query.Set("stream", "0")
- if stream {
- query.Set("stream", "1")
- }
-
- resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil)
- if err != nil {
- return types.ContainerStats{}, err
- }
-
- osType := getDockerOS(resp.header.Get("Server"))
- return types.ContainerStats{Body: resp.body, OSType: osType}, err
-}
-
-// ContainerStatsOneShot gets a single stat entry from a container.
-// It differs from `ContainerStats` in that the API should not wait to prime the stats
-func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (types.ContainerStats, error) {
- query := url.Values{}
- query.Set("stream", "0")
- query.Set("one-shot", "1")
-
- resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil)
- if err != nil {
- return types.ContainerStats{}, err
- }
-
- osType := getDockerOS(resp.header.Get("Server"))
- return types.ContainerStats{Body: resp.body, OSType: osType}, err
-}
diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go
deleted file mode 100644
index 629d7ab64c..0000000000
--- a/vendor/github.com/docker/docker/client/container_stop.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
- "time"
-
- timetypes "github.com/docker/docker/api/types/time"
-)
-
-// ContainerStop stops a container. In case the container fails to stop
-// gracefully within a time frame specified by the timeout argument,
-// it is forcefully terminated (killed).
-//
-// If the timeout is nil, the container's StopTimeout value is used, if set,
-// otherwise the engine default. A negative timeout value can be specified,
-// meaning no timeout, i.e. no forceful termination is performed.
-func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error {
- query := url.Values{}
- if timeout != nil {
- query.Set("t", timetypes.DurationToSecondsString(*timeout))
- }
- resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go
deleted file mode 100644
index a5b78999bf..0000000000
--- a/vendor/github.com/docker/docker/client/container_top.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
- "strings"
-
- "github.com/docker/docker/api/types/container"
-)
-
-// ContainerTop shows process information from within a container.
-func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) {
- var response container.ContainerTopOKBody
- query := url.Values{}
- if len(arguments) > 0 {
- query.Set("ps_args", strings.Join(arguments, " "))
- }
-
- resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&response)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go
deleted file mode 100644
index 1d8f873169..0000000000
--- a/vendor/github.com/docker/docker/client/container_unpause.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import "context"
-
-// ContainerUnpause resumes the process execution within a container
-func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error {
- resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go
deleted file mode 100644
index 6917cf9fb3..0000000000
--- a/vendor/github.com/docker/docker/client/container_update.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
-
- "github.com/docker/docker/api/types/container"
-)
-
-// ContainerUpdate updates resources of a container
-func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) {
- var response container.ContainerUpdateOKBody
- serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(serverResp.body).Decode(&response)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go
deleted file mode 100644
index 6ab8c1da96..0000000000
--- a/vendor/github.com/docker/docker/client/container_wait.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types/container"
- "github.com/docker/docker/api/types/versions"
-)
-
-// ContainerWait waits until the specified container is in a certain state
-// indicated by the given condition, either "not-running" (default),
-// "next-exit", or "removed".
-//
-// If this client's API version is before 1.30, condition is ignored and
-// ContainerWait will return immediately with the two channels, as the server
-// will wait as if the condition were "not-running".
-//
-// If this client's API version is at least 1.30, ContainerWait blocks until
-// the request has been acknowledged by the server (with a response header),
-// then returns two channels on which the caller can wait for the exit status
-// of the container or an error if there was a problem either beginning the
-// wait request or in getting the response. This allows the caller to
-// synchronize ContainerWait with other calls, such as specifying a
-// "next-exit" condition before issuing a ContainerStart request.
-func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) {
- if versions.LessThan(cli.ClientVersion(), "1.30") {
- return cli.legacyContainerWait(ctx, containerID)
- }
-
- resultC := make(chan container.ContainerWaitOKBody)
- errC := make(chan error, 1)
-
- query := url.Values{}
- query.Set("condition", string(condition))
-
- resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil)
- if err != nil {
- defer ensureReaderClosed(resp)
- errC <- err
- return resultC, errC
- }
-
- go func() {
- defer ensureReaderClosed(resp)
- var res container.ContainerWaitOKBody
- if err := json.NewDecoder(resp.body).Decode(&res); err != nil {
- errC <- err
- return
- }
-
- resultC <- res
- }()
-
- return resultC, errC
-}
-
-// legacyContainerWait returns immediately and doesn't have an option to wait
-// until the container is removed.
-func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.ContainerWaitOKBody, <-chan error) {
- resultC := make(chan container.ContainerWaitOKBody)
- errC := make(chan error)
-
- go func() {
- resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil)
- if err != nil {
- errC <- err
- return
- }
- defer ensureReaderClosed(resp)
-
- var res container.ContainerWaitOKBody
- if err := json.NewDecoder(resp.body).Decode(&res); err != nil {
- errC <- err
- return
- }
-
- resultC <- res
- }()
-
- return resultC, errC
-}
diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go
deleted file mode 100644
index 354cd36939..0000000000
--- a/vendor/github.com/docker/docker/client/disk_usage.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "fmt"
-
- "github.com/docker/docker/api/types"
-)
-
-// DiskUsage requests the current data usage from the daemon
-func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) {
- var du types.DiskUsage
-
- serverResp, err := cli.get(ctx, "/system/df", nil, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return du, err
- }
-
- if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil {
- return du, fmt.Errorf("Error retrieving disk usage: %v", err)
- }
-
- return du, nil
-}
diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go
deleted file mode 100644
index f4e3794cb4..0000000000
--- a/vendor/github.com/docker/docker/client/distribution_inspect.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- registrytypes "github.com/docker/docker/api/types/registry"
-)
-
-// DistributionInspect returns the image digest with full Manifest
-func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) {
- // Contact the registry to retrieve digest and platform information
- var distributionInspect registrytypes.DistributionInspect
- if image == "" {
- return distributionInspect, objectNotFoundError{object: "distribution", id: image}
- }
-
- if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil {
- return distributionInspect, err
- }
- var headers map[string][]string
-
- if encodedRegistryAuth != "" {
- headers = map[string][]string{
- "X-Registry-Auth": {encodedRegistryAuth},
- }
- }
-
- resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers)
- defer ensureReaderClosed(resp)
- if err != nil {
- return distributionInspect, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&distributionInspect)
- return distributionInspect, err
-}
diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go
deleted file mode 100644
index 041bc8d49c..0000000000
--- a/vendor/github.com/docker/docker/client/errors.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "fmt"
- "net/http"
-
- "github.com/docker/docker/api/types/versions"
- "github.com/docker/docker/errdefs"
- "github.com/pkg/errors"
-)
-
-// errConnectionFailed implements an error returned when connection failed.
-type errConnectionFailed struct {
- host string
-}
-
-// Error returns a string representation of an errConnectionFailed
-func (err errConnectionFailed) Error() string {
- if err.host == "" {
- return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?"
- }
- return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host)
-}
-
-// IsErrConnectionFailed returns true if the error is caused by connection failed.
-func IsErrConnectionFailed(err error) bool {
- return errors.As(err, &errConnectionFailed{})
-}
-
-// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed.
-func ErrorConnectionFailed(host string) error {
- return errConnectionFailed{host: host}
-}
-
-// Deprecated: use the errdefs.NotFound() interface instead. Kept for backward compatibility
-type notFound interface {
- error
- NotFound() bool
-}
-
-// IsErrNotFound returns true if the error is a NotFound error, which is returned
-// by the API when some object is not found.
-func IsErrNotFound(err error) bool {
- var e notFound
- if errors.As(err, &e) {
- return true
- }
- return errdefs.IsNotFound(err)
-}
-
-type objectNotFoundError struct {
- object string
- id string
-}
-
-func (e objectNotFoundError) NotFound() {}
-
-func (e objectNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such %s: %s", e.object, e.id)
-}
-
-func wrapResponseError(err error, resp serverResponse, object, id string) error {
- switch {
- case err == nil:
- return nil
- case resp.statusCode == http.StatusNotFound:
- return objectNotFoundError{object: object, id: id}
- case resp.statusCode == http.StatusNotImplemented:
- return errdefs.NotImplemented(err)
- default:
- return err
- }
-}
-
-// unauthorizedError represents an authorization error in a remote registry.
-type unauthorizedError struct {
- cause error
-}
-
-// Error returns a string representation of an unauthorizedError
-func (u unauthorizedError) Error() string {
- return u.cause.Error()
-}
-
-// IsErrUnauthorized returns true if the error is caused
-// when a remote registry authentication fails
-func IsErrUnauthorized(err error) bool {
- if _, ok := err.(unauthorizedError); ok {
- return ok
- }
- return errdefs.IsUnauthorized(err)
-}
-
-type pluginPermissionDenied struct {
- name string
-}
-
-func (e pluginPermissionDenied) Error() string {
- return "Permission denied while installing plugin " + e.name
-}
-
-// IsErrPluginPermissionDenied returns true if the error is caused
-// when a user denies a plugin's permissions
-func IsErrPluginPermissionDenied(err error) bool {
- _, ok := err.(pluginPermissionDenied)
- return ok
-}
-
-type notImplementedError struct {
- message string
-}
-
-func (e notImplementedError) Error() string {
- return e.message
-}
-
-func (e notImplementedError) NotImplemented() bool {
- return true
-}
-
-// IsErrNotImplemented returns true if the error is a NotImplemented error.
-// This is returned by the API when a requested feature has not been
-// implemented.
-func IsErrNotImplemented(err error) bool {
- if _, ok := err.(notImplementedError); ok {
- return ok
- }
- return errdefs.IsNotImplemented(err)
-}
-
-// NewVersionError returns an error if the APIVersion required
-// if less than the current supported version
-func (cli *Client) NewVersionError(APIrequired, feature string) error {
- if cli.version != "" && versions.LessThan(cli.version, APIrequired) {
- return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version)
- }
- return nil
-}
diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go
deleted file mode 100644
index f0dc9d9e12..0000000000
--- a/vendor/github.com/docker/docker/client/events.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
- "time"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/events"
- "github.com/docker/docker/api/types/filters"
- timetypes "github.com/docker/docker/api/types/time"
-)
-
-// Events returns a stream of events in the daemon. It's up to the caller to close the stream
-// by cancelling the context. Once the stream has been completely read an io.EOF error will
-// be sent over the error channel. If an error is sent all processing will be stopped. It's up
-// to the caller to reopen the stream in the event of an error by reinvoking this method.
-func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) {
-
- messages := make(chan events.Message)
- errs := make(chan error, 1)
-
- started := make(chan struct{})
- go func() {
- defer close(errs)
-
- query, err := buildEventsQueryParams(cli.version, options)
- if err != nil {
- close(started)
- errs <- err
- return
- }
-
- resp, err := cli.get(ctx, "/events", query, nil)
- if err != nil {
- close(started)
- errs <- err
- return
- }
- defer resp.body.Close()
-
- decoder := json.NewDecoder(resp.body)
-
- close(started)
- for {
- select {
- case <-ctx.Done():
- errs <- ctx.Err()
- return
- default:
- var event events.Message
- if err := decoder.Decode(&event); err != nil {
- errs <- err
- return
- }
-
- select {
- case messages <- event:
- case <-ctx.Done():
- errs <- ctx.Err()
- return
- }
- }
- }
- }()
- <-started
-
- return messages, errs
-}
-
-func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) {
- query := url.Values{}
- ref := time.Now()
-
- if options.Since != "" {
- ts, err := timetypes.GetTimestamp(options.Since, ref)
- if err != nil {
- return nil, err
- }
- query.Set("since", ts)
- }
-
- if options.Until != "" {
- ts, err := timetypes.GetTimestamp(options.Until, ref)
- if err != nil {
- return nil, err
- }
- query.Set("until", ts)
- }
-
- if options.Filters.Len() > 0 {
- //nolint:staticcheck // ignore SA1019 for old code
- filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters)
- if err != nil {
- return nil, err
- }
- query.Set("filters", filterJSON)
- }
-
- return query, nil
-}
diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go
deleted file mode 100644
index e1dc49ef0f..0000000000
--- a/vendor/github.com/docker/docker/client/hijack.go
+++ /dev/null
@@ -1,145 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bufio"
- "context"
- "crypto/tls"
- "fmt"
- "net"
- "net/http"
- "net/http/httputil"
- "net/url"
- "time"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/go-connections/sockets"
- "github.com/pkg/errors"
-)
-
-// postHijacked sends a POST request and hijacks the connection.
-func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) {
- bodyEncoded, err := encodeData(body)
- if err != nil {
- return types.HijackedResponse{}, err
- }
-
- apiPath := cli.getAPIPath(ctx, path, query)
- req, err := http.NewRequest(http.MethodPost, apiPath, bodyEncoded)
- if err != nil {
- return types.HijackedResponse{}, err
- }
- req = cli.addHeaders(req, headers)
-
- conn, err := cli.setupHijackConn(ctx, req, "tcp")
- if err != nil {
- return types.HijackedResponse{}, err
- }
-
- return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err
-}
-
-// DialHijack returns a hijacked connection with negotiated protocol proto.
-func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) {
- req, err := http.NewRequest(http.MethodPost, url, nil)
- if err != nil {
- return nil, err
- }
- req = cli.addHeaders(req, meta)
-
- return cli.setupHijackConn(ctx, req, proto)
-}
-
-// fallbackDial is used when WithDialer() was not called.
-// See cli.Dialer().
-func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) {
- if tlsConfig != nil && proto != "unix" && proto != "npipe" {
- return tls.Dial(proto, addr, tlsConfig)
- }
- if proto == "npipe" {
- return sockets.DialPipe(addr, 32*time.Second)
- }
- return net.Dial(proto, addr)
-}
-
-func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, error) {
- req.Host = cli.addr
- req.Header.Set("Connection", "Upgrade")
- req.Header.Set("Upgrade", proto)
-
- dialer := cli.Dialer()
- conn, err := dialer(ctx)
- if err != nil {
- return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?")
- }
-
- // When we set up a TCP connection for hijack, there could be long periods
- // of inactivity (a long running command with no output) that in certain
- // network setups may cause ECONNTIMEOUT, leaving the client in an unknown
- // state. Setting TCP KeepAlive on the socket connection will prohibit
- // ECONNTIMEOUT unless the socket connection truly is broken
- if tcpConn, ok := conn.(*net.TCPConn); ok {
- tcpConn.SetKeepAlive(true)
- tcpConn.SetKeepAlivePeriod(30 * time.Second)
- }
-
- clientconn := httputil.NewClientConn(conn, nil)
- defer clientconn.Close()
-
- // Server hijacks the connection, error 'connection closed' expected
- resp, err := clientconn.Do(req)
-
- //nolint:staticcheck // ignore SA1019 for connecting to old (pre go1.8) daemons
- if err != httputil.ErrPersistEOF {
- if err != nil {
- return nil, err
- }
- if resp.StatusCode != http.StatusSwitchingProtocols {
- resp.Body.Close()
- return nil, fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode)
- }
- }
-
- c, br := clientconn.Hijack()
- if br.Buffered() > 0 {
- // If there is buffered content, wrap the connection. We return an
- // object that implements CloseWrite iff the underlying connection
- // implements it.
- if _, ok := c.(types.CloseWriter); ok {
- c = &hijackedConnCloseWriter{&hijackedConn{c, br}}
- } else {
- c = &hijackedConn{c, br}
- }
- } else {
- br.Reset(nil)
- }
-
- return c, nil
-}
-
-// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case
-// that a) there was already buffered data in the http layer when Hijack() was
-// called, and b) the underlying net.Conn does *not* implement CloseWrite().
-// hijackedConn does not implement CloseWrite() either.
-type hijackedConn struct {
- net.Conn
- r *bufio.Reader
-}
-
-func (c *hijackedConn) Read(b []byte) (int, error) {
- return c.r.Read(b)
-}
-
-// hijackedConnCloseWriter is a hijackedConn which additionally implements
-// CloseWrite(). It is returned by setupHijackConn in the case that a) there
-// was already buffered data in the http layer when Hijack() was called, and b)
-// the underlying net.Conn *does* implement CloseWrite().
-type hijackedConnCloseWriter struct {
- *hijackedConn
-}
-
-var _ types.CloseWriter = &hijackedConnCloseWriter{}
-
-func (c *hijackedConnCloseWriter) CloseWrite() error {
- conn := c.Conn.(types.CloseWriter)
- return conn.CloseWrite()
-}
diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go
deleted file mode 100644
index 8fcf995036..0000000000
--- a/vendor/github.com/docker/docker/client/image_build.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/base64"
- "encoding/json"
- "io"
- "net/http"
- "net/url"
- "strconv"
- "strings"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/container"
-)
-
-// ImageBuild sends request to the daemon to build images.
-// The Body in the response implement an io.ReadCloser and it's up to the caller to
-// close it.
-func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
- query, err := cli.imageBuildOptionsToQuery(options)
- if err != nil {
- return types.ImageBuildResponse{}, err
- }
-
- headers := http.Header(make(map[string][]string))
- buf, err := json.Marshal(options.AuthConfigs)
- if err != nil {
- return types.ImageBuildResponse{}, err
- }
- headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))
-
- headers.Set("Content-Type", "application/x-tar")
-
- serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers)
- if err != nil {
- return types.ImageBuildResponse{}, err
- }
-
- osType := getDockerOS(serverResp.header.Get("Server"))
-
- return types.ImageBuildResponse{
- Body: serverResp.body,
- OSType: osType,
- }, nil
-}
-
-func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) {
- query := url.Values{
- "t": options.Tags,
- "securityopt": options.SecurityOpt,
- "extrahosts": options.ExtraHosts,
- }
- if options.SuppressOutput {
- query.Set("q", "1")
- }
- if options.RemoteContext != "" {
- query.Set("remote", options.RemoteContext)
- }
- if options.NoCache {
- query.Set("nocache", "1")
- }
- if options.Remove {
- query.Set("rm", "1")
- } else {
- query.Set("rm", "0")
- }
-
- if options.ForceRemove {
- query.Set("forcerm", "1")
- }
-
- if options.PullParent {
- query.Set("pull", "1")
- }
-
- if options.Squash {
- if err := cli.NewVersionError("1.25", "squash"); err != nil {
- return query, err
- }
- query.Set("squash", "1")
- }
-
- if !container.Isolation.IsDefault(options.Isolation) {
- query.Set("isolation", string(options.Isolation))
- }
-
- query.Set("cpusetcpus", options.CPUSetCPUs)
- query.Set("networkmode", options.NetworkMode)
- query.Set("cpusetmems", options.CPUSetMems)
- query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10))
- query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10))
- query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10))
- query.Set("memory", strconv.FormatInt(options.Memory, 10))
- query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10))
- query.Set("cgroupparent", options.CgroupParent)
- query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10))
- query.Set("dockerfile", options.Dockerfile)
- query.Set("target", options.Target)
-
- ulimitsJSON, err := json.Marshal(options.Ulimits)
- if err != nil {
- return query, err
- }
- query.Set("ulimits", string(ulimitsJSON))
-
- buildArgsJSON, err := json.Marshal(options.BuildArgs)
- if err != nil {
- return query, err
- }
- query.Set("buildargs", string(buildArgsJSON))
-
- labelsJSON, err := json.Marshal(options.Labels)
- if err != nil {
- return query, err
- }
- query.Set("labels", string(labelsJSON))
-
- cacheFromJSON, err := json.Marshal(options.CacheFrom)
- if err != nil {
- return query, err
- }
- query.Set("cachefrom", string(cacheFromJSON))
- if options.SessionID != "" {
- query.Set("session", options.SessionID)
- }
- if options.Platform != "" {
- if err := cli.NewVersionError("1.32", "platform"); err != nil {
- return query, err
- }
- query.Set("platform", strings.ToLower(options.Platform))
- }
- if options.BuildID != "" {
- query.Set("buildid", options.BuildID)
- }
- query.Set("version", string(options.Version))
-
- if options.Outputs != nil {
- outputsJSON, err := json.Marshal(options.Outputs)
- if err != nil {
- return query, err
- }
- query.Set("outputs", string(outputsJSON))
- }
- return query, nil
-}
diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go
deleted file mode 100644
index 239380474e..0000000000
--- a/vendor/github.com/docker/docker/client/image_create.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net/url"
- "strings"
-
- "github.com/docker/distribution/reference"
- "github.com/docker/docker/api/types"
-)
-
-// ImageCreate creates a new image based in the parent options.
-// It returns the JSON content in the response body.
-func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) {
- ref, err := reference.ParseNormalizedNamed(parentReference)
- if err != nil {
- return nil, err
- }
-
- query := url.Values{}
- query.Set("fromImage", reference.FamiliarName(ref))
- query.Set("tag", getAPITagFromNamedRef(ref))
- if options.Platform != "" {
- query.Set("platform", strings.ToLower(options.Platform))
- }
- resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
-
-func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {
- headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
- return cli.post(ctx, "/images/create", query, nil, headers)
-}
diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go
deleted file mode 100644
index b5bea10d8f..0000000000
--- a/vendor/github.com/docker/docker/client/image_history.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types/image"
-)
-
-// ImageHistory returns the changes in an image in history format.
-func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) {
- var history []image.HistoryResponseItem
- serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return history, err
- }
-
- err = json.NewDecoder(serverResp.body).Decode(&history)
- return history, err
-}
diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go
deleted file mode 100644
index d3336d4106..0000000000
--- a/vendor/github.com/docker/docker/client/image_import.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net/url"
- "strings"
-
- "github.com/docker/distribution/reference"
- "github.com/docker/docker/api/types"
-)
-
-// ImageImport creates a new image based in the source options.
-// It returns the JSON content in the response body.
-func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) {
- if ref != "" {
- // Check if the given image name can be resolved
- if _, err := reference.ParseNormalizedNamed(ref); err != nil {
- return nil, err
- }
- }
-
- query := url.Values{}
- query.Set("fromSrc", source.SourceName)
- query.Set("repo", ref)
- query.Set("tag", options.Tag)
- query.Set("message", options.Message)
- if options.Platform != "" {
- query.Set("platform", strings.ToLower(options.Platform))
- }
- for _, change := range options.Changes {
- query.Add("changes", change)
- }
-
- resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil)
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go
deleted file mode 100644
index 1eb8dce025..0000000000
--- a/vendor/github.com/docker/docker/client/image_inspect.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "io/ioutil"
-
- "github.com/docker/docker/api/types"
-)
-
-// ImageInspectWithRaw returns the image information and its raw representation.
-func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) {
- if imageID == "" {
- return types.ImageInspect{}, nil, objectNotFoundError{object: "image", id: imageID}
- }
- serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID)
- }
-
- body, err := ioutil.ReadAll(serverResp.body)
- if err != nil {
- return types.ImageInspect{}, nil, err
- }
-
- var response types.ImageInspect
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&response)
- return response, body, err
-}
diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go
deleted file mode 100644
index a4d7505094..0000000000
--- a/vendor/github.com/docker/docker/client/image_list.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/api/types/versions"
-)
-
-// ImageList returns a list of images in the docker host.
-func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) {
- var images []types.ImageSummary
- query := url.Values{}
-
- optionFilters := options.Filters
- referenceFilters := optionFilters.Get("reference")
- if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 {
- query.Set("filter", referenceFilters[0])
- for _, filterValue := range referenceFilters {
- optionFilters.Del("reference", filterValue)
- }
- }
- if optionFilters.Len() > 0 {
- //nolint:staticcheck // ignore SA1019 for old code
- filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters)
- if err != nil {
- return images, err
- }
- query.Set("filters", filterJSON)
- }
- if options.All {
- query.Set("all", "1")
- }
-
- serverResp, err := cli.get(ctx, "/images/json", query, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return images, err
- }
-
- err = json.NewDecoder(serverResp.body).Decode(&images)
- return images, err
-}
diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go
deleted file mode 100644
index 91016e493c..0000000000
--- a/vendor/github.com/docker/docker/client/image_load.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// ImageLoad loads an image in the docker host from the client host.
-// It's up to the caller to close the io.ReadCloser in the
-// ImageLoadResponse returned by this function.
-func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) {
- v := url.Values{}
- v.Set("quiet", "0")
- if quiet {
- v.Set("quiet", "1")
- }
- headers := map[string][]string{"Content-Type": {"application/x-tar"}}
- resp, err := cli.postRaw(ctx, "/images/load", v, input, headers)
- if err != nil {
- return types.ImageLoadResponse{}, err
- }
- return types.ImageLoadResponse{
- Body: resp.body,
- JSON: resp.header.Get("Content-Type") == "application/json",
- }, nil
-}
diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go
deleted file mode 100644
index 56af6d7f98..0000000000
--- a/vendor/github.com/docker/docker/client/image_prune.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "fmt"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
-)
-
-// ImagesPrune requests the daemon to delete unused data
-func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) {
- var report types.ImagesPruneReport
-
- if err := cli.NewVersionError("1.25", "image prune"); err != nil {
- return report, err
- }
-
- query, err := getFiltersQuery(pruneFilters)
- if err != nil {
- return report, err
- }
-
- serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return report, err
- }
-
- if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
- return report, fmt.Errorf("Error retrieving disk usage: %v", err)
- }
-
- return report, nil
-}
diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go
deleted file mode 100644
index a23975591b..0000000000
--- a/vendor/github.com/docker/docker/client/image_pull.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net/url"
- "strings"
-
- "github.com/docker/distribution/reference"
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/errdefs"
-)
-
-// ImagePull requests the docker host to pull an image from a remote registry.
-// It executes the privileged function if the operation is unauthorized
-// and it tries one more time.
-// It's up to the caller to handle the io.ReadCloser and close it properly.
-//
-// FIXME(vdemeester): there is currently used in a few way in docker/docker
-// - if not in trusted content, ref is used to pass the whole reference, and tag is empty
-// - if in trusted content, ref is used to pass the reference name, and tag for the digest
-func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) {
- ref, err := reference.ParseNormalizedNamed(refStr)
- if err != nil {
- return nil, err
- }
-
- query := url.Values{}
- query.Set("fromImage", reference.FamiliarName(ref))
- if !options.All {
- query.Set("tag", getAPITagFromNamedRef(ref))
- }
- if options.Platform != "" {
- query.Set("platform", strings.ToLower(options.Platform))
- }
-
- resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
- if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
- newAuthHeader, privilegeErr := options.PrivilegeFunc()
- if privilegeErr != nil {
- return nil, privilegeErr
- }
- resp, err = cli.tryImageCreate(ctx, query, newAuthHeader)
- }
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
-
-// getAPITagFromNamedRef returns a tag from the specified reference.
-// This function is necessary as long as the docker "server" api expects
-// digests to be sent as tags and makes a distinction between the name
-// and tag/digest part of a reference.
-func getAPITagFromNamedRef(ref reference.Named) string {
- if digested, ok := ref.(reference.Digested); ok {
- return digested.Digest().String()
- }
- ref = reference.TagNameOnly(ref)
- if tagged, ok := ref.(reference.Tagged); ok {
- return tagged.Tag()
- }
- return ""
-}
diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go
deleted file mode 100644
index 845580d4a4..0000000000
--- a/vendor/github.com/docker/docker/client/image_push.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "errors"
- "io"
- "net/url"
-
- "github.com/docker/distribution/reference"
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/errdefs"
-)
-
-// ImagePush requests the docker host to push an image to a remote registry.
-// It executes the privileged function if the operation is unauthorized
-// and it tries one more time.
-// It's up to the caller to handle the io.ReadCloser and close it properly.
-func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) {
- ref, err := reference.ParseNormalizedNamed(image)
- if err != nil {
- return nil, err
- }
-
- if _, isCanonical := ref.(reference.Canonical); isCanonical {
- return nil, errors.New("cannot push a digest reference")
- }
-
- name := reference.FamiliarName(ref)
- query := url.Values{}
- if !options.All {
- ref = reference.TagNameOnly(ref)
- if tagged, ok := ref.(reference.Tagged); ok {
- query.Set("tag", tagged.Tag())
- }
- }
-
- resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth)
- if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
- newAuthHeader, privilegeErr := options.PrivilegeFunc()
- if privilegeErr != nil {
- return nil, privilegeErr
- }
- resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader)
- }
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
-
-func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) {
- headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
- return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers)
-}
diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go
deleted file mode 100644
index 84a41af0f2..0000000000
--- a/vendor/github.com/docker/docker/client/image_remove.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// ImageRemove removes an image from the docker host.
-func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) {
- query := url.Values{}
-
- if options.Force {
- query.Set("force", "1")
- }
- if !options.PruneChildren {
- query.Set("noprune", "1")
- }
-
- var dels []types.ImageDeleteResponseItem
- resp, err := cli.delete(ctx, "/images/"+imageID, query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return dels, wrapResponseError(err, resp, "image", imageID)
- }
-
- err = json.NewDecoder(resp.body).Decode(&dels)
- return dels, err
-}
diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go
deleted file mode 100644
index d1314e4b22..0000000000
--- a/vendor/github.com/docker/docker/client/image_save.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net/url"
-)
-
-// ImageSave retrieves one or more images from the docker host as an io.ReadCloser.
-// It's up to the caller to store the images and close the stream.
-func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) {
- query := url.Values{
- "names": imageIDs,
- }
-
- resp, err := cli.get(ctx, "/images/get", query, nil)
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go
deleted file mode 100644
index 82955a7477..0000000000
--- a/vendor/github.com/docker/docker/client/image_search.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/api/types/registry"
- "github.com/docker/docker/errdefs"
-)
-
-// ImageSearch makes the docker host to search by a term in a remote registry.
-// The list of results is not sorted in any fashion.
-func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) {
- var results []registry.SearchResult
- query := url.Values{}
- query.Set("term", term)
- query.Set("limit", fmt.Sprintf("%d", options.Limit))
-
- if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToJSON(options.Filters)
- if err != nil {
- return results, err
- }
- query.Set("filters", filterJSON)
- }
-
- resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth)
- defer ensureReaderClosed(resp)
- if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
- newAuthHeader, privilegeErr := options.PrivilegeFunc()
- if privilegeErr != nil {
- return results, privilegeErr
- }
- resp, err = cli.tryImageSearch(ctx, query, newAuthHeader)
- }
- if err != nil {
- return results, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&results)
- return results, err
-}
-
-func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {
- headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
- return cli.get(ctx, "/images/search", query, headers)
-}
diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go
deleted file mode 100644
index 5652bfc252..0000000000
--- a/vendor/github.com/docker/docker/client/image_tag.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-
- "github.com/docker/distribution/reference"
- "github.com/pkg/errors"
-)
-
-// ImageTag tags an image in the docker host
-func (cli *Client) ImageTag(ctx context.Context, source, target string) error {
- if _, err := reference.ParseAnyReference(source); err != nil {
- return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source)
- }
-
- ref, err := reference.ParseNormalizedNamed(target)
- if err != nil {
- return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target)
- }
-
- if _, isCanonical := ref.(reference.Canonical); isCanonical {
- return errors.New("refusing to create a tag with a digest reference")
- }
-
- ref = reference.TagNameOnly(ref)
-
- query := url.Values{}
- query.Set("repo", reference.FamiliarName(ref))
- if tagged, ok := ref.(reference.Tagged); ok {
- query.Set("tag", tagged.Tag())
- }
-
- resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go
deleted file mode 100644
index c856704e23..0000000000
--- a/vendor/github.com/docker/docker/client/info.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// Info returns information about the docker server.
-func (cli *Client) Info(ctx context.Context) (types.Info, error) {
- var info types.Info
- serverResp, err := cli.get(ctx, "/info", url.Values{}, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return info, err
- }
-
- if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil {
- return info, fmt.Errorf("Error reading remote info: %v", err)
- }
-
- return info, nil
-}
diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go
deleted file mode 100644
index aabad4a911..0000000000
--- a/vendor/github.com/docker/docker/client/interface.go
+++ /dev/null
@@ -1,201 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net"
- "net/http"
- "time"
-
- "github.com/docker/docker/api/types"
- containertypes "github.com/docker/docker/api/types/container"
- "github.com/docker/docker/api/types/events"
- "github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/api/types/image"
- networktypes "github.com/docker/docker/api/types/network"
- "github.com/docker/docker/api/types/registry"
- "github.com/docker/docker/api/types/swarm"
- volumetypes "github.com/docker/docker/api/types/volume"
- specs "github.com/opencontainers/image-spec/specs-go/v1"
-)
-
-// CommonAPIClient is the common methods between stable and experimental versions of APIClient.
-type CommonAPIClient interface {
- ConfigAPIClient
- ContainerAPIClient
- DistributionAPIClient
- ImageAPIClient
- NodeAPIClient
- NetworkAPIClient
- PluginAPIClient
- ServiceAPIClient
- SwarmAPIClient
- SecretAPIClient
- SystemAPIClient
- VolumeAPIClient
- ClientVersion() string
- DaemonHost() string
- HTTPClient() *http.Client
- ServerVersion(ctx context.Context) (types.Version, error)
- NegotiateAPIVersion(ctx context.Context)
- NegotiateAPIVersionPing(types.Ping)
- DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error)
- Dialer() func(context.Context) (net.Conn, error)
- Close() error
-}
-
-// ContainerAPIClient defines API client methods for the containers
-type ContainerAPIClient interface {
- ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error)
- ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error)
- ContainerCreate(ctx context.Context, config *containertypes.Config, hostConfig *containertypes.HostConfig, networkingConfig *networktypes.NetworkingConfig, platform *specs.Platform, containerName string) (containertypes.ContainerCreateCreatedBody, error)
- ContainerDiff(ctx context.Context, container string) ([]containertypes.ContainerChangeResponseItem, error)
- ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error)
- ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error)
- ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error)
- ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error
- ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error
- ContainerExport(ctx context.Context, container string) (io.ReadCloser, error)
- ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error)
- ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error)
- ContainerKill(ctx context.Context, container, signal string) error
- ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
- ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error)
- ContainerPause(ctx context.Context, container string) error
- ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error
- ContainerRename(ctx context.Context, container, newContainerName string) error
- ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error
- ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error
- ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error)
- ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error)
- ContainerStatsOneShot(ctx context.Context, container string) (types.ContainerStats, error)
- ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error
- ContainerStop(ctx context.Context, container string, timeout *time.Duration) error
- ContainerTop(ctx context.Context, container string, arguments []string) (containertypes.ContainerTopOKBody, error)
- ContainerUnpause(ctx context.Context, container string) error
- ContainerUpdate(ctx context.Context, container string, updateConfig containertypes.UpdateConfig) (containertypes.ContainerUpdateOKBody, error)
- ContainerWait(ctx context.Context, container string, condition containertypes.WaitCondition) (<-chan containertypes.ContainerWaitOKBody, <-chan error)
- CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error)
- CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error
- ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error)
-}
-
-// DistributionAPIClient defines API client methods for the registry
-type DistributionAPIClient interface {
- DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error)
-}
-
-// ImageAPIClient defines API client methods for the images
-type ImageAPIClient interface {
- ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
- BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error)
- BuildCancel(ctx context.Context, id string) error
- ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error)
- ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error)
- ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error)
- ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error)
- ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error)
- ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error)
- ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error)
- ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error)
- ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error)
- ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error)
- ImageSave(ctx context.Context, images []string) (io.ReadCloser, error)
- ImageTag(ctx context.Context, image, ref string) error
- ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error)
-}
-
-// NetworkAPIClient defines API client methods for the networks
-type NetworkAPIClient interface {
- NetworkConnect(ctx context.Context, network, container string, config *networktypes.EndpointSettings) error
- NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error)
- NetworkDisconnect(ctx context.Context, network, container string, force bool) error
- NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error)
- NetworkInspectWithRaw(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error)
- NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error)
- NetworkRemove(ctx context.Context, network string) error
- NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error)
-}
-
-// NodeAPIClient defines API client methods for the nodes
-type NodeAPIClient interface {
- NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error)
- NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
- NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error
- NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error
-}
-
-// PluginAPIClient defines API client methods for the plugins
-type PluginAPIClient interface {
- PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error)
- PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error
- PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error
- PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error
- PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error)
- PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error)
- PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error)
- PluginSet(ctx context.Context, name string, args []string) error
- PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error)
- PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error
-}
-
-// ServiceAPIClient defines API client methods for the services
-type ServiceAPIClient interface {
- ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error)
- ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error)
- ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
- ServiceRemove(ctx context.Context, serviceID string) error
- ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error)
- ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error)
- TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error)
- TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error)
- TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error)
-}
-
-// SwarmAPIClient defines API client methods for the swarm
-type SwarmAPIClient interface {
- SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error)
- SwarmJoin(ctx context.Context, req swarm.JoinRequest) error
- SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error)
- SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error
- SwarmLeave(ctx context.Context, force bool) error
- SwarmInspect(ctx context.Context) (swarm.Swarm, error)
- SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error
-}
-
-// SystemAPIClient defines API client methods for the system
-type SystemAPIClient interface {
- Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error)
- Info(ctx context.Context) (types.Info, error)
- RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error)
- DiskUsage(ctx context.Context) (types.DiskUsage, error)
- Ping(ctx context.Context) (types.Ping, error)
-}
-
-// VolumeAPIClient defines API client methods for the volumes
-type VolumeAPIClient interface {
- VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error)
- VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error)
- VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error)
- VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error)
- VolumeRemove(ctx context.Context, volumeID string, force bool) error
- VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error)
-}
-
-// SecretAPIClient defines API client methods for secrets
-type SecretAPIClient interface {
- SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error)
- SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error)
- SecretRemove(ctx context.Context, id string) error
- SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error)
- SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error
-}
-
-// ConfigAPIClient defines API client methods for configs
-type ConfigAPIClient interface {
- ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error)
- ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error)
- ConfigRemove(ctx context.Context, id string) error
- ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error)
- ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error
-}
diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go
deleted file mode 100644
index 402ffb512c..0000000000
--- a/vendor/github.com/docker/docker/client/interface_experimental.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
-
- "github.com/docker/docker/api/types"
-)
-
-type apiClientExperimental interface {
- CheckpointAPIClient
-}
-
-// CheckpointAPIClient defines API client methods for the checkpoints
-type CheckpointAPIClient interface {
- CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error
- CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error
- CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error)
-}
diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go
deleted file mode 100644
index 5502cd7426..0000000000
--- a/vendor/github.com/docker/docker/client/interface_stable.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-// APIClient is an interface that clients that talk with a docker server must implement.
-type APIClient interface {
- CommonAPIClient
- apiClientExperimental
-}
-
-// Ensure that Client always implements APIClient.
-var _ APIClient = &Client{}
diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go
deleted file mode 100644
index f058520638..0000000000
--- a/vendor/github.com/docker/docker/client/login.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/registry"
-)
-
-// RegistryLogin authenticates the docker server with a given docker registry.
-// It returns unauthorizedError when the authentication fails.
-func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) {
- resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil)
- defer ensureReaderClosed(resp)
-
- if err != nil {
- return registry.AuthenticateOKBody{}, err
- }
-
- var response registry.AuthenticateOKBody
- err = json.NewDecoder(resp.body).Decode(&response)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go
deleted file mode 100644
index 5718946134..0000000000
--- a/vendor/github.com/docker/docker/client/network_connect.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/network"
-)
-
-// NetworkConnect connects a container to an existent network in the docker host.
-func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error {
- nc := types.NetworkConnect{
- Container: containerID,
- EndpointConfig: config,
- }
- resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go
deleted file mode 100644
index 278d9383a8..0000000000
--- a/vendor/github.com/docker/docker/client/network_create.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
-
- "github.com/docker/docker/api/types"
-)
-
-// NetworkCreate creates a new network in the docker host.
-func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) {
- networkCreateRequest := types.NetworkCreateRequest{
- NetworkCreate: options,
- Name: name,
- }
- var response types.NetworkCreateResponse
- serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(serverResp.body).Decode(&response)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go
deleted file mode 100644
index dd15676656..0000000000
--- a/vendor/github.com/docker/docker/client/network_disconnect.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
-
- "github.com/docker/docker/api/types"
-)
-
-// NetworkDisconnect disconnects a container from an existent network in the docker host.
-func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error {
- nd := types.NetworkDisconnect{Container: containerID, Force: force}
- resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go
deleted file mode 100644
index 89a05b3021..0000000000
--- a/vendor/github.com/docker/docker/client/network_inspect.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "io/ioutil"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// NetworkInspect returns the information for a specific network configured in the docker host.
-func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) {
- networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options)
- return networkResource, err
-}
-
-// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation.
-func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) {
- if networkID == "" {
- return types.NetworkResource{}, nil, objectNotFoundError{object: "network", id: networkID}
- }
- var (
- networkResource types.NetworkResource
- resp serverResponse
- err error
- )
- query := url.Values{}
- if options.Verbose {
- query.Set("verbose", "true")
- }
- if options.Scope != "" {
- query.Set("scope", options.Scope)
- }
- resp, err = cli.get(ctx, "/networks/"+networkID, query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return networkResource, nil, wrapResponseError(err, resp, "network", networkID)
- }
-
- body, err := ioutil.ReadAll(resp.body)
- if err != nil {
- return networkResource, nil, err
- }
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&networkResource)
- return networkResource, body, err
-}
diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go
deleted file mode 100644
index ed2acb5571..0000000000
--- a/vendor/github.com/docker/docker/client/network_list.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
-)
-
-// NetworkList returns the list of networks configured in the docker host.
-func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) {
- query := url.Values{}
- if options.Filters.Len() > 0 {
- //nolint:staticcheck // ignore SA1019 for old code
- filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
- if err != nil {
- return nil, err
- }
-
- query.Set("filters", filterJSON)
- }
- var networkResources []types.NetworkResource
- resp, err := cli.get(ctx, "/networks", query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return networkResources, err
- }
- err = json.NewDecoder(resp.body).Decode(&networkResources)
- return networkResources, err
-}
diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go
deleted file mode 100644
index cebb188219..0000000000
--- a/vendor/github.com/docker/docker/client/network_prune.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "fmt"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
-)
-
-// NetworksPrune requests the daemon to delete unused networks
-func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) {
- var report types.NetworksPruneReport
-
- if err := cli.NewVersionError("1.25", "network prune"); err != nil {
- return report, err
- }
-
- query, err := getFiltersQuery(pruneFilters)
- if err != nil {
- return report, err
- }
-
- serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return report, err
- }
-
- if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
- return report, fmt.Errorf("Error retrieving network prune report: %v", err)
- }
-
- return report, nil
-}
diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go
deleted file mode 100644
index e71b16d869..0000000000
--- a/vendor/github.com/docker/docker/client/network_remove.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import "context"
-
-// NetworkRemove removes an existent network from the docker host.
-func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error {
- resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil)
- defer ensureReaderClosed(resp)
- return wrapResponseError(err, resp, "network", networkID)
-}
diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go
deleted file mode 100644
index d296c9fdde..0000000000
--- a/vendor/github.com/docker/docker/client/node_inspect.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "io/ioutil"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// NodeInspectWithRaw returns the node information.
-func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) {
- if nodeID == "" {
- return swarm.Node{}, nil, objectNotFoundError{object: "node", id: nodeID}
- }
- serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID)
- }
-
- body, err := ioutil.ReadAll(serverResp.body)
- if err != nil {
- return swarm.Node{}, nil, err
- }
-
- var response swarm.Node
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&response)
- return response, body, err
-}
diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go
deleted file mode 100644
index c212906bc7..0000000000
--- a/vendor/github.com/docker/docker/client/node_list.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/api/types/swarm"
-)
-
-// NodeList returns the list of nodes.
-func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) {
- query := url.Values{}
-
- if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToJSON(options.Filters)
-
- if err != nil {
- return nil, err
- }
-
- query.Set("filters", filterJSON)
- }
-
- resp, err := cli.get(ctx, "/nodes", query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return nil, err
- }
-
- var nodes []swarm.Node
- err = json.NewDecoder(resp.body).Decode(&nodes)
- return nodes, err
-}
diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go
deleted file mode 100644
index 03ab878097..0000000000
--- a/vendor/github.com/docker/docker/client/node_remove.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// NodeRemove removes a Node.
-func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error {
- query := url.Values{}
- if options.Force {
- query.Set("force", "1")
- }
-
- resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil)
- defer ensureReaderClosed(resp)
- return wrapResponseError(err, resp, "node", nodeID)
-}
diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go
deleted file mode 100644
index de32a617fb..0000000000
--- a/vendor/github.com/docker/docker/client/node_update.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
- "strconv"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// NodeUpdate updates a Node.
-func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error {
- query := url.Values{}
- query.Set("version", strconv.FormatUint(version.Index, 10))
- resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go
deleted file mode 100644
index 6f77f0955f..0000000000
--- a/vendor/github.com/docker/docker/client/options.go
+++ /dev/null
@@ -1,172 +0,0 @@
-package client
-
-import (
- "context"
- "net"
- "net/http"
- "os"
- "path/filepath"
- "time"
-
- "github.com/docker/go-connections/sockets"
- "github.com/docker/go-connections/tlsconfig"
- "github.com/pkg/errors"
-)
-
-// Opt is a configuration option to initialize a client
-type Opt func(*Client) error
-
-// FromEnv configures the client with values from environment variables.
-//
-// Supported environment variables:
-// DOCKER_HOST to set the url to the docker server.
-// DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest.
-// DOCKER_CERT_PATH to load the TLS certificates from.
-// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default.
-func FromEnv(c *Client) error {
- if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" {
- options := tlsconfig.Options{
- CAFile: filepath.Join(dockerCertPath, "ca.pem"),
- CertFile: filepath.Join(dockerCertPath, "cert.pem"),
- KeyFile: filepath.Join(dockerCertPath, "key.pem"),
- InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "",
- }
- tlsc, err := tlsconfig.Client(options)
- if err != nil {
- return err
- }
-
- c.client = &http.Client{
- Transport: &http.Transport{TLSClientConfig: tlsc},
- CheckRedirect: CheckRedirect,
- }
- }
-
- if host := os.Getenv("DOCKER_HOST"); host != "" {
- if err := WithHost(host)(c); err != nil {
- return err
- }
- }
-
- if version := os.Getenv("DOCKER_API_VERSION"); version != "" {
- if err := WithVersion(version)(c); err != nil {
- return err
- }
- }
- return nil
-}
-
-// WithDialer applies the dialer.DialContext to the client transport. This can be
-// used to set the Timeout and KeepAlive settings of the client.
-// Deprecated: use WithDialContext
-func WithDialer(dialer *net.Dialer) Opt {
- return WithDialContext(dialer.DialContext)
-}
-
-// WithDialContext applies the dialer to the client transport. This can be
-// used to set the Timeout and KeepAlive settings of the client.
-func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt {
- return func(c *Client) error {
- if transport, ok := c.client.Transport.(*http.Transport); ok {
- transport.DialContext = dialContext
- return nil
- }
- return errors.Errorf("cannot apply dialer to transport: %T", c.client.Transport)
- }
-}
-
-// WithHost overrides the client host with the specified one.
-func WithHost(host string) Opt {
- return func(c *Client) error {
- hostURL, err := ParseHostURL(host)
- if err != nil {
- return err
- }
- c.host = host
- c.proto = hostURL.Scheme
- c.addr = hostURL.Host
- c.basePath = hostURL.Path
- if transport, ok := c.client.Transport.(*http.Transport); ok {
- return sockets.ConfigureTransport(transport, c.proto, c.addr)
- }
- return errors.Errorf("cannot apply host to transport: %T", c.client.Transport)
- }
-}
-
-// WithHTTPClient overrides the client http client with the specified one
-func WithHTTPClient(client *http.Client) Opt {
- return func(c *Client) error {
- if client != nil {
- c.client = client
- }
- return nil
- }
-}
-
-// WithTimeout configures the time limit for requests made by the HTTP client
-func WithTimeout(timeout time.Duration) Opt {
- return func(c *Client) error {
- c.client.Timeout = timeout
- return nil
- }
-}
-
-// WithHTTPHeaders overrides the client default http headers
-func WithHTTPHeaders(headers map[string]string) Opt {
- return func(c *Client) error {
- c.customHTTPHeaders = headers
- return nil
- }
-}
-
-// WithScheme overrides the client scheme with the specified one
-func WithScheme(scheme string) Opt {
- return func(c *Client) error {
- c.scheme = scheme
- return nil
- }
-}
-
-// WithTLSClientConfig applies a tls config to the client transport.
-func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt {
- return func(c *Client) error {
- opts := tlsconfig.Options{
- CAFile: cacertPath,
- CertFile: certPath,
- KeyFile: keyPath,
- ExclusiveRootPools: true,
- }
- config, err := tlsconfig.Client(opts)
- if err != nil {
- return errors.Wrap(err, "failed to create tls config")
- }
- if transport, ok := c.client.Transport.(*http.Transport); ok {
- transport.TLSClientConfig = config
- return nil
- }
- return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport)
- }
-}
-
-// WithVersion overrides the client version with the specified one. If an empty
-// version is specified, the value will be ignored to allow version negotiation.
-func WithVersion(version string) Opt {
- return func(c *Client) error {
- if version != "" {
- c.version = version
- c.manualOverride = true
- }
- return nil
- }
-}
-
-// WithAPIVersionNegotiation enables automatic API version negotiation for the client.
-// With this option enabled, the client automatically negotiates the API version
-// to use when making requests. API version negotiation is performed on the first
-// request; subsequent requests will not re-negotiate.
-func WithAPIVersionNegotiation() Opt {
- return func(c *Client) error {
- c.negotiateVersion = true
- return nil
- }
-}
diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go
deleted file mode 100644
index a9af001ef4..0000000000
--- a/vendor/github.com/docker/docker/client/ping.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/http"
- "path"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/errdefs"
-)
-
-// Ping pings the server and returns the value of the "Docker-Experimental",
-// "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use
-// a HEAD request on the endpoint, but falls back to GET if HEAD is not supported
-// by the daemon.
-func (cli *Client) Ping(ctx context.Context) (types.Ping, error) {
- var ping types.Ping
-
- // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest()
- // because ping requests are used during API version negotiation, so we want
- // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping
- req, err := cli.buildRequest(http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil)
- if err != nil {
- return ping, err
- }
- serverResp, err := cli.doRequest(ctx, req)
- if err == nil {
- defer ensureReaderClosed(serverResp)
- switch serverResp.statusCode {
- case http.StatusOK, http.StatusInternalServerError:
- // Server handled the request, so parse the response
- return parsePingResponse(cli, serverResp)
- }
- } else if IsErrConnectionFailed(err) {
- return ping, err
- }
-
- req, err = cli.buildRequest(http.MethodGet, path.Join(cli.basePath, "/_ping"), nil, nil)
- if err != nil {
- return ping, err
- }
- serverResp, err = cli.doRequest(ctx, req)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return ping, err
- }
- return parsePingResponse(cli, serverResp)
-}
-
-func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) {
- var ping types.Ping
- if resp.header == nil {
- err := cli.checkResponseErr(resp)
- return ping, errdefs.FromStatusCode(err, resp.statusCode)
- }
- ping.APIVersion = resp.header.Get("API-Version")
- ping.OSType = resp.header.Get("OSType")
- if resp.header.Get("Docker-Experimental") == "true" {
- ping.Experimental = true
- }
- if bv := resp.header.Get("Builder-Version"); bv != "" {
- ping.BuilderVersion = types.BuilderVersion(bv)
- }
- err := cli.checkResponseErr(resp)
- return ping, errdefs.FromStatusCode(err, resp.statusCode)
-}
diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go
deleted file mode 100644
index b95dbaf686..0000000000
--- a/vendor/github.com/docker/docker/client/plugin_create.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net/http"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// PluginCreate creates a plugin
-func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error {
- headers := http.Header(make(map[string][]string))
- headers.Set("Content-Type", "application/x-tar")
-
- query := url.Values{}
- query.Set("name", createOptions.RepoName)
-
- resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go
deleted file mode 100644
index 01f6574f95..0000000000
--- a/vendor/github.com/docker/docker/client/plugin_disable.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// PluginDisable disables a plugin
-func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error {
- query := url.Values{}
- if options.Force {
- query.Set("force", "1")
- }
- resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go
deleted file mode 100644
index 736da48bd1..0000000000
--- a/vendor/github.com/docker/docker/client/plugin_enable.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
- "strconv"
-
- "github.com/docker/docker/api/types"
-)
-
-// PluginEnable enables a plugin
-func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error {
- query := url.Values{}
- query.Set("timeout", strconv.Itoa(options.Timeout))
-
- resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go
deleted file mode 100644
index 81b89732b0..0000000000
--- a/vendor/github.com/docker/docker/client/plugin_inspect.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "io/ioutil"
-
- "github.com/docker/docker/api/types"
-)
-
-// PluginInspectWithRaw inspects an existing plugin
-func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) {
- if name == "" {
- return nil, nil, objectNotFoundError{object: "plugin", id: name}
- }
- resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return nil, nil, wrapResponseError(err, resp, "plugin", name)
- }
-
- body, err := ioutil.ReadAll(resp.body)
- if err != nil {
- return nil, nil, err
- }
- var p types.Plugin
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&p)
- return &p, body, err
-}
diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go
deleted file mode 100644
index 012afe61ca..0000000000
--- a/vendor/github.com/docker/docker/client/plugin_install.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "io"
- "net/url"
-
- "github.com/docker/distribution/reference"
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/errdefs"
- "github.com/pkg/errors"
-)
-
-// PluginInstall installs a plugin
-func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) {
- query := url.Values{}
- if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil {
- return nil, errors.Wrap(err, "invalid remote reference")
- }
- query.Set("remote", options.RemoteRef)
-
- privileges, err := cli.checkPluginPermissions(ctx, query, options)
- if err != nil {
- return nil, err
- }
-
- // set name for plugin pull, if empty should default to remote reference
- query.Set("name", name)
-
- resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth)
- if err != nil {
- return nil, err
- }
-
- name = resp.header.Get("Docker-Plugin-Name")
-
- pr, pw := io.Pipe()
- go func() { // todo: the client should probably be designed more around the actual api
- _, err := io.Copy(pw, resp.body)
- if err != nil {
- pw.CloseWithError(err)
- return
- }
- defer func() {
- if err != nil {
- delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil)
- ensureReaderClosed(delResp)
- }
- }()
- if len(options.Args) > 0 {
- if err := cli.PluginSet(ctx, name, options.Args); err != nil {
- pw.CloseWithError(err)
- return
- }
- }
-
- if options.Disabled {
- pw.Close()
- return
- }
-
- enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0})
- pw.CloseWithError(enableErr)
- }()
- return pr, nil
-}
-
-func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {
- headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
- return cli.get(ctx, "/plugins/privileges", query, headers)
-}
-
-func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) {
- headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
- return cli.post(ctx, "/plugins/pull", query, privileges, headers)
-}
-
-func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) {
- resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth)
- if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
- // todo: do inspect before to check existing name before checking privileges
- newAuthHeader, privilegeErr := options.PrivilegeFunc()
- if privilegeErr != nil {
- ensureReaderClosed(resp)
- return nil, privilegeErr
- }
- options.RegistryAuth = newAuthHeader
- resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth)
- }
- if err != nil {
- ensureReaderClosed(resp)
- return nil, err
- }
-
- var privileges types.PluginPrivileges
- if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil {
- ensureReaderClosed(resp)
- return nil, err
- }
- ensureReaderClosed(resp)
-
- if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 {
- accept, err := options.AcceptPermissionsFunc(privileges)
- if err != nil {
- return nil, err
- }
- if !accept {
- return nil, pluginPermissionDenied{options.RemoteRef}
- }
- }
- return privileges, nil
-}
diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go
deleted file mode 100644
index cf1935e2f5..0000000000
--- a/vendor/github.com/docker/docker/client/plugin_list.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
-)
-
-// PluginList returns the installed plugins
-func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) {
- var plugins types.PluginsListResponse
- query := url.Values{}
-
- if filter.Len() > 0 {
- //nolint:staticcheck // ignore SA1019 for old code
- filterJSON, err := filters.ToParamWithVersion(cli.version, filter)
- if err != nil {
- return plugins, err
- }
- query.Set("filters", filterJSON)
- }
- resp, err := cli.get(ctx, "/plugins", query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return plugins, wrapResponseError(err, resp, "plugin", "")
- }
-
- err = json.NewDecoder(resp.body).Decode(&plugins)
- return plugins, err
-}
diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go
deleted file mode 100644
index d20bfe8447..0000000000
--- a/vendor/github.com/docker/docker/client/plugin_push.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
-)
-
-// PluginPush pushes a plugin to a registry
-func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) {
- headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
- resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers)
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go
deleted file mode 100644
index 51ca1040d6..0000000000
--- a/vendor/github.com/docker/docker/client/plugin_remove.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// PluginRemove removes a plugin
-func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error {
- query := url.Values{}
- if options.Force {
- query.Set("force", "1")
- }
-
- resp, err := cli.delete(ctx, "/plugins/"+name, query, nil)
- defer ensureReaderClosed(resp)
- return wrapResponseError(err, resp, "plugin", name)
-}
diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go
deleted file mode 100644
index dcf5752ca2..0000000000
--- a/vendor/github.com/docker/docker/client/plugin_set.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
-)
-
-// PluginSet modifies settings for an existing plugin
-func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error {
- resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go
deleted file mode 100644
index 115cea945b..0000000000
--- a/vendor/github.com/docker/docker/client/plugin_upgrade.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net/url"
-
- "github.com/docker/distribution/reference"
- "github.com/docker/docker/api/types"
- "github.com/pkg/errors"
-)
-
-// PluginUpgrade upgrades a plugin
-func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) {
- if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil {
- return nil, err
- }
- query := url.Values{}
- if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil {
- return nil, errors.Wrap(err, "invalid remote reference")
- }
- query.Set("remote", options.RemoteRef)
-
- privileges, err := cli.checkPluginPermissions(ctx, query, options)
- if err != nil {
- return nil, err
- }
-
- resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth)
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
-
-func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) {
- headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
- return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers)
-}
diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go
deleted file mode 100644
index 813eac2c9e..0000000000
--- a/vendor/github.com/docker/docker/client/request.go
+++ /dev/null
@@ -1,269 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- "net/http"
- "net/url"
- "os"
- "strings"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/versions"
- "github.com/docker/docker/errdefs"
- "github.com/pkg/errors"
-)
-
-// serverResponse is a wrapper for http API responses.
-type serverResponse struct {
- body io.ReadCloser
- header http.Header
- statusCode int
- reqURL *url.URL
-}
-
-// head sends an http request to the docker API using the method HEAD.
-func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) {
- return cli.sendRequest(ctx, http.MethodHead, path, query, nil, headers)
-}
-
-// get sends an http request to the docker API using the method GET with a specific Go context.
-func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) {
- return cli.sendRequest(ctx, http.MethodGet, path, query, nil, headers)
-}
-
-// post sends an http request to the docker API using the method POST with a specific Go context.
-func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) {
- body, headers, err := encodeBody(obj, headers)
- if err != nil {
- return serverResponse{}, err
- }
- return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers)
-}
-
-func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) {
- return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers)
-}
-
-// putRaw sends an http request to the docker API using the method PUT.
-func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) {
- return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers)
-}
-
-// delete sends an http request to the docker API using the method DELETE.
-func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) {
- return cli.sendRequest(ctx, http.MethodDelete, path, query, nil, headers)
-}
-
-type headers map[string][]string
-
-func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) {
- if obj == nil {
- return nil, headers, nil
- }
-
- body, err := encodeData(obj)
- if err != nil {
- return nil, headers, err
- }
- if headers == nil {
- headers = make(map[string][]string)
- }
- headers["Content-Type"] = []string{"application/json"}
- return body, headers, nil
-}
-
-func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) {
- expectedPayload := (method == http.MethodPost || method == http.MethodPut)
- if expectedPayload && body == nil {
- body = bytes.NewReader([]byte{})
- }
-
- req, err := http.NewRequest(method, path, body)
- if err != nil {
- return nil, err
- }
- req = cli.addHeaders(req, headers)
-
- if cli.proto == "unix" || cli.proto == "npipe" {
- // For local communications, it doesn't matter what the host is. We just
- // need a valid and meaningful host name. (See #189)
- req.Host = "docker"
- }
-
- req.URL.Host = cli.addr
- req.URL.Scheme = cli.scheme
-
- if expectedPayload && req.Header.Get("Content-Type") == "" {
- req.Header.Set("Content-Type", "text/plain")
- }
- return req, nil
-}
-
-func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) {
- req, err := cli.buildRequest(method, cli.getAPIPath(ctx, path, query), body, headers)
- if err != nil {
- return serverResponse{}, err
- }
- resp, err := cli.doRequest(ctx, req)
- if err != nil {
- return resp, errdefs.FromStatusCode(err, resp.statusCode)
- }
- err = cli.checkResponseErr(resp)
- return resp, errdefs.FromStatusCode(err, resp.statusCode)
-}
-
-func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) {
- serverResp := serverResponse{statusCode: -1, reqURL: req.URL}
-
- req = req.WithContext(ctx)
- resp, err := cli.client.Do(req)
- if err != nil {
- if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") {
- return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)
- }
-
- if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") {
- return serverResp, errors.Wrap(err, "The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings")
- }
-
- // Don't decorate context sentinel errors; users may be comparing to
- // them directly.
- if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
- return serverResp, err
- }
-
- if nErr, ok := err.(*url.Error); ok {
- if nErr, ok := nErr.Err.(*net.OpError); ok {
- if os.IsPermission(nErr.Err) {
- return serverResp, errors.Wrapf(err, "Got permission denied while trying to connect to the Docker daemon socket at %v", cli.host)
- }
- }
- }
-
- if err, ok := err.(net.Error); ok {
- if err.Timeout() {
- return serverResp, ErrorConnectionFailed(cli.host)
- }
- if !err.Temporary() {
- if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") {
- return serverResp, ErrorConnectionFailed(cli.host)
- }
- }
- }
-
- // Although there's not a strongly typed error for this in go-winio,
- // lots of people are using the default configuration for the docker
- // daemon on Windows where the daemon is listening on a named pipe
- // `//./pipe/docker_engine, and the client must be running elevated.
- // Give users a clue rather than the not-overly useful message
- // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info:
- // open //./pipe/docker_engine: The system cannot find the file specified.`.
- // Note we can't string compare "The system cannot find the file specified" as
- // this is localised - for example in French the error would be
- // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.`
- if strings.Contains(err.Error(), `open //./pipe/docker_engine`) {
- // Checks if client is running with elevated privileges
- if f, elevatedErr := os.Open("\\\\.\\PHYSICALDRIVE0"); elevatedErr == nil {
- err = errors.Wrap(err, "In the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect.")
- } else {
- f.Close()
- err = errors.Wrap(err, "This error may indicate that the docker daemon is not running.")
- }
- }
-
- return serverResp, errors.Wrap(err, "error during connect")
- }
-
- if resp != nil {
- serverResp.statusCode = resp.StatusCode
- serverResp.body = resp.Body
- serverResp.header = resp.Header
- }
- return serverResp, nil
-}
-
-func (cli *Client) checkResponseErr(serverResp serverResponse) error {
- if serverResp.statusCode >= 200 && serverResp.statusCode < 400 {
- return nil
- }
-
- var body []byte
- var err error
- if serverResp.body != nil {
- bodyMax := 1 * 1024 * 1024 // 1 MiB
- bodyR := &io.LimitedReader{
- R: serverResp.body,
- N: int64(bodyMax),
- }
- body, err = ioutil.ReadAll(bodyR)
- if err != nil {
- return err
- }
- if bodyR.N == 0 {
- return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), bodyMax, serverResp.reqURL)
- }
- }
- if len(body) == 0 {
- return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL)
- }
-
- var ct string
- if serverResp.header != nil {
- ct = serverResp.header.Get("Content-Type")
- }
-
- var errorMessage string
- if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" {
- var errorResponse types.ErrorResponse
- if err := json.Unmarshal(body, &errorResponse); err != nil {
- return errors.Wrap(err, "Error reading JSON")
- }
- errorMessage = strings.TrimSpace(errorResponse.Message)
- } else {
- errorMessage = strings.TrimSpace(string(body))
- }
-
- return errors.Wrap(errors.New(errorMessage), "Error response from daemon")
-}
-
-func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request {
- // Add CLI Config's HTTP Headers BEFORE we set the Docker headers
- // then the user can't change OUR headers
- for k, v := range cli.customHTTPHeaders {
- if versions.LessThan(cli.version, "1.25") && k == "User-Agent" {
- continue
- }
- req.Header.Set(k, v)
- }
-
- if headers != nil {
- for k, v := range headers {
- req.Header[k] = v
- }
- }
- return req
-}
-
-func encodeData(data interface{}) (*bytes.Buffer, error) {
- params := bytes.NewBuffer(nil)
- if data != nil {
- if err := json.NewEncoder(params).Encode(data); err != nil {
- return nil, err
- }
- }
- return params, nil
-}
-
-func ensureReaderClosed(response serverResponse) {
- if response.body != nil {
- // Drain up to 512 bytes and close the body to let the Transport reuse the connection
- io.CopyN(ioutil.Discard, response.body, 512)
- response.body.Close()
- }
-}
diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go
deleted file mode 100644
index fd5b914136..0000000000
--- a/vendor/github.com/docker/docker/client/secret_create.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/swarm"
-)
-
-// SecretCreate creates a new Secret.
-func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) {
- var response types.SecretCreateResponse
- if err := cli.NewVersionError("1.25", "secret create"); err != nil {
- return response, err
- }
- resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&response)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go
deleted file mode 100644
index d093916c9a..0000000000
--- a/vendor/github.com/docker/docker/client/secret_inspect.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "io/ioutil"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// SecretInspectWithRaw returns the secret information with raw data
-func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) {
- if err := cli.NewVersionError("1.25", "secret inspect"); err != nil {
- return swarm.Secret{}, nil, err
- }
- if id == "" {
- return swarm.Secret{}, nil, objectNotFoundError{object: "secret", id: id}
- }
- resp, err := cli.get(ctx, "/secrets/"+id, nil, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id)
- }
-
- body, err := ioutil.ReadAll(resp.body)
- if err != nil {
- return swarm.Secret{}, nil, err
- }
-
- var secret swarm.Secret
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&secret)
-
- return secret, body, err
-}
diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go
deleted file mode 100644
index a0289c9f44..0000000000
--- a/vendor/github.com/docker/docker/client/secret_list.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/api/types/swarm"
-)
-
-// SecretList returns the list of secrets.
-func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) {
- if err := cli.NewVersionError("1.25", "secret list"); err != nil {
- return nil, err
- }
- query := url.Values{}
-
- if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToJSON(options.Filters)
- if err != nil {
- return nil, err
- }
-
- query.Set("filters", filterJSON)
- }
-
- resp, err := cli.get(ctx, "/secrets", query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return nil, err
- }
-
- var secrets []swarm.Secret
- err = json.NewDecoder(resp.body).Decode(&secrets)
- return secrets, err
-}
diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go
deleted file mode 100644
index c16f555804..0000000000
--- a/vendor/github.com/docker/docker/client/secret_remove.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import "context"
-
-// SecretRemove removes a Secret.
-func (cli *Client) SecretRemove(ctx context.Context, id string) error {
- if err := cli.NewVersionError("1.25", "secret remove"); err != nil {
- return err
- }
- resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil)
- defer ensureReaderClosed(resp)
- return wrapResponseError(err, resp, "secret", id)
-}
diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go
deleted file mode 100644
index 164256bbc1..0000000000
--- a/vendor/github.com/docker/docker/client/secret_update.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
- "strconv"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// SecretUpdate attempts to update a Secret
-func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error {
- if err := cli.NewVersionError("1.25", "secret update"); err != nil {
- return err
- }
- query := url.Values{}
- query.Set("version", strconv.FormatUint(version.Index, 10))
- resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go
deleted file mode 100644
index e0428bf98b..0000000000
--- a/vendor/github.com/docker/docker/client/service_create.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "strings"
-
- "github.com/docker/distribution/reference"
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/swarm"
- digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
-)
-
-// ServiceCreate creates a new Service.
-func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) {
- var response types.ServiceCreateResponse
- headers := map[string][]string{
- "version": {cli.version},
- }
-
- if options.EncodedRegistryAuth != "" {
- headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth}
- }
-
- // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container
- if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) {
- service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
- }
-
- if err := validateServiceSpec(service); err != nil {
- return response, err
- }
-
- // ensure that the image is tagged
- var resolveWarning string
- switch {
- case service.TaskTemplate.ContainerSpec != nil:
- if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" {
- service.TaskTemplate.ContainerSpec.Image = taggedImg
- }
- if options.QueryRegistry {
- resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth)
- }
- case service.TaskTemplate.PluginSpec != nil:
- if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" {
- service.TaskTemplate.PluginSpec.Remote = taggedImg
- }
- if options.QueryRegistry {
- resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth)
- }
- }
-
- resp, err := cli.post(ctx, "/services/create", nil, service, headers)
- defer ensureReaderClosed(resp)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&response)
- if resolveWarning != "" {
- response.Warnings = append(response.Warnings, resolveWarning)
- }
-
- return response, err
-}
-
-func resolveContainerSpecImage(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string {
- var warning string
- if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.ContainerSpec.Image, encodedAuth); err != nil {
- warning = digestWarning(taskSpec.ContainerSpec.Image)
- } else {
- taskSpec.ContainerSpec.Image = img
- if len(imgPlatforms) > 0 {
- if taskSpec.Placement == nil {
- taskSpec.Placement = &swarm.Placement{}
- }
- taskSpec.Placement.Platforms = imgPlatforms
- }
- }
- return warning
-}
-
-func resolvePluginSpecRemote(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string {
- var warning string
- if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.PluginSpec.Remote, encodedAuth); err != nil {
- warning = digestWarning(taskSpec.PluginSpec.Remote)
- } else {
- taskSpec.PluginSpec.Remote = img
- if len(imgPlatforms) > 0 {
- if taskSpec.Placement == nil {
- taskSpec.Placement = &swarm.Placement{}
- }
- taskSpec.Placement.Platforms = imgPlatforms
- }
- }
- return warning
-}
-
-func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) {
- distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth)
- var platforms []swarm.Platform
- if err != nil {
- return "", nil, err
- }
-
- imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest)
-
- if len(distributionInspect.Platforms) > 0 {
- platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms))
- for _, p := range distributionInspect.Platforms {
- // clear architecture field for arm. This is a temporary patch to address
- // https://github.com/docker/swarmkit/issues/2294. The issue is that while
- // image manifests report "arm" as the architecture, the node reports
- // something like "armv7l" (includes the variant), which causes arm images
- // to stop working with swarm mode. This patch removes the architecture
- // constraint for arm images to ensure tasks get scheduled.
- arch := p.Architecture
- if strings.ToLower(arch) == "arm" {
- arch = ""
- }
- platforms = append(platforms, swarm.Platform{
- Architecture: arch,
- OS: p.OS,
- })
- }
- }
- return imageWithDigest, platforms, err
-}
-
-// imageWithDigestString takes an image string and a digest, and updates
-// the image string if it didn't originally contain a digest. It returns
-// image unmodified in other situations.
-func imageWithDigestString(image string, dgst digest.Digest) string {
- namedRef, err := reference.ParseNormalizedNamed(image)
- if err == nil {
- if _, isCanonical := namedRef.(reference.Canonical); !isCanonical {
- // ensure that image gets a default tag if none is provided
- img, err := reference.WithDigest(namedRef, dgst)
- if err == nil {
- return reference.FamiliarString(img)
- }
- }
- }
- return image
-}
-
-// imageWithTagString takes an image string, and returns a tagged image
-// string, adding a 'latest' tag if one was not provided. It returns an
-// empty string if a canonical reference was provided
-func imageWithTagString(image string) string {
- namedRef, err := reference.ParseNormalizedNamed(image)
- if err == nil {
- return reference.FamiliarString(reference.TagNameOnly(namedRef))
- }
- return ""
-}
-
-// digestWarning constructs a formatted warning string using the
-// image name that could not be pinned by digest. The formatting
-// is hardcoded, but could me made smarter in the future
-func digestWarning(image string) string {
- return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image)
-}
-
-func validateServiceSpec(s swarm.ServiceSpec) error {
- if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil {
- return errors.New("must not specify both a container spec and a plugin spec in the task template")
- }
- if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin {
- return errors.New("mismatched runtime with plugin spec")
- }
- if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) {
- return errors.New("mismatched runtime with container spec")
- }
- return nil
-}
diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go
deleted file mode 100644
index 2801483b80..0000000000
--- a/vendor/github.com/docker/docker/client/service_inspect.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/swarm"
-)
-
-// ServiceInspectWithRaw returns the service information and the raw data.
-func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) {
- if serviceID == "" {
- return swarm.Service{}, nil, objectNotFoundError{object: "service", id: serviceID}
- }
- query := url.Values{}
- query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults))
- serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID)
- }
-
- body, err := ioutil.ReadAll(serverResp.body)
- if err != nil {
- return swarm.Service{}, nil, err
- }
-
- var response swarm.Service
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&response)
- return response, body, err
-}
diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go
deleted file mode 100644
index f97ec75a5c..0000000000
--- a/vendor/github.com/docker/docker/client/service_list.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/api/types/swarm"
-)
-
-// ServiceList returns the list of services.
-func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) {
- query := url.Values{}
-
- if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToJSON(options.Filters)
- if err != nil {
- return nil, err
- }
-
- query.Set("filters", filterJSON)
- }
-
- if options.Status {
- query.Set("status", "true")
- }
-
- resp, err := cli.get(ctx, "/services", query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return nil, err
- }
-
- var services []swarm.Service
- err = json.NewDecoder(resp.body).Decode(&services)
- return services, err
-}
diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go
deleted file mode 100644
index 906fd4059e..0000000000
--- a/vendor/github.com/docker/docker/client/service_logs.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net/url"
- "time"
-
- "github.com/docker/docker/api/types"
- timetypes "github.com/docker/docker/api/types/time"
- "github.com/pkg/errors"
-)
-
-// ServiceLogs returns the logs generated by a service in an io.ReadCloser.
-// It's up to the caller to close the stream.
-func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
- query := url.Values{}
- if options.ShowStdout {
- query.Set("stdout", "1")
- }
-
- if options.ShowStderr {
- query.Set("stderr", "1")
- }
-
- if options.Since != "" {
- ts, err := timetypes.GetTimestamp(options.Since, time.Now())
- if err != nil {
- return nil, errors.Wrap(err, `invalid value for "since"`)
- }
- query.Set("since", ts)
- }
-
- if options.Timestamps {
- query.Set("timestamps", "1")
- }
-
- if options.Details {
- query.Set("details", "1")
- }
-
- if options.Follow {
- query.Set("follow", "1")
- }
- query.Set("tail", options.Tail)
-
- resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil)
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go
deleted file mode 100644
index 953a2adf5a..0000000000
--- a/vendor/github.com/docker/docker/client/service_remove.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import "context"
-
-// ServiceRemove kills and removes a service.
-func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error {
- resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil)
- defer ensureReaderClosed(resp)
- return wrapResponseError(err, resp, "service", serviceID)
-}
diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go
deleted file mode 100644
index c63895f74f..0000000000
--- a/vendor/github.com/docker/docker/client/service_update.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
- "strconv"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/swarm"
-)
-
-// ServiceUpdate updates a Service. The version number is required to avoid conflicting writes.
-// It should be the value as set *before* the update. You can find this value in the Meta field
-// of swarm.Service, which can be found using ServiceInspectWithRaw.
-func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) {
- var (
- query = url.Values{}
- response = types.ServiceUpdateResponse{}
- )
-
- headers := map[string][]string{
- "version": {cli.version},
- }
-
- if options.EncodedRegistryAuth != "" {
- headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth}
- }
-
- if options.RegistryAuthFrom != "" {
- query.Set("registryAuthFrom", options.RegistryAuthFrom)
- }
-
- if options.Rollback != "" {
- query.Set("rollback", options.Rollback)
- }
-
- query.Set("version", strconv.FormatUint(version.Index, 10))
-
- if err := validateServiceSpec(service); err != nil {
- return response, err
- }
-
- // ensure that the image is tagged
- var resolveWarning string
- switch {
- case service.TaskTemplate.ContainerSpec != nil:
- if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" {
- service.TaskTemplate.ContainerSpec.Image = taggedImg
- }
- if options.QueryRegistry {
- resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth)
- }
- case service.TaskTemplate.PluginSpec != nil:
- if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" {
- service.TaskTemplate.PluginSpec.Remote = taggedImg
- }
- if options.QueryRegistry {
- resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth)
- }
- }
-
- resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers)
- defer ensureReaderClosed(resp)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&response)
- if resolveWarning != "" {
- response.Warnings = append(response.Warnings, resolveWarning)
- }
-
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go
deleted file mode 100644
index 19f59dd582..0000000000
--- a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
-
- "github.com/docker/docker/api/types"
-)
-
-// SwarmGetUnlockKey retrieves the swarm's unlock key.
-func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) {
- serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return types.SwarmUnlockKeyResponse{}, err
- }
-
- var response types.SwarmUnlockKeyResponse
- err = json.NewDecoder(serverResp.body).Decode(&response)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go
deleted file mode 100644
index da3c1637ef..0000000000
--- a/vendor/github.com/docker/docker/client/swarm_init.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// SwarmInit initializes the swarm.
-func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) {
- serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return "", err
- }
-
- var response string
- err = json.NewDecoder(serverResp.body).Decode(&response)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go
deleted file mode 100644
index b52b67a884..0000000000
--- a/vendor/github.com/docker/docker/client/swarm_inspect.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// SwarmInspect inspects the swarm.
-func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) {
- serverResp, err := cli.get(ctx, "/swarm", nil, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return swarm.Swarm{}, err
- }
-
- var response swarm.Swarm
- err = json.NewDecoder(serverResp.body).Decode(&response)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go
deleted file mode 100644
index a1cf0455d2..0000000000
--- a/vendor/github.com/docker/docker/client/swarm_join.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// SwarmJoin joins the swarm.
-func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error {
- resp, err := cli.post(ctx, "/swarm/join", nil, req, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go
deleted file mode 100644
index 90ca84b363..0000000000
--- a/vendor/github.com/docker/docker/client/swarm_leave.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-)
-
-// SwarmLeave leaves the swarm.
-func (cli *Client) SwarmLeave(ctx context.Context, force bool) error {
- query := url.Values{}
- if force {
- query.Set("force", "1")
- }
- resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go
deleted file mode 100644
index d2412f7d44..0000000000
--- a/vendor/github.com/docker/docker/client/swarm_unlock.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// SwarmUnlock unlocks locked swarm.
-func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error {
- serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil)
- ensureReaderClosed(serverResp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go
deleted file mode 100644
index 56a5bea761..0000000000
--- a/vendor/github.com/docker/docker/client/swarm_update.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "fmt"
- "net/url"
- "strconv"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// SwarmUpdate updates the swarm.
-func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error {
- query := url.Values{}
- query.Set("version", strconv.FormatUint(version.Index, 10))
- query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken))
- query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken))
- query.Set("rotateManagerUnlockKey", fmt.Sprintf("%v", flags.RotateManagerUnlockKey))
- resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go
deleted file mode 100644
index 44d40ba5ae..0000000000
--- a/vendor/github.com/docker/docker/client/task_inspect.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "io/ioutil"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// TaskInspectWithRaw returns the task information and its raw representation..
-func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) {
- if taskID == "" {
- return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID}
- }
- serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID)
- }
-
- body, err := ioutil.ReadAll(serverResp.body)
- if err != nil {
- return swarm.Task{}, nil, err
- }
-
- var response swarm.Task
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&response)
- return response, body, err
-}
diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go
deleted file mode 100644
index 4869b44493..0000000000
--- a/vendor/github.com/docker/docker/client/task_list.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/api/types/swarm"
-)
-
-// TaskList returns the list of tasks.
-func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) {
- query := url.Values{}
-
- if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToJSON(options.Filters)
- if err != nil {
- return nil, err
- }
-
- query.Set("filters", filterJSON)
- }
-
- resp, err := cli.get(ctx, "/tasks", query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return nil, err
- }
-
- var tasks []swarm.Task
- err = json.NewDecoder(resp.body).Decode(&tasks)
- return tasks, err
-}
diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go
deleted file mode 100644
index 6222fab577..0000000000
--- a/vendor/github.com/docker/docker/client/task_logs.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net/url"
- "time"
-
- "github.com/docker/docker/api/types"
- timetypes "github.com/docker/docker/api/types/time"
-)
-
-// TaskLogs returns the logs generated by a task in an io.ReadCloser.
-// It's up to the caller to close the stream.
-func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
- query := url.Values{}
- if options.ShowStdout {
- query.Set("stdout", "1")
- }
-
- if options.ShowStderr {
- query.Set("stderr", "1")
- }
-
- if options.Since != "" {
- ts, err := timetypes.GetTimestamp(options.Since, time.Now())
- if err != nil {
- return nil, err
- }
- query.Set("since", ts)
- }
-
- if options.Timestamps {
- query.Set("timestamps", "1")
- }
-
- if options.Details {
- query.Set("details", "1")
- }
-
- if options.Follow {
- query.Set("follow", "1")
- }
- query.Set("tail", options.Tail)
-
- resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil)
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go
deleted file mode 100644
index 5541344366..0000000000
--- a/vendor/github.com/docker/docker/client/transport.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "crypto/tls"
- "net/http"
-)
-
-// resolveTLSConfig attempts to resolve the TLS configuration from the
-// RoundTripper.
-func resolveTLSConfig(transport http.RoundTripper) *tls.Config {
- switch tr := transport.(type) {
- case *http.Transport:
- return tr.TLSClientConfig
- default:
- return nil
- }
-}
diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go
deleted file mode 100644
index 7f3ff44eb8..0000000000
--- a/vendor/github.com/docker/docker/client/utils.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "net/url"
- "regexp"
-
- "github.com/docker/docker/api/types/filters"
-)
-
-var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`)
-
-// getDockerOS returns the operating system based on the server header from the daemon.
-func getDockerOS(serverHeader string) string {
- var osType string
- matches := headerRegexp.FindStringSubmatch(serverHeader)
- if len(matches) > 0 {
- osType = matches[1]
- }
- return osType
-}
-
-// getFiltersQuery returns a url query with "filters" query term, based on the
-// filters provided.
-func getFiltersQuery(f filters.Args) (url.Values, error) {
- query := url.Values{}
- if f.Len() > 0 {
- filterJSON, err := filters.ToJSON(f)
- if err != nil {
- return query, err
- }
- query.Set("filters", filterJSON)
- }
- return query, nil
-}
diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go
deleted file mode 100644
index 8f17ff4e87..0000000000
--- a/vendor/github.com/docker/docker/client/version.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
-
- "github.com/docker/docker/api/types"
-)
-
-// ServerVersion returns information of the docker client and server host.
-func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) {
- resp, err := cli.get(ctx, "/version", nil, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return types.Version{}, err
- }
-
- var server types.Version
- err = json.NewDecoder(resp.body).Decode(&server)
- return server, err
-}
diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go
deleted file mode 100644
index 92761b3c63..0000000000
--- a/vendor/github.com/docker/docker/client/volume_create.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
-
- "github.com/docker/docker/api/types"
- volumetypes "github.com/docker/docker/api/types/volume"
-)
-
-// VolumeCreate creates a volume in the docker host.
-func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) {
- var volume types.Volume
- resp, err := cli.post(ctx, "/volumes/create", nil, options, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return volume, err
- }
- err = json.NewDecoder(resp.body).Decode(&volume)
- return volume, err
-}
diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go
deleted file mode 100644
index e20b2c67c7..0000000000
--- a/vendor/github.com/docker/docker/client/volume_inspect.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "io/ioutil"
-
- "github.com/docker/docker/api/types"
-)
-
-// VolumeInspect returns the information about a specific volume in the docker host.
-func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) {
- volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID)
- return volume, err
-}
-
-// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation
-func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) {
- if volumeID == "" {
- return types.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID}
- }
-
- var volume types.Volume
- resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return volume, nil, wrapResponseError(err, resp, "volume", volumeID)
- }
-
- body, err := ioutil.ReadAll(resp.body)
- if err != nil {
- return volume, nil, err
- }
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&volume)
- return volume, body, err
-}
diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go
deleted file mode 100644
index 942498dde2..0000000000
--- a/vendor/github.com/docker/docker/client/volume_list.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types/filters"
- volumetypes "github.com/docker/docker/api/types/volume"
-)
-
-// VolumeList returns the volumes configured in the docker host.
-func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) {
- var volumes volumetypes.VolumeListOKBody
- query := url.Values{}
-
- if filter.Len() > 0 {
- //nolint:staticcheck // ignore SA1019 for old code
- filterJSON, err := filters.ToParamWithVersion(cli.version, filter)
- if err != nil {
- return volumes, err
- }
- query.Set("filters", filterJSON)
- }
- resp, err := cli.get(ctx, "/volumes", query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return volumes, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&volumes)
- return volumes, err
-}
diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go
deleted file mode 100644
index 6e324708f2..0000000000
--- a/vendor/github.com/docker/docker/client/volume_prune.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "fmt"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
-)
-
-// VolumesPrune requests the daemon to delete unused data
-func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) {
- var report types.VolumesPruneReport
-
- if err := cli.NewVersionError("1.25", "volume prune"); err != nil {
- return report, err
- }
-
- query, err := getFiltersQuery(pruneFilters)
- if err != nil {
- return report, err
- }
-
- serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return report, err
- }
-
- if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
- return report, fmt.Errorf("Error retrieving volume prune report: %v", err)
- }
-
- return report, nil
-}
diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go
deleted file mode 100644
index 79decdafab..0000000000
--- a/vendor/github.com/docker/docker/client/volume_remove.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-
- "github.com/docker/docker/api/types/versions"
-)
-
-// VolumeRemove removes a volume from the docker host.
-func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error {
- query := url.Values{}
- if versions.GreaterThanOrEqualTo(cli.version, "1.25") {
- if force {
- query.Set("force", "1")
- }
- }
- resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil)
- defer ensureReaderClosed(resp)
- return wrapResponseError(err, resp, "volume", volumeID)
-}
diff --git a/vendor/github.com/docker/go-connections/sockets/BUILD.bazel b/vendor/github.com/docker/go-connections/sockets/BUILD.bazel
deleted file mode 100644
index 71741d2409..0000000000
--- a/vendor/github.com/docker/go-connections/sockets/BUILD.bazel
+++ /dev/null
@@ -1,25 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
- name = "go_default_library",
- srcs = [
- "inmem_socket.go",
- "proxy.go",
- "sockets.go",
- "sockets_unix.go",
- "sockets_windows.go",
- "tcp_socket.go",
- "unix_socket.go",
- ],
- importmap = "k8s.io/kops/vendor/github.com/docker/go-connections/sockets",
- importpath = "github.com/docker/go-connections/sockets",
- visibility = ["//visibility:public"],
- deps = [
- "//vendor/golang.org/x/net/proxy:go_default_library",
- ] + select({
- "@io_bazel_rules_go//go/platform:windows": [
- "//vendor/github.com/Microsoft/go-winio:go_default_library",
- ],
- "//conditions:default": [],
- }),
-)
diff --git a/vendor/github.com/docker/go-connections/sockets/README.md b/vendor/github.com/docker/go-connections/sockets/README.md
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go
deleted file mode 100644
index 99846ffddb..0000000000
--- a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package sockets
-
-import (
- "errors"
- "net"
- "sync"
-)
-
-var errClosed = errors.New("use of closed network connection")
-
-// InmemSocket implements net.Listener using in-memory only connections.
-type InmemSocket struct {
- chConn chan net.Conn
- chClose chan struct{}
- addr string
- mu sync.Mutex
-}
-
-// dummyAddr is used to satisfy net.Addr for the in-mem socket
-// it is just stored as a string and returns the string for all calls
-type dummyAddr string
-
-// NewInmemSocket creates an in-memory only net.Listener
-// The addr argument can be any string, but is used to satisfy the `Addr()` part
-// of the net.Listener interface
-func NewInmemSocket(addr string, bufSize int) *InmemSocket {
- return &InmemSocket{
- chConn: make(chan net.Conn, bufSize),
- chClose: make(chan struct{}),
- addr: addr,
- }
-}
-
-// Addr returns the socket's addr string to satisfy net.Listener
-func (s *InmemSocket) Addr() net.Addr {
- return dummyAddr(s.addr)
-}
-
-// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn.
-func (s *InmemSocket) Accept() (net.Conn, error) {
- select {
- case conn := <-s.chConn:
- return conn, nil
- case <-s.chClose:
- return nil, errClosed
- }
-}
-
-// Close closes the listener. It will be unavailable for use once closed.
-func (s *InmemSocket) Close() error {
- s.mu.Lock()
- defer s.mu.Unlock()
- select {
- case <-s.chClose:
- default:
- close(s.chClose)
- }
- return nil
-}
-
-// Dial is used to establish a connection with the in-mem server
-func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) {
- srvConn, clientConn := net.Pipe()
- select {
- case s.chConn <- srvConn:
- case <-s.chClose:
- return nil, errClosed
- }
-
- return clientConn, nil
-}
-
-// Network returns the addr string, satisfies net.Addr
-func (a dummyAddr) Network() string {
- return string(a)
-}
-
-// String returns the string form
-func (a dummyAddr) String() string {
- return string(a)
-}
diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go
deleted file mode 100644
index 98e9a1dc61..0000000000
--- a/vendor/github.com/docker/go-connections/sockets/proxy.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package sockets
-
-import (
- "net"
- "net/url"
- "os"
- "strings"
-
- "golang.org/x/net/proxy"
-)
-
-// GetProxyEnv allows access to the uppercase and the lowercase forms of
-// proxy-related variables. See the Go specification for details on these
-// variables. https://golang.org/pkg/net/http/
-func GetProxyEnv(key string) string {
- proxyValue := os.Getenv(strings.ToUpper(key))
- if proxyValue == "" {
- return os.Getenv(strings.ToLower(key))
- }
- return proxyValue
-}
-
-// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a
-// proxy.Dialer which will route the connections through the proxy using the
-// given dialer.
-func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) {
- allProxy := GetProxyEnv("all_proxy")
- if len(allProxy) == 0 {
- return direct, nil
- }
-
- proxyURL, err := url.Parse(allProxy)
- if err != nil {
- return direct, err
- }
-
- proxyFromURL, err := proxy.FromURL(proxyURL, direct)
- if err != nil {
- return direct, err
- }
-
- noProxy := GetProxyEnv("no_proxy")
- if len(noProxy) == 0 {
- return proxyFromURL, nil
- }
-
- perHost := proxy.NewPerHost(proxyFromURL, direct)
- perHost.AddFromString(noProxy)
-
- return perHost, nil
-}
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go
deleted file mode 100644
index a1d7beb4d8..0000000000
--- a/vendor/github.com/docker/go-connections/sockets/sockets.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Package sockets provides helper functions to create and configure Unix or TCP sockets.
-package sockets
-
-import (
- "errors"
- "net"
- "net/http"
- "time"
-)
-
-// Why 32? See https://github.com/docker/docker/pull/8035.
-const defaultTimeout = 32 * time.Second
-
-// ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system.
-var ErrProtocolNotAvailable = errors.New("protocol not available")
-
-// ConfigureTransport configures the specified Transport according to the
-// specified proto and addr.
-// If the proto is unix (using a unix socket to communicate) or npipe the
-// compression is disabled.
-func ConfigureTransport(tr *http.Transport, proto, addr string) error {
- switch proto {
- case "unix":
- return configureUnixTransport(tr, proto, addr)
- case "npipe":
- return configureNpipeTransport(tr, proto, addr)
- default:
- tr.Proxy = http.ProxyFromEnvironment
- dialer, err := DialerFromEnvironment(&net.Dialer{
- Timeout: defaultTimeout,
- })
- if err != nil {
- return err
- }
- tr.Dial = dialer.Dial
- }
- return nil
-}
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go
deleted file mode 100644
index 386cf0dbbd..0000000000
--- a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// +build !windows
-
-package sockets
-
-import (
- "fmt"
- "net"
- "net/http"
- "syscall"
- "time"
-)
-
-const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path)
-
-func configureUnixTransport(tr *http.Transport, proto, addr string) error {
- if len(addr) > maxUnixSocketPathSize {
- return fmt.Errorf("Unix socket path %q is too long", addr)
- }
- // No need for compression in local communications.
- tr.DisableCompression = true
- tr.Dial = func(_, _ string) (net.Conn, error) {
- return net.DialTimeout(proto, addr, defaultTimeout)
- }
- return nil
-}
-
-func configureNpipeTransport(tr *http.Transport, proto, addr string) error {
- return ErrProtocolNotAvailable
-}
-
-// DialPipe connects to a Windows named pipe.
-// This is not supported on other OSes.
-func DialPipe(_ string, _ time.Duration) (net.Conn, error) {
- return nil, syscall.EAFNOSUPPORT
-}
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go
deleted file mode 100644
index 5c21644e1f..0000000000
--- a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package sockets
-
-import (
- "net"
- "net/http"
- "time"
-
- "github.com/Microsoft/go-winio"
-)
-
-func configureUnixTransport(tr *http.Transport, proto, addr string) error {
- return ErrProtocolNotAvailable
-}
-
-func configureNpipeTransport(tr *http.Transport, proto, addr string) error {
- // No need for compression in local communications.
- tr.DisableCompression = true
- tr.Dial = func(_, _ string) (net.Conn, error) {
- return DialPipe(addr, defaultTimeout)
- }
- return nil
-}
-
-// DialPipe connects to a Windows named pipe.
-func DialPipe(addr string, timeout time.Duration) (net.Conn, error) {
- return winio.DialPipe(addr, &timeout)
-}
diff --git a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go
deleted file mode 100644
index 53cbb6c79e..0000000000
--- a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Package sockets provides helper functions to create and configure Unix or TCP sockets.
-package sockets
-
-import (
- "crypto/tls"
- "net"
-)
-
-// NewTCPSocket creates a TCP socket listener with the specified address and
-// the specified tls configuration. If TLSConfig is set, will encapsulate the
-// TCP listener inside a TLS one.
-func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) {
- l, err := net.Listen("tcp", addr)
- if err != nil {
- return nil, err
- }
- if tlsConfig != nil {
- tlsConfig.NextProtos = []string{"http/1.1"}
- l = tls.NewListener(l, tlsConfig)
- }
- return l, nil
-}
diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go
deleted file mode 100644
index a8b5dbb6fd..0000000000
--- a/vendor/github.com/docker/go-connections/sockets/unix_socket.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// +build !windows
-
-package sockets
-
-import (
- "net"
- "os"
- "syscall"
-)
-
-// NewUnixSocket creates a unix socket with the specified path and group.
-func NewUnixSocket(path string, gid int) (net.Listener, error) {
- if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) {
- return nil, err
- }
- mask := syscall.Umask(0777)
- defer syscall.Umask(mask)
-
- l, err := net.Listen("unix", path)
- if err != nil {
- return nil, err
- }
- if err := os.Chown(path, 0, gid); err != nil {
- l.Close()
- return nil, err
- }
- if err := os.Chmod(path, 0660); err != nil {
- l.Close()
- return nil, err
- }
- return l, nil
-}
diff --git a/vendor/github.com/google/go-containerregistry/LICENSE b/vendor/github.com/google/go-containerregistry/LICENSE
new file mode 100644
index 0000000000..7a4a3ea242
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/vendor/github.com/google/go-containerregistry/internal/and/BUILD.bazel b/vendor/github.com/google/go-containerregistry/internal/and/BUILD.bazel
new file mode 100644
index 0000000000..cdd2701b6a
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/internal/and/BUILD.bazel
@@ -0,0 +1,9 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = ["and_closer.go"],
+ importmap = "k8s.io/kops/vendor/github.com/google/go-containerregistry/internal/and",
+ importpath = "github.com/google/go-containerregistry/internal/and",
+ visibility = ["//vendor/github.com/google/go-containerregistry:__subpackages__"],
+)
diff --git a/vendor/github.com/google/go-containerregistry/internal/and/and_closer.go b/vendor/github.com/google/go-containerregistry/internal/and/and_closer.go
new file mode 100644
index 0000000000..14a05eaa17
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/internal/and/and_closer.go
@@ -0,0 +1,48 @@
+// Copyright 2020 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package and provides helpers for adding Close to io.{Reader|Writer}.
+package and
+
+import (
+ "io"
+)
+
+// ReadCloser implements io.ReadCloser by reading from a particular io.Reader
+// and then calling the provided "Close()" method.
+type ReadCloser struct {
+ io.Reader
+ CloseFunc func() error
+}
+
+var _ io.ReadCloser = (*ReadCloser)(nil)
+
+// Close implements io.ReadCloser
+func (rac *ReadCloser) Close() error {
+ return rac.CloseFunc()
+}
+
+// WriteCloser implements io.WriteCloser by reading from a particular io.Writer
+// and then calling the provided "Close()" method.
+type WriteCloser struct {
+ io.Writer
+ CloseFunc func() error
+}
+
+var _ io.WriteCloser = (*WriteCloser)(nil)
+
+// Close implements io.WriteCloser
+func (wac *WriteCloser) Close() error {
+ return wac.CloseFunc()
+}
diff --git a/vendor/github.com/google/go-containerregistry/internal/gzip/BUILD.bazel b/vendor/github.com/google/go-containerregistry/internal/gzip/BUILD.bazel
new file mode 100644
index 0000000000..fe64f010c8
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/internal/gzip/BUILD.bazel
@@ -0,0 +1,10 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = ["zip.go"],
+ importmap = "k8s.io/kops/vendor/github.com/google/go-containerregistry/internal/gzip",
+ importpath = "github.com/google/go-containerregistry/internal/gzip",
+ visibility = ["//vendor/github.com/google/go-containerregistry:__subpackages__"],
+ deps = ["//vendor/github.com/google/go-containerregistry/internal/and:go_default_library"],
+)
diff --git a/vendor/github.com/google/go-containerregistry/internal/gzip/zip.go b/vendor/github.com/google/go-containerregistry/internal/gzip/zip.go
new file mode 100644
index 0000000000..e7d673ff6b
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/internal/gzip/zip.go
@@ -0,0 +1,117 @@
+// Copyright 2020 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package gzip provides helper functions for interacting with gzipped streams.
+package gzip
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "io"
+
+ "github.com/google/go-containerregistry/internal/and"
+)
+
+var gzipMagicHeader = []byte{'\x1f', '\x8b'}
+
+// ReadCloser reads uncompressed input data from the io.ReadCloser and
+// returns an io.ReadCloser from which compressed data may be read.
+// This uses gzip.BestSpeed for the compression level.
+func ReadCloser(r io.ReadCloser) io.ReadCloser {
+ return ReadCloserLevel(r, gzip.BestSpeed)
+}
+
+// ReadCloserLevel reads uncompressed input data from the io.ReadCloser and
+// returns an io.ReadCloser from which compressed data may be read.
+// Refer to compress/gzip for the level:
+// https://golang.org/pkg/compress/gzip/#pkg-constants
+func ReadCloserLevel(r io.ReadCloser, level int) io.ReadCloser {
+ pr, pw := io.Pipe()
+
+ // For highly compressible layers, gzip.Writer will output a very small
+ // number of bytes per Write(). This is normally fine, but when pushing
+ // to a registry, we want to ensure that we're taking full advantage of
+ // the available bandwidth instead of sending tons of tiny writes over
+ // the wire.
+ // 64K ought to be small enough for anybody.
+ bw := bufio.NewWriterSize(pw, 2<<16)
+
+ // Returns err so we can pw.CloseWithError(err)
+ go func() error {
+ // TODO(go1.14): Just defer {pw,gw,r}.Close like you'd expect.
+ // Context: https://golang.org/issue/24283
+ gw, err := gzip.NewWriterLevel(bw, level)
+ if err != nil {
+ return pw.CloseWithError(err)
+ }
+
+ if _, err := io.Copy(gw, r); err != nil {
+ defer r.Close()
+ defer gw.Close()
+ return pw.CloseWithError(err)
+ }
+
+ // Close gzip writer to Flush it and write gzip trailers.
+ if err := gw.Close(); err != nil {
+ return pw.CloseWithError(err)
+ }
+
+ // Flush bufio writer to ensure we write out everything.
+ if err := bw.Flush(); err != nil {
+ return pw.CloseWithError(err)
+ }
+
+ // We don't really care if these fail.
+ defer pw.Close()
+ defer r.Close()
+
+ return nil
+ }()
+
+ return pr
+}
+
+// UnzipReadCloser reads compressed input data from the io.ReadCloser and
+// returns an io.ReadCloser from which uncompessed data may be read.
+func UnzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) {
+ gr, err := gzip.NewReader(r)
+ if err != nil {
+ return nil, err
+ }
+ return &and.ReadCloser{
+ Reader: gr,
+ CloseFunc: func() error {
+ // If the unzip fails, then this seems to return the same
+ // error as the read. We don't want this to interfere with
+ // us closing the main ReadCloser, since this could leave
+ // an open file descriptor (fails on Windows).
+ gr.Close()
+ return r.Close()
+ },
+ }, nil
+}
+
+// Is detects whether the input stream is compressed.
+func Is(r io.Reader) (bool, error) {
+ magicHeader := make([]byte, 2)
+ n, err := r.Read(magicHeader)
+ if n == 0 && err == io.EOF {
+ return false, nil
+ }
+ if err != nil {
+ return false, err
+ }
+ return bytes.Equal(magicHeader, gzipMagicHeader), nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/internal/redact/BUILD.bazel b/vendor/github.com/google/go-containerregistry/internal/redact/BUILD.bazel
new file mode 100644
index 0000000000..4a47680078
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/internal/redact/BUILD.bazel
@@ -0,0 +1,9 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = ["redact.go"],
+ importmap = "k8s.io/kops/vendor/github.com/google/go-containerregistry/internal/redact",
+ importpath = "github.com/google/go-containerregistry/internal/redact",
+ visibility = ["//vendor/github.com/google/go-containerregistry:__subpackages__"],
+)
diff --git a/vendor/github.com/google/go-containerregistry/internal/redact/redact.go b/vendor/github.com/google/go-containerregistry/internal/redact/redact.go
new file mode 100644
index 0000000000..dc9c56b7f3
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/internal/redact/redact.go
@@ -0,0 +1,35 @@
+// Copyright 2020 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package redact contains a simple context signal for redacting requests.
+package redact
+
+import (
+ "context"
+)
+
+type contextKey string
+
+var redactKey = contextKey("redact")
+
+// NewContext creates a new ctx with the reason for redaction.
+func NewContext(ctx context.Context, reason string) context.Context {
+ return context.WithValue(ctx, redactKey, reason)
+}
+
+// FromContext returns the redaction reason, if any.
+func FromContext(ctx context.Context) (bool, string) {
+ reason, ok := ctx.Value(redactKey).(string)
+ return ok, reason
+}
diff --git a/vendor/github.com/google/go-containerregistry/internal/retry/BUILD.bazel b/vendor/github.com/google/go-containerregistry/internal/retry/BUILD.bazel
new file mode 100644
index 0000000000..10552828c5
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/internal/retry/BUILD.bazel
@@ -0,0 +1,10 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = ["retry.go"],
+ importmap = "k8s.io/kops/vendor/github.com/google/go-containerregistry/internal/retry",
+ importpath = "github.com/google/go-containerregistry/internal/retry",
+ visibility = ["//vendor/github.com/google/go-containerregistry:__subpackages__"],
+ deps = ["//vendor/github.com/google/go-containerregistry/internal/retry/wait:go_default_library"],
+)
diff --git a/vendor/github.com/google/go-containerregistry/internal/retry/retry.go b/vendor/github.com/google/go-containerregistry/internal/retry/retry.go
new file mode 100644
index 0000000000..133cb1c122
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/internal/retry/retry.go
@@ -0,0 +1,77 @@
+// Copyright 2019 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package retry provides methods for retrying operations. It is a thin wrapper
+// around k8s.io/apimachinery/pkg/util/wait to make certain operations easier.
+package retry
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/google/go-containerregistry/internal/retry/wait"
+)
+
+// Backoff is an alias of our own wait.Backoff to avoid name conflicts with
+// the kubernetes wait package. Typing retry.Backoff is aesier than fixing
+// the wrong import every time you use wait.Backoff.
+type Backoff = wait.Backoff
+
+// This is implemented by several errors in the net package as well as our
+// transport.Error.
+type temporary interface {
+ Temporary() bool
+}
+
+// IsTemporary returns true if err implements Temporary() and it returns true.
+func IsTemporary(err error) bool {
+ if err == context.DeadlineExceeded {
+ return false
+ }
+ if te, ok := err.(temporary); ok && te.Temporary() {
+ return true
+ }
+ return false
+}
+
+// IsNotNil returns true if err is not nil.
+func IsNotNil(err error) bool {
+ return err != nil
+}
+
+// Predicate determines whether an error should be retried.
+type Predicate func(error) (retry bool)
+
+// Retry retries a given function, f, until a predicate is satisfied, using
+// exponential backoff. If the predicate is never satisfied, it will return the
+// last error returned by f.
+func Retry(f func() error, p Predicate, backoff wait.Backoff) (err error) {
+ if f == nil {
+ return fmt.Errorf("nil f passed to retry")
+ }
+ if p == nil {
+ return fmt.Errorf("nil p passed to retry")
+ }
+
+ condition := func() (bool, error) {
+ err = f()
+ if p(err) {
+ return false, nil
+ }
+ return true, err
+ }
+
+ wait.ExponentialBackoff(backoff, condition)
+ return
+}
diff --git a/vendor/github.com/google/go-containerregistry/internal/retry/wait/BUILD.bazel b/vendor/github.com/google/go-containerregistry/internal/retry/wait/BUILD.bazel
new file mode 100644
index 0000000000..6377b217a8
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/internal/retry/wait/BUILD.bazel
@@ -0,0 +1,9 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = ["kubernetes_apimachinery_wait.go"],
+ importmap = "k8s.io/kops/vendor/github.com/google/go-containerregistry/internal/retry/wait",
+ importpath = "github.com/google/go-containerregistry/internal/retry/wait",
+ visibility = ["//vendor/github.com/google/go-containerregistry:__subpackages__"],
+)
diff --git a/vendor/github.com/google/go-containerregistry/internal/retry/wait/kubernetes_apimachinery_wait.go b/vendor/github.com/google/go-containerregistry/internal/retry/wait/kubernetes_apimachinery_wait.go
new file mode 100644
index 0000000000..ab06e5f109
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/internal/retry/wait/kubernetes_apimachinery_wait.go
@@ -0,0 +1,123 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package wait is a subset of k8s.io/apimachinery to avoid conflicts
+// in dependencies (specifically, logging).
+package wait
+
+import (
+ "errors"
+ "math/rand"
+ "time"
+)
+
+// Jitter returns a time.Duration between duration and duration + maxFactor *
+// duration.
+//
+// This allows clients to avoid converging on periodic behavior. If maxFactor
+// is 0.0, a suggested default value will be chosen.
+func Jitter(duration time.Duration, maxFactor float64) time.Duration {
+ if maxFactor <= 0.0 {
+ maxFactor = 1.0
+ }
+ wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration))
+ return wait
+}
+
+// ErrWaitTimeout is returned when the condition exited without success.
+var ErrWaitTimeout = errors.New("timed out waiting for the condition")
+
+// ConditionFunc returns true if the condition is satisfied, or an error
+// if the loop should be aborted.
+type ConditionFunc func() (done bool, err error)
+
+// Backoff holds parameters applied to a Backoff function.
+type Backoff struct {
+ // The initial duration.
+ Duration time.Duration
+ // Duration is multiplied by factor each iteration, if factor is not zero
+ // and the limits imposed by Steps and Cap have not been reached.
+ // Should not be negative.
+ // The jitter does not contribute to the updates to the duration parameter.
+ Factor float64
+ // The sleep at each iteration is the duration plus an additional
+ // amount chosen uniformly at random from the interval between
+ // zero and `jitter*duration`.
+ Jitter float64
+ // The remaining number of iterations in which the duration
+ // parameter may change (but progress can be stopped earlier by
+ // hitting the cap). If not positive, the duration is not
+ // changed. Used for exponential backoff in combination with
+ // Factor and Cap.
+ Steps int
+ // A limit on revised values of the duration parameter. If a
+ // multiplication by the factor parameter would make the duration
+ // exceed the cap then the duration is set to the cap and the
+ // steps parameter is set to zero.
+ Cap time.Duration
+}
+
+// Step (1) returns an amount of time to sleep determined by the
+// original Duration and Jitter and (2) mutates the provided Backoff
+// to update its Steps and Duration.
+func (b *Backoff) Step() time.Duration {
+ if b.Steps < 1 {
+ if b.Jitter > 0 {
+ return Jitter(b.Duration, b.Jitter)
+ }
+ return b.Duration
+ }
+ b.Steps--
+
+ duration := b.Duration
+
+ // calculate the next step
+ if b.Factor != 0 {
+ b.Duration = time.Duration(float64(b.Duration) * b.Factor)
+ if b.Cap > 0 && b.Duration > b.Cap {
+ b.Duration = b.Cap
+ b.Steps = 0
+ }
+ }
+
+ if b.Jitter > 0 {
+ duration = Jitter(duration, b.Jitter)
+ }
+ return duration
+}
+
+// ExponentialBackoff repeats a condition check with exponential backoff.
+//
+// It repeatedly checks the condition and then sleeps, using `backoff.Step()`
+// to determine the length of the sleep and adjust Duration and Steps.
+// Stops and returns as soon as:
+// 1. the condition check returns true or an error,
+// 2. `backoff.Steps` checks of the condition have been done, or
+// 3. a sleep truncated by the cap on duration has been completed.
+// In case (1) the returned error is what the condition function returned.
+// In all other cases, ErrWaitTimeout is returned.
+func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
+ for backoff.Steps > 0 {
+ if ok, err := condition(); err != nil || ok {
+ return err
+ }
+ if backoff.Steps == 1 {
+ break
+ }
+ time.Sleep(backoff.Step())
+ }
+ return ErrWaitTimeout
+}
diff --git a/vendor/github.com/google/go-containerregistry/internal/verify/BUILD.bazel b/vendor/github.com/google/go-containerregistry/internal/verify/BUILD.bazel
new file mode 100644
index 0000000000..ad40fa628f
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/internal/verify/BUILD.bazel
@@ -0,0 +1,13 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = ["verify.go"],
+ importmap = "k8s.io/kops/vendor/github.com/google/go-containerregistry/internal/verify",
+ importpath = "github.com/google/go-containerregistry/internal/verify",
+ visibility = ["//vendor/github.com/google/go-containerregistry:__subpackages__"],
+ deps = [
+ "//vendor/github.com/google/go-containerregistry/internal/and:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/pkg/v1:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/go-containerregistry/internal/verify/verify.go b/vendor/github.com/google/go-containerregistry/internal/verify/verify.go
new file mode 100644
index 0000000000..4446803800
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/internal/verify/verify.go
@@ -0,0 +1,64 @@
+// Copyright 2020 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package verify provides a ReadCloser that verifies content matches the
+// expected hash values.
+package verify
+
+import (
+ "encoding/hex"
+ "fmt"
+ "hash"
+ "io"
+
+ "github.com/google/go-containerregistry/internal/and"
+ v1 "github.com/google/go-containerregistry/pkg/v1"
+)
+
+type verifyReader struct {
+ inner io.Reader
+ hasher hash.Hash
+ expected v1.Hash
+}
+
+// Read implements io.Reader
+func (vc *verifyReader) Read(b []byte) (int, error) {
+ n, err := vc.inner.Read(b)
+ if err == io.EOF {
+ got := hex.EncodeToString(vc.hasher.Sum(make([]byte, 0, vc.hasher.Size())))
+ if want := vc.expected.Hex; got != want {
+ return n, fmt.Errorf("error verifying %s checksum; got %q, want %q",
+ vc.expected.Algorithm, got, want)
+ }
+ }
+ return n, err
+}
+
+// ReadCloser wraps the given io.ReadCloser to verify that its contents match
+// the provided v1.Hash before io.EOF is returned.
+func ReadCloser(r io.ReadCloser, h v1.Hash) (io.ReadCloser, error) {
+ w, err := v1.Hasher(h.Algorithm)
+ if err != nil {
+ return nil, err
+ }
+ r2 := io.TeeReader(r, w)
+ return &and.ReadCloser{
+ Reader: &verifyReader{
+ inner: r2,
+ hasher: w,
+ expected: h,
+ },
+ CloseFunc: r.Close,
+ }, nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/authn/BUILD.bazel
new file mode 100644
index 0000000000..1f24121b0f
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/authn/BUILD.bazel
@@ -0,0 +1,23 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "anon.go",
+ "auth.go",
+ "authn.go",
+ "basic.go",
+ "bearer.go",
+ "doc.go",
+ "keychain.go",
+ "multikeychain.go",
+ ],
+ importmap = "k8s.io/kops/vendor/github.com/google/go-containerregistry/pkg/authn",
+ importpath = "github.com/google/go-containerregistry/pkg/authn",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//vendor/github.com/docker/cli/cli/config:go_default_library",
+ "//vendor/github.com/docker/cli/cli/config/types:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/pkg/name:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/README.md b/vendor/github.com/google/go-containerregistry/pkg/authn/README.md
new file mode 100644
index 0000000000..1eb17c7ab1
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/authn/README.md
@@ -0,0 +1,242 @@
+# `authn`
+
+[](https://godoc.org/github.com/google/go-containerregistry/pkg/authn)
+
+This README outlines how we acquire and use credentials when interacting with a registry.
+
+As much as possible, we attempt to emulate docker's authentication behavior and configuration so that this library "just works" if you've already configured credentials that work with docker; however, when things don't work, a basic understanding of what's going on can help with debugging.
+
+The official documentation for how docker authentication works is (reasonably) scattered across several different sites and GitHub repositories, so we've tried to summarize the relevant bits here.
+
+## tl;dr for consumers of this package
+
+By default, [`pkg/v1/remote`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote) uses [`Anonymous`](https://godoc.org/github.com/google/go-containerregistry/pkg/authn#Anonymous) credentials (i.e. _none_), which for most registries will only allow read access to public images.
+
+To use the credentials found in your docker config file, you can use the [`DefaultKeychain`](https://godoc.org/github.com/google/go-containerregistry/pkg/authn#DefaultKeychain), e.g.:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/google/go-containerregistry/pkg/authn"
+ "github.com/google/go-containerregistry/pkg/name"
+ "github.com/google/go-containerregistry/pkg/v1/remote"
+)
+
+func main() {
+ ref, err := name.ParseReference("registry.example.com/private/repo")
+ if err != nil {
+ panic(err)
+ }
+
+ // Fetch the manifest using default credentials.
+ img, err := remote.Get(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain))
+ if err != nil {
+ panic(err)
+ }
+
+ // Prints the digest of registry.example.com/private/repo
+ fmt.Println(img.Digest)
+}
+```
+
+(If you're only using [gcr.io](https://gcr.io), see the [`pkg/v1/google.Keychain`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/google#Keychain), which emulates [`docker-credential-gcr`](https://github.com/GoogleCloudPlatform/docker-credential-gcr).)
+
+## The Config File
+
+This file contains various configuration options for docker and is (by default) located at:
+* `$HOME/.docker/config.json` (on linux and darwin), or
+* `%USERPROFILE%\.docker\config.json` (on windows).
+
+You can override this location with the `DOCKER_CONFIG` environment variable.
+
+### Plaintext
+
+The config file is where your credentials are stored when you invoke `docker login`, e.g. the contents may look something like this:
+
+```json
+{
+ "auths": {
+ "registry.example.com": {
+ "auth": "QXp1cmVEaWFtb25kOmh1bnRlcjI="
+ }
+ }
+}
+```
+
+The `auths` map has an entry per registry, and the `auth` field contains your username and password encoded as [HTTP 'Basic' Auth](https://tools.ietf.org/html/rfc7617).
+
+**NOTE**: This means that your credentials are stored _in plaintext_:
+
+```bash
+$ echo "QXp1cmVEaWFtb25kOmh1bnRlcjI=" | base64 -d
+AzureDiamond:hunter2
+```
+
+For what it's worth, this config file is equivalent to:
+
+```json
+{
+ "auths": {
+ "registry.example.com": {
+ "username": "AzureDiamond",
+ "password": "hunter2"
+ }
+ }
+}
+```
+
+... which is useful to know if e.g. your CI system provides you a registry username and password via environment variables and you want to populate this file manually without invoking `docker login`.
+
+### Helpers
+
+If you log in like this, docker will warn you that you should use a [credential helper](https://docs.docker.com/engine/reference/commandline/login/#credentials-store), and you should!
+
+To configure a global credential helper:
+```json
+{
+ "credsStore": "osxkeychain"
+}
+```
+
+To configure a per-registry credential helper:
+```json
+{
+ "credHelpers": {
+ "gcr.io": "gcr"
+ }
+}
+```
+
+We use [`github.com/docker/cli/cli/config.Load`](https://godoc.org/github.com/docker/cli/cli/config#Load) to parse the config file and invoke any necessary credential helpers. This handles the logic of taking a [`ConfigFile`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/configfile/file.go#L25-L54) + registry domain and producing an [`AuthConfig`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L3-L22), which determines how we authenticate to the registry.
+
+## Credential Helpers
+
+The [credential helper protocol](https://github.com/docker/docker-credential-helpers) allows you to configure a binary that supplies credentials for the registry, rather than hard-coding them in the config file.
+
+The protocol has several verbs, but the one we most care about is `get`.
+
+For example, using the following config file:
+```json
+{
+ "credHelpers": {
+ "gcr.io": "gcr",
+ "eu.gcr.io": "gcr"
+ }
+}
+```
+
+To acquire credentials for `gcr.io`, we look in the `credHelpers` map to find
+the credential helper for `gcr.io` is `gcr`. By appending that value to
+`docker-credential-`, we can get the name of the binary we need to use.
+
+For this example, that's `docker-credential-gcr`, which must be on our `$PATH`.
+We'll then invoke that binary to get credentials:
+
+```bash
+$ echo "gcr.io" | docker-credential-gcr get
+{"Username":"_token","Secret":""}
+```
+
+You can configure the same credential helper for multiple registries, which is
+why we need to pass the domain in via STDIN, e.g. if we were trying to access
+`eu.gcr.io`, we'd do this instead:
+
+```bash
+$ echo "eu.gcr.io" | docker-credential-gcr get
+{"Username":"_token","Secret":""}
+```
+
+### Debugging credential helpers
+
+If a credential helper is configured but doesn't seem to be working, it can be
+challenging to debug. Implementing a fake credential helper lets you poke around
+to make it easier to see where the failure is happening.
+
+This "implements" a credential helper with hard-coded values:
+```
+#!/usr/bin/env bash
+echo '{"Username":"","Secret":"hunter2"}'
+```
+
+
+This implements a credential helper that prints the output of
+`docker-credential-gcr` to both stderr and whatever called it, which allows you
+to snoop on another credential helper:
+```
+#!/usr/bin/env bash
+docker-credential-gcr $@ | tee >(cat 1>&2)
+```
+
+Put those files somewhere on your path, naming them e.g.
+`docker-credential-hardcoded` and `docker-credential-tee`, then modify the
+config file to use them:
+
+```json
+{
+ "credHelpers": {
+ "gcr.io": "tee",
+ "eu.gcr.io": "hardcoded"
+ }
+}
+```
+
+The `docker-credential-tee` trick works with both `crane` and `docker`:
+
+```bash
+$ crane manifest gcr.io/google-containers/pause > /dev/null
+{"ServerURL":"","Username":"_dcgcr_1_5_0_token","Secret":""}
+
+$ docker pull gcr.io/google-containers/pause
+Using default tag: latest
+{"ServerURL":"","Username":"_dcgcr_1_5_0_token","Secret":""}
+latest: Pulling from google-containers/pause
+a3ed95caeb02: Pull complete
+4964c72cd024: Pull complete
+Digest: sha256:a78c2d6208eff9b672de43f880093100050983047b7b0afe0217d3656e1b0d5f
+Status: Downloaded newer image for gcr.io/google-containers/pause:latest
+gcr.io/google-containers/pause:latest
+```
+
+## The Registry
+
+There are two methods for authenticating against a registry:
+[token](https://docs.docker.com/registry/spec/auth/token/) and
+[oauth2](https://docs.docker.com/registry/spec/auth/oauth/).
+
+Both methods are used to acquire an opaque `Bearer` token (or
+[RegistryToken](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L21))
+to use in the `Authorization` header. The registry will return a `401
+Unauthorized` during the [version
+check](https://github.com/opencontainers/distribution-spec/blob/2c3975d1f03b67c9a0203199038adea0413f0573/spec.md#api-version-check)
+(or during normal operations) with
+[Www-Authenticate](https://tools.ietf.org/html/rfc7235#section-4.1) challenge
+indicating how to proceed.
+
+### Token
+
+If we get back an `AuthConfig` containing a [`Username/Password`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L5-L6)
+or
+[`Auth`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L7),
+we'll use the token method for authentication:
+
+
+
+### OAuth 2
+
+If we get back an `AuthConfig` containing an [`IdentityToken`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L18)
+we'll use the oauth2 method for authentication:
+
+
+
+This happens when a credential helper returns a response with the
+[`Username`](https://github.com/docker/docker-credential-helpers/blob/f78081d1f7fef6ad74ad6b79368de6348386e591/credentials/credentials.go#L16)
+set to `` (no, that's not a placeholder, the literal string `""`).
+It is unclear why: [moby/moby#36926](https://github.com/moby/moby/issues/36926).
+
+We only support the oauth2 `grant_type` for `refresh_token` ([#629](https://github.com/google/go-containerregistry/issues/629)),
+since it's impossible to determine from the registry response whether we should
+use oauth, and the token method for authentication is widely implemented by
+registries.
diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go b/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go
new file mode 100644
index 0000000000..83214957d5
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go
@@ -0,0 +1,26 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package authn
+
+// anonymous implements Authenticator for anonymous authentication.
+type anonymous struct{}
+
+// Authorization implements Authenticator.
+func (a *anonymous) Authorization() (*AuthConfig, error) {
+ return &AuthConfig{}, nil
+}
+
+// Anonymous is a singleton Authenticator for providing anonymous auth.
+var Anonymous Authenticator = &anonymous{}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go b/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go
new file mode 100644
index 0000000000..0111f1ae72
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go
@@ -0,0 +1,30 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package authn
+
+// auth is an Authenticator that simply returns the wrapped AuthConfig.
+type auth struct {
+ config AuthConfig
+}
+
+// FromConfig returns an Authenticator that just returns the given AuthConfig.
+func FromConfig(cfg AuthConfig) Authenticator {
+ return &auth{cfg}
+}
+
+// Authorization implements Authenticator.
+func (a *auth) Authorization() (*AuthConfig, error) {
+ return &a.config, nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go b/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go
new file mode 100644
index 0000000000..690e81d058
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go
@@ -0,0 +1,36 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package authn
+
+// AuthConfig contains authorization information for connecting to a Registry
+// Inlined what we use from github.com/docker/cli/cli/config/types
+type AuthConfig struct {
+ Username string `json:"username,omitempty"`
+ Password string `json:"password,omitempty"`
+ Auth string `json:"auth,omitempty"`
+
+ // IdentityToken is used to authenticate the user and get
+ // an access token for the registry.
+ IdentityToken string `json:"identitytoken,omitempty"`
+
+ // RegistryToken is a bearer token to be sent to a registry
+ RegistryToken string `json:"registrytoken,omitempty"`
+}
+
+// Authenticator is used to authenticate Docker transports.
+type Authenticator interface {
+ // Authorization returns the value to use in an http transport's Authorization header.
+ Authorization() (*AuthConfig, error)
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go b/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go
new file mode 100644
index 0000000000..500cb6616f
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go
@@ -0,0 +1,29 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package authn
+
+// Basic implements Authenticator for basic authentication.
+type Basic struct {
+ Username string
+ Password string
+}
+
+// Authorization implements Authenticator.
+func (b *Basic) Authorization() (*AuthConfig, error) {
+ return &AuthConfig{
+ Username: b.Username,
+ Password: b.Password,
+ }, nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go b/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go
new file mode 100644
index 0000000000..4cf86df92f
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go
@@ -0,0 +1,27 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package authn
+
+// Bearer implements Authenticator for bearer authentication.
+type Bearer struct {
+ Token string `json:"token"`
+}
+
+// Authorization implements Authenticator.
+func (b *Bearer) Authorization() (*AuthConfig, error) {
+ return &AuthConfig{
+ RegistryToken: b.Token,
+ }, nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go b/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go
new file mode 100644
index 0000000000..c2a5fc0267
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package authn defines different methods of authentication for
+// talking to a container registry.
+package authn
diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go b/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go
new file mode 100644
index 0000000000..60eebc7599
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go
@@ -0,0 +1,89 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package authn
+
+import (
+ "os"
+
+ "github.com/docker/cli/cli/config"
+ "github.com/docker/cli/cli/config/types"
+ "github.com/google/go-containerregistry/pkg/name"
+)
+
+// Resource represents a registry or repository that can be authenticated against.
+type Resource interface {
+ // String returns the full string representation of the target, e.g.
+ // gcr.io/my-project or just gcr.io.
+ String() string
+
+ // RegistryStr returns just the registry portion of the target, e.g. for
+ // gcr.io/my-project, this should just return gcr.io. This is needed to
+ // pull out an appropriate hostname.
+ RegistryStr() string
+}
+
+// Keychain is an interface for resolving an image reference to a credential.
+type Keychain interface {
+ // Resolve looks up the most appropriate credential for the specified target.
+ Resolve(Resource) (Authenticator, error)
+}
+
+// defaultKeychain implements Keychain with the semantics of the standard Docker
+// credential keychain.
+type defaultKeychain struct{}
+
+var (
+ // DefaultKeychain implements Keychain by interpreting the docker config file.
+ DefaultKeychain Keychain = &defaultKeychain{}
+)
+
+const (
+ // DefaultAuthKey is the key used for dockerhub in config files, which
+ // is hardcoded for historical reasons.
+ DefaultAuthKey = "https://" + name.DefaultRegistry + "/v1/"
+)
+
+// Resolve implements Keychain.
+func (dk *defaultKeychain) Resolve(target Resource) (Authenticator, error) {
+ cf, err := config.Load(os.Getenv("DOCKER_CONFIG"))
+ if err != nil {
+ return nil, err
+ }
+
+ // See:
+ // https://github.com/google/ko/issues/90
+ // https://github.com/moby/moby/blob/fc01c2b481097a6057bec3cd1ab2d7b4488c50c4/registry/config.go#L397-L404
+ key := target.RegistryStr()
+ if key == name.DefaultRegistry {
+ key = DefaultAuthKey
+ }
+
+ cfg, err := cf.GetAuthConfig(key)
+ if err != nil {
+ return nil, err
+ }
+
+ empty := types.AuthConfig{}
+ if cfg == empty {
+ return Anonymous, nil
+ }
+ return FromConfig(AuthConfig{
+ Username: cfg.Username,
+ Password: cfg.Password,
+ Auth: cfg.Auth,
+ IdentityToken: cfg.IdentityToken,
+ RegistryToken: cfg.RegistryToken,
+ }), nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go b/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go
new file mode 100644
index 0000000000..3b1804f5d0
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go
@@ -0,0 +1,41 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package authn
+
+type multiKeychain struct {
+ keychains []Keychain
+}
+
+// Assert that our multi-keychain implements Keychain.
+var _ (Keychain) = (*multiKeychain)(nil)
+
+// NewMultiKeychain composes a list of keychains into one new keychain.
+func NewMultiKeychain(kcs ...Keychain) Keychain {
+ return &multiKeychain{keychains: kcs}
+}
+
+// Resolve implements Keychain.
+func (mk *multiKeychain) Resolve(target Resource) (Authenticator, error) {
+ for _, kc := range mk.keychains {
+ auth, err := kc.Resolve(target)
+ if err != nil {
+ return nil, err
+ }
+ if auth != Anonymous {
+ return auth, nil
+ }
+ }
+ return Anonymous, nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/logs/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/logs/BUILD.bazel
new file mode 100644
index 0000000000..f4d788b1c5
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/logs/BUILD.bazel
@@ -0,0 +1,9 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = ["logs.go"],
+ importmap = "k8s.io/kops/vendor/github.com/google/go-containerregistry/pkg/logs",
+ importpath = "github.com/google/go-containerregistry/pkg/logs",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go b/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go
new file mode 100644
index 0000000000..5d25d63d61
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go
@@ -0,0 +1,39 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package logs exposes the loggers used by this library.
+package logs
+
+import (
+ "io/ioutil"
+ "log"
+)
+
+var (
+ // Warn is used to log non-fatal errors.
+ Warn = log.New(ioutil.Discard, "", log.LstdFlags)
+
+ // Progress is used to log notable, successful events.
+ Progress = log.New(ioutil.Discard, "", log.LstdFlags)
+
+ // Debug is used to log information that is useful for debugging.
+ Debug = log.New(ioutil.Discard, "", log.LstdFlags)
+)
+
+// Enabled checks to see if the logger's writer is set to something other
+// than ioutil.Discard. This allows callers to avoid expensive operations
+// that will end up in /dev/null anyway.
+func Enabled(l *log.Logger) bool {
+ return l.Writer() != ioutil.Discard
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/name/BUILD.bazel
new file mode 100644
index 0000000000..ce740c7b35
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/BUILD.bazel
@@ -0,0 +1,19 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "check.go",
+ "digest.go",
+ "doc.go",
+ "errors.go",
+ "options.go",
+ "ref.go",
+ "registry.go",
+ "repository.go",
+ "tag.go",
+ ],
+ importmap = "k8s.io/kops/vendor/github.com/google/go-containerregistry/pkg/name",
+ importpath = "github.com/google/go-containerregistry/pkg/name",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/README.md b/vendor/github.com/google/go-containerregistry/pkg/name/README.md
new file mode 100644
index 0000000000..4889b8446a
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/README.md
@@ -0,0 +1,3 @@
+# `name`
+
+[](https://godoc.org/github.com/google/go-containerregistry/pkg/name)
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/check.go b/vendor/github.com/google/go-containerregistry/pkg/name/check.go
new file mode 100644
index 0000000000..01b03e5626
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/check.go
@@ -0,0 +1,43 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+import (
+ "strings"
+ "unicode/utf8"
+)
+
+// stripRunesFn returns a function which returns -1 (i.e. a value which
+// signals deletion in strings.Map) for runes in 'runes', and the rune otherwise.
+func stripRunesFn(runes string) func(rune) rune {
+ return func(r rune) rune {
+ if strings.ContainsRune(runes, r) {
+ return -1
+ }
+ return r
+ }
+}
+
+// checkElement checks a given named element matches character and length restrictions.
+// Returns true if the given element adheres to the given restrictions, false otherwise.
+func checkElement(name, element, allowedRunes string, minRunes, maxRunes int) error {
+ numRunes := utf8.RuneCountInString(element)
+ if (numRunes < minRunes) || (maxRunes < numRunes) {
+ return NewErrBadName("%s must be between %d and %d runes in length: %s", name, minRunes, maxRunes, element)
+ } else if len(strings.Map(stripRunesFn(allowedRunes), element)) != 0 {
+ return NewErrBadName("%s can only contain the runes `%s`: %s", name, allowedRunes, element)
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go
new file mode 100644
index 0000000000..120dd216ab
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go
@@ -0,0 +1,96 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+import (
+ "strings"
+)
+
+const (
+ // These have the form: sha256:
+ // TODO(dekkagaijin): replace with opencontainers/go-digest or docker/distribution's validation.
+ digestChars = "sh:0123456789abcdef"
+ digestDelim = "@"
+)
+
+// Digest stores a digest name in a structured form.
+type Digest struct {
+ Repository
+ digest string
+ original string
+}
+
+// Ensure Digest implements Reference
+var _ Reference = (*Digest)(nil)
+
+// Context implements Reference.
+func (d Digest) Context() Repository {
+ return d.Repository
+}
+
+// Identifier implements Reference.
+func (d Digest) Identifier() string {
+ return d.DigestStr()
+}
+
+// DigestStr returns the digest component of the Digest.
+func (d Digest) DigestStr() string {
+ return d.digest
+}
+
+// Name returns the name from which the Digest was derived.
+func (d Digest) Name() string {
+ return d.Repository.Name() + digestDelim + d.DigestStr()
+}
+
+// String returns the original input string.
+func (d Digest) String() string {
+ return d.original
+}
+
+func checkDigest(name string) error {
+ return checkElement("digest", name, digestChars, 7+64, 7+64)
+}
+
+// NewDigest returns a new Digest representing the given name.
+func NewDigest(name string, opts ...Option) (Digest, error) {
+ // Split on "@"
+ parts := strings.Split(name, digestDelim)
+ if len(parts) != 2 {
+ return Digest{}, NewErrBadName("a digest must contain exactly one '@' separator (e.g. registry/repository@digest) saw: %s", name)
+ }
+ base := parts[0]
+ digest := parts[1]
+
+ // Always check that the digest is valid.
+ if err := checkDigest(digest); err != nil {
+ return Digest{}, err
+ }
+
+ tag, err := NewTag(base, opts...)
+ if err == nil {
+ base = tag.Repository.Name()
+ }
+
+ repo, err := NewRepository(base, opts...)
+ if err != nil {
+ return Digest{}, err
+ }
+ return Digest{
+ Repository: repo,
+ digest: digest,
+ original: name,
+ }, nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/doc.go b/vendor/github.com/google/go-containerregistry/pkg/name/doc.go
new file mode 100644
index 0000000000..b294794dc1
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/doc.go
@@ -0,0 +1,42 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package name defines structured types for representing image references.
+//
+// What's in a name? For image references, not nearly enough!
+//
+// Image references look a lot like URLs, but they differ in that they don't
+// contain the scheme (http or https), they can end with a :tag or a @digest
+// (the latter being validated), and they perform defaulting for missing
+// components.
+//
+// Since image references don't contain the scheme, we do our best to infer
+// if we use http or https from the given hostname. We allow http fallback for
+// any host that looks like localhost (localhost, 127.0.0.1, ::1), ends in
+// ".local", or is in the "private" address space per RFC 1918. For everything
+// else, we assume https only. To override this heuristic, use the Insecure
+// option.
+//
+// Image references with a digest signal to us that we should verify the content
+// of the image matches the digest. E.g. when pulling a Digest reference, we'll
+// calculate the sha256 of the manifest returned by the registry and error out
+// if it doesn't match what we asked for.
+//
+// For defaulting, we interpret "ubuntu" as
+// "index.docker.io/library/ubuntu:latest" because we add the missing repo
+// "library", the missing registry "index.docker.io", and the missing tag
+// "latest". To disable this defaulting, use the StrictValidation option. This
+// is useful e.g. to only allow image references that explicitly set a tag or
+// digest, so that you don't accidentally pull "latest".
+package name
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/errors.go b/vendor/github.com/google/go-containerregistry/pkg/name/errors.go
new file mode 100644
index 0000000000..7847cc5d1e
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/errors.go
@@ -0,0 +1,37 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+import "fmt"
+
+// ErrBadName is an error for when a bad docker name is supplied.
+type ErrBadName struct {
+ info string
+}
+
+func (e *ErrBadName) Error() string {
+ return e.info
+}
+
+// NewErrBadName returns a ErrBadName which returns the given formatted string from Error().
+func NewErrBadName(fmtStr string, args ...interface{}) *ErrBadName {
+ return &ErrBadName{fmt.Sprintf(fmtStr, args...)}
+}
+
+// IsErrBadName returns true if the given error is an ErrBadName.
+func IsErrBadName(err error) bool {
+ _, ok := err.(*ErrBadName)
+ return ok
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/options.go b/vendor/github.com/google/go-containerregistry/pkg/name/options.go
new file mode 100644
index 0000000000..d14fedcdad
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/options.go
@@ -0,0 +1,83 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+const (
+ // DefaultRegistry is the registry name that will be used if no registry
+ // provided and the default is not overridden.
+ DefaultRegistry = "index.docker.io"
+ defaultRegistryAlias = "docker.io"
+
+ // DefaultTag is the tag name that will be used if no tag provided and the
+ // default is not overridden.
+ DefaultTag = "latest"
+)
+
+type options struct {
+ strict bool // weak by default
+ insecure bool // secure by default
+ defaultRegistry string
+ defaultTag string
+}
+
+func makeOptions(opts ...Option) options {
+ opt := options{
+ defaultRegistry: DefaultRegistry,
+ defaultTag: DefaultTag,
+ }
+ for _, o := range opts {
+ o(&opt)
+ }
+ return opt
+}
+
+// Option is a functional option for name parsing.
+type Option func(*options)
+
+// StrictValidation is an Option that requires image references to be fully
+// specified; i.e. no defaulting for registry (dockerhub), repo (library),
+// or tag (latest).
+func StrictValidation(opts *options) {
+ opts.strict = true
+}
+
+// WeakValidation is an Option that sets defaults when parsing names, see
+// StrictValidation.
+func WeakValidation(opts *options) {
+ opts.strict = false
+}
+
+// Insecure is an Option that allows image references to be fetched without TLS.
+func Insecure(opts *options) {
+ opts.insecure = true
+}
+
+// OptionFn is a function that returns an option.
+type OptionFn func() Option
+
+// WithDefaultRegistry sets the default registry that will be used if one is not
+// provided.
+func WithDefaultRegistry(r string) Option {
+ return func(opts *options) {
+ opts.defaultRegistry = r
+ }
+}
+
+// WithDefaultTag sets the default tag that will be used if one is not provided.
+func WithDefaultTag(t string) Option {
+ return func(opts *options) {
+ opts.defaultTag = t
+ }
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/ref.go b/vendor/github.com/google/go-containerregistry/pkg/name/ref.go
new file mode 100644
index 0000000000..e5180b3d0a
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/ref.go
@@ -0,0 +1,76 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+import (
+ "fmt"
+)
+
+// Reference defines the interface that consumers use when they can
+// take either a tag or a digest.
+type Reference interface {
+ fmt.Stringer
+
+ // Context accesses the Repository context of the reference.
+ Context() Repository
+
+ // Identifier accesses the type-specific portion of the reference.
+ Identifier() string
+
+ // Name is the fully-qualified reference name.
+ Name() string
+
+ // Scope is the scope needed to access this reference.
+ Scope(string) string
+}
+
+// ParseReference parses the string as a reference, either by tag or digest.
+func ParseReference(s string, opts ...Option) (Reference, error) {
+ if t, err := NewTag(s, opts...); err == nil {
+ return t, nil
+ }
+ if d, err := NewDigest(s, opts...); err == nil {
+ return d, nil
+ }
+ return nil, NewErrBadName("could not parse reference: " + s)
+
+}
+
+type stringConst string
+
+// MustParseReference behaves like ParseReference, but panics instead of
+// returning an error. It's intended for use in tests, or when a value is
+// expected to be valid at code authoring time.
+//
+// To discourage its use in scenarios where the value is not known at code
+// authoring time, it must be passed a string constant:
+//
+// const str = "valid/string"
+// MustParseReference(str)
+// MustParseReference("another/valid/string")
+// MustParseReference(str + "/and/more")
+//
+// These will not compile:
+//
+// var str = "valid/string"
+// MustParseReference(str)
+// MustParseReference(strings.Join([]string{"valid", "string"}, "/"))
+func MustParseReference(s stringConst, opts ...Option) Reference {
+ ref, err := ParseReference(string(s), opts...)
+ if err != nil {
+ panic(err)
+ }
+ return ref
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go
new file mode 100644
index 0000000000..d4da7409e8
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go
@@ -0,0 +1,136 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+import (
+ "net"
+ "net/url"
+ "regexp"
+ "strings"
+)
+
+// Detect more complex forms of local references.
+var reLocal = regexp.MustCompile(`.*\.local(?:host)?(?::\d{1,5})?$`)
+
+// Detect the loopback IP (127.0.0.1)
+var reLoopback = regexp.MustCompile(regexp.QuoteMeta("127.0.0.1"))
+
+// Detect the loopback IPV6 (::1)
+var reipv6Loopback = regexp.MustCompile(regexp.QuoteMeta("::1"))
+
+// Registry stores a docker registry name in a structured form.
+type Registry struct {
+ insecure bool
+ registry string
+}
+
+// RegistryStr returns the registry component of the Registry.
+func (r Registry) RegistryStr() string {
+ return r.registry
+}
+
+// Name returns the name from which the Registry was derived.
+func (r Registry) Name() string {
+ return r.RegistryStr()
+}
+
+func (r Registry) String() string {
+ return r.Name()
+}
+
+// Scope returns the scope required to access the registry.
+func (r Registry) Scope(string) string {
+ // The only resource under 'registry' is 'catalog'. http://goo.gl/N9cN9Z
+ return "registry:catalog:*"
+}
+
+func (r Registry) isRFC1918() bool {
+ ipStr := strings.Split(r.Name(), ":")[0]
+ ip := net.ParseIP(ipStr)
+ if ip == nil {
+ return false
+ }
+ for _, cidr := range []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"} {
+ _, block, _ := net.ParseCIDR(cidr)
+ if block.Contains(ip) {
+ return true
+ }
+ }
+ return false
+}
+
+// Scheme returns https scheme for all the endpoints except localhost or when explicitly defined.
+func (r Registry) Scheme() string {
+ if r.insecure {
+ return "http"
+ }
+ if r.isRFC1918() {
+ return "http"
+ }
+ if strings.HasPrefix(r.Name(), "localhost:") {
+ return "http"
+ }
+ if reLocal.MatchString(r.Name()) {
+ return "http"
+ }
+ if reLoopback.MatchString(r.Name()) {
+ return "http"
+ }
+ if reipv6Loopback.MatchString(r.Name()) {
+ return "http"
+ }
+ return "https"
+}
+
+func checkRegistry(name string) error {
+ // Per RFC 3986, registries (authorities) are required to be prefixed with "//"
+ // url.Host == hostname[:port] == authority
+ if url, err := url.Parse("//" + name); err != nil || url.Host != name {
+ return NewErrBadName("registries must be valid RFC 3986 URI authorities: %s", name)
+ }
+ return nil
+}
+
+// NewRegistry returns a Registry based on the given name.
+// Strict validation requires explicit, valid RFC 3986 URI authorities to be given.
+func NewRegistry(name string, opts ...Option) (Registry, error) {
+ opt := makeOptions(opts...)
+ if opt.strict && len(name) == 0 {
+ return Registry{}, NewErrBadName("strict validation requires the registry to be explicitly defined")
+ }
+
+ if err := checkRegistry(name); err != nil {
+ return Registry{}, err
+ }
+
+ if name == "" {
+ name = opt.defaultRegistry
+ }
+ // Rewrite "docker.io" to "index.docker.io".
+ // See: https://github.com/google/go-containerregistry/issues/68
+ if name == defaultRegistryAlias {
+ name = DefaultRegistry
+ }
+
+ return Registry{registry: name, insecure: opt.insecure}, nil
+}
+
+// NewInsecureRegistry returns an Insecure Registry based on the given name.
+//
+// Deprecated: Use the Insecure Option with NewRegistry instead.
+func NewInsecureRegistry(name string, opts ...Option) (Registry, error) {
+ opts = append(opts, Insecure)
+ return NewRegistry(name, opts...)
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/repository.go b/vendor/github.com/google/go-containerregistry/pkg/name/repository.go
new file mode 100644
index 0000000000..54367a15cd
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/repository.go
@@ -0,0 +1,121 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+import (
+ "fmt"
+ "strings"
+)
+
+const (
+ defaultNamespace = "library"
+ repositoryChars = "abcdefghijklmnopqrstuvwxyz0123456789_-./"
+ regRepoDelimiter = "/"
+)
+
+// Repository stores a docker repository name in a structured form.
+type Repository struct {
+ Registry
+ repository string
+}
+
+// See https://docs.docker.com/docker-hub/official_repos
+func hasImplicitNamespace(repo string, reg Registry) bool {
+ return !strings.ContainsRune(repo, '/') && reg.RegistryStr() == DefaultRegistry
+}
+
+// RepositoryStr returns the repository component of the Repository.
+func (r Repository) RepositoryStr() string {
+ if hasImplicitNamespace(r.repository, r.Registry) {
+ return fmt.Sprintf("%s/%s", defaultNamespace, r.repository)
+ }
+ return r.repository
+}
+
+// Name returns the name from which the Repository was derived.
+func (r Repository) Name() string {
+ regName := r.Registry.Name()
+ if regName != "" {
+ return regName + regRepoDelimiter + r.RepositoryStr()
+ }
+ // TODO: As far as I can tell, this is unreachable.
+ return r.RepositoryStr()
+}
+
+func (r Repository) String() string {
+ return r.Name()
+}
+
+// Scope returns the scope required to perform the given action on the registry.
+// TODO(jonjohnsonjr): consider moving scopes to a separate package.
+func (r Repository) Scope(action string) string {
+ return fmt.Sprintf("repository:%s:%s", r.RepositoryStr(), action)
+}
+
+func checkRepository(repository string) error {
+ return checkElement("repository", repository, repositoryChars, 2, 255)
+}
+
+// NewRepository returns a new Repository representing the given name, according to the given strictness.
+func NewRepository(name string, opts ...Option) (Repository, error) {
+ opt := makeOptions(opts...)
+ if len(name) == 0 {
+ return Repository{}, NewErrBadName("a repository name must be specified")
+ }
+
+ var registry string
+ repo := name
+ parts := strings.SplitN(name, regRepoDelimiter, 2)
+ if len(parts) == 2 && (strings.ContainsRune(parts[0], '.') || strings.ContainsRune(parts[0], ':')) {
+ // The first part of the repository is treated as the registry domain
+ // iff it contains a '.' or ':' character, otherwise it is all repository
+ // and the domain defaults to Docker Hub.
+ registry = parts[0]
+ repo = parts[1]
+ }
+
+ if err := checkRepository(repo); err != nil {
+ return Repository{}, err
+ }
+
+ reg, err := NewRegistry(registry, opts...)
+ if err != nil {
+ return Repository{}, err
+ }
+ if hasImplicitNamespace(repo, reg) && opt.strict {
+ return Repository{}, NewErrBadName("strict validation requires the full repository path (missing 'library')")
+ }
+ return Repository{reg, repo}, nil
+}
+
+// Tag returns a Tag in this Repository.
+func (r Repository) Tag(identifier string) Tag {
+ t := Tag{
+ tag: identifier,
+ Repository: r,
+ }
+ t.original = t.Name()
+ return t
+}
+
+// Digest returns a Digest in this Repository.
+func (r Repository) Digest(identifier string) Digest {
+ d := Digest{
+ digest: identifier,
+ Repository: r,
+ }
+ d.original = d.Name()
+ return d
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/tag.go b/vendor/github.com/google/go-containerregistry/pkg/name/tag.go
new file mode 100644
index 0000000000..66bd1bec3d
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/tag.go
@@ -0,0 +1,108 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package name
+
+import (
+ "strings"
+)
+
+const (
+ // TODO(dekkagaijin): use the docker/distribution regexes for validation.
+ tagChars = "abcdefghijklmnopqrstuvwxyz0123456789_-.ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ tagDelim = ":"
+)
+
+// Tag stores a docker tag name in a structured form.
+type Tag struct {
+ Repository
+ tag string
+ original string
+}
+
+// Ensure Tag implements Reference
+var _ Reference = (*Tag)(nil)
+
+// Context implements Reference.
+func (t Tag) Context() Repository {
+ return t.Repository
+}
+
+// Identifier implements Reference.
+func (t Tag) Identifier() string {
+ return t.TagStr()
+}
+
+// TagStr returns the tag component of the Tag.
+func (t Tag) TagStr() string {
+ return t.tag
+}
+
+// Name returns the name from which the Tag was derived.
+func (t Tag) Name() string {
+ return t.Repository.Name() + tagDelim + t.TagStr()
+}
+
+// String returns the original input string.
+func (t Tag) String() string {
+ return t.original
+}
+
+// Scope returns the scope required to perform the given action on the tag.
+func (t Tag) Scope(action string) string {
+ return t.Repository.Scope(action)
+}
+
+func checkTag(name string) error {
+ return checkElement("tag", name, tagChars, 1, 128)
+}
+
+// NewTag returns a new Tag representing the given name, according to the given strictness.
+func NewTag(name string, opts ...Option) (Tag, error) {
+ opt := makeOptions(opts...)
+ base := name
+ tag := ""
+
+ // Split on ":"
+ parts := strings.Split(name, tagDelim)
+ // Verify that we aren't confusing a tag for a hostname w/ port for the purposes of weak validation.
+ if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], regRepoDelimiter) {
+ base = strings.Join(parts[:len(parts)-1], tagDelim)
+ tag = parts[len(parts)-1]
+ }
+
+ // We don't require a tag, but if we get one check it's valid,
+ // even when not being strict.
+ // If we are being strict, we want to validate the tag regardless in case
+ // it's empty.
+ if tag != "" || opt.strict {
+ if err := checkTag(tag); err != nil {
+ return Tag{}, err
+ }
+ }
+
+ if tag == "" {
+ tag = opt.defaultTag
+ }
+
+ repo, err := NewRepository(base, opts...)
+ if err != nil {
+ return Tag{}, err
+ }
+ return Tag{
+ Repository: repo,
+ tag: tag,
+ original: name,
+ }, nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/v1/BUILD.bazel
new file mode 100644
index 0000000000..672fa1ebe5
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/BUILD.bazel
@@ -0,0 +1,21 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "config.go",
+ "doc.go",
+ "hash.go",
+ "image.go",
+ "index.go",
+ "layer.go",
+ "manifest.go",
+ "platform.go",
+ "progress.go",
+ "zz_deepcopy_generated.go",
+ ],
+ importmap = "k8s.io/kops/vendor/github.com/google/go-containerregistry/pkg/v1",
+ importpath = "github.com/google/go-containerregistry/pkg/v1",
+ visibility = ["//visibility:public"],
+ deps = ["//vendor/github.com/google/go-containerregistry/pkg/v1/types:go_default_library"],
+)
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go
new file mode 100644
index 0000000000..a950b397c1
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go
@@ -0,0 +1,133 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "encoding/json"
+ "io"
+ "time"
+)
+
+// ConfigFile is the configuration file that holds the metadata describing
+// how to launch a container. See:
+// https://github.com/opencontainers/image-spec/blob/master/config.md
+//
+// docker_version and os.version are not part of the spec but included
+// for backwards compatibility.
+type ConfigFile struct {
+ Architecture string `json:"architecture"`
+ Author string `json:"author,omitempty"`
+ Container string `json:"container,omitempty"`
+ Created Time `json:"created,omitempty"`
+ DockerVersion string `json:"docker_version,omitempty"`
+ History []History `json:"history,omitempty"`
+ OS string `json:"os"`
+ RootFS RootFS `json:"rootfs"`
+ Config Config `json:"config"`
+ OSVersion string `json:"os.version,omitempty"`
+}
+
+// History is one entry of a list recording how this container image was built.
+type History struct {
+ Author string `json:"author,omitempty"`
+ Created Time `json:"created,omitempty"`
+ CreatedBy string `json:"created_by,omitempty"`
+ Comment string `json:"comment,omitempty"`
+ EmptyLayer bool `json:"empty_layer,omitempty"`
+}
+
+// Time is a wrapper around time.Time to help with deep copying
+type Time struct {
+ time.Time
+}
+
+// DeepCopyInto creates a deep-copy of the Time value. The underlying time.Time
+// type is effectively immutable in the time API, so it is safe to
+// copy-by-assign, despite the presence of (unexported) Pointer fields.
+func (t *Time) DeepCopyInto(out *Time) {
+ *out = *t
+}
+
+// RootFS holds the ordered list of file system deltas that comprise the
+// container image's root filesystem.
+type RootFS struct {
+ Type string `json:"type"`
+ DiffIDs []Hash `json:"diff_ids"`
+}
+
+// HealthConfig holds configuration settings for the HEALTHCHECK feature.
+type HealthConfig struct {
+ // Test is the test to perform to check that the container is healthy.
+ // An empty slice means to inherit the default.
+ // The options are:
+ // {} : inherit healthcheck
+ // {"NONE"} : disable healthcheck
+ // {"CMD", args...} : exec arguments directly
+ // {"CMD-SHELL", command} : run command with system's default shell
+ Test []string `json:",omitempty"`
+
+ // Zero means to inherit. Durations are expressed as integer nanoseconds.
+ Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
+ Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
+ StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down.
+
+ // Retries is the number of consecutive failures needed to consider a container as unhealthy.
+ // Zero means inherit.
+ Retries int `json:",omitempty"`
+}
+
+// Config is a submessage of the config file described as:
+// The execution parameters which SHOULD be used as a base when running
+// a container using the image.
+// The names of the fields in this message are chosen to reflect the JSON
+// payload of the Config as defined here:
+// https://git.io/vrAET
+// and
+// https://github.com/opencontainers/image-spec/blob/master/config.md
+type Config struct {
+ AttachStderr bool `json:"AttachStderr,omitempty"`
+ AttachStdin bool `json:"AttachStdin,omitempty"`
+ AttachStdout bool `json:"AttachStdout,omitempty"`
+ Cmd []string `json:"Cmd,omitempty"`
+ Healthcheck *HealthConfig `json:"Healthcheck,omitempty"`
+ Domainname string `json:"Domainname,omitempty"`
+ Entrypoint []string `json:"Entrypoint,omitempty"`
+ Env []string `json:"Env,omitempty"`
+ Hostname string `json:"Hostname,omitempty"`
+ Image string `json:"Image,omitempty"`
+ Labels map[string]string `json:"Labels,omitempty"`
+ OnBuild []string `json:"OnBuild,omitempty"`
+ OpenStdin bool `json:"OpenStdin,omitempty"`
+ StdinOnce bool `json:"StdinOnce,omitempty"`
+ Tty bool `json:"Tty,omitempty"`
+ User string `json:"User,omitempty"`
+ Volumes map[string]struct{} `json:"Volumes,omitempty"`
+ WorkingDir string `json:"WorkingDir,omitempty"`
+ ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"`
+ ArgsEscaped bool `json:"ArgsEscaped,omitempty"`
+ NetworkDisabled bool `json:"NetworkDisabled,omitempty"`
+ MacAddress string `json:"MacAddress,omitempty"`
+ StopSignal string `json:"StopSignal,omitempty"`
+ Shell []string `json:"Shell,omitempty"`
+}
+
+// ParseConfigFile parses the io.Reader's contents into a ConfigFile.
+func ParseConfigFile(r io.Reader) (*ConfigFile, error) {
+ cf := ConfigFile{}
+ if err := json.NewDecoder(r).Decode(&cf); err != nil {
+ return nil, err
+ }
+ return &cf, nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go
new file mode 100644
index 0000000000..7a84736be2
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go
@@ -0,0 +1,18 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +k8s:deepcopy-gen=package
+
+// Package v1 defines structured types for OCI v1 images
+package v1
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go b/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go
new file mode 100644
index 0000000000..e9630087e1
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go
@@ -0,0 +1,123 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "hash"
+ "io"
+ "strconv"
+ "strings"
+)
+
+// Hash is an unqualified digest of some content, e.g. sha256:deadbeef
+type Hash struct {
+ // Algorithm holds the algorithm used to compute the hash.
+ Algorithm string
+
+ // Hex holds the hex portion of the content hash.
+ Hex string
+}
+
+// String reverses NewHash returning the string-form of the hash.
+func (h Hash) String() string {
+ return fmt.Sprintf("%s:%s", h.Algorithm, h.Hex)
+}
+
+// NewHash validates the input string is a hash and returns a strongly type Hash object.
+func NewHash(s string) (Hash, error) {
+ h := Hash{}
+ if err := h.parse(s); err != nil {
+ return Hash{}, err
+ }
+ return h, nil
+}
+
+// MarshalJSON implements json.Marshaler
+func (h Hash) MarshalJSON() ([]byte, error) {
+ return json.Marshal(h.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler
+func (h *Hash) UnmarshalJSON(data []byte) error {
+ s, err := strconv.Unquote(string(data))
+ if err != nil {
+ return err
+ }
+ return h.parse(s)
+}
+
+// MarshalText implements encoding.TextMarshaler. This is required to use
+// v1.Hash as a key in a map when marshalling JSON.
+func (h Hash) MarshalText() (text []byte, err error) {
+ return []byte(h.String()), nil
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler. This is required to use
+// v1.Hash as a key in a map when unmarshalling JSON.
+func (h *Hash) UnmarshalText(text []byte) error {
+ return h.parse(string(text))
+}
+
+// Hasher returns a hash.Hash for the named algorithm (e.g. "sha256")
+func Hasher(name string) (hash.Hash, error) {
+ switch name {
+ case "sha256":
+ return sha256.New(), nil
+ default:
+ return nil, fmt.Errorf("unsupported hash: %q", name)
+ }
+}
+
+func (h *Hash) parse(unquoted string) error {
+ parts := strings.Split(unquoted, ":")
+ if len(parts) != 2 {
+ return fmt.Errorf("cannot parse hash: %q", unquoted)
+ }
+
+ rest := strings.TrimLeft(parts[1], "0123456789abcdef")
+ if len(rest) != 0 {
+ return fmt.Errorf("found non-hex character in hash: %c", rest[0])
+ }
+
+ hasher, err := Hasher(parts[0])
+ if err != nil {
+ return err
+ }
+ // Compare the hex to the expected size (2 hex characters per byte)
+ if len(parts[1]) != hasher.Size()*2 {
+ return fmt.Errorf("wrong number of hex digits for %s: %s", parts[0], parts[1])
+ }
+
+ h.Algorithm = parts[0]
+ h.Hex = parts[1]
+ return nil
+}
+
+// SHA256 computes the Hash of the provided io.Reader's content.
+func SHA256(r io.Reader) (Hash, int64, error) {
+ hasher := sha256.New()
+ n, err := io.Copy(hasher, r)
+ if err != nil {
+ return Hash{}, 0, err
+ }
+ return Hash{
+ Algorithm: "sha256",
+ Hex: hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))),
+ }, n, nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/image.go
new file mode 100644
index 0000000000..8de9e47645
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/image.go
@@ -0,0 +1,59 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "github.com/google/go-containerregistry/pkg/v1/types"
+)
+
+// Image defines the interface for interacting with an OCI v1 image.
+type Image interface {
+ // Layers returns the ordered collection of filesystem layers that comprise this image.
+ // The order of the list is oldest/base layer first, and most-recent/top layer last.
+ Layers() ([]Layer, error)
+
+ // MediaType of this image's manifest.
+ MediaType() (types.MediaType, error)
+
+ // Size returns the size of the manifest.
+ Size() (int64, error)
+
+ // ConfigName returns the hash of the image's config file, also known as
+ // the Image ID.
+ ConfigName() (Hash, error)
+
+ // ConfigFile returns this image's config file.
+ ConfigFile() (*ConfigFile, error)
+
+ // RawConfigFile returns the serialized bytes of ConfigFile().
+ RawConfigFile() ([]byte, error)
+
+ // Digest returns the sha256 of this image's manifest.
+ Digest() (Hash, error)
+
+ // Manifest returns this image's Manifest object.
+ Manifest() (*Manifest, error)
+
+ // RawManifest returns the serialized bytes of Manifest()
+ RawManifest() ([]byte, error)
+
+ // LayerByDigest returns a Layer for interacting with a particular layer of
+ // the image, looking it up by "digest" (the compressed hash).
+ LayerByDigest(Hash) (Layer, error)
+
+ // LayerByDiffID is an analog to LayerByDigest, looking up by "diff id"
+ // (the uncompressed hash).
+ LayerByDiffID(Hash) (Layer, error)
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/index.go
new file mode 100644
index 0000000000..8e7bc8ebb3
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/index.go
@@ -0,0 +1,43 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "github.com/google/go-containerregistry/pkg/v1/types"
+)
+
+// ImageIndex defines the interface for interacting with an OCI image index.
+type ImageIndex interface {
+ // MediaType of this image's manifest.
+ MediaType() (types.MediaType, error)
+
+ // Digest returns the sha256 of this index's manifest.
+ Digest() (Hash, error)
+
+ // Size returns the size of the manifest.
+ Size() (int64, error)
+
+ // IndexManifest returns this image index's manifest object.
+ IndexManifest() (*IndexManifest, error)
+
+ // RawManifest returns the serialized bytes of IndexManifest().
+ RawManifest() ([]byte, error)
+
+ // Image returns a v1.Image that this ImageIndex references.
+ Image(Hash) (Image, error)
+
+ // ImageIndex returns a v1.ImageIndex that this ImageIndex references.
+ ImageIndex(Hash) (ImageIndex, error)
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go
new file mode 100644
index 0000000000..57447d263d
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go
@@ -0,0 +1,42 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "io"
+
+ "github.com/google/go-containerregistry/pkg/v1/types"
+)
+
+// Layer is an interface for accessing the properties of a particular layer of a v1.Image
+type Layer interface {
+ // Digest returns the Hash of the compressed layer.
+ Digest() (Hash, error)
+
+ // DiffID returns the Hash of the uncompressed layer.
+ DiffID() (Hash, error)
+
+ // Compressed returns an io.ReadCloser for the compressed layer contents.
+ Compressed() (io.ReadCloser, error)
+
+ // Uncompressed returns an io.ReadCloser for the uncompressed layer contents.
+ Uncompressed() (io.ReadCloser, error)
+
+ // Size returns the compressed size of the Layer.
+ Size() (int64, error)
+
+ // MediaType returns the media type of the Layer.
+ MediaType() (types.MediaType, error)
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go b/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go
new file mode 100644
index 0000000000..51a4670405
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go
@@ -0,0 +1,67 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "encoding/json"
+ "io"
+
+ "github.com/google/go-containerregistry/pkg/v1/types"
+)
+
+// Manifest represents the OCI image manifest in a structured way.
+type Manifest struct {
+ SchemaVersion int64 `json:"schemaVersion"`
+ MediaType types.MediaType `json:"mediaType,omitempty"`
+ Config Descriptor `json:"config"`
+ Layers []Descriptor `json:"layers"`
+ Annotations map[string]string `json:"annotations,omitempty"`
+}
+
+// IndexManifest represents an OCI image index in a structured way.
+type IndexManifest struct {
+ SchemaVersion int64 `json:"schemaVersion"`
+ MediaType types.MediaType `json:"mediaType,omitempty"`
+ Manifests []Descriptor `json:"manifests"`
+ Annotations map[string]string `json:"annotations,omitempty"`
+}
+
+// Descriptor holds a reference from the manifest to one of its constituent elements.
+type Descriptor struct {
+ MediaType types.MediaType `json:"mediaType"`
+ Size int64 `json:"size"`
+ Digest Hash `json:"digest"`
+ URLs []string `json:"urls,omitempty"`
+ Annotations map[string]string `json:"annotations,omitempty"`
+ Platform *Platform `json:"platform,omitempty"`
+}
+
+// ParseManifest parses the io.Reader's contents into a Manifest.
+func ParseManifest(r io.Reader) (*Manifest, error) {
+ m := Manifest{}
+ if err := json.NewDecoder(r).Decode(&m); err != nil {
+ return nil, err
+ }
+ return &m, nil
+}
+
+// ParseIndexManifest parses the io.Reader's contents into an IndexManifest.
+func ParseIndexManifest(r io.Reader) (*IndexManifest, error) {
+ im := IndexManifest{}
+ if err := json.NewDecoder(r).Decode(&im); err != nil {
+ return nil, err
+ }
+ return &im, nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/match/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/v1/match/BUILD.bazel
new file mode 100644
index 0000000000..5795201e4d
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/match/BUILD.bazel
@@ -0,0 +1,13 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = ["match.go"],
+ importmap = "k8s.io/kops/vendor/github.com/google/go-containerregistry/pkg/v1/match",
+ importpath = "github.com/google/go-containerregistry/pkg/v1/match",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//vendor/github.com/google/go-containerregistry/pkg/v1:go_default_library",
+ "//vendor/github.com/opencontainers/image-spec/specs-go/v1:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go b/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go
new file mode 100644
index 0000000000..0f886667ad
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go
@@ -0,0 +1,90 @@
+// Copyright 2020 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package match provides functionality for conveniently matching a v1.Descriptor.
+package match
+
+import (
+ v1 "github.com/google/go-containerregistry/pkg/v1"
+ imagespec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// Matcher function that is given a v1.Descriptor, and returns whether or
+// not it matches a given rule. Can match on anything it wants in the Descriptor.
+type Matcher func(desc v1.Descriptor) bool
+
+// Name returns a match.Matcher that matches based on the value of the
+// "org.opencontainers.image.ref.name" annotation:
+// github.com/opencontainers/image-spec/blob/v1.0.1/annotations.md#pre-defined-annotation-keys
+func Name(name string) Matcher {
+ return Annotation(imagespec.AnnotationRefName, name)
+}
+
+// Annotation returns a match.Matcher that matches based on the provided annotation.
+func Annotation(key, value string) Matcher {
+ return func(desc v1.Descriptor) bool {
+ if desc.Annotations == nil {
+ return false
+ }
+ if aValue, ok := desc.Annotations[key]; ok && aValue == value {
+ return true
+ }
+ return false
+ }
+}
+
+// Platforms returns a match.Matcher that matches on any one of the provided platforms.
+// Ignores any descriptors that do not have a platform.
+func Platforms(platforms ...v1.Platform) Matcher {
+ return func(desc v1.Descriptor) bool {
+ if desc.Platform == nil {
+ return false
+ }
+ for _, platform := range platforms {
+ if desc.Platform.Equals(platform) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// MediaTypes returns a match.Matcher that matches at least one of the provided media types.
+func MediaTypes(mediaTypes ...string) Matcher {
+ mts := map[string]bool{}
+ for _, media := range mediaTypes {
+ mts[media] = true
+ }
+ return func(desc v1.Descriptor) bool {
+ if desc.MediaType == "" {
+ return false
+ }
+ if _, ok := mts[string(desc.MediaType)]; ok {
+ return true
+ }
+ return false
+ }
+}
+
+// Digests returns a match.Matcher that matches at least one of the provided Digests
+func Digests(digests ...v1.Hash) Matcher {
+ digs := map[v1.Hash]bool{}
+ for _, digest := range digests {
+ digs[digest] = true
+ }
+ return func(desc v1.Descriptor) bool {
+ _, ok := digs[desc.Digest]
+ return ok
+ }
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/BUILD.bazel
new file mode 100644
index 0000000000..cb3d108999
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/BUILD.bazel
@@ -0,0 +1,22 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "compressed.go",
+ "doc.go",
+ "image.go",
+ "index.go",
+ "uncompressed.go",
+ "with.go",
+ ],
+ importmap = "k8s.io/kops/vendor/github.com/google/go-containerregistry/pkg/v1/partial",
+ importpath = "github.com/google/go-containerregistry/pkg/v1/partial",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//vendor/github.com/google/go-containerregistry/internal/gzip:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/pkg/v1:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/pkg/v1/match:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/pkg/v1/types:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/README.md
new file mode 100644
index 0000000000..c5710f9a0f
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/README.md
@@ -0,0 +1,82 @@
+# `partial`
+
+[](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial)
+
+## Partial Implementations
+
+There are roughly two kinds of image representations: compressed and uncompressed.
+
+The implementations for these kinds of images are almost identical, with the only
+major difference being how blobs (config and layers) are fetched. This common
+code lives in this package, where you provide a _partial_ implementation of a
+compressed or uncompressed image, and you get back a full `v1.Image` implementation.
+
+### Examples
+
+In a registry, blobs are compressed, so it's easiest to implement a `v1.Image` in terms
+of compressed layers. `remote.remoteImage` does this by implementing `CompressedImageCore`:
+
+```go
+type CompressedImageCore interface {
+ RawConfigFile() ([]byte, error)
+ MediaType() (types.MediaType, error)
+ RawManifest() ([]byte, error)
+ LayerByDigest(v1.Hash) (CompressedLayer, error)
+}
+```
+
+In a tarball, blobs are (often) uncompressed, so it's easiest to implement a `v1.Image` in terms
+of uncompressed layers. `tarball.uncompressedImage` does this by implementing `UncompressedImageCore`:
+
+```go
+type CompressedImageCore interface {
+ RawConfigFile() ([]byte, error)
+ MediaType() (types.MediaType, error)
+ LayerByDiffID(v1.Hash) (UncompressedLayer, error)
+}
+```
+
+## Optional Methods
+
+Where possible, we access some information via optional methods as an optimization.
+
+### [`partial.Descriptor`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#Descriptor)
+
+There are some properties of a [`Descriptor`](https://github.com/opencontainers/image-spec/blob/master/descriptor.md#properties) that aren't derivable from just image data:
+
+* `MediaType`
+* `Platform`
+* `URLs`
+* `Annotations`
+
+For example, in a `tarball.Image`, there is a `LayerSources` field that contains
+an entire layer descriptor with `URLs` information for foreign layers. This
+information can be passed through to callers by implementing this optional
+`Descriptor` method.
+
+See [`#654`](https://github.com/google/go-containerregistry/pull/654).
+
+### [`partial.UncompressedSize`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#UncompressedSize)
+
+Usually, you don't need to know the uncompressed size of a layer, since that
+information isn't stored in a config file (just he sha256 is needed); however,
+there are cases where it is very helpful to know the layer size, e.g. when
+writing the uncompressed layer into a tarball.
+
+See [`#655`](https://github.com/google/go-containerregistry/pull/655).
+
+### [`partial.Exists`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#Exists)
+
+We generally don't care about the existence of something as granular as a
+layer, and would rather ensure all the invariants of an image are upheld via
+the `validate` package. However, there are situations where we want to do a
+quick smoke test to ensure that the underlying storage engine hasn't been
+corrupted by something e.g. deleting files or blobs. Thus, we've exposed an
+optional `Exists` method that does an existence check without actually reading
+any bytes.
+
+The `remote` package implements this via `HEAD` requests.
+
+The `layout` package implements this via `os.Stat`.
+
+See [`#838`](https://github.com/google/go-containerregistry/pull/838).
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go
new file mode 100644
index 0000000000..2e6e548ac9
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go
@@ -0,0 +1,163 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package partial
+
+import (
+ "io"
+
+ "github.com/google/go-containerregistry/internal/gzip"
+ v1 "github.com/google/go-containerregistry/pkg/v1"
+ "github.com/google/go-containerregistry/pkg/v1/types"
+)
+
+// CompressedLayer represents the bare minimum interface a natively
+// compressed layer must implement for us to produce a v1.Layer
+type CompressedLayer interface {
+ // Digest returns the Hash of the compressed layer.
+ Digest() (v1.Hash, error)
+
+ // Compressed returns an io.ReadCloser for the compressed layer contents.
+ Compressed() (io.ReadCloser, error)
+
+ // Size returns the compressed size of the Layer.
+ Size() (int64, error)
+
+ // Returns the mediaType for the compressed Layer
+ MediaType() (types.MediaType, error)
+}
+
+// compressedLayerExtender implements v1.Image using the compressed base properties.
+type compressedLayerExtender struct {
+ CompressedLayer
+}
+
+// Uncompressed implements v1.Layer
+func (cle *compressedLayerExtender) Uncompressed() (io.ReadCloser, error) {
+ r, err := cle.Compressed()
+ if err != nil {
+ return nil, err
+ }
+ return gzip.UnzipReadCloser(r)
+}
+
+// DiffID implements v1.Layer
+func (cle *compressedLayerExtender) DiffID() (v1.Hash, error) {
+ // If our nested CompressedLayer implements DiffID,
+ // then delegate to it instead.
+ if wdi, ok := cle.CompressedLayer.(WithDiffID); ok {
+ return wdi.DiffID()
+ }
+ r, err := cle.Uncompressed()
+ if err != nil {
+ return v1.Hash{}, err
+ }
+ defer r.Close()
+ h, _, err := v1.SHA256(r)
+ return h, err
+}
+
+// CompressedToLayer fills in the missing methods from a CompressedLayer so that it implements v1.Layer
+func CompressedToLayer(ul CompressedLayer) (v1.Layer, error) {
+ return &compressedLayerExtender{ul}, nil
+}
+
+// CompressedImageCore represents the base minimum interface a natively
+// compressed image must implement for us to produce a v1.Image.
+type CompressedImageCore interface {
+ ImageCore
+
+ // RawManifest returns the serialized bytes of the manifest.
+ RawManifest() ([]byte, error)
+
+ // LayerByDigest is a variation on the v1.Image method, which returns
+ // a CompressedLayer instead.
+ LayerByDigest(v1.Hash) (CompressedLayer, error)
+}
+
+// compressedImageExtender implements v1.Image by extending CompressedImageCore with the
+// appropriate methods computed from the minimal core.
+type compressedImageExtender struct {
+ CompressedImageCore
+}
+
+// Assert that our extender type completes the v1.Image interface
+var _ v1.Image = (*compressedImageExtender)(nil)
+
+// Digest implements v1.Image
+func (i *compressedImageExtender) Digest() (v1.Hash, error) {
+ return Digest(i)
+}
+
+// ConfigName implements v1.Image
+func (i *compressedImageExtender) ConfigName() (v1.Hash, error) {
+ return ConfigName(i)
+}
+
+// Layers implements v1.Image
+func (i *compressedImageExtender) Layers() ([]v1.Layer, error) {
+ hs, err := FSLayers(i)
+ if err != nil {
+ return nil, err
+ }
+ ls := make([]v1.Layer, 0, len(hs))
+ for _, h := range hs {
+ l, err := i.LayerByDigest(h)
+ if err != nil {
+ return nil, err
+ }
+ ls = append(ls, l)
+ }
+ return ls, nil
+}
+
+// LayerByDigest implements v1.Image
+func (i *compressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) {
+ cl, err := i.CompressedImageCore.LayerByDigest(h)
+ if err != nil {
+ return nil, err
+ }
+ return CompressedToLayer(cl)
+}
+
+// LayerByDiffID implements v1.Image
+func (i *compressedImageExtender) LayerByDiffID(h v1.Hash) (v1.Layer, error) {
+ h, err := DiffIDToBlob(i, h)
+ if err != nil {
+ return nil, err
+ }
+ return i.LayerByDigest(h)
+}
+
+// ConfigFile implements v1.Image
+func (i *compressedImageExtender) ConfigFile() (*v1.ConfigFile, error) {
+ return ConfigFile(i)
+}
+
+// Manifest implements v1.Image
+func (i *compressedImageExtender) Manifest() (*v1.Manifest, error) {
+ return Manifest(i)
+}
+
+// Size implements v1.Image
+func (i *compressedImageExtender) Size() (int64, error) {
+ return Size(i)
+}
+
+// CompressedToImage fills in the missing methods from a CompressedImageCore so that it implements v1.Image
+func CompressedToImage(cic CompressedImageCore) (v1.Image, error) {
+ return &compressedImageExtender{
+ CompressedImageCore: cic,
+ }, nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go
new file mode 100644
index 0000000000..153dfe4d53
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package partial defines methods for building up a v1.Image from
+// minimal subsets that are sufficient for defining a v1.Image.
+package partial
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go
new file mode 100644
index 0000000000..c65f45e0dc
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go
@@ -0,0 +1,28 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package partial
+
+import (
+ "github.com/google/go-containerregistry/pkg/v1/types"
+)
+
+// ImageCore is the core set of properties without which we cannot build a v1.Image
+type ImageCore interface {
+ // RawConfigFile returns the serialized bytes of this image's config file.
+ RawConfigFile() ([]byte, error)
+
+ // MediaType of this image's manifest.
+ MediaType() (types.MediaType, error)
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go
new file mode 100644
index 0000000000..9c7a92485b
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go
@@ -0,0 +1,85 @@
+// Copyright 2020 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package partial
+
+import (
+ "fmt"
+
+ v1 "github.com/google/go-containerregistry/pkg/v1"
+ "github.com/google/go-containerregistry/pkg/v1/match"
+)
+
+// FindManifests given a v1.ImageIndex, find the manifests that fit the matcher.
+func FindManifests(index v1.ImageIndex, matcher match.Matcher) ([]v1.Descriptor, error) {
+ // get the actual manifest list
+ indexManifest, err := index.IndexManifest()
+ if err != nil {
+ return nil, fmt.Errorf("unable to get raw index: %v", err)
+ }
+ manifests := []v1.Descriptor{}
+ // try to get the root of our image
+ for _, manifest := range indexManifest.Manifests {
+ if matcher(manifest) {
+ manifests = append(manifests, manifest)
+ }
+ }
+ return manifests, nil
+}
+
+// FindImages given a v1.ImageIndex, find the images that fit the matcher. If a Descriptor
+// matches the provider Matcher, but the referenced item is not an Image, ignores it.
+// Only returns those that match the Matcher and are images.
+func FindImages(index v1.ImageIndex, matcher match.Matcher) ([]v1.Image, error) {
+ matches := []v1.Image{}
+ manifests, err := FindManifests(index, matcher)
+ if err != nil {
+ return nil, err
+ }
+ for _, desc := range manifests {
+ // if it is not an image, ignore it
+ if !desc.MediaType.IsImage() {
+ continue
+ }
+ img, err := index.Image(desc.Digest)
+ if err != nil {
+ return nil, err
+ }
+ matches = append(matches, img)
+ }
+ return matches, nil
+}
+
+// FindIndexes given a v1.ImageIndex, find the indexes that fit the matcher. If a Descriptor
+// matches the provider Matcher, but the referenced item is not an Index, ignores it.
+// Only returns those that match the Matcher and are indexes.
+func FindIndexes(index v1.ImageIndex, matcher match.Matcher) ([]v1.ImageIndex, error) {
+ matches := []v1.ImageIndex{}
+ manifests, err := FindManifests(index, matcher)
+ if err != nil {
+ return nil, err
+ }
+ for _, desc := range manifests {
+ if !desc.MediaType.IsIndex() {
+ continue
+ }
+ // if it is not an index, ignore it
+ idx, err := index.ImageIndex(desc.Digest)
+ if err != nil {
+ return nil, err
+ }
+ matches = append(matches, idx)
+ }
+ return matches, nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go
new file mode 100644
index 0000000000..df20d3aa9e
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go
@@ -0,0 +1,223 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package partial
+
+import (
+ "bytes"
+ "io"
+ "sync"
+
+ "github.com/google/go-containerregistry/internal/gzip"
+ v1 "github.com/google/go-containerregistry/pkg/v1"
+ "github.com/google/go-containerregistry/pkg/v1/types"
+)
+
+// UncompressedLayer represents the bare minimum interface a natively
+// uncompressed layer must implement for us to produce a v1.Layer
+type UncompressedLayer interface {
+ // DiffID returns the Hash of the uncompressed layer.
+ DiffID() (v1.Hash, error)
+
+ // Uncompressed returns an io.ReadCloser for the uncompressed layer contents.
+ Uncompressed() (io.ReadCloser, error)
+
+ // Returns the mediaType for the compressed Layer
+ MediaType() (types.MediaType, error)
+}
+
+// uncompressedLayerExtender implements v1.Image using the uncompressed base properties.
+type uncompressedLayerExtender struct {
+ UncompressedLayer
+ // Memoize size/hash so that the methods aren't twice as
+ // expensive as doing this manually.
+ hash v1.Hash
+ size int64
+ hashSizeError error
+ once sync.Once
+}
+
+// Compressed implements v1.Layer
+func (ule *uncompressedLayerExtender) Compressed() (io.ReadCloser, error) {
+ u, err := ule.Uncompressed()
+ if err != nil {
+ return nil, err
+ }
+ return gzip.ReadCloser(u), nil
+}
+
+// Digest implements v1.Layer
+func (ule *uncompressedLayerExtender) Digest() (v1.Hash, error) {
+ ule.calcSizeHash()
+ return ule.hash, ule.hashSizeError
+}
+
+// Size implements v1.Layer
+func (ule *uncompressedLayerExtender) Size() (int64, error) {
+ ule.calcSizeHash()
+ return ule.size, ule.hashSizeError
+}
+
+func (ule *uncompressedLayerExtender) calcSizeHash() {
+ ule.once.Do(func() {
+ var r io.ReadCloser
+ r, ule.hashSizeError = ule.Compressed()
+ if ule.hashSizeError != nil {
+ return
+ }
+ defer r.Close()
+ ule.hash, ule.size, ule.hashSizeError = v1.SHA256(r)
+ })
+}
+
+// UncompressedToLayer fills in the missing methods from an UncompressedLayer so that it implements v1.Layer
+func UncompressedToLayer(ul UncompressedLayer) (v1.Layer, error) {
+ return &uncompressedLayerExtender{UncompressedLayer: ul}, nil
+}
+
+// UncompressedImageCore represents the bare minimum interface a natively
+// uncompressed image must implement for us to produce a v1.Image
+type UncompressedImageCore interface {
+ ImageCore
+
+ // LayerByDiffID is a variation on the v1.Image method, which returns
+ // an UncompressedLayer instead.
+ LayerByDiffID(v1.Hash) (UncompressedLayer, error)
+}
+
+// UncompressedToImage fills in the missing methods from an UncompressedImageCore so that it implements v1.Image.
+func UncompressedToImage(uic UncompressedImageCore) (v1.Image, error) {
+ return &uncompressedImageExtender{
+ UncompressedImageCore: uic,
+ }, nil
+}
+
+// uncompressedImageExtender implements v1.Image by extending UncompressedImageCore with the
+// appropriate methods computed from the minimal core.
+type uncompressedImageExtender struct {
+ UncompressedImageCore
+
+ lock sync.Mutex
+ manifest *v1.Manifest
+}
+
+// Assert that our extender type completes the v1.Image interface
+var _ v1.Image = (*uncompressedImageExtender)(nil)
+
+// Digest implements v1.Image
+func (i *uncompressedImageExtender) Digest() (v1.Hash, error) {
+ return Digest(i)
+}
+
+// Manifest implements v1.Image
+func (i *uncompressedImageExtender) Manifest() (*v1.Manifest, error) {
+ i.lock.Lock()
+ defer i.lock.Unlock()
+ if i.manifest != nil {
+ return i.manifest, nil
+ }
+
+ b, err := i.RawConfigFile()
+ if err != nil {
+ return nil, err
+ }
+
+ cfgHash, cfgSize, err := v1.SHA256(bytes.NewReader(b))
+ if err != nil {
+ return nil, err
+ }
+
+ m := &v1.Manifest{
+ SchemaVersion: 2,
+ MediaType: types.DockerManifestSchema2,
+ Config: v1.Descriptor{
+ MediaType: types.DockerConfigJSON,
+ Size: cfgSize,
+ Digest: cfgHash,
+ },
+ }
+
+ ls, err := i.Layers()
+ if err != nil {
+ return nil, err
+ }
+
+ m.Layers = make([]v1.Descriptor, len(ls))
+ for i, l := range ls {
+ desc, err := Descriptor(l)
+ if err != nil {
+ return nil, err
+ }
+
+ m.Layers[i] = *desc
+ }
+
+ i.manifest = m
+ return i.manifest, nil
+}
+
+// RawManifest implements v1.Image
+func (i *uncompressedImageExtender) RawManifest() ([]byte, error) {
+ return RawManifest(i)
+}
+
+// Size implements v1.Image
+func (i *uncompressedImageExtender) Size() (int64, error) {
+ return Size(i)
+}
+
+// ConfigName implements v1.Image
+func (i *uncompressedImageExtender) ConfigName() (v1.Hash, error) {
+ return ConfigName(i)
+}
+
+// ConfigFile implements v1.Image
+func (i *uncompressedImageExtender) ConfigFile() (*v1.ConfigFile, error) {
+ return ConfigFile(i)
+}
+
+// Layers implements v1.Image
+func (i *uncompressedImageExtender) Layers() ([]v1.Layer, error) {
+ diffIDs, err := DiffIDs(i)
+ if err != nil {
+ return nil, err
+ }
+ ls := make([]v1.Layer, 0, len(diffIDs))
+ for _, h := range diffIDs {
+ l, err := i.LayerByDiffID(h)
+ if err != nil {
+ return nil, err
+ }
+ ls = append(ls, l)
+ }
+ return ls, nil
+}
+
+// LayerByDiffID implements v1.Image
+func (i *uncompressedImageExtender) LayerByDiffID(diffID v1.Hash) (v1.Layer, error) {
+ ul, err := i.UncompressedImageCore.LayerByDiffID(diffID)
+ if err != nil {
+ return nil, err
+ }
+ return UncompressedToLayer(ul)
+}
+
+// LayerByDigest implements v1.Image
+func (i *uncompressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) {
+ diffID, err := BlobToDiffID(i, h)
+ if err != nil {
+ return nil, err
+ }
+ return i.LayerByDiffID(diffID)
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go
new file mode 100644
index 0000000000..3a5c615722
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go
@@ -0,0 +1,389 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package partial
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+
+ v1 "github.com/google/go-containerregistry/pkg/v1"
+ "github.com/google/go-containerregistry/pkg/v1/types"
+)
+
+// WithRawConfigFile defines the subset of v1.Image used by these helper methods
+type WithRawConfigFile interface {
+ // RawConfigFile returns the serialized bytes of this image's config file.
+ RawConfigFile() ([]byte, error)
+}
+
+// ConfigFile is a helper for implementing v1.Image
+func ConfigFile(i WithRawConfigFile) (*v1.ConfigFile, error) {
+ b, err := i.RawConfigFile()
+ if err != nil {
+ return nil, err
+ }
+ return v1.ParseConfigFile(bytes.NewReader(b))
+}
+
+// ConfigName is a helper for implementing v1.Image
+func ConfigName(i WithRawConfigFile) (v1.Hash, error) {
+ b, err := i.RawConfigFile()
+ if err != nil {
+ return v1.Hash{}, err
+ }
+ h, _, err := v1.SHA256(bytes.NewReader(b))
+ return h, err
+}
+
+type configLayer struct {
+ hash v1.Hash
+ content []byte
+}
+
+// Digest implements v1.Layer
+func (cl *configLayer) Digest() (v1.Hash, error) {
+ return cl.hash, nil
+}
+
+// DiffID implements v1.Layer
+func (cl *configLayer) DiffID() (v1.Hash, error) {
+ return cl.hash, nil
+}
+
+// Uncompressed implements v1.Layer
+func (cl *configLayer) Uncompressed() (io.ReadCloser, error) {
+ return ioutil.NopCloser(bytes.NewBuffer(cl.content)), nil
+}
+
+// Compressed implements v1.Layer
+func (cl *configLayer) Compressed() (io.ReadCloser, error) {
+ return ioutil.NopCloser(bytes.NewBuffer(cl.content)), nil
+}
+
+// Size implements v1.Layer
+func (cl *configLayer) Size() (int64, error) {
+ return int64(len(cl.content)), nil
+}
+
+func (cl *configLayer) MediaType() (types.MediaType, error) {
+ // Defaulting this to OCIConfigJSON as it should remain
+ // backwards compatible with DockerConfigJSON
+ return types.OCIConfigJSON, nil
+}
+
+var _ v1.Layer = (*configLayer)(nil)
+
+// ConfigLayer implements v1.Layer from the raw config bytes.
+// This is so that clients (e.g. remote) can access the config as a blob.
+func ConfigLayer(i WithRawConfigFile) (v1.Layer, error) {
+ h, err := ConfigName(i)
+ if err != nil {
+ return nil, err
+ }
+ rcfg, err := i.RawConfigFile()
+ if err != nil {
+ return nil, err
+ }
+ return &configLayer{
+ hash: h,
+ content: rcfg,
+ }, nil
+}
+
+// WithConfigFile defines the subset of v1.Image used by these helper methods
+type WithConfigFile interface {
+ // ConfigFile returns this image's config file.
+ ConfigFile() (*v1.ConfigFile, error)
+}
+
+// DiffIDs is a helper for implementing v1.Image
+func DiffIDs(i WithConfigFile) ([]v1.Hash, error) {
+ cfg, err := i.ConfigFile()
+ if err != nil {
+ return nil, err
+ }
+ return cfg.RootFS.DiffIDs, nil
+}
+
+// RawConfigFile is a helper for implementing v1.Image
+func RawConfigFile(i WithConfigFile) ([]byte, error) {
+ cfg, err := i.ConfigFile()
+ if err != nil {
+ return nil, err
+ }
+ return json.Marshal(cfg)
+}
+
+// WithRawManifest defines the subset of v1.Image used by these helper methods
+type WithRawManifest interface {
+ // RawManifest returns the serialized bytes of this image's config file.
+ RawManifest() ([]byte, error)
+}
+
+// Digest is a helper for implementing v1.Image
+func Digest(i WithRawManifest) (v1.Hash, error) {
+ mb, err := i.RawManifest()
+ if err != nil {
+ return v1.Hash{}, err
+ }
+ digest, _, err := v1.SHA256(bytes.NewReader(mb))
+ return digest, err
+}
+
+// Manifest is a helper for implementing v1.Image
+func Manifest(i WithRawManifest) (*v1.Manifest, error) {
+ b, err := i.RawManifest()
+ if err != nil {
+ return nil, err
+ }
+ return v1.ParseManifest(bytes.NewReader(b))
+}
+
+// WithManifest defines the subset of v1.Image used by these helper methods
+type WithManifest interface {
+ // Manifest returns this image's Manifest object.
+ Manifest() (*v1.Manifest, error)
+}
+
+// RawManifest is a helper for implementing v1.Image
+func RawManifest(i WithManifest) ([]byte, error) {
+ m, err := i.Manifest()
+ if err != nil {
+ return nil, err
+ }
+ return json.Marshal(m)
+}
+
+// Size is a helper for implementing v1.Image
+func Size(i WithRawManifest) (int64, error) {
+ b, err := i.RawManifest()
+ if err != nil {
+ return -1, err
+ }
+ return int64(len(b)), nil
+}
+
+// FSLayers is a helper for implementing v1.Image
+func FSLayers(i WithManifest) ([]v1.Hash, error) {
+ m, err := i.Manifest()
+ if err != nil {
+ return nil, err
+ }
+ fsl := make([]v1.Hash, len(m.Layers))
+ for i, l := range m.Layers {
+ fsl[i] = l.Digest
+ }
+ return fsl, nil
+}
+
+// BlobSize is a helper for implementing v1.Image
+func BlobSize(i WithManifest, h v1.Hash) (int64, error) {
+ d, err := BlobDescriptor(i, h)
+ if err != nil {
+ return -1, err
+ }
+ return d.Size, nil
+}
+
+// BlobDescriptor is a helper for implementing v1.Image
+func BlobDescriptor(i WithManifest, h v1.Hash) (*v1.Descriptor, error) {
+ m, err := i.Manifest()
+ if err != nil {
+ return nil, err
+ }
+
+ if m.Config.Digest == h {
+ return &m.Config, nil
+ }
+
+ for _, l := range m.Layers {
+ if l.Digest == h {
+ return &l, nil
+ }
+ }
+ return nil, fmt.Errorf("blob %v not found", h)
+}
+
+// WithManifestAndConfigFile defines the subset of v1.Image used by these helper methods
+type WithManifestAndConfigFile interface {
+ WithConfigFile
+
+ // Manifest returns this image's Manifest object.
+ Manifest() (*v1.Manifest, error)
+}
+
+// BlobToDiffID is a helper for mapping between compressed
+// and uncompressed blob hashes.
+func BlobToDiffID(i WithManifestAndConfigFile, h v1.Hash) (v1.Hash, error) {
+ blobs, err := FSLayers(i)
+ if err != nil {
+ return v1.Hash{}, err
+ }
+ diffIDs, err := DiffIDs(i)
+ if err != nil {
+ return v1.Hash{}, err
+ }
+ if len(blobs) != len(diffIDs) {
+ return v1.Hash{}, fmt.Errorf("mismatched fs layers (%d) and diff ids (%d)", len(blobs), len(diffIDs))
+ }
+ for i, blob := range blobs {
+ if blob == h {
+ return diffIDs[i], nil
+ }
+ }
+ return v1.Hash{}, fmt.Errorf("unknown blob %v", h)
+}
+
+// DiffIDToBlob is a helper for mapping between uncompressed
+// and compressed blob hashes.
+func DiffIDToBlob(wm WithManifestAndConfigFile, h v1.Hash) (v1.Hash, error) {
+ blobs, err := FSLayers(wm)
+ if err != nil {
+ return v1.Hash{}, err
+ }
+ diffIDs, err := DiffIDs(wm)
+ if err != nil {
+ return v1.Hash{}, err
+ }
+ if len(blobs) != len(diffIDs) {
+ return v1.Hash{}, fmt.Errorf("mismatched fs layers (%d) and diff ids (%d)", len(blobs), len(diffIDs))
+ }
+ for i, diffID := range diffIDs {
+ if diffID == h {
+ return blobs[i], nil
+ }
+ }
+ return v1.Hash{}, fmt.Errorf("unknown diffID %v", h)
+}
+
+// WithDiffID defines the subset of v1.Layer for exposing the DiffID method.
+type WithDiffID interface {
+ DiffID() (v1.Hash, error)
+}
+
+// withDescriptor allows partial layer implementations to provide a layer
+// descriptor to the partial image manifest builder. This allows partial
+// uncompressed layers to provide foreign layer metadata like URLs to the
+// uncompressed image manifest.
+type withDescriptor interface {
+ Descriptor() (*v1.Descriptor, error)
+}
+
+// Describable represents something for which we can produce a v1.Descriptor.
+type Describable interface {
+ Digest() (v1.Hash, error)
+ MediaType() (types.MediaType, error)
+ Size() (int64, error)
+}
+
+// Descriptor returns a v1.Descriptor given a Describable. It also encodes
+// some logic for unwrapping things that have been wrapped by
+// CompressedToLayer, UncompressedToLayer, CompressedToImage, or
+// UncompressedToImage.
+func Descriptor(d Describable) (*v1.Descriptor, error) {
+ // If Describable implements Descriptor itself, return that.
+ if wd, ok := unwrap(d).(withDescriptor); ok {
+ return wd.Descriptor()
+ }
+
+ // If all else fails, compute the descriptor from the individual methods.
+ var (
+ desc v1.Descriptor
+ err error
+ )
+
+ if desc.Size, err = d.Size(); err != nil {
+ return nil, err
+ }
+ if desc.Digest, err = d.Digest(); err != nil {
+ return nil, err
+ }
+ if desc.MediaType, err = d.MediaType(); err != nil {
+ return nil, err
+ }
+
+ return &desc, nil
+}
+
+type withUncompressedSize interface {
+ UncompressedSize() (int64, error)
+}
+
+// UncompressedSize returns the size of the Uncompressed layer. If the
+// underlying implementation doesn't implement UncompressedSize directly,
+// this will compute the uncompressedSize by reading everything returned
+// by Compressed(). This is potentially expensive and may consume the contents
+// for streaming layers.
+func UncompressedSize(l v1.Layer) (int64, error) {
+ // If the layer implements UncompressedSize itself, return that.
+ if wus, ok := unwrap(l).(withUncompressedSize); ok {
+ return wus.UncompressedSize()
+ }
+
+ // The layer doesn't implement UncompressedSize, we need to compute it.
+ rc, err := l.Uncompressed()
+ if err != nil {
+ return -1, err
+ }
+ defer rc.Close()
+
+ return io.Copy(ioutil.Discard, rc)
+}
+
+type withExists interface {
+ Exists() (bool, error)
+}
+
+// Exists checks to see if a layer exists. This is a hack to work around the
+// mistakes of the partial package. Don't use this.
+func Exists(l v1.Layer) (bool, error) {
+ // If the layer implements Exists itself, return that.
+ if we, ok := unwrap(l).(withExists); ok {
+ return we.Exists()
+ }
+
+ // The layer doesn't implement Exists, so we hope that calling Compressed()
+ // is enough to trigger an error if the layer does not exist.
+ rc, err := l.Compressed()
+ if err != nil {
+ return false, err
+ }
+ defer rc.Close()
+
+ // We may want to try actually reading a single byte, but if we need to do
+ // that, we should just fix this hack.
+ return true, nil
+}
+
+// Recursively unwrap our wrappers so that we can check for the original implementation.
+// We might want to expose this?
+func unwrap(i interface{}) interface{} {
+ if ule, ok := i.(*uncompressedLayerExtender); ok {
+ return unwrap(ule.UncompressedLayer)
+ }
+ if cle, ok := i.(*compressedLayerExtender); ok {
+ return unwrap(cle.CompressedLayer)
+ }
+ if uie, ok := i.(*uncompressedImageExtender); ok {
+ return unwrap(uie.UncompressedImageCore)
+ }
+ if cie, ok := i.(*compressedImageExtender); ok {
+ return unwrap(cie.CompressedImageCore)
+ }
+ return i
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go b/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go
new file mode 100644
index 0000000000..a586ab3675
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go
@@ -0,0 +1,59 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+import (
+ "sort"
+)
+
+// Platform represents the target os/arch for an image.
+type Platform struct {
+ Architecture string `json:"architecture"`
+ OS string `json:"os"`
+ OSVersion string `json:"os.version,omitempty"`
+ OSFeatures []string `json:"os.features,omitempty"`
+ Variant string `json:"variant,omitempty"`
+ Features []string `json:"features,omitempty"`
+}
+
+// Equals returns true if the given platform is semantically equivalent to this one.
+// The order of Features and OSFeatures is not important.
+func (p Platform) Equals(o Platform) bool {
+ return p.OS == o.OS && p.Architecture == o.Architecture && p.Variant == o.Variant && p.OSVersion == o.OSVersion &&
+ stringSliceEqualIgnoreOrder(p.OSFeatures, o.OSFeatures) && stringSliceEqualIgnoreOrder(p.Features, o.Features)
+}
+
+// stringSliceEqual compares 2 string slices and returns if their contents are identical.
+func stringSliceEqual(a, b []string) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i, elm := range a {
+ if elm != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// stringSliceEqualIgnoreOrder compares 2 string slices and returns if their contents are identical, ignoring order
+func stringSliceEqualIgnoreOrder(a, b []string) bool {
+ a1, b1 := a[:], b[:]
+ if a1 != nil && b1 != nil {
+ sort.Strings(a1)
+ sort.Strings(b1)
+ }
+ return stringSliceEqual(a1, b1)
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go b/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go
new file mode 100644
index 0000000000..844f04d937
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go
@@ -0,0 +1,25 @@
+// Copyright 2020 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1
+
+// Update representation of an update of transfer progress. Some functions
+// in this module can take a channel to which updates will be sent while a
+// transfer is in progress.
+// +k8s:deepcopy-gen=false
+type Update struct {
+ Total int64
+ Complete int64
+ Error error
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/BUILD.bazel
new file mode 100644
index 0000000000..023f9013dc
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/BUILD.bazel
@@ -0,0 +1,37 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "catalog.go",
+ "check.go",
+ "delete.go",
+ "descriptor.go",
+ "doc.go",
+ "image.go",
+ "index.go",
+ "layer.go",
+ "list.go",
+ "mount.go",
+ "multi_write.go",
+ "options.go",
+ "write.go",
+ ],
+ importmap = "k8s.io/kops/vendor/github.com/google/go-containerregistry/pkg/v1/remote",
+ importpath = "github.com/google/go-containerregistry/pkg/v1/remote",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//vendor/github.com/google/go-containerregistry/internal/redact:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/internal/retry:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/internal/verify:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/pkg/authn:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/pkg/logs:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/pkg/name:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/pkg/v1:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/pkg/v1/partial:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/pkg/v1/stream:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/pkg/v1/types:go_default_library",
+ "//vendor/golang.org/x/sync/errgroup:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/README.md
new file mode 100644
index 0000000000..c1e81b310b
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/README.md
@@ -0,0 +1,117 @@
+# `remote`
+
+[](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote)
+
+The `remote` package implements a client for accessing a registry,
+per the [OCI distribution spec](https://github.com/opencontainers/distribution-spec/blob/master/spec.md).
+
+It leans heavily on the lower level [`transport`](/pkg/v1/remote/transport) package, which handles the
+authentication handshake and structured errors.
+
+## Usage
+
+```go
+package main
+
+import (
+ "github.com/google/go-containerregistry/pkg/authn"
+ "github.com/google/go-containerregistry/pkg/name"
+ "github.com/google/go-containerregistry/pkg/v1/remote"
+)
+
+func main() {
+ ref, err := name.ParseReference("gcr.io/google-containers/pause")
+ if err != nil {
+ panic(err)
+ }
+
+ img, err := remote.Image(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain))
+ if err != nil {
+ panic(err)
+ }
+
+ // do stuff with img
+}
+```
+
+## Structure
+
+
+
+
+
+
+## Background
+
+There are a lot of confusingly similar terms that come up when talking about images in registries.
+
+### Anatomy of an image
+
+In general...
+
+* A tag refers to an image manifest.
+* An image manifest references a config file and an orderered list of _compressed_ layers by sha256 digest.
+* A config file references an ordered list of _uncompressed_ layers by sha256 digest and contains runtime configuration.
+* The sha256 digest of the config file is the [image id](https://github.com/opencontainers/image-spec/blob/master/config.md#imageid) for the image.
+
+For example, an image with two layers would look something like this:
+
+
+
+### Anatomy of an index
+
+In the normal case, an [index](https://github.com/opencontainers/image-spec/blob/master/image-index.md) is used to represent a multi-platform image.
+This was the original use case for a [manifest
+list](https://docs.docker.com/registry/spec/manifest-v2-2/#manifest-list).
+
+
+
+It is possible for an index to reference another index, per the OCI
+[image-spec](https://github.com/opencontainers/image-spec/blob/master/media-types.md#compatibility-matrix).
+In theory, both an image and image index can reference arbitrary things via
+[descriptors](https://github.com/opencontainers/image-spec/blob/master/descriptor.md),
+e.g. see the [image layout
+example](https://github.com/opencontainers/image-spec/blob/master/image-layout.md#index-example),
+which references an application/xml file from an image index.
+
+That could look something like this:
+
+
+
+Using a recursive index like this might not be possible with all registries,
+but this flexibility allows for some interesting applications, e.g. the
+[OCI Artifacts](https://github.com/opencontainers/artifacts) effort.
+
+### Anatomy of an image upload
+
+The structure of an image requires a delicate ordering when uploading an image to a registry.
+Below is a (slightly simplified) figure that describes how an image is prepared for upload
+to a registry and how the data flows between various artifacts:
+
+
+
+Note that:
+
+* A config file references the uncompressed layer contents by sha256.
+* A manifest references the compressed layer contents by sha256 and the size of the layer.
+* A manifest references the config file contents by sha256 and the size of the file.
+
+It follows that during an upload, we need to upload layers before the config file,
+and we need to upload the config file before the manifest.
+
+Sometimes, we know all of this information ahead of time, (e.g. when copying from remote.Image),
+so the ordering is less important.
+
+In other cases, e.g. when using a [`stream.Layer`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream#Layer),
+we can't compute anything until we have already uploaded the layer, so we need to be careful about ordering.
+
+## Caveats
+
+### schema 1
+
+This package does not support schema 1 images, see [`#377`](https://github.com/google/go-containerregistry/issues/377),
+however, it's possible to do _something_ useful with them via [`remote.Get`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote#Get),
+which doesn't try to interpret what is returned by the registry.
+
+[`crane.Copy`](https://godoc.org/github.com/google/go-containerregistry/pkg/crane#Copy) takes advantage of this to implement support for copying schema 1 images,
+see [here](https://github.com/google/go-containerregistry/blob/main/pkg/internal/legacy/copy.go).
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go
new file mode 100644
index 0000000000..21b5dbbaaa
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go
@@ -0,0 +1,151 @@
+// Copyright 2019 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/url"
+
+ "github.com/google/go-containerregistry/pkg/name"
+ "github.com/google/go-containerregistry/pkg/v1/remote/transport"
+)
+
+type catalog struct {
+ Repos []string `json:"repositories"`
+}
+
+// CatalogPage calls /_catalog, returning the list of repositories on the registry.
+func CatalogPage(target name.Registry, last string, n int, options ...Option) ([]string, error) {
+ o, err := makeOptions(target, options...)
+ if err != nil {
+ return nil, err
+ }
+
+ scopes := []string{target.Scope(transport.PullScope)}
+ tr, err := transport.NewWithContext(o.context, target, o.auth, o.transport, scopes)
+ if err != nil {
+ return nil, err
+ }
+
+ query := fmt.Sprintf("last=%s&n=%d", url.QueryEscape(last), n)
+
+ uri := url.URL{
+ Scheme: target.Scheme(),
+ Host: target.RegistryStr(),
+ Path: "/v2/_catalog",
+ RawQuery: query,
+ }
+
+ client := http.Client{Transport: tr}
+ req, err := http.NewRequest(http.MethodGet, uri.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Do(req.WithContext(o.context))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if err := transport.CheckError(resp, http.StatusOK); err != nil {
+ return nil, err
+ }
+
+ var parsed catalog
+ if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil {
+ return nil, err
+ }
+
+ return parsed.Repos, nil
+}
+
+// Catalog calls /_catalog, returning the list of repositories on the registry.
+func Catalog(ctx context.Context, target name.Registry, options ...Option) ([]string, error) {
+ o, err := makeOptions(target, options...)
+ if err != nil {
+ return nil, err
+ }
+
+ scopes := []string{target.Scope(transport.PullScope)}
+ tr, err := transport.NewWithContext(o.context, target, o.auth, o.transport, scopes)
+ if err != nil {
+ return nil, err
+ }
+
+ uri := &url.URL{
+ Scheme: target.Scheme(),
+ Host: target.RegistryStr(),
+ Path: "/v2/_catalog",
+ RawQuery: "n=10000",
+ }
+
+ client := http.Client{Transport: tr}
+
+ // WithContext overrides the ctx passed directly.
+ if o.context != context.Background() {
+ ctx = o.context
+ }
+
+ var (
+ parsed catalog
+ repoList []string
+ )
+
+ // get responses until there is no next page
+ for {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+
+ req, err := http.NewRequest("GET", uri.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+ req = req.WithContext(ctx)
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := transport.CheckError(resp, http.StatusOK); err != nil {
+ return nil, err
+ }
+
+ if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil {
+ return nil, err
+ }
+ if err := resp.Body.Close(); err != nil {
+ return nil, err
+ }
+
+ repoList = append(repoList, parsed.Repos...)
+
+ uri, err = getNextPageURL(resp)
+ if err != nil {
+ return nil, err
+ }
+ // no next page
+ if uri == nil {
+ break
+ }
+ }
+ return repoList, nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go
new file mode 100644
index 0000000000..c841cc0580
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go
@@ -0,0 +1,59 @@
+package remote
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+
+ "github.com/google/go-containerregistry/pkg/authn"
+ "github.com/google/go-containerregistry/pkg/name"
+ "github.com/google/go-containerregistry/pkg/v1/remote/transport"
+)
+
+// CheckPushPermission returns an error if the given keychain cannot authorize
+// a push operation to the given ref.
+//
+// This can be useful to check whether the caller has permission to push an
+// image before doing work to construct the image.
+//
+// TODO(#412): Remove the need for this method.
+func CheckPushPermission(ref name.Reference, kc authn.Keychain, t http.RoundTripper) error {
+ auth, err := kc.Resolve(ref.Context().Registry)
+ if err != nil {
+ return fmt.Errorf("resolving authorization for %v failed: %v", ref.Context().Registry, err)
+ }
+
+ scopes := []string{ref.Scope(transport.PushScope)}
+ tr, err := transport.New(ref.Context().Registry, auth, t, scopes)
+ if err != nil {
+ return fmt.Errorf("creating push check transport for %v failed: %v", ref.Context().Registry, err)
+ }
+ // TODO(jasonhall): Against GCR, just doing the token handshake is
+ // enough, but this doesn't extend to Dockerhub
+ // (https://github.com/docker/hub-feedback/issues/1771), so we actually
+ // need to initiate an upload to tell whether the credentials can
+ // authorize a push. Figure out how to return early here when we can,
+ // to avoid a roundtrip for spec-compliant registries.
+ w := writer{
+ repo: ref.Context(),
+ client: &http.Client{Transport: tr},
+ context: context.Background(),
+ }
+ loc, _, err := w.initiateUpload("", "")
+ if loc != "" {
+ // Since we're only initiating the upload to check whether we
+ // can, we should attempt to cancel it, in case initiating
+ // reserves some resources on the server. We shouldn't wait for
+ // cancelling to complete, and we don't care if it fails.
+ go w.cancelUpload(loc)
+ }
+ return err
+}
+
+func (w *writer) cancelUpload(loc string) {
+ req, err := http.NewRequest(http.MethodDelete, loc, nil)
+ if err != nil {
+ return
+ }
+ _, _ = w.client.Do(req)
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go
new file mode 100644
index 0000000000..3b9022719c
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go
@@ -0,0 +1,57 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+
+ "github.com/google/go-containerregistry/pkg/name"
+ "github.com/google/go-containerregistry/pkg/v1/remote/transport"
+)
+
+// Delete removes the specified image reference from the remote registry.
+func Delete(ref name.Reference, options ...Option) error {
+ o, err := makeOptions(ref.Context(), options...)
+ if err != nil {
+ return err
+ }
+ scopes := []string{ref.Scope(transport.DeleteScope)}
+ tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes)
+ if err != nil {
+ return err
+ }
+ c := &http.Client{Transport: tr}
+
+ u := url.URL{
+ Scheme: ref.Context().Registry.Scheme(),
+ Host: ref.Context().RegistryStr(),
+ Path: fmt.Sprintf("/v2/%s/manifests/%s", ref.Context().RepositoryStr(), ref.Identifier()),
+ }
+
+ req, err := http.NewRequest(http.MethodDelete, u.String(), nil)
+ if err != nil {
+ return err
+ }
+
+ resp, err := c.Do(req.WithContext(o.context))
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ return transport.CheckError(resp, http.StatusOK, http.StatusAccepted)
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go
new file mode 100644
index 0000000000..a13f01b68e
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go
@@ -0,0 +1,424 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+
+ "github.com/google/go-containerregistry/internal/verify"
+ "github.com/google/go-containerregistry/pkg/logs"
+ "github.com/google/go-containerregistry/pkg/name"
+ v1 "github.com/google/go-containerregistry/pkg/v1"
+ "github.com/google/go-containerregistry/pkg/v1/partial"
+ "github.com/google/go-containerregistry/pkg/v1/remote/transport"
+ "github.com/google/go-containerregistry/pkg/v1/types"
+)
+
+// ErrSchema1 indicates that we received a schema1 manifest from the registry.
+// This library doesn't have plans to support this legacy image format:
+// https://github.com/google/go-containerregistry/issues/377
+type ErrSchema1 struct {
+ schema string
+}
+
+// newErrSchema1 returns an ErrSchema1 with the unexpected MediaType.
+func newErrSchema1(schema types.MediaType) error {
+ return &ErrSchema1{
+ schema: string(schema),
+ }
+}
+
+// Error implements error.
+func (e *ErrSchema1) Error() string {
+ return fmt.Sprintf("unsupported MediaType: %q, see https://github.com/google/go-containerregistry/issues/377", e.schema)
+}
+
+// Descriptor provides access to metadata about remote artifact and accessors
+// for efficiently converting it into a v1.Image or v1.ImageIndex.
+type Descriptor struct {
+ fetcher
+ v1.Descriptor
+ Manifest []byte
+
+ // So we can share this implementation with Image..
+ platform v1.Platform
+}
+
+// RawManifest exists to satisfy the Taggable interface.
+func (d *Descriptor) RawManifest() ([]byte, error) {
+ return d.Manifest, nil
+}
+
+// Get returns a remote.Descriptor for the given reference. The response from
+// the registry is left un-interpreted, for the most part. This is useful for
+// querying what kind of artifact a reference represents.
+//
+// See Head if you don't need the response body.
+func Get(ref name.Reference, options ...Option) (*Descriptor, error) {
+ acceptable := []types.MediaType{
+ // Just to look at them.
+ types.DockerManifestSchema1,
+ types.DockerManifestSchema1Signed,
+ }
+ acceptable = append(acceptable, acceptableImageMediaTypes...)
+ acceptable = append(acceptable, acceptableIndexMediaTypes...)
+ return get(ref, acceptable, options...)
+}
+
+// Head returns a v1.Descriptor for the given reference by issuing a HEAD
+// request.
+//
+// Note that the server response will not have a body, so any errors encountered
+// should be retried with Get to get more details.
+func Head(ref name.Reference, options ...Option) (*v1.Descriptor, error) {
+ acceptable := []types.MediaType{
+ // Just to look at them.
+ types.DockerManifestSchema1,
+ types.DockerManifestSchema1Signed,
+ }
+ acceptable = append(acceptable, acceptableImageMediaTypes...)
+ acceptable = append(acceptable, acceptableIndexMediaTypes...)
+
+ o, err := makeOptions(ref.Context(), options...)
+ if err != nil {
+ return nil, err
+ }
+
+ f, err := makeFetcher(ref, o)
+ if err != nil {
+ return nil, err
+ }
+
+ return f.headManifest(ref, acceptable)
+}
+
+// Handle options and fetch the manifest with the acceptable MediaTypes in the
+// Accept header.
+func get(ref name.Reference, acceptable []types.MediaType, options ...Option) (*Descriptor, error) {
+ o, err := makeOptions(ref.Context(), options...)
+ if err != nil {
+ return nil, err
+ }
+ f, err := makeFetcher(ref, o)
+ if err != nil {
+ return nil, err
+ }
+ b, desc, err := f.fetchManifest(ref, acceptable)
+ if err != nil {
+ return nil, err
+ }
+ return &Descriptor{
+ fetcher: *f,
+ Manifest: b,
+ Descriptor: *desc,
+ platform: o.platform,
+ }, nil
+}
+
+// Image converts the Descriptor into a v1.Image.
+//
+// If the fetched artifact is already an image, it will just return it.
+//
+// If the fetched artifact is an index, it will attempt to resolve the index to
+// a child image with the appropriate platform.
+//
+// See WithPlatform to set the desired platform.
+func (d *Descriptor) Image() (v1.Image, error) {
+ switch d.MediaType {
+ case types.DockerManifestSchema1, types.DockerManifestSchema1Signed:
+ // We don't care to support schema 1 images:
+ // https://github.com/google/go-containerregistry/issues/377
+ return nil, newErrSchema1(d.MediaType)
+ case types.OCIImageIndex, types.DockerManifestList:
+ // We want an image but the registry has an index, resolve it to an image.
+ return d.remoteIndex().imageByPlatform(d.platform)
+ case types.OCIManifestSchema1, types.DockerManifestSchema2:
+ // These are expected. Enumerated here to allow a default case.
+ default:
+ // We could just return an error here, but some registries (e.g. static
+ // registries) don't set the Content-Type headers correctly, so instead...
+ logs.Warn.Printf("Unexpected media type for Image(): %s", d.MediaType)
+ }
+
+ // Wrap the v1.Layers returned by this v1.Image in a hint for downstream
+ // remote.Write calls to facilitate cross-repo "mounting".
+ imgCore, err := partial.CompressedToImage(d.remoteImage())
+ if err != nil {
+ return nil, err
+ }
+ return &mountableImage{
+ Image: imgCore,
+ Reference: d.Ref,
+ }, nil
+}
+
+// ImageIndex converts the Descriptor into a v1.ImageIndex.
+func (d *Descriptor) ImageIndex() (v1.ImageIndex, error) {
+ switch d.MediaType {
+ case types.DockerManifestSchema1, types.DockerManifestSchema1Signed:
+ // We don't care to support schema 1 images:
+ // https://github.com/google/go-containerregistry/issues/377
+ return nil, newErrSchema1(d.MediaType)
+ case types.OCIManifestSchema1, types.DockerManifestSchema2:
+ // We want an index but the registry has an image, nothing we can do.
+ return nil, fmt.Errorf("unexpected media type for ImageIndex(): %s; call Image() instead", d.MediaType)
+ case types.OCIImageIndex, types.DockerManifestList:
+ // These are expected.
+ default:
+ // We could just return an error here, but some registries (e.g. static
+ // registries) don't set the Content-Type headers correctly, so instead...
+ logs.Warn.Printf("Unexpected media type for ImageIndex(): %s", d.MediaType)
+ }
+ return d.remoteIndex(), nil
+}
+
+func (d *Descriptor) remoteImage() *remoteImage {
+ return &remoteImage{
+ fetcher: d.fetcher,
+ manifest: d.Manifest,
+ mediaType: d.MediaType,
+ descriptor: &d.Descriptor,
+ }
+}
+
+func (d *Descriptor) remoteIndex() *remoteIndex {
+ return &remoteIndex{
+ fetcher: d.fetcher,
+ manifest: d.Manifest,
+ mediaType: d.MediaType,
+ descriptor: &d.Descriptor,
+ }
+}
+
+// fetcher implements methods for reading from a registry.
+type fetcher struct {
+ Ref name.Reference
+ Client *http.Client
+ context context.Context
+}
+
+func makeFetcher(ref name.Reference, o *options) (*fetcher, error) {
+ tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, []string{ref.Scope(transport.PullScope)})
+ if err != nil {
+ return nil, err
+ }
+ return &fetcher{
+ Ref: ref,
+ Client: &http.Client{Transport: tr},
+ context: o.context,
+ }, nil
+}
+
+// url returns a url.Url for the specified path in the context of this remote image reference.
+func (f *fetcher) url(resource, identifier string) url.URL {
+ return url.URL{
+ Scheme: f.Ref.Context().Registry.Scheme(),
+ Host: f.Ref.Context().RegistryStr(),
+ Path: fmt.Sprintf("/v2/%s/%s/%s", f.Ref.Context().RepositoryStr(), resource, identifier),
+ }
+}
+
+func (f *fetcher) fetchManifest(ref name.Reference, acceptable []types.MediaType) ([]byte, *v1.Descriptor, error) {
+ u := f.url("manifests", ref.Identifier())
+ req, err := http.NewRequest(http.MethodGet, u.String(), nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ accept := []string{}
+ for _, mt := range acceptable {
+ accept = append(accept, string(mt))
+ }
+ req.Header.Set("Accept", strings.Join(accept, ","))
+
+ resp, err := f.Client.Do(req.WithContext(f.context))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ if err := transport.CheckError(resp, http.StatusOK); err != nil {
+ return nil, nil, err
+ }
+
+ manifest, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ digest, size, err := v1.SHA256(bytes.NewReader(manifest))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ mediaType := types.MediaType(resp.Header.Get("Content-Type"))
+ contentDigest, err := v1.NewHash(resp.Header.Get("Docker-Content-Digest"))
+ if err == nil && mediaType == types.DockerManifestSchema1Signed {
+ // If we can parse the digest from the header, and it's a signed schema 1
+ // manifest, let's use that for the digest to appease older registries.
+ digest = contentDigest
+ }
+
+ // Validate the digest matches what we asked for, if pulling by digest.
+ if dgst, ok := ref.(name.Digest); ok {
+ if digest.String() != dgst.DigestStr() {
+ return nil, nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref)
+ }
+ }
+ // Do nothing for tags; I give up.
+ //
+ // We'd like to validate that the "Docker-Content-Digest" header matches what is returned by the registry,
+ // but so many registries implement this incorrectly that it's not worth checking.
+ //
+ // For reference:
+ // https://github.com/GoogleContainerTools/kaniko/issues/298
+
+ // Return all this info since we have to calculate it anyway.
+ desc := v1.Descriptor{
+ Digest: digest,
+ Size: size,
+ MediaType: mediaType,
+ }
+
+ return manifest, &desc, nil
+}
+
+func (f *fetcher) headManifest(ref name.Reference, acceptable []types.MediaType) (*v1.Descriptor, error) {
+ u := f.url("manifests", ref.Identifier())
+ req, err := http.NewRequest(http.MethodHead, u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+ accept := []string{}
+ for _, mt := range acceptable {
+ accept = append(accept, string(mt))
+ }
+ req.Header.Set("Accept", strings.Join(accept, ","))
+
+ resp, err := f.Client.Do(req.WithContext(f.context))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if err := transport.CheckError(resp, http.StatusOK); err != nil {
+ return nil, err
+ }
+
+ mth := resp.Header.Get("Content-Type")
+ if mth == "" {
+ return nil, fmt.Errorf("HEAD %s: response did not include Content-Type header", u.String())
+ }
+ mediaType := types.MediaType(mth)
+
+ lh := resp.Header.Get("Content-Length")
+ if lh == "" {
+ return nil, fmt.Errorf("HEAD %s: response did not include Content-Length header", u.String())
+ }
+ size, err := strconv.ParseInt(lh, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ dh := resp.Header.Get("Docker-Content-Digest")
+ if dh == "" {
+ return nil, fmt.Errorf("HEAD %s: response did not include Docker-Content-Digest header", u.String())
+ }
+ digest, err := v1.NewHash(dh)
+ if err != nil {
+ return nil, err
+ }
+
+ // Validate the digest matches what we asked for, if pulling by digest.
+ if dgst, ok := ref.(name.Digest); ok {
+ if digest.String() != dgst.DigestStr() {
+ return nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref)
+ }
+ }
+
+ // Return all this info since we have to calculate it anyway.
+ return &v1.Descriptor{
+ Digest: digest,
+ Size: size,
+ MediaType: mediaType,
+ }, nil
+}
+
+func (f *fetcher) fetchBlob(ctx context.Context, h v1.Hash) (io.ReadCloser, error) {
+ u := f.url("blobs", h.String())
+ req, err := http.NewRequest(http.MethodGet, u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := f.Client.Do(req.WithContext(ctx))
+ if err != nil {
+ return nil, err
+ }
+
+ if err := transport.CheckError(resp, http.StatusOK); err != nil {
+ resp.Body.Close()
+ return nil, err
+ }
+
+ return verify.ReadCloser(resp.Body, h)
+}
+
+func (f *fetcher) headBlob(h v1.Hash) (*http.Response, error) {
+ u := f.url("blobs", h.String())
+ req, err := http.NewRequest(http.MethodHead, u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := f.Client.Do(req.WithContext(f.context))
+ if err != nil {
+ return nil, err
+ }
+
+ if err := transport.CheckError(resp, http.StatusOK); err != nil {
+ resp.Body.Close()
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+func (f *fetcher) blobExists(h v1.Hash) (bool, error) {
+ u := f.url("blobs", h.String())
+ req, err := http.NewRequest(http.MethodHead, u.String(), nil)
+ if err != nil {
+ return false, err
+ }
+
+ resp, err := f.Client.Do(req.WithContext(f.context))
+ if err != nil {
+ return false, err
+ }
+ defer resp.Body.Close()
+
+ if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil {
+ return false, err
+ }
+
+ return resp.StatusCode == http.StatusOK, nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go
new file mode 100644
index 0000000000..846ba07cda
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package remote provides facilities for reading/writing v1.Images from/to
+// a remote image registry.
+package remote
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go
new file mode 100644
index 0000000000..71739fee35
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go
@@ -0,0 +1,235 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "sync"
+
+ "github.com/google/go-containerregistry/internal/redact"
+ "github.com/google/go-containerregistry/internal/verify"
+ "github.com/google/go-containerregistry/pkg/name"
+ v1 "github.com/google/go-containerregistry/pkg/v1"
+ "github.com/google/go-containerregistry/pkg/v1/partial"
+ "github.com/google/go-containerregistry/pkg/v1/remote/transport"
+ "github.com/google/go-containerregistry/pkg/v1/types"
+)
+
+var acceptableImageMediaTypes = []types.MediaType{
+ types.DockerManifestSchema2,
+ types.OCIManifestSchema1,
+}
+
+// remoteImage accesses an image from a remote registry
+type remoteImage struct {
+ fetcher
+ manifestLock sync.Mutex // Protects manifest
+ manifest []byte
+ configLock sync.Mutex // Protects config
+ config []byte
+ mediaType types.MediaType
+ descriptor *v1.Descriptor
+}
+
+var _ partial.CompressedImageCore = (*remoteImage)(nil)
+
+// Image provides access to a remote image reference.
+func Image(ref name.Reference, options ...Option) (v1.Image, error) {
+ desc, err := Get(ref, options...)
+ if err != nil {
+ return nil, err
+ }
+
+ return desc.Image()
+}
+
+func (r *remoteImage) MediaType() (types.MediaType, error) {
+ if string(r.mediaType) != "" {
+ return r.mediaType, nil
+ }
+ return types.DockerManifestSchema2, nil
+}
+
+func (r *remoteImage) RawManifest() ([]byte, error) {
+ r.manifestLock.Lock()
+ defer r.manifestLock.Unlock()
+ if r.manifest != nil {
+ return r.manifest, nil
+ }
+
+ // NOTE(jonjohnsonjr): We should never get here because the public entrypoints
+ // do type-checking via remote.Descriptor. I've left this here for tests that
+ // directly instantiate a remoteImage.
+ manifest, desc, err := r.fetchManifest(r.Ref, acceptableImageMediaTypes)
+ if err != nil {
+ return nil, err
+ }
+
+ if r.descriptor == nil {
+ r.descriptor = desc
+ }
+ r.mediaType = desc.MediaType
+ r.manifest = manifest
+ return r.manifest, nil
+}
+
+func (r *remoteImage) RawConfigFile() ([]byte, error) {
+ r.configLock.Lock()
+ defer r.configLock.Unlock()
+ if r.config != nil {
+ return r.config, nil
+ }
+
+ m, err := partial.Manifest(r)
+ if err != nil {
+ return nil, err
+ }
+
+ body, err := r.fetchBlob(r.context, m.Config.Digest)
+ if err != nil {
+ return nil, err
+ }
+ defer body.Close()
+
+ r.config, err = ioutil.ReadAll(body)
+ if err != nil {
+ return nil, err
+ }
+ return r.config, nil
+}
+
+// Descriptor retains the original descriptor from an index manifest.
+// See partial.Descriptor.
+func (r *remoteImage) Descriptor() (*v1.Descriptor, error) {
+ // kind of a hack, but RawManifest does appropriate locking/memoization
+ // and makes sure r.descriptor is populated.
+ _, err := r.RawManifest()
+ return r.descriptor, err
+}
+
+// remoteImageLayer implements partial.CompressedLayer
+type remoteImageLayer struct {
+ ri *remoteImage
+ digest v1.Hash
+}
+
+// Digest implements partial.CompressedLayer
+func (rl *remoteImageLayer) Digest() (v1.Hash, error) {
+ return rl.digest, nil
+}
+
+// Compressed implements partial.CompressedLayer
+func (rl *remoteImageLayer) Compressed() (io.ReadCloser, error) {
+ urls := []url.URL{rl.ri.url("blobs", rl.digest.String())}
+
+ // Add alternative layer sources from URLs (usually none).
+ d, err := partial.BlobDescriptor(rl, rl.digest)
+ if err != nil {
+ return nil, err
+ }
+
+ // We don't want to log binary layers -- this can break terminals.
+ ctx := redact.NewContext(rl.ri.context, "omitting binary blobs from logs")
+
+ for _, s := range d.URLs {
+ u, err := url.Parse(s)
+ if err != nil {
+ return nil, err
+ }
+ urls = append(urls, *u)
+ }
+
+ // The lastErr for most pulls will be the same (the first error), but for
+ // foreign layers we'll want to surface the last one, since we try to pull
+ // from the registry first, which would often fail.
+ // TODO: Maybe we don't want to try pulling from the registry first?
+ var lastErr error
+ for _, u := range urls {
+ req, err := http.NewRequest(http.MethodGet, u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := rl.ri.Client.Do(req.WithContext(ctx))
+ if err != nil {
+ lastErr = err
+ continue
+ }
+
+ if err := transport.CheckError(resp, http.StatusOK); err != nil {
+ resp.Body.Close()
+ lastErr = err
+ continue
+ }
+
+ return verify.ReadCloser(resp.Body, rl.digest)
+ }
+
+ return nil, lastErr
+}
+
+// Manifest implements partial.WithManifest so that we can use partial.BlobSize below.
+func (rl *remoteImageLayer) Manifest() (*v1.Manifest, error) {
+ return partial.Manifest(rl.ri)
+}
+
+// MediaType implements v1.Layer
+func (rl *remoteImageLayer) MediaType() (types.MediaType, error) {
+ bd, err := partial.BlobDescriptor(rl, rl.digest)
+ if err != nil {
+ return "", err
+ }
+
+ return bd.MediaType, nil
+}
+
+// Size implements partial.CompressedLayer
+func (rl *remoteImageLayer) Size() (int64, error) {
+ // Look up the size of this digest in the manifest to avoid a request.
+ return partial.BlobSize(rl, rl.digest)
+}
+
+// ConfigFile implements partial.WithManifestAndConfigFile so that we can use partial.BlobToDiffID below.
+func (rl *remoteImageLayer) ConfigFile() (*v1.ConfigFile, error) {
+ return partial.ConfigFile(rl.ri)
+}
+
+// DiffID implements partial.WithDiffID so that we don't recompute a DiffID that we already have
+// available in our ConfigFile.
+func (rl *remoteImageLayer) DiffID() (v1.Hash, error) {
+ return partial.BlobToDiffID(rl, rl.digest)
+}
+
+// Descriptor retains the original descriptor from an image manifest.
+// See partial.Descriptor.
+func (rl *remoteImageLayer) Descriptor() (*v1.Descriptor, error) {
+ return partial.BlobDescriptor(rl, rl.digest)
+}
+
+// See partial.Exists.
+func (rl *remoteImageLayer) Exists() (bool, error) {
+ return rl.ri.blobExists(rl.digest)
+}
+
+// LayerByDigest implements partial.CompressedLayer
+func (r *remoteImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) {
+ return &remoteImageLayer{
+ ri: r,
+ digest: h,
+ }, nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go
new file mode 100644
index 0000000000..c139343527
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go
@@ -0,0 +1,261 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "bytes"
+ "fmt"
+ "sync"
+
+ "github.com/google/go-containerregistry/pkg/name"
+ v1 "github.com/google/go-containerregistry/pkg/v1"
+ "github.com/google/go-containerregistry/pkg/v1/partial"
+ "github.com/google/go-containerregistry/pkg/v1/types"
+)
+
+var acceptableIndexMediaTypes = []types.MediaType{
+ types.DockerManifestList,
+ types.OCIImageIndex,
+}
+
+// remoteIndex accesses an index from a remote registry
+type remoteIndex struct {
+ fetcher
+ manifestLock sync.Mutex // Protects manifest
+ manifest []byte
+ mediaType types.MediaType
+ descriptor *v1.Descriptor
+}
+
+// Index provides access to a remote index reference.
+func Index(ref name.Reference, options ...Option) (v1.ImageIndex, error) {
+ desc, err := get(ref, acceptableIndexMediaTypes, options...)
+ if err != nil {
+ return nil, err
+ }
+
+ return desc.ImageIndex()
+}
+
+func (r *remoteIndex) MediaType() (types.MediaType, error) {
+ if string(r.mediaType) != "" {
+ return r.mediaType, nil
+ }
+ return types.DockerManifestList, nil
+}
+
+func (r *remoteIndex) Digest() (v1.Hash, error) {
+ return partial.Digest(r)
+}
+
+func (r *remoteIndex) Size() (int64, error) {
+ return partial.Size(r)
+}
+
+func (r *remoteIndex) RawManifest() ([]byte, error) {
+ r.manifestLock.Lock()
+ defer r.manifestLock.Unlock()
+ if r.manifest != nil {
+ return r.manifest, nil
+ }
+
+ // NOTE(jonjohnsonjr): We should never get here because the public entrypoints
+ // do type-checking via remote.Descriptor. I've left this here for tests that
+ // directly instantiate a remoteIndex.
+ manifest, desc, err := r.fetchManifest(r.Ref, acceptableIndexMediaTypes)
+ if err != nil {
+ return nil, err
+ }
+
+ if r.descriptor == nil {
+ r.descriptor = desc
+ }
+ r.mediaType = desc.MediaType
+ r.manifest = manifest
+ return r.manifest, nil
+}
+
+func (r *remoteIndex) IndexManifest() (*v1.IndexManifest, error) {
+ b, err := r.RawManifest()
+ if err != nil {
+ return nil, err
+ }
+ return v1.ParseIndexManifest(bytes.NewReader(b))
+}
+
+func (r *remoteIndex) Image(h v1.Hash) (v1.Image, error) {
+ desc, err := r.childByHash(h)
+ if err != nil {
+ return nil, err
+ }
+
+ // Descriptor.Image will handle coercing nested indexes into an Image.
+ return desc.Image()
+}
+
+// Descriptor retains the original descriptor from an index manifest.
+// See partial.Descriptor.
+func (r *remoteIndex) Descriptor() (*v1.Descriptor, error) {
+ // kind of a hack, but RawManifest does appropriate locking/memoization
+ // and makes sure r.descriptor is populated.
+ _, err := r.RawManifest()
+ return r.descriptor, err
+}
+
+func (r *remoteIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) {
+ desc, err := r.childByHash(h)
+ if err != nil {
+ return nil, err
+ }
+ return desc.ImageIndex()
+}
+
+// Workaround for #819.
+func (r *remoteIndex) Layer(h v1.Hash) (v1.Layer, error) {
+ index, err := r.IndexManifest()
+ if err != nil {
+ return nil, err
+ }
+ for _, childDesc := range index.Manifests {
+ if h == childDesc.Digest {
+ l, err := partial.CompressedToLayer(&remoteLayer{
+ fetcher: r.fetcher,
+ digest: h,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &MountableLayer{
+ Layer: l,
+ Reference: r.Ref.Context().Digest(h.String()),
+ }, nil
+ }
+ }
+ return nil, fmt.Errorf("layer not found: %s", h)
+}
+
+func (r *remoteIndex) imageByPlatform(platform v1.Platform) (v1.Image, error) {
+ desc, err := r.childByPlatform(platform)
+ if err != nil {
+ return nil, err
+ }
+
+ // Descriptor.Image will handle coercing nested indexes into an Image.
+ return desc.Image()
+}
+
+// This naively matches the first manifest with matching platform attributes.
+//
+// We should probably use this instead:
+// github.com/containerd/containerd/platforms
+//
+// But first we'd need to migrate to:
+// github.com/opencontainers/image-spec/specs-go/v1
+func (r *remoteIndex) childByPlatform(platform v1.Platform) (*Descriptor, error) {
+ index, err := r.IndexManifest()
+ if err != nil {
+ return nil, err
+ }
+ for _, childDesc := range index.Manifests {
+ // If platform is missing from child descriptor, assume it's amd64/linux.
+ p := defaultPlatform
+ if childDesc.Platform != nil {
+ p = *childDesc.Platform
+ }
+
+ if matchesPlatform(p, platform) {
+ return r.childDescriptor(childDesc, platform)
+ }
+ }
+ return nil, fmt.Errorf("no child with platform %s/%s in index %s", platform.OS, platform.Architecture, r.Ref)
+}
+
+func (r *remoteIndex) childByHash(h v1.Hash) (*Descriptor, error) {
+ index, err := r.IndexManifest()
+ if err != nil {
+ return nil, err
+ }
+ for _, childDesc := range index.Manifests {
+ if h == childDesc.Digest {
+ return r.childDescriptor(childDesc, defaultPlatform)
+ }
+ }
+ return nil, fmt.Errorf("no child with digest %s in index %s", h, r.Ref)
+}
+
+// Convert one of this index's child's v1.Descriptor into a remote.Descriptor, with the given platform option.
+func (r *remoteIndex) childDescriptor(child v1.Descriptor, platform v1.Platform) (*Descriptor, error) {
+ ref := r.Ref.Context().Digest(child.Digest.String())
+ manifest, _, err := r.fetchManifest(ref, []types.MediaType{child.MediaType})
+ if err != nil {
+ return nil, err
+ }
+ return &Descriptor{
+ fetcher: fetcher{
+ Ref: ref,
+ Client: r.Client,
+ context: r.context,
+ },
+ Manifest: manifest,
+ Descriptor: child,
+ platform: platform,
+ }, nil
+}
+
+// matchesPlatform checks if the given platform matches the required platforms.
+// The given platform matches the required platform if
+// - architecture and OS are identical.
+// - OS version and variant are identical if provided.
+// - features and OS features of the required platform are subsets of those of the given platform.
+func matchesPlatform(given, required v1.Platform) bool {
+ // Required fields that must be identical.
+ if given.Architecture != required.Architecture || given.OS != required.OS {
+ return false
+ }
+
+ // Optional fields that may be empty, but must be identical if provided.
+ if required.OSVersion != "" && given.OSVersion != required.OSVersion {
+ return false
+ }
+ if required.Variant != "" && given.Variant != required.Variant {
+ return false
+ }
+
+ // Verify required platform's features are a subset of given platform's features.
+ if !isSubset(given.OSFeatures, required.OSFeatures) {
+ return false
+ }
+ if !isSubset(given.Features, required.Features) {
+ return false
+ }
+
+ return true
+}
+
+// isSubset checks if the required array of strings is a subset of the given lst.
+func isSubset(lst, required []string) bool {
+ set := make(map[string]bool)
+ for _, value := range lst {
+ set[value] = true
+ }
+
+ for _, value := range required {
+ if _, ok := set[value]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go
new file mode 100644
index 0000000000..1501672d3a
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go
@@ -0,0 +1,93 @@
+// Copyright 2019 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "io"
+
+ "github.com/google/go-containerregistry/internal/redact"
+ "github.com/google/go-containerregistry/pkg/name"
+ v1 "github.com/google/go-containerregistry/pkg/v1"
+ "github.com/google/go-containerregistry/pkg/v1/partial"
+ "github.com/google/go-containerregistry/pkg/v1/types"
+)
+
+// remoteImagelayer implements partial.CompressedLayer
+type remoteLayer struct {
+ fetcher
+ digest v1.Hash
+}
+
+// Compressed implements partial.CompressedLayer
+func (rl *remoteLayer) Compressed() (io.ReadCloser, error) {
+ // We don't want to log binary layers -- this can break terminals.
+ ctx := redact.NewContext(rl.context, "omitting binary blobs from logs")
+ return rl.fetchBlob(ctx, rl.digest)
+}
+
+// Compressed implements partial.CompressedLayer
+func (rl *remoteLayer) Size() (int64, error) {
+ resp, err := rl.headBlob(rl.digest)
+ if err != nil {
+ return -1, err
+ }
+ defer resp.Body.Close()
+ return resp.ContentLength, nil
+}
+
+// Digest implements partial.CompressedLayer
+func (rl *remoteLayer) Digest() (v1.Hash, error) {
+ return rl.digest, nil
+}
+
+// MediaType implements v1.Layer
+func (rl *remoteLayer) MediaType() (types.MediaType, error) {
+ return types.DockerLayer, nil
+}
+
+// See partial.Exists.
+func (rl *remoteLayer) Exists() (bool, error) {
+ return rl.blobExists(rl.digest)
+}
+
+// Layer reads the given blob reference from a registry as a Layer. A blob
+// reference here is just a punned name.Digest where the digest portion is the
+// digest of the blob to be read and the repository portion is the repo where
+// that blob lives.
+func Layer(ref name.Digest, options ...Option) (v1.Layer, error) {
+ o, err := makeOptions(ref.Context(), options...)
+ if err != nil {
+ return nil, err
+ }
+ f, err := makeFetcher(ref, o)
+ if err != nil {
+ return nil, err
+ }
+ h, err := v1.NewHash(ref.Identifier())
+ if err != nil {
+ return nil, err
+ }
+ l, err := partial.CompressedToLayer(&remoteLayer{
+ fetcher: *f,
+ digest: h,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &MountableLayer{
+ Layer: l,
+ Reference: ref,
+ }, nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go
new file mode 100644
index 0000000000..e4a005aa58
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go
@@ -0,0 +1,146 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/google/go-containerregistry/pkg/name"
+ "github.com/google/go-containerregistry/pkg/v1/remote/transport"
+)
+
+type tags struct {
+ Name string `json:"name"`
+ Tags []string `json:"tags"`
+}
+
+// List wraps ListWithContext using the background context.
+func List(repo name.Repository, options ...Option) ([]string, error) {
+ return ListWithContext(context.Background(), repo, options...)
+}
+
+// ListWithContext calls /tags/list for the given repository, returning the list of tags
+// in the "tags" property.
+func ListWithContext(ctx context.Context, repo name.Repository, options ...Option) ([]string, error) {
+ o, err := makeOptions(repo, options...)
+ if err != nil {
+ return nil, err
+ }
+ scopes := []string{repo.Scope(transport.PullScope)}
+ tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes)
+ if err != nil {
+ return nil, err
+ }
+
+ uri := &url.URL{
+ Scheme: repo.Registry.Scheme(),
+ Host: repo.Registry.RegistryStr(),
+ Path: fmt.Sprintf("/v2/%s/tags/list", repo.RepositoryStr()),
+ // ECR returns an error if n > 1000:
+ // https://github.com/google/go-containerregistry/issues/681
+ RawQuery: "n=1000",
+ }
+
+ // This is lazy, but I want to make sure List(..., WithContext(ctx)) works
+ // without calling makeOptions() twice (which can have side effects).
+ // This means ListWithContext(ctx, ..., WithContext(ctx2)) prefers ctx2.
+ if o.context != context.Background() {
+ ctx = o.context
+ }
+
+ client := http.Client{Transport: tr}
+ tagList := []string{}
+ parsed := tags{}
+
+ // get responses until there is no next page
+ for {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+
+ req, err := http.NewRequest("GET", uri.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+ req = req.WithContext(ctx)
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := transport.CheckError(resp, http.StatusOK); err != nil {
+ return nil, err
+ }
+
+ if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil {
+ return nil, err
+ }
+
+ if err := resp.Body.Close(); err != nil {
+ return nil, err
+ }
+
+ tagList = append(tagList, parsed.Tags...)
+
+ uri, err = getNextPageURL(resp)
+ if err != nil {
+ return nil, err
+ }
+ // no next page
+ if uri == nil {
+ break
+ }
+ }
+
+ return tagList, nil
+}
+
+// getNextPageURL checks if there is a Link header in a http.Response which
+// contains a link to the next page. If yes it returns the url.URL of the next
+// page otherwise it returns nil.
+func getNextPageURL(resp *http.Response) (*url.URL, error) {
+ link := resp.Header.Get("Link")
+ if link == "" {
+ return nil, nil
+ }
+
+ if link[0] != '<' {
+ return nil, fmt.Errorf("failed to parse link header: missing '<' in: %s", link)
+ }
+
+ end := strings.Index(link, ">")
+ if end == -1 {
+ return nil, fmt.Errorf("failed to parse link header: missing '>' in: %s", link)
+ }
+ link = link[1:end]
+
+ linkURL, err := url.Parse(link)
+ if err != nil {
+ return nil, err
+ }
+ if resp.Request == nil || resp.Request.URL == nil {
+ return nil, nil
+ }
+ linkURL = resp.Request.URL.ResolveReference(linkURL)
+ return linkURL, nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go
new file mode 100644
index 0000000000..728997044c
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go
@@ -0,0 +1,95 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "github.com/google/go-containerregistry/pkg/name"
+ v1 "github.com/google/go-containerregistry/pkg/v1"
+ "github.com/google/go-containerregistry/pkg/v1/partial"
+)
+
+// MountableLayer wraps a v1.Layer in a shim that enables the layer to be
+// "mounted" when published to another registry.
+type MountableLayer struct {
+ v1.Layer
+
+ Reference name.Reference
+}
+
+// Descriptor retains the original descriptor from an image manifest.
+// See partial.Descriptor.
+func (ml *MountableLayer) Descriptor() (*v1.Descriptor, error) {
+ return partial.Descriptor(ml.Layer)
+}
+
+// Exists is a hack. See partial.Exists.
+func (ml *MountableLayer) Exists() (bool, error) {
+ return partial.Exists(ml.Layer)
+}
+
+// mountableImage wraps the v1.Layer references returned by the embedded v1.Image
+// in MountableLayer's so that remote.Write might attempt to mount them from their
+// source repository.
+type mountableImage struct {
+ v1.Image
+
+ Reference name.Reference
+}
+
+// Layers implements v1.Image
+func (mi *mountableImage) Layers() ([]v1.Layer, error) {
+ ls, err := mi.Image.Layers()
+ if err != nil {
+ return nil, err
+ }
+ mls := make([]v1.Layer, 0, len(ls))
+ for _, l := range ls {
+ mls = append(mls, &MountableLayer{
+ Layer: l,
+ Reference: mi.Reference,
+ })
+ }
+ return mls, nil
+}
+
+// LayerByDigest implements v1.Image
+func (mi *mountableImage) LayerByDigest(d v1.Hash) (v1.Layer, error) {
+ l, err := mi.Image.LayerByDigest(d)
+ if err != nil {
+ return nil, err
+ }
+ return &MountableLayer{
+ Layer: l,
+ Reference: mi.Reference,
+ }, nil
+}
+
+// LayerByDiffID implements v1.Image
+func (mi *mountableImage) LayerByDiffID(d v1.Hash) (v1.Layer, error) {
+ l, err := mi.Image.LayerByDiffID(d)
+ if err != nil {
+ return nil, err
+ }
+ return &MountableLayer{
+ Layer: l,
+ Reference: mi.Reference,
+ }, nil
+}
+
+// Descriptor retains the original descriptor from an index manifest.
+// See partial.Descriptor.
+func (mi *mountableImage) Descriptor() (*v1.Descriptor, error) {
+ return partial.Descriptor(mi.Image)
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go
new file mode 100644
index 0000000000..45408fc4a1
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go
@@ -0,0 +1,298 @@
+// Copyright 2020 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/google/go-containerregistry/pkg/name"
+ v1 "github.com/google/go-containerregistry/pkg/v1"
+ "github.com/google/go-containerregistry/pkg/v1/partial"
+ "github.com/google/go-containerregistry/pkg/v1/remote/transport"
+ "github.com/google/go-containerregistry/pkg/v1/types"
+ "golang.org/x/sync/errgroup"
+)
+
+// MultiWrite writes the given Images or ImageIndexes to the given refs, as
+// efficiently as possible, by deduping shared layer blobs and uploading layers
+// in parallel, then uploading all manifests in parallel.
+//
+// Current limitations:
+// - All refs must share the same repository.
+// - Images cannot consist of stream.Layers.
+func MultiWrite(m map[name.Reference]Taggable, options ...Option) (rerr error) {
+ // Determine the repository being pushed to; if asked to push to
+ // multiple repositories, give up.
+ var repo, zero name.Repository
+ for ref := range m {
+ if repo == zero {
+ repo = ref.Context()
+ } else if ref.Context() != repo {
+ return fmt.Errorf("MultiWrite can only push to the same repository (saw %q and %q)", repo, ref.Context())
+ }
+ }
+
+ o, err := makeOptions(repo, options...)
+ if err != nil {
+ return err
+ }
+
+ // Collect unique blobs (layers and config blobs).
+ blobs := map[v1.Hash]v1.Layer{}
+ newManifests := []map[name.Reference]Taggable{}
+ // Separate originally requested images and indexes, so we can push images first.
+ images, indexes := map[name.Reference]Taggable{}, map[name.Reference]Taggable{}
+ for ref, i := range m {
+ if img, ok := i.(v1.Image); ok {
+ images[ref] = i
+ if err := addImageBlobs(img, blobs, o.allowNondistributableArtifacts); err != nil {
+ return err
+ }
+ continue
+ }
+ if idx, ok := i.(v1.ImageIndex); ok {
+ indexes[ref] = i
+ newManifests, err = addIndexBlobs(idx, blobs, repo, newManifests, 0, o.allowNondistributableArtifacts)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+ return fmt.Errorf("pushable resource was not Image or ImageIndex: %T", i)
+ }
+
+ // Determine if any of the layers are Mountable, because if so we need
+ // to request Pull scope too.
+ ls := []v1.Layer{}
+ for _, l := range blobs {
+ ls = append(ls, l)
+ }
+ scopes := scopesForUploadingImage(repo, ls)
+ tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes)
+ if err != nil {
+ return err
+ }
+ w := writer{
+ repo: repo,
+ client: &http.Client{Transport: tr},
+ context: o.context,
+ updates: o.updates,
+ lastUpdate: &v1.Update{},
+ }
+
+ // Collect the total size of blobs and manifests we're about to write.
+ if o.updates != nil {
+ defer close(o.updates)
+ defer func() { sendError(o.updates, rerr) }()
+ for _, b := range blobs {
+ size, err := b.Size()
+ if err != nil {
+ return err
+ }
+ w.lastUpdate.Total += size
+ }
+ countManifest := func(t Taggable) error {
+ b, err := t.RawManifest()
+ if err != nil {
+ return err
+ }
+ w.lastUpdate.Total += int64(len(b))
+ return nil
+ }
+ for _, i := range images {
+ if err := countManifest(i); err != nil {
+ return err
+ }
+ }
+ for _, nm := range newManifests {
+ for _, i := range nm {
+ if err := countManifest(i); err != nil {
+ return err
+ }
+ }
+ }
+ for _, i := range indexes {
+ if err := countManifest(i); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Upload individual blobs and collect any errors.
+ blobChan := make(chan v1.Layer, 2*o.jobs)
+ g, ctx := errgroup.WithContext(o.context)
+ for i := 0; i < o.jobs; i++ {
+ // Start N workers consuming blobs to upload.
+ g.Go(func() error {
+ for b := range blobChan {
+ if err := w.uploadOne(b); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+ }
+ g.Go(func() error {
+ defer close(blobChan)
+ for _, b := range blobs {
+ select {
+ case blobChan <- b:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+ return nil
+ })
+ if err := g.Wait(); err != nil {
+ return err
+ }
+
+ commitMany := func(m map[name.Reference]Taggable) error {
+ // With all of the constituent elements uploaded, upload the manifests
+ // to commit the images and indexes, and collect any errors.
+ type task struct {
+ i Taggable
+ ref name.Reference
+ }
+ taskChan := make(chan task, 2*o.jobs)
+ for i := 0; i < o.jobs; i++ {
+ // Start N workers consuming tasks to upload manifests.
+ g.Go(func() error {
+ for t := range taskChan {
+ if err := w.commitManifest(t.i, t.ref); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+ }
+ go func() {
+ for ref, i := range m {
+ taskChan <- task{i, ref}
+ }
+ close(taskChan)
+ }()
+ return g.Wait()
+ }
+ // Push originally requested image manifests. These have no
+ // dependencies.
+ if err := commitMany(images); err != nil {
+ return err
+ }
+ // Push new manifests from lowest levels up.
+ for i := len(newManifests) - 1; i >= 0; i-- {
+ if err := commitMany(newManifests[i]); err != nil {
+ return err
+ }
+ }
+ // Push originally requested index manifests, which might depend on
+ // newly discovered manifests.
+
+ return commitMany(indexes)
+}
+
+// addIndexBlobs adds blobs to the set of blobs we intend to upload, and
+// returns the latest copy of the ordered collection of manifests to upload.
+func addIndexBlobs(idx v1.ImageIndex, blobs map[v1.Hash]v1.Layer, repo name.Repository, newManifests []map[name.Reference]Taggable, lvl int, allowNondistributableArtifacts bool) ([]map[name.Reference]Taggable, error) {
+ if lvl > len(newManifests)-1 {
+ newManifests = append(newManifests, map[name.Reference]Taggable{})
+ }
+
+ im, err := idx.IndexManifest()
+ if err != nil {
+ return nil, err
+ }
+ for _, desc := range im.Manifests {
+ switch desc.MediaType {
+ case types.OCIImageIndex, types.DockerManifestList:
+ idx, err := idx.ImageIndex(desc.Digest)
+ if err != nil {
+ return nil, err
+ }
+ newManifests, err = addIndexBlobs(idx, blobs, repo, newManifests, lvl+1, allowNondistributableArtifacts)
+ if err != nil {
+ return nil, err
+ }
+
+ // Also track the sub-index manifest to upload later by digest.
+ newManifests[lvl][repo.Digest(desc.Digest.String())] = idx
+ case types.OCIManifestSchema1, types.DockerManifestSchema2:
+ img, err := idx.Image(desc.Digest)
+ if err != nil {
+ return nil, err
+ }
+ if err := addImageBlobs(img, blobs, allowNondistributableArtifacts); err != nil {
+ return nil, err
+ }
+
+ // Also track the sub-image manifest to upload later by digest.
+ newManifests[lvl][repo.Digest(desc.Digest.String())] = img
+ default:
+ // Workaround for #819.
+ if wl, ok := idx.(withLayer); ok {
+ layer, err := wl.Layer(desc.Digest)
+ if err != nil {
+ return nil, err
+ }
+ if err := addLayerBlob(layer, blobs, allowNondistributableArtifacts); err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, fmt.Errorf("unknown media type: %v", desc.MediaType)
+ }
+ }
+ }
+ return newManifests, nil
+}
+
+func addLayerBlob(l v1.Layer, blobs map[v1.Hash]v1.Layer, allowNondistributableArtifacts bool) error {
+ // Ignore foreign layers.
+ mt, err := l.MediaType()
+ if err != nil {
+ return err
+ }
+
+ if mt.IsDistributable() || allowNondistributableArtifacts {
+ d, err := l.Digest()
+ if err != nil {
+ return err
+ }
+
+ blobs[d] = l
+ }
+
+ return nil
+}
+
+func addImageBlobs(img v1.Image, blobs map[v1.Hash]v1.Layer, allowNondistributableArtifacts bool) error {
+ ls, err := img.Layers()
+ if err != nil {
+ return err
+ }
+ // Collect all layers.
+ for _, l := range ls {
+ if err := addLayerBlob(l, blobs, allowNondistributableArtifacts); err != nil {
+ return err
+ }
+ }
+
+ // Collect config blob.
+ cl, err := partial.ConfigLayer(img)
+ if err != nil {
+ return err
+ }
+ return addLayerBlob(cl, blobs, allowNondistributableArtifacts)
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go
new file mode 100644
index 0000000000..7edebdf779
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go
@@ -0,0 +1,195 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "context"
+ "errors"
+ "net/http"
+
+ "github.com/google/go-containerregistry/pkg/authn"
+ "github.com/google/go-containerregistry/pkg/logs"
+ v1 "github.com/google/go-containerregistry/pkg/v1"
+ "github.com/google/go-containerregistry/pkg/v1/remote/transport"
+)
+
+// Option is a functional option for remote operations.
+type Option func(*options) error
+
+type options struct {
+ auth authn.Authenticator
+ keychain authn.Keychain
+ transport http.RoundTripper
+ platform v1.Platform
+ context context.Context
+ jobs int
+ userAgent string
+ allowNondistributableArtifacts bool
+ updates chan<- v1.Update
+}
+
+var defaultPlatform = v1.Platform{
+ Architecture: "amd64",
+ OS: "linux",
+}
+
+const defaultJobs = 4
+
+func makeOptions(target authn.Resource, opts ...Option) (*options, error) {
+ o := &options{
+ auth: authn.Anonymous,
+ transport: http.DefaultTransport,
+ platform: defaultPlatform,
+ context: context.Background(),
+ jobs: defaultJobs,
+ }
+
+ for _, option := range opts {
+ if err := option(o); err != nil {
+ return nil, err
+ }
+ }
+
+ if o.keychain != nil {
+ auth, err := o.keychain.Resolve(target)
+ if err != nil {
+ return nil, err
+ }
+ o.auth = auth
+ }
+
+ // Wrap the transport in something that logs requests and responses.
+ // It's expensive to generate the dumps, so skip it if we're writing
+ // to nothing.
+ if logs.Enabled(logs.Debug) {
+ o.transport = transport.NewLogger(o.transport)
+ }
+
+ // Wrap the transport in something that can retry network flakes.
+ o.transport = transport.NewRetry(o.transport)
+
+ // Wrap this last to prevent transport.New from double-wrapping.
+ if o.userAgent != "" {
+ o.transport = transport.NewUserAgent(o.transport, o.userAgent)
+ }
+
+ return o, nil
+}
+
+// WithTransport is a functional option for overriding the default transport
+// for remote operations.
+//
+// The default transport its http.DefaultTransport.
+func WithTransport(t http.RoundTripper) Option {
+ return func(o *options) error {
+ o.transport = t
+ return nil
+ }
+}
+
+// WithAuth is a functional option for overriding the default authenticator
+// for remote operations.
+//
+// The default authenticator is authn.Anonymous.
+func WithAuth(auth authn.Authenticator) Option {
+ return func(o *options) error {
+ o.auth = auth
+ return nil
+ }
+}
+
+// WithAuthFromKeychain is a functional option for overriding the default
+// authenticator for remote operations, using an authn.Keychain to find
+// credentials.
+//
+// The default authenticator is authn.Anonymous.
+func WithAuthFromKeychain(keys authn.Keychain) Option {
+ return func(o *options) error {
+ o.keychain = keys
+ return nil
+ }
+}
+
+// WithPlatform is a functional option for overriding the default platform
+// that Image and Descriptor.Image use for resolving an index to an image.
+//
+// The default platform is amd64/linux.
+func WithPlatform(p v1.Platform) Option {
+ return func(o *options) error {
+ o.platform = p
+ return nil
+ }
+}
+
+// WithContext is a functional option for setting the context in http requests
+// performed by a given function. Note that this context is used for _all_
+// http requests, not just the initial volley. E.g., for remote.Image, the
+// context will be set on http requests generated by subsequent calls to
+// RawConfigFile() and even methods on layers returned by Layers().
+//
+// The default context is context.Background().
+func WithContext(ctx context.Context) Option {
+ return func(o *options) error {
+ o.context = ctx
+ return nil
+ }
+}
+
+// WithJobs is a functional option for setting the parallelism of remote
+// operations performed by a given function. Note that not all remote
+// operations support parallelism.
+//
+// The default value is 4.
+func WithJobs(jobs int) Option {
+ return func(o *options) error {
+ if jobs <= 0 {
+ return errors.New("jobs must be greater than zero")
+ }
+ o.jobs = jobs
+ return nil
+ }
+}
+
+// WithUserAgent adds the given string to the User-Agent header for any HTTP
+// requests. This header will also include "go-containerregistry/${version}".
+//
+// If you want to completely overwrite the User-Agent header, use WithTransport.
+func WithUserAgent(ua string) Option {
+ return func(o *options) error {
+ o.userAgent = ua
+ return nil
+ }
+}
+
+// WithNondistributable includes non-distributable (foreign) layers
+// when writing images, see:
+// https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers
+//
+// The default behaviour is to skip these layers
+func WithNondistributable(o *options) error {
+ o.allowNondistributableArtifacts = true
+ return nil
+}
+
+// WithProgress takes a channel that will receive progress updates as bytes are written.
+//
+// Sending updates to an unbuffered channel will block writes, so callers
+// should provide a buffered channel to avoid potential deadlocks.
+func WithProgress(updates chan<- v1.Update) Option {
+ return func(o *options) error {
+ o.updates = updates
+ return nil
+ }
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/BUILD.bazel
new file mode 100644
index 0000000000..2ee9b61e1b
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/BUILD.bazel
@@ -0,0 +1,29 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "basic.go",
+ "bearer.go",
+ "doc.go",
+ "error.go",
+ "logger.go",
+ "ping.go",
+ "retry.go",
+ "schemer.go",
+ "scope.go",
+ "transport.go",
+ "useragent.go",
+ ],
+ importmap = "k8s.io/kops/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport",
+ importpath = "github.com/google/go-containerregistry/pkg/v1/remote/transport",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//vendor/github.com/docker/distribution/registry/client/auth/challenge:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/internal/redact:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/internal/retry:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/pkg/authn:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/pkg/logs:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/pkg/name:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md
new file mode 100644
index 0000000000..bd4d957b0e
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md
@@ -0,0 +1,129 @@
+# `transport`
+
+[](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/transport)
+
+The [distribution protocol](https://github.com/opencontainers/distribution-spec) is fairly simple, but correctly [implementing authentication](../../../authn/README.md) is **hard**.
+
+This package [implements](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote/transport#New) an [`http.RoundTripper`](https://godoc.org/net/http#RoundTripper)
+that transparently performs:
+* [Token
+Authentication](https://docs.docker.com/registry/spec/auth/token/) and
+* [OAuth2
+Authentication](https://docs.docker.com/registry/spec/auth/oauth/)
+
+for registry clients.
+
+## Raison d'être
+
+> Why not just use the [`docker/distribution`](https://godoc.org/github.com/docker/distribution/registry/client/auth) client?
+
+Great question! Mostly, because I don't want to depend on [`prometheus/client_golang`](https://github.com/prometheus/client_golang).
+
+As a performance optimization, that client uses [a cache](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/client/repository.go#L173) to keep track of a mapping between blob digests and their [descriptors](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/blobs.go#L57-L86). Unfortunately, the cache [uses prometheus](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/storage/cache/cachedblobdescriptorstore.go#L44) to track hits and misses, so if you want to use that client you have to pull in all of prometheus, which is pretty large.
+
+
+
+> Why does it matter if you depend on prometheus? Who cares?
+
+It's generally polite to your downstream to reduce the number of dependencies your package requires:
+
+* Downloading your package is faster, which helps our Australian friends and people on airplanes.
+* There is less code to compile, which speeds up builds and saves the planet from global warming.
+* You reduce the likelihood of inflicting dependency hell upon your consumers.
+* [Tim Hockin](https://twitter.com/thockin/status/958606077456654336) prefers it based on his experience working on Kubernetes, and he's a pretty smart guy.
+
+> Okay, what about [`containerd/containerd`](https://godoc.org/github.com/containerd/containerd/remotes/docker)?
+
+Similar reasons! That ends up pulling in grpc, protobuf, and logrus.
+
+
+
+> Well... what about [`containers/image`](https://godoc.org/github.com/containers/image/docker)?
+
+That just uses the the `docker/distribution` client... and more!
+
+
+
+> Wow, what about this package?
+
+Of course, this package isn't perfect either. `transport` depends on `authn`,
+which in turn depends on docker's config file parsing and handling package,
+which you don't strictly need but almost certainly want if you're going to be
+interacting with a registry.
+
+
+
+*These graphs were generated by
+[`kisielk/godepgraph`](https://github.com/kisielk/godepgraph).*
+
+## Usage
+
+This is heavily used by the
+[`remote`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote)
+package, which implements higher level image-centric functionality, but this
+package is useful if you want to interact directly with the registry to do
+something that `remote` doesn't support, e.g. [to handle with schema 1
+images](https://github.com/google/go-containerregistry/pull/509).
+
+This package also includes some [error
+handling](https://github.com/opencontainers/distribution-spec/blob/60be706c34ee7805bdd1d3d11affec53b0dfb8fb/spec.md#errors)
+facilities in the form of
+[`CheckError`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote/transport#CheckError),
+which will parse the response body into a structured error for unexpected http
+status codes.
+
+Here's a "simple" program that writes the result of
+[listing tags](https://github.com/opencontainers/distribution-spec/blob/60be706c34ee7805bdd1d3d11affec53b0dfb8fb/spec.md#tags)
+for [`gcr.io/google-containers/pause`](https://gcr.io/google-containers/pause)
+to stdout.
+
+```go
+package main
+
+import (
+ "io"
+ "net/http"
+ "os"
+
+ "github.com/google/go-containerregistry/pkg/authn"
+ "github.com/google/go-containerregistry/pkg/name"
+ "github.com/google/go-containerregistry/pkg/v1/remote/transport"
+)
+
+func main() {
+ repo, err := name.NewRepository("gcr.io/google-containers/pause")
+ if err != nil {
+ panic(err)
+ }
+
+ // Fetch credentials based on your docker config file, which is $HOME/.docker/config.json or $DOCKER_CONFIG.
+ auth, err := authn.DefaultKeychain.Resolve(repo.Registry)
+ if err != nil {
+ panic(err)
+ }
+
+ // Construct an http.Client that is authorized to pull from gcr.io/google-containers/pause.
+ scopes := []string{repo.Scope(transport.PullScope)}
+ t, err := transport.New(repo.Registry, auth, http.DefaultTransport, scopes)
+ if err != nil {
+ panic(err)
+ }
+ client := &http.Client{Transport: t}
+
+ // Make the actual request.
+ resp, err := client.Get("https://gcr.io/v2/google-containers/pause/tags/list")
+ if err != nil {
+ panic(err)
+ }
+
+ // Assert that we get a 200, otherwise attempt to parse body as a structured error.
+ if err := transport.CheckError(resp, http.StatusOK); err != nil {
+ panic(err)
+ }
+
+ // Write the response to stdout.
+ if _, err := io.Copy(os.Stdout, resp.Body); err != nil {
+ panic(err)
+ }
+}
+```
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go
new file mode 100644
index 0000000000..fdb362b762
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go
@@ -0,0 +1,62 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "encoding/base64"
+ "fmt"
+ "net/http"
+
+ "github.com/google/go-containerregistry/pkg/authn"
+)
+
+type basicTransport struct {
+ inner http.RoundTripper
+ auth authn.Authenticator
+ target string
+}
+
+var _ http.RoundTripper = (*basicTransport)(nil)
+
+// RoundTrip implements http.RoundTripper
+func (bt *basicTransport) RoundTrip(in *http.Request) (*http.Response, error) {
+ if bt.auth != authn.Anonymous {
+ auth, err := bt.auth.Authorization()
+ if err != nil {
+ return nil, err
+ }
+
+ // http.Client handles redirects at a layer above the http.RoundTripper
+ // abstraction, so to avoid forwarding Authorization headers to places
+ // we are redirected, only set it when the authorization header matches
+ // the host with which we are interacting.
+ // In case of redirect http.Client can use an empty Host, check URL too.
+ if in.Host == bt.target || in.URL.Host == bt.target {
+ if bearer := auth.RegistryToken; bearer != "" {
+ hdr := fmt.Sprintf("Bearer %s", bearer)
+ in.Header.Set("Authorization", hdr)
+ } else if user, pass := auth.Username, auth.Password; user != "" && pass != "" {
+ delimited := fmt.Sprintf("%s:%s", user, pass)
+ encoded := base64.StdEncoding.EncodeToString([]byte(delimited))
+ hdr := fmt.Sprintf("Basic %s", encoded)
+ in.Header.Set("Authorization", hdr)
+ } else if token := auth.Auth; token != "" {
+ hdr := fmt.Sprintf("Basic %s", token)
+ in.Header.Set("Authorization", hdr)
+ }
+ }
+ }
+ return bt.inner.RoundTrip(in)
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go
new file mode 100644
index 0000000000..49941bd896
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go
@@ -0,0 +1,311 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+
+ authchallenge "github.com/docker/distribution/registry/client/auth/challenge"
+ "github.com/google/go-containerregistry/internal/redact"
+ "github.com/google/go-containerregistry/pkg/authn"
+ "github.com/google/go-containerregistry/pkg/logs"
+ "github.com/google/go-containerregistry/pkg/name"
+)
+
+type bearerTransport struct {
+ // Wrapped by bearerTransport.
+ inner http.RoundTripper
+ // Basic credentials that we exchange for bearer tokens.
+ basic authn.Authenticator
+ // Holds the bearer response from the token service.
+ bearer authn.AuthConfig
+ // Registry to which we send bearer tokens.
+ registry name.Registry
+ // See https://tools.ietf.org/html/rfc6750#section-3
+ realm string
+ // See https://docs.docker.com/registry/spec/auth/token/
+ service string
+ scopes []string
+ // Scheme we should use, determined by ping response.
+ scheme string
+}
+
+var _ http.RoundTripper = (*bearerTransport)(nil)
+
+var portMap = map[string]string{
+ "http": "80",
+ "https": "443",
+}
+
+func stringSet(ss []string) map[string]struct{} {
+ set := make(map[string]struct{})
+ for _, s := range ss {
+ set[s] = struct{}{}
+ }
+ return set
+}
+
+// RoundTrip implements http.RoundTripper
+func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) {
+ sendRequest := func() (*http.Response, error) {
+ // http.Client handles redirects at a layer above the http.RoundTripper
+ // abstraction, so to avoid forwarding Authorization headers to places
+ // we are redirected, only set it when the authorization header matches
+ // the registry with which we are interacting.
+ // In case of redirect http.Client can use an empty Host, check URL too.
+ if matchesHost(bt.registry, in, bt.scheme) {
+ hdr := fmt.Sprintf("Bearer %s", bt.bearer.RegistryToken)
+ in.Header.Set("Authorization", hdr)
+ }
+ return bt.inner.RoundTrip(in)
+ }
+
+ res, err := sendRequest()
+ if err != nil {
+ return nil, err
+ }
+
+ // If we hit a WWW-Authenticate challenge, it might be due to expired tokens or insufficient scope.
+ if challenges := authchallenge.ResponseChallenges(res); len(challenges) != 0 {
+ for _, wac := range challenges {
+ // TODO(jonjohnsonjr): Should we also update "realm" or "service"?
+ if scope, ok := wac.Parameters["scope"]; ok {
+ // From https://tools.ietf.org/html/rfc6750#section-3
+ // The "scope" attribute is defined in Section 3.3 of [RFC6749]. The
+ // "scope" attribute is a space-delimited list of case-sensitive scope
+ // values indicating the required scope of the access token for
+ // accessing the requested resource.
+ scopes := strings.Split(scope, " ")
+
+ // Add any scopes that we don't already request.
+ got := stringSet(bt.scopes)
+ for _, want := range scopes {
+ if _, ok := got[want]; !ok {
+ bt.scopes = append(bt.scopes, want)
+ }
+ }
+ }
+ }
+
+ // TODO(jonjohnsonjr): Teach transport.Error about "error" and "error_description" from challenge.
+
+ // Retry the request to attempt to get a valid token.
+ if err = bt.refresh(in.Context()); err != nil {
+ return nil, err
+ }
+ return sendRequest()
+ }
+
+ return res, err
+}
+
+// It's unclear which authentication flow to use based purely on the protocol,
+// so we rely on heuristics and fallbacks to support as many registries as possible.
+// The basic token exchange is attempted first, falling back to the oauth flow.
+// If the IdentityToken is set, this indicates that we should start with the oauth flow.
+func (bt *bearerTransport) refresh(ctx context.Context) error {
+ auth, err := bt.basic.Authorization()
+ if err != nil {
+ return err
+ }
+
+ if auth.RegistryToken != "" {
+ bt.bearer.RegistryToken = auth.RegistryToken
+ return nil
+ }
+
+ var content []byte
+ if auth.IdentityToken != "" {
+ // If the secret being stored is an identity token,
+ // the Username should be set to , which indicates
+ // we are using an oauth flow.
+ content, err = bt.refreshOauth(ctx)
+ if terr, ok := err.(*Error); ok && terr.StatusCode == http.StatusNotFound {
+ // Note: Not all token servers implement oauth2.
+ // If the request to the endpoint returns 404 using the HTTP POST method,
+ // refer to Token Documentation for using the HTTP GET method supported by all token servers.
+ content, err = bt.refreshBasic(ctx)
+ }
+ } else {
+ content, err = bt.refreshBasic(ctx)
+ }
+ if err != nil {
+ return err
+ }
+
+ // Some registries don't have "token" in the response. See #54.
+ type tokenResponse struct {
+ Token string `json:"token"`
+ AccessToken string `json:"access_token"`
+ RefreshToken string `json:"refresh_token"`
+ // TODO: handle expiry?
+ }
+
+ var response tokenResponse
+ if err := json.Unmarshal(content, &response); err != nil {
+ return err
+ }
+
+ // Some registries set access_token instead of token.
+ if response.AccessToken != "" {
+ response.Token = response.AccessToken
+ }
+
+ // Find a token to turn into a Bearer authenticator
+ if response.Token != "" {
+ bt.bearer.RegistryToken = response.Token
+ } else {
+ return fmt.Errorf("no token in bearer response:\n%s", content)
+ }
+
+ // If we obtained a refresh token from the oauth flow, use that for refresh() now.
+ if response.RefreshToken != "" {
+ bt.basic = authn.FromConfig(authn.AuthConfig{
+ IdentityToken: response.RefreshToken,
+ })
+ }
+
+ return nil
+}
+
+func matchesHost(reg name.Registry, in *http.Request, scheme string) bool {
+ canonicalHeaderHost := canonicalAddress(in.Host, scheme)
+ canonicalURLHost := canonicalAddress(in.URL.Host, scheme)
+ canonicalRegistryHost := canonicalAddress(reg.RegistryStr(), scheme)
+ return canonicalHeaderHost == canonicalRegistryHost || canonicalURLHost == canonicalRegistryHost
+}
+
+func canonicalAddress(host, scheme string) (address string) {
+ // The host may be any one of:
+ // - hostname
+ // - hostname:port
+ // - ipv4
+ // - ipv4:port
+ // - ipv6
+ // - [ipv6]:port
+ // As net.SplitHostPort returns an error if the host does not contain a port, we should only attempt
+ // to call it when we know that the address contains a port
+ if strings.Count(host, ":") == 1 || (strings.Count(host, ":") >= 2 && strings.Contains(host, "]:")) {
+ hostname, port, err := net.SplitHostPort(host)
+ if err != nil {
+ return host
+ }
+ if port == "" {
+ port = portMap[scheme]
+ }
+
+ return net.JoinHostPort(hostname, port)
+ }
+
+ return net.JoinHostPort(host, portMap[scheme])
+}
+
+// https://docs.docker.com/registry/spec/auth/oauth/
+func (bt *bearerTransport) refreshOauth(ctx context.Context) ([]byte, error) {
+ auth, err := bt.basic.Authorization()
+ if err != nil {
+ return nil, err
+ }
+
+ u, err := url.Parse(bt.realm)
+ if err != nil {
+ return nil, err
+ }
+
+ v := url.Values{}
+ v.Set("scope", strings.Join(bt.scopes, " "))
+ v.Set("service", bt.service)
+ v.Set("client_id", defaultUserAgent)
+ if auth.IdentityToken != "" {
+ v.Set("grant_type", "refresh_token")
+ v.Set("refresh_token", auth.IdentityToken)
+ } else if auth.Username != "" && auth.Password != "" {
+ // TODO(#629): This is unreachable.
+ v.Set("grant_type", "password")
+ v.Set("username", auth.Username)
+ v.Set("password", auth.Password)
+ v.Set("access_type", "offline")
+ }
+
+ client := http.Client{Transport: bt.inner}
+ req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode()))
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+
+ // We don't want to log credentials.
+ ctx = redact.NewContext(ctx, "oauth token response contains credentials")
+
+ resp, err := client.Do(req.WithContext(ctx))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if err := CheckError(resp, http.StatusOK); err != nil {
+ logs.Warn.Printf("No matching credentials were found for %q", bt.registry)
+ return nil, err
+ }
+
+ return ioutil.ReadAll(resp.Body)
+}
+
+// https://docs.docker.com/registry/spec/auth/token/
+func (bt *bearerTransport) refreshBasic(ctx context.Context) ([]byte, error) {
+ u, err := url.Parse(bt.realm)
+ if err != nil {
+ return nil, err
+ }
+ b := &basicTransport{
+ inner: bt.inner,
+ auth: bt.basic,
+ target: u.Host,
+ }
+ client := http.Client{Transport: b}
+
+ v := u.Query()
+ v["scope"] = bt.scopes
+ v.Set("service", bt.service)
+ u.RawQuery = v.Encode()
+
+ req, err := http.NewRequest(http.MethodGet, u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // We don't want to log credentials.
+ ctx = redact.NewContext(ctx, "basic token response contains credentials")
+
+ resp, err := client.Do(req.WithContext(ctx))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if err := CheckError(resp, http.StatusOK); err != nil {
+ logs.Warn.Printf("No matching credentials were found for %q", bt.registry)
+ return nil, err
+ }
+
+ return ioutil.ReadAll(resp.Body)
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go
new file mode 100644
index 0000000000..ff7025b5c0
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go
@@ -0,0 +1,18 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package transport provides facilities for setting up an authenticated
+// http.RoundTripper given an Authenticator and base RoundTripper. See
+// transport.New for more information.
+package transport
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go
new file mode 100644
index 0000000000..bb59d22e4d
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go
@@ -0,0 +1,197 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+// The set of query string keys that we expect to send as part of the registry
+// protocol. Anything else is potentially dangerous to leak, as it's probably
+// from a redirect. These redirects often included tokens or signed URLs.
+var paramAllowlist = map[string]struct{}{
+ // Token exchange
+ "scope": {},
+ "service": {},
+ // Cross-repo mounting
+ "mount": {},
+ "from": {},
+ // Layer PUT
+ "digest": {},
+ // Listing tags and catalog
+ "n": {},
+ "last": {},
+}
+
+// Error implements error to support the following error specification:
+// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors
+type Error struct {
+ Errors []Diagnostic `json:"errors,omitempty"`
+ // The http status code returned.
+ StatusCode int
+ // The raw body if we couldn't understand it.
+ rawBody string
+ // The request that failed.
+ request *http.Request
+}
+
+// Check that Error implements error
+var _ error = (*Error)(nil)
+
+// Error implements error
+func (e *Error) Error() string {
+ prefix := ""
+ if e.request != nil {
+ prefix = fmt.Sprintf("%s %s: ", e.request.Method, redactURL(e.request.URL))
+ }
+ return prefix + e.responseErr()
+}
+
+func (e *Error) responseErr() string {
+ switch len(e.Errors) {
+ case 0:
+ if len(e.rawBody) == 0 {
+ if e.request != nil && e.request.Method == http.MethodHead {
+ return fmt.Sprintf("unexpected status code %d %s (HEAD responses have no body, use GET for details)", e.StatusCode, http.StatusText(e.StatusCode))
+ }
+ return fmt.Sprintf("unexpected status code %d %s", e.StatusCode, http.StatusText(e.StatusCode))
+ }
+ return fmt.Sprintf("unexpected status code %d %s: %s", e.StatusCode, http.StatusText(e.StatusCode), e.rawBody)
+ case 1:
+ return e.Errors[0].String()
+ default:
+ var errors []string
+ for _, d := range e.Errors {
+ errors = append(errors, d.String())
+ }
+ return fmt.Sprintf("multiple errors returned: %s",
+ strings.Join(errors, "; "))
+ }
+}
+
+// Temporary returns whether the request that preceded the error is temporary.
+func (e *Error) Temporary() bool {
+ if len(e.Errors) == 0 {
+ _, ok := temporaryStatusCodes[e.StatusCode]
+ return ok
+ }
+ for _, d := range e.Errors {
+ if _, ok := temporaryErrorCodes[d.Code]; !ok {
+ return false
+ }
+ }
+ return true
+}
+
+// TODO(jonjohnsonjr): Consider moving to internal/redact.
+func redactURL(original *url.URL) *url.URL {
+ qs := original.Query()
+ for k, v := range qs {
+ for i := range v {
+ if _, ok := paramAllowlist[k]; !ok {
+ // key is not in the Allowlist
+ v[i] = "REDACTED"
+ }
+ }
+ }
+ redacted := *original
+ redacted.RawQuery = qs.Encode()
+ return &redacted
+}
+
+// Diagnostic represents a single error returned by a Docker registry interaction.
+type Diagnostic struct {
+ Code ErrorCode `json:"code"`
+ Message string `json:"message,omitempty"`
+ Detail interface{} `json:"detail,omitempty"`
+}
+
+// String stringifies the Diagnostic in the form: $Code: $Message[; $Detail]
+func (d Diagnostic) String() string {
+ msg := fmt.Sprintf("%s: %s", d.Code, d.Message)
+ if d.Detail != nil {
+ msg = fmt.Sprintf("%s; %v", msg, d.Detail)
+ }
+ return msg
+}
+
+// ErrorCode is an enumeration of supported error codes.
+type ErrorCode string
+
+// The set of error conditions a registry may return:
+// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors-2
+const (
+ BlobUnknownErrorCode ErrorCode = "BLOB_UNKNOWN"
+ BlobUploadInvalidErrorCode ErrorCode = "BLOB_UPLOAD_INVALID"
+ BlobUploadUnknownErrorCode ErrorCode = "BLOB_UPLOAD_UNKNOWN"
+ DigestInvalidErrorCode ErrorCode = "DIGEST_INVALID"
+ ManifestBlobUnknownErrorCode ErrorCode = "MANIFEST_BLOB_UNKNOWN"
+ ManifestInvalidErrorCode ErrorCode = "MANIFEST_INVALID"
+ ManifestUnknownErrorCode ErrorCode = "MANIFEST_UNKNOWN"
+ ManifestUnverifiedErrorCode ErrorCode = "MANIFEST_UNVERIFIED"
+ NameInvalidErrorCode ErrorCode = "NAME_INVALID"
+ NameUnknownErrorCode ErrorCode = "NAME_UNKNOWN"
+ SizeInvalidErrorCode ErrorCode = "SIZE_INVALID"
+ TagInvalidErrorCode ErrorCode = "TAG_INVALID"
+ UnauthorizedErrorCode ErrorCode = "UNAUTHORIZED"
+ DeniedErrorCode ErrorCode = "DENIED"
+ UnsupportedErrorCode ErrorCode = "UNSUPPORTED"
+ TooManyRequestsErrorCode ErrorCode = "TOOMANYREQUESTS"
+)
+
+// TODO: Include other error types.
+var temporaryErrorCodes = map[ErrorCode]struct{}{
+ BlobUploadInvalidErrorCode: {},
+ TooManyRequestsErrorCode: {},
+}
+
+var temporaryStatusCodes = map[int]struct{}{
+ http.StatusRequestTimeout: {},
+ http.StatusInternalServerError: {},
+ http.StatusBadGateway: {},
+ http.StatusServiceUnavailable: {},
+}
+
+// CheckError returns a structured error if the response status is not in codes.
+func CheckError(resp *http.Response, codes ...int) error {
+ for _, code := range codes {
+ if resp.StatusCode == code {
+ // This is one of the supported status codes.
+ return nil
+ }
+ }
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+
+ // https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors
+ structuredError := &Error{}
+
+ // This can fail if e.g. the response body is not valid JSON. That's fine,
+ // we'll construct an appropriate error string from the body and status code.
+ _ = json.Unmarshal(b, structuredError)
+
+ structuredError.rawBody = string(b)
+ structuredError.StatusCode = resp.StatusCode
+ structuredError.request = resp.Request
+
+ return structuredError
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go
new file mode 100644
index 0000000000..c341f844e6
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go
@@ -0,0 +1,91 @@
+// Copyright 2020 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httputil"
+ "time"
+
+ "github.com/google/go-containerregistry/internal/redact"
+ "github.com/google/go-containerregistry/pkg/logs"
+)
+
+type logTransport struct {
+ inner http.RoundTripper
+}
+
+// NewLogger returns a transport that logs requests and responses to
+// github.com/google/go-containerregistry/pkg/logs.Debug.
+func NewLogger(inner http.RoundTripper) http.RoundTripper {
+ return &logTransport{inner}
+}
+
+func (t *logTransport) RoundTrip(in *http.Request) (out *http.Response, err error) {
+ // Inspired by: github.com/motemen/go-loghttp
+
+ // We redact token responses and binary blobs in response/request.
+ omitBody, reason := redact.FromContext(in.Context())
+ if omitBody {
+ logs.Debug.Printf("--> %s %s [body redacted: %s]", in.Method, in.URL, reason)
+ } else {
+ logs.Debug.Printf("--> %s %s", in.Method, in.URL)
+ }
+
+ // Save these headers so we can redact Authorization.
+ savedHeaders := in.Header.Clone()
+ if in.Header != nil && in.Header.Get("authorization") != "" {
+ in.Header.Set("authorization", "")
+ }
+
+ b, err := httputil.DumpRequestOut(in, !omitBody)
+ if err == nil {
+ logs.Debug.Println(string(b))
+ } else {
+ logs.Debug.Printf("Failed to dump request %s %s: %v", in.Method, in.URL, err)
+ }
+
+ // Restore the non-redacted headers.
+ in.Header = savedHeaders
+
+ start := time.Now()
+ out, err = t.inner.RoundTrip(in)
+ duration := time.Since(start)
+ if err != nil {
+ logs.Debug.Printf("<-- %v %s %s (%s)", err, in.Method, in.URL, duration)
+ }
+ if out != nil {
+ msg := fmt.Sprintf("<-- %d", out.StatusCode)
+ if out.Request != nil {
+ msg = fmt.Sprintf("%s %s", msg, out.Request.URL)
+ }
+ msg = fmt.Sprintf("%s (%s)", msg, duration)
+
+ if omitBody {
+ msg = fmt.Sprintf("%s [body redacted: %s]", msg, reason)
+ }
+
+ logs.Debug.Print(msg)
+
+ b, err := httputil.DumpResponse(out, !omitBody)
+ if err == nil {
+ logs.Debug.Println(string(b))
+ } else {
+ logs.Debug.Printf("Failed to dump response %s %s: %v", in.Method, in.URL, err)
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go
new file mode 100644
index 0000000000..396d4e0342
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go
@@ -0,0 +1,129 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strings"
+
+ authchallenge "github.com/docker/distribution/registry/client/auth/challenge"
+ "github.com/google/go-containerregistry/pkg/name"
+)
+
+type challenge string
+
+const (
+ anonymous challenge = "anonymous"
+ basic challenge = "basic"
+ bearer challenge = "bearer"
+)
+
+type pingResp struct {
+ challenge challenge
+
+ // Following the challenge there are often key/value pairs
+ // e.g. Bearer service="gcr.io",realm="https://auth.gcr.io/v36/tokenz"
+ parameters map[string]string
+
+ // The registry's scheme to use. Communicates whether we fell back to http.
+ scheme string
+}
+
+func (c challenge) Canonical() challenge {
+ return challenge(strings.ToLower(string(c)))
+}
+
+func parseChallenge(suffix string) map[string]string {
+ kv := make(map[string]string)
+ for _, token := range strings.Split(suffix, ",") {
+ // Trim any whitespace around each token.
+ token = strings.Trim(token, " ")
+
+ // Break the token into a key/value pair
+ if parts := strings.SplitN(token, "=", 2); len(parts) == 2 {
+ // Unquote the value, if it is quoted.
+ kv[parts[0]] = strings.Trim(parts[1], `"`)
+ } else {
+ // If there was only one part, treat is as a key with an empty value
+ kv[token] = ""
+ }
+ }
+ return kv
+}
+
+func ping(ctx context.Context, reg name.Registry, t http.RoundTripper) (*pingResp, error) {
+ client := http.Client{Transport: t}
+
+ // This first attempts to use "https" for every request, falling back to http
+ // if the registry matches our localhost heuristic or if it is intentionally
+ // set to insecure via name.NewInsecureRegistry.
+ schemes := []string{"https"}
+ if reg.Scheme() == "http" {
+ schemes = append(schemes, "http")
+ }
+
+ var errs []string
+ for _, scheme := range schemes {
+ url := fmt.Sprintf("%s://%s/v2/", scheme, reg.Name())
+ req, err := http.NewRequest(http.MethodGet, url, nil)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Do(req.WithContext(ctx))
+ if err != nil {
+ errs = append(errs, err.Error())
+ // Potentially retry with http.
+ continue
+ }
+ defer func() {
+ // By draining the body, make sure to reuse the connection made by
+ // the ping for the following access to the registry
+ io.Copy(ioutil.Discard, resp.Body)
+ resp.Body.Close()
+ }()
+
+ switch resp.StatusCode {
+ case http.StatusOK:
+ // If we get a 200, then no authentication is needed.
+ return &pingResp{
+ challenge: anonymous,
+ scheme: scheme,
+ }, nil
+ case http.StatusUnauthorized:
+ if challenges := authchallenge.ResponseChallenges(resp); len(challenges) != 0 {
+ // If we hit more than one, I'm not even sure what to do.
+ wac := challenges[0]
+ return &pingResp{
+ challenge: challenge(wac.Scheme).Canonical(),
+ parameters: wac.Parameters,
+ scheme: scheme,
+ }, nil
+ }
+ // Otherwise, just return the challenge without parameters.
+ return &pingResp{
+ challenge: challenge(resp.Header.Get("WWW-Authenticate")).Canonical(),
+ scheme: scheme,
+ }, nil
+ default:
+ return nil, CheckError(resp, http.StatusOK, http.StatusUnauthorized)
+ }
+ }
+ return nil, errors.New(strings.Join(errs, "; "))
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go
new file mode 100644
index 0000000000..7f7d1e452e
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go
@@ -0,0 +1,88 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/google/go-containerregistry/internal/retry"
+)
+
+// Sleep for 0.1, 0.3, 0.9, 2.7 seconds. This should cover networking blips.
+var defaultBackoff = retry.Backoff{
+ Duration: 100 * time.Millisecond,
+ Factor: 3.0,
+ Jitter: 0.1,
+ Steps: 5,
+}
+
+var _ http.RoundTripper = (*retryTransport)(nil)
+
+// retryTransport wraps a RoundTripper and retries temporary network errors.
+type retryTransport struct {
+ inner http.RoundTripper
+ backoff retry.Backoff
+ predicate retry.Predicate
+}
+
+// Option is a functional option for retryTransport.
+type Option func(*options)
+
+type options struct {
+ backoff retry.Backoff
+ predicate retry.Predicate
+}
+
+// WithRetryBackoff sets the backoff for retry operations.
+func WithRetryBackoff(backoff retry.Backoff) Option {
+ return func(o *options) {
+ o.backoff = backoff
+ }
+}
+
+// WithRetryPredicate sets the predicate for retry operations.
+func WithRetryPredicate(predicate func(error) bool) Option {
+ return func(o *options) {
+ o.predicate = predicate
+ }
+}
+
+// NewRetry returns a transport that retries errors.
+func NewRetry(inner http.RoundTripper, opts ...Option) http.RoundTripper {
+ o := &options{
+ backoff: defaultBackoff,
+ predicate: retry.IsTemporary,
+ }
+
+ for _, opt := range opts {
+ opt(o)
+ }
+
+ return &retryTransport{
+ inner: inner,
+ backoff: o.backoff,
+ predicate: o.predicate,
+ }
+}
+
+func (t *retryTransport) RoundTrip(in *http.Request) (out *http.Response, err error) {
+ roundtrip := func() error {
+ out, err = t.inner.RoundTrip(in)
+ return err
+ }
+ retry.Retry(roundtrip, t.predicate, t.backoff)
+ return
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go
new file mode 100644
index 0000000000..d70b6a850c
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go
@@ -0,0 +1,44 @@
+// Copyright 2019 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "net/http"
+
+ "github.com/google/go-containerregistry/pkg/name"
+)
+
+type schemeTransport struct {
+ // Scheme we should use, determined by ping response.
+ scheme string
+
+ // Registry we're talking to.
+ registry name.Registry
+
+ // Wrapped by schemeTransport.
+ inner http.RoundTripper
+}
+
+// RoundTrip implements http.RoundTripper
+func (st *schemeTransport) RoundTrip(in *http.Request) (*http.Response, error) {
+ // When we ping() the registry, we determine whether to use http or https
+ // based on which scheme was successful. That is only valid for the
+ // registry server and not e.g. a separate token server or blob storage,
+ // so we should only override the scheme if the host is the registry.
+ if matchesHost(st.registry, in, st.scheme) {
+ in.URL.Scheme = st.scheme
+ }
+ return st.inner.RoundTrip(in)
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go
new file mode 100644
index 0000000000..c3b56f7a41
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go
@@ -0,0 +1,24 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+// Scopes suitable to qualify each Repository
+const (
+ PullScope string = "pull"
+ PushScope string = "push,pull"
+ // For now DELETE is PUSH, which is the read/write ACL.
+ DeleteScope string = PushScope
+ CatalogScope string = "catalog"
+)
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go
new file mode 100644
index 0000000000..5c35fc7c9b
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go
@@ -0,0 +1,103 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+
+ "github.com/google/go-containerregistry/pkg/authn"
+ "github.com/google/go-containerregistry/pkg/name"
+)
+
+// New returns a new RoundTripper based on the provided RoundTripper that has been
+// setup to authenticate with the remote registry "reg", in the capacity
+// laid out by the specified scopes.
+//
+// TODO(jonjohnsonjr): Deprecate this.
+func New(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) {
+ return NewWithContext(context.Background(), reg, auth, t, scopes)
+}
+
+// NewWithContext returns a new RoundTripper based on the provided RoundTripper that has been
+// setup to authenticate with the remote registry "reg", in the capacity
+// laid out by the specified scopes.
+func NewWithContext(ctx context.Context, reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) {
+ // The handshake:
+ // 1. Use "t" to ping() the registry for the authentication challenge.
+ //
+ // 2a. If we get back a 200, then simply use "t".
+ //
+ // 2b. If we get back a 401 with a Basic challenge, then use a transport
+ // that just attachs auth each roundtrip.
+ //
+ // 2c. If we get back a 401 with a Bearer challenge, then use a transport
+ // that attaches a bearer token to each request, and refreshes is on 401s.
+ // Perform an initial refresh to seed the bearer token.
+
+ // First we ping the registry to determine the parameters of the authentication handshake
+ // (if one is even necessary).
+ pr, err := ping(ctx, reg, t)
+ if err != nil {
+ return nil, err
+ }
+
+ // Wrap t with a useragent transport unless we already have one.
+ if _, ok := t.(*userAgentTransport); !ok {
+ t = NewUserAgent(t, "")
+ }
+
+ // Wrap t in a transport that selects the appropriate scheme based on the ping response.
+ t = &schemeTransport{
+ scheme: pr.scheme,
+ registry: reg,
+ inner: t,
+ }
+
+ switch pr.challenge.Canonical() {
+ case anonymous:
+ return t, nil
+ case basic:
+ return &basicTransport{inner: t, auth: auth, target: reg.RegistryStr()}, nil
+ case bearer:
+ // We require the realm, which tells us where to send our Basic auth to turn it into Bearer auth.
+ realm, ok := pr.parameters["realm"]
+ if !ok {
+ return nil, fmt.Errorf("malformed www-authenticate, missing realm: %v", pr.parameters)
+ }
+ service, ok := pr.parameters["service"]
+ if !ok {
+ // If the service parameter is not specified, then default it to the registry
+ // with which we are talking.
+ service = reg.String()
+ }
+ bt := &bearerTransport{
+ inner: t,
+ basic: auth,
+ realm: realm,
+ registry: reg,
+ service: service,
+ scopes: scopes,
+ scheme: pr.scheme,
+ }
+ if err := bt.refresh(ctx); err != nil {
+ return nil, err
+ }
+ return bt, nil
+ default:
+ return nil, fmt.Errorf("unrecognized challenge: %s", pr.challenge)
+ }
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go
new file mode 100644
index 0000000000..74a9e71bdf
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go
@@ -0,0 +1,94 @@
+// Copyright 2019 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "fmt"
+ "net/http"
+ "runtime/debug"
+)
+
+var (
+ // Version can be set via:
+ // -ldflags="-X 'github.com/google/go-containerregistry/pkg/v1/remote/transport.Version=$TAG'"
+ Version string
+
+ ggcrVersion = defaultUserAgent
+)
+
+const (
+ defaultUserAgent = "go-containerregistry"
+ moduleName = "github.com/google/go-containerregistry"
+)
+
+type userAgentTransport struct {
+ inner http.RoundTripper
+ ua string
+}
+
+func init() {
+ if v := version(); v != "" {
+ ggcrVersion = fmt.Sprintf("%s/%s", defaultUserAgent, v)
+ }
+}
+
+func version() string {
+ if Version != "" {
+ // Version was set via ldflags, just return it.
+ return Version
+ }
+
+ info, ok := debug.ReadBuildInfo()
+ if !ok {
+ return ""
+ }
+
+ // Happens for crane and gcrane.
+ if info.Main.Path == moduleName {
+ return info.Main.Version
+ }
+
+ // Anything else.
+ for _, dep := range info.Deps {
+ if dep.Path == moduleName {
+ return dep.Version
+ }
+ }
+
+ return ""
+}
+
+// NewUserAgent returns an http.Roundtripper that sets the user agent to
+// The provided string plus additional go-containerregistry information,
+// e.g. if provided "crane/v0.1.4" and this modules was built at v0.1.4:
+//
+// User-Agent: crane/v0.1.4 go-containerregistry/v0.1.4
+func NewUserAgent(inner http.RoundTripper, ua string) http.RoundTripper {
+ if ua == "" {
+ ua = ggcrVersion
+ } else {
+ ua = fmt.Sprintf("%s %s", ua, ggcrVersion)
+ }
+ return &userAgentTransport{
+ inner: inner,
+ ua: ua,
+ }
+}
+
+// RoundTrip implements http.RoundTripper
+func (ut *userAgentTransport) RoundTrip(in *http.Request) (*http.Response, error) {
+ in.Header.Set("User-Agent", ut.ua)
+ return ut.inner.RoundTrip(in)
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go
new file mode 100644
index 0000000000..05d99d076d
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go
@@ -0,0 +1,901 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync/atomic"
+ "syscall"
+ "time"
+
+ "github.com/google/go-containerregistry/internal/redact"
+ "github.com/google/go-containerregistry/internal/retry"
+ "github.com/google/go-containerregistry/pkg/logs"
+ "github.com/google/go-containerregistry/pkg/name"
+ v1 "github.com/google/go-containerregistry/pkg/v1"
+ "github.com/google/go-containerregistry/pkg/v1/partial"
+ "github.com/google/go-containerregistry/pkg/v1/remote/transport"
+ "github.com/google/go-containerregistry/pkg/v1/stream"
+ "github.com/google/go-containerregistry/pkg/v1/types"
+ "golang.org/x/sync/errgroup"
+)
+
+// Taggable is an interface that enables a manifest PUT (e.g. for tagging).
+type Taggable interface {
+ RawManifest() ([]byte, error)
+}
+
+// Write pushes the provided img to the specified image reference.
+func Write(ref name.Reference, img v1.Image, options ...Option) (rerr error) {
+ o, err := makeOptions(ref.Context(), options...)
+ if err != nil {
+ return err
+ }
+
+ var lastUpdate *v1.Update
+ if o.updates != nil {
+ lastUpdate = &v1.Update{}
+ lastUpdate.Total, err = countImage(img, o.allowNondistributableArtifacts)
+ if err != nil {
+ return err
+ }
+ defer close(o.updates)
+ defer func() { sendError(o.updates, rerr) }()
+ }
+ return writeImage(ref, img, o, lastUpdate)
+}
+
+func writeImage(ref name.Reference, img v1.Image, o *options, lastUpdate *v1.Update) error {
+ ls, err := img.Layers()
+ if err != nil {
+ return err
+ }
+ scopes := scopesForUploadingImage(ref.Context(), ls)
+ tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes)
+ if err != nil {
+ return err
+ }
+ w := writer{
+ repo: ref.Context(),
+ client: &http.Client{Transport: tr},
+ context: o.context,
+ updates: o.updates,
+ lastUpdate: lastUpdate,
+ }
+
+ // Upload individual blobs and collect any errors.
+ blobChan := make(chan v1.Layer, 2*o.jobs)
+ g, ctx := errgroup.WithContext(o.context)
+ for i := 0; i < o.jobs; i++ {
+ // Start N workers consuming blobs to upload.
+ g.Go(func() error {
+ for b := range blobChan {
+ if err := w.uploadOne(b); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+ }
+
+ // Upload individual layers in goroutines and collect any errors.
+ // If we can dedupe by the layer digest, try to do so. If we can't determine
+ // the digest for whatever reason, we can't dedupe and might re-upload.
+ g.Go(func() error {
+ defer close(blobChan)
+ uploaded := map[v1.Hash]bool{}
+ for _, l := range ls {
+ l := l
+
+ // Handle foreign layers.
+ mt, err := l.MediaType()
+ if err != nil {
+ return err
+ }
+ if !mt.IsDistributable() && !o.allowNondistributableArtifacts {
+ continue
+ }
+
+ // Streaming layers calculate their digests while uploading them. Assume
+ // an error here indicates we need to upload the layer.
+ h, err := l.Digest()
+ if err == nil {
+ // If we can determine the layer's digest ahead of
+ // time, use it to dedupe uploads.
+ if uploaded[h] {
+ continue // Already uploading.
+ }
+ uploaded[h] = true
+ }
+ select {
+ case blobChan <- l:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+ return nil
+ })
+ if err := g.Wait(); err != nil {
+ return err
+ }
+
+ if l, err := partial.ConfigLayer(img); err != nil {
+ // We can't read the ConfigLayer, possibly because of streaming layers,
+ // since the layer DiffIDs haven't been calculated yet. Attempt to wait
+ // for the other layers to be uploaded, then try the config again.
+ if err := g.Wait(); err != nil {
+ return err
+ }
+
+ // Now that all the layers are uploaded, try to upload the config file blob.
+ l, err := partial.ConfigLayer(img)
+ if err != nil {
+ return err
+ }
+ if err := w.uploadOne(l); err != nil {
+ return err
+ }
+ } else {
+ // We *can* read the ConfigLayer, so upload it concurrently with the layers.
+ g.Go(func() error {
+ return w.uploadOne(l)
+ })
+
+ // Wait for the layers + config.
+ if err := g.Wait(); err != nil {
+ return err
+ }
+ }
+
+ // With all of the constituent elements uploaded, upload the manifest
+ // to commit the image.
+ return w.commitManifest(img, ref)
+}
+
+// writer writes the elements of an image to a remote image reference.
+type writer struct {
+ repo name.Repository
+ client *http.Client
+ context context.Context
+
+ updates chan<- v1.Update
+ lastUpdate *v1.Update
+}
+
+func sendError(ch chan<- v1.Update, err error) error {
+ if err != nil && ch != nil {
+ ch <- v1.Update{Error: err}
+ }
+ return err
+}
+
+// url returns a url.Url for the specified path in the context of this remote image reference.
+func (w *writer) url(path string) url.URL {
+ return url.URL{
+ Scheme: w.repo.Registry.Scheme(),
+ Host: w.repo.RegistryStr(),
+ Path: path,
+ }
+}
+
+// nextLocation extracts the fully-qualified URL to which we should send the next request in an upload sequence.
+func (w *writer) nextLocation(resp *http.Response) (string, error) {
+ loc := resp.Header.Get("Location")
+ if len(loc) == 0 {
+ return "", errors.New("missing Location header")
+ }
+ u, err := url.Parse(loc)
+ if err != nil {
+ return "", err
+ }
+
+ // If the location header returned is just a url path, then fully qualify it.
+ // We cannot simply call w.url, since there might be an embedded query string.
+ return resp.Request.URL.ResolveReference(u).String(), nil
+}
+
+// checkExistingBlob checks if a blob exists already in the repository by making a
+// HEAD request to the blob store API. GCR performs an existence check on the
+// initiation if "mount" is specified, even if no "from" sources are specified.
+// However, this is not broadly applicable to all registries, e.g. ECR.
+func (w *writer) checkExistingBlob(h v1.Hash) (bool, error) {
+ u := w.url(fmt.Sprintf("/v2/%s/blobs/%s", w.repo.RepositoryStr(), h.String()))
+
+ req, err := http.NewRequest(http.MethodHead, u.String(), nil)
+ if err != nil {
+ return false, err
+ }
+
+ resp, err := w.client.Do(req.WithContext(w.context))
+ if err != nil {
+ return false, err
+ }
+ defer resp.Body.Close()
+
+ if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil {
+ return false, err
+ }
+
+ return resp.StatusCode == http.StatusOK, nil
+}
+
+// checkExistingManifest checks if a manifest exists already in the repository
+// by making a HEAD request to the manifest API.
+func (w *writer) checkExistingManifest(h v1.Hash, mt types.MediaType) (bool, error) {
+ u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.repo.RepositoryStr(), h.String()))
+
+ req, err := http.NewRequest(http.MethodHead, u.String(), nil)
+ if err != nil {
+ return false, err
+ }
+ req.Header.Set("Accept", string(mt))
+
+ resp, err := w.client.Do(req.WithContext(w.context))
+ if err != nil {
+ return false, err
+ }
+ defer resp.Body.Close()
+
+ if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil {
+ return false, err
+ }
+
+ return resp.StatusCode == http.StatusOK, nil
+}
+
+// initiateUpload initiates the blob upload, which starts with a POST that can
+// optionally include the hash of the layer and a list of repositories from
+// which that layer might be read. On failure, an error is returned.
+// On success, the layer was either mounted (nothing more to do) or a blob
+// upload was initiated and the body of that blob should be sent to the returned
+// location.
+func (w *writer) initiateUpload(from, mount string) (location string, mounted bool, err error) {
+ u := w.url(fmt.Sprintf("/v2/%s/blobs/uploads/", w.repo.RepositoryStr()))
+ uv := url.Values{}
+ if mount != "" && from != "" {
+ // Quay will fail if we specify a "mount" without a "from".
+ uv["mount"] = []string{mount}
+ uv["from"] = []string{from}
+ }
+ u.RawQuery = uv.Encode()
+
+ // Make the request to initiate the blob upload.
+ req, err := http.NewRequest(http.MethodPost, u.String(), nil)
+ if err != nil {
+ return "", false, err
+ }
+ req.Header.Set("Content-Type", "application/json")
+ resp, err := w.client.Do(req.WithContext(w.context))
+ if err != nil {
+ return "", false, err
+ }
+ defer resp.Body.Close()
+
+ if err := transport.CheckError(resp, http.StatusCreated, http.StatusAccepted); err != nil {
+ return "", false, err
+ }
+
+ // Check the response code to determine the result.
+ switch resp.StatusCode {
+ case http.StatusCreated:
+ // We're done, we were able to fast-path.
+ return "", true, nil
+ case http.StatusAccepted:
+ // Proceed to PATCH, upload has begun.
+ loc, err := w.nextLocation(resp)
+ return loc, false, err
+ default:
+ panic("Unreachable: initiateUpload")
+ }
+}
+
+type progressReader struct {
+ rc io.ReadCloser
+
+ count *int64 // number of bytes this reader has read, to support resetting on retry.
+ updates chan<- v1.Update
+ lastUpdate *v1.Update
+}
+
+func (r *progressReader) Read(b []byte) (int, error) {
+ n, err := r.rc.Read(b)
+ if err != nil {
+ return n, err
+ }
+ atomic.AddInt64(r.count, int64(n))
+ // TODO: warn/debug log if sending takes too long, or if sending is blocked while context is cancelled.
+ r.updates <- v1.Update{
+ Total: r.lastUpdate.Total,
+ Complete: atomic.AddInt64(&r.lastUpdate.Complete, int64(n)),
+ }
+ return n, nil
+}
+
+func (r *progressReader) Close() error { return r.rc.Close() }
+
+// streamBlob streams the contents of the blob to the specified location.
+// On failure, this will return an error. On success, this will return the location
+// header indicating how to commit the streamed blob.
+func (w *writer) streamBlob(ctx context.Context, blob io.ReadCloser, streamLocation string) (commitLocation string, rerr error) {
+ reset := func() {}
+ defer func() {
+ if rerr != nil {
+ reset()
+ }
+ }()
+ if w.updates != nil {
+ var count int64
+ blob = &progressReader{rc: blob, updates: w.updates, lastUpdate: w.lastUpdate, count: &count}
+ reset = func() {
+ atomic.AddInt64(&w.lastUpdate.Complete, -count)
+ w.updates <- *w.lastUpdate
+ }
+ }
+
+ req, err := http.NewRequest(http.MethodPatch, streamLocation, blob)
+ if err != nil {
+ return "", err
+ }
+
+ resp, err := w.client.Do(req.WithContext(ctx))
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ if err := transport.CheckError(resp, http.StatusNoContent, http.StatusAccepted, http.StatusCreated); err != nil {
+ return "", err
+ }
+
+ // The blob has been uploaded, return the location header indicating
+ // how to commit this layer.
+ return w.nextLocation(resp)
+}
+
+// commitBlob commits this blob by sending a PUT to the location returned from
+// streaming the blob.
+func (w *writer) commitBlob(location, digest string) error {
+ u, err := url.Parse(location)
+ if err != nil {
+ return err
+ }
+ v := u.Query()
+ v.Set("digest", digest)
+ u.RawQuery = v.Encode()
+
+ req, err := http.NewRequest(http.MethodPut, u.String(), nil)
+ if err != nil {
+ return err
+ }
+
+ resp, err := w.client.Do(req.WithContext(w.context))
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ return transport.CheckError(resp, http.StatusCreated)
+}
+
+// incrProgress increments and sends a progress update, if WithProgress is used.
+func (w *writer) incrProgress(written int64) {
+ if w.updates == nil {
+ return
+ }
+ w.updates <- v1.Update{
+ Total: w.lastUpdate.Total,
+ Complete: atomic.AddInt64(&w.lastUpdate.Complete, int64(written)),
+ }
+}
+
+// uploadOne performs a complete upload of a single layer.
+func (w *writer) uploadOne(l v1.Layer) error {
+ var from, mount string
+ if h, err := l.Digest(); err == nil {
+ // If we know the digest, this isn't a streaming layer. Do an existence
+ // check so we can skip uploading the layer if possible.
+ existing, err := w.checkExistingBlob(h)
+ if err != nil {
+ return err
+ }
+ if existing {
+ size, err := l.Size()
+ if err != nil {
+ return err
+ }
+ w.incrProgress(size)
+ logs.Progress.Printf("existing blob: %v", h)
+ return nil
+ }
+
+ mount = h.String()
+ }
+ if ml, ok := l.(*MountableLayer); ok {
+ if w.repo.RegistryStr() == ml.Reference.Context().RegistryStr() {
+ from = ml.Reference.Context().RepositoryStr()
+ }
+ }
+
+ ctx := w.context
+
+ shouldRetry := func(err error) bool {
+ // Various failure modes here, as we're often reading from and writing to
+ // the network.
+ if retry.IsTemporary(err) || errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, syscall.EPIPE) {
+ logs.Warn.Printf("retrying %v", err)
+ return true
+ }
+ return false
+ }
+
+ tryUpload := func() error {
+ location, mounted, err := w.initiateUpload(from, mount)
+ if err != nil {
+ return err
+ } else if mounted {
+ size, err := l.Size()
+ if err != nil {
+ return err
+ }
+ w.incrProgress(size)
+ h, err := l.Digest()
+ if err != nil {
+ return err
+ }
+ logs.Progress.Printf("mounted blob: %s", h.String())
+ return nil
+ }
+
+ // Only log layers with +json or +yaml. We can let through other stuff if it becomes popular.
+ // TODO(opencontainers/image-spec#791): Would be great to have an actual parser.
+ mt, err := l.MediaType()
+ if err != nil {
+ return err
+ }
+ smt := string(mt)
+ if !(strings.HasSuffix(smt, "+json") || strings.HasSuffix(smt, "+yaml")) {
+ ctx = redact.NewContext(ctx, "omitting binary blobs from logs")
+ }
+
+ blob, err := l.Compressed()
+ if err != nil {
+ return err
+ }
+ location, err = w.streamBlob(ctx, blob, location)
+ if err != nil {
+ return err
+ }
+
+ h, err := l.Digest()
+ if err != nil {
+ return err
+ }
+ digest := h.String()
+
+ if err := w.commitBlob(location, digest); err != nil {
+ return err
+ }
+ logs.Progress.Printf("pushed blob: %s", digest)
+ return nil
+ }
+
+ // Try this three times, waiting 1s after first failure, 3s after second.
+ backoff := retry.Backoff{
+ Duration: 1.0 * time.Second,
+ Factor: 3.0,
+ Jitter: 0.1,
+ Steps: 3,
+ }
+
+ return retry.Retry(tryUpload, shouldRetry, backoff)
+}
+
+type withLayer interface {
+ Layer(v1.Hash) (v1.Layer, error)
+}
+
+func (w *writer) writeIndex(ref name.Reference, ii v1.ImageIndex, options ...Option) error {
+ index, err := ii.IndexManifest()
+ if err != nil {
+ return err
+ }
+
+ o, err := makeOptions(ref.Context(), options...)
+ if err != nil {
+ return err
+ }
+
+ // TODO(#803): Pipe through remote.WithJobs and upload these in parallel.
+ for _, desc := range index.Manifests {
+ ref := ref.Context().Digest(desc.Digest.String())
+ exists, err := w.checkExistingManifest(desc.Digest, desc.MediaType)
+ if err != nil {
+ return err
+ }
+ if exists {
+ logs.Progress.Print("existing manifest: ", desc.Digest)
+ continue
+ }
+
+ switch desc.MediaType {
+ case types.OCIImageIndex, types.DockerManifestList:
+ ii, err := ii.ImageIndex(desc.Digest)
+ if err != nil {
+ return err
+ }
+ if err := w.writeIndex(ref, ii); err != nil {
+ return err
+ }
+ case types.OCIManifestSchema1, types.DockerManifestSchema2:
+ img, err := ii.Image(desc.Digest)
+ if err != nil {
+ return err
+ }
+ if err := writeImage(ref, img, o, w.lastUpdate); err != nil {
+ return err
+ }
+ default:
+ // Workaround for #819.
+ if wl, ok := ii.(withLayer); ok {
+ layer, err := wl.Layer(desc.Digest)
+ if err != nil {
+ return err
+ }
+ if err := w.uploadOne(layer); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ // With all of the constituent elements uploaded, upload the manifest
+ // to commit the image.
+ return w.commitManifest(ii, ref)
+}
+
+type withMediaType interface {
+ MediaType() (types.MediaType, error)
+}
+
+// This is really silly, but go interfaces don't let me satisfy remote.Taggable
+// with remote.Descriptor because of name collisions between method names and
+// struct fields.
+//
+// Use reflection to either pull the v1.Descriptor out of remote.Descriptor or
+// create a descriptor based on the RawManifest and (optionally) MediaType.
+func unpackTaggable(t Taggable) ([]byte, *v1.Descriptor, error) {
+ if d, ok := t.(*Descriptor); ok {
+ return d.Manifest, &d.Descriptor, nil
+ }
+ b, err := t.RawManifest()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // A reasonable default if Taggable doesn't implement MediaType.
+ mt := types.DockerManifestSchema2
+
+ if wmt, ok := t.(withMediaType); ok {
+ m, err := wmt.MediaType()
+ if err != nil {
+ return nil, nil, err
+ }
+ mt = m
+ }
+
+ h, sz, err := v1.SHA256(bytes.NewReader(b))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return b, &v1.Descriptor{
+ MediaType: mt,
+ Size: sz,
+ Digest: h,
+ }, nil
+}
+
+// commitManifest does a PUT of the image's manifest.
+func (w *writer) commitManifest(t Taggable, ref name.Reference) error {
+ raw, desc, err := unpackTaggable(t)
+ if err != nil {
+ return err
+ }
+
+ u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.repo.RepositoryStr(), ref.Identifier()))
+
+ // Make the request to PUT the serialized manifest
+ req, err := http.NewRequest(http.MethodPut, u.String(), bytes.NewBuffer(raw))
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", string(desc.MediaType))
+
+ resp, err := w.client.Do(req.WithContext(w.context))
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if err := transport.CheckError(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted); err != nil {
+ return err
+ }
+
+ // The image was successfully pushed!
+ logs.Progress.Printf("%v: digest: %v size: %d", ref, desc.Digest, desc.Size)
+ w.incrProgress(int64(len(raw)))
+ return nil
+}
+
+func scopesForUploadingImage(repo name.Repository, layers []v1.Layer) []string {
+ // use a map as set to remove duplicates scope strings
+ scopeSet := map[string]struct{}{}
+
+ for _, l := range layers {
+ if ml, ok := l.(*MountableLayer); ok {
+ // we will add push scope for ref.Context() after the loop.
+ // for now we ask pull scope for references of the same registry
+ if ml.Reference.Context().String() != repo.String() && ml.Reference.Context().Registry.String() == repo.Registry.String() {
+ scopeSet[ml.Reference.Scope(transport.PullScope)] = struct{}{}
+ }
+ }
+ }
+
+ scopes := make([]string, 0)
+ // Push scope should be the first element because a few registries just look at the first scope to determine access.
+ scopes = append(scopes, repo.Scope(transport.PushScope))
+
+ for scope := range scopeSet {
+ scopes = append(scopes, scope)
+ }
+
+ return scopes
+}
+
+// WriteIndex pushes the provided ImageIndex to the specified image reference.
+// WriteIndex will attempt to push all of the referenced manifests before
+// attempting to push the ImageIndex, to retain referential integrity.
+func WriteIndex(ref name.Reference, ii v1.ImageIndex, options ...Option) (rerr error) {
+ o, err := makeOptions(ref.Context(), options...)
+ if err != nil {
+ return err
+ }
+
+ scopes := []string{ref.Scope(transport.PushScope)}
+ tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes)
+ if err != nil {
+ return err
+ }
+ w := writer{
+ repo: ref.Context(),
+ client: &http.Client{Transport: tr},
+ context: o.context,
+ updates: o.updates,
+ }
+
+ if o.updates != nil {
+ w.lastUpdate = &v1.Update{}
+ w.lastUpdate.Total, err = countIndex(ii, o.allowNondistributableArtifacts)
+ if err != nil {
+ return err
+ }
+ defer close(o.updates)
+ defer func() { sendError(o.updates, rerr) }()
+ }
+
+ return w.writeIndex(ref, ii, options...)
+}
+
+// countImage counts the total size of all layers + config blob + manifest for
+// an image. It de-dupes duplicate layers.
+func countImage(img v1.Image, allowNondistributableArtifacts bool) (int64, error) {
+ var total int64
+ ls, err := img.Layers()
+ if err != nil {
+ return 0, err
+ }
+ seen := map[v1.Hash]bool{}
+ for _, l := range ls {
+ // Handle foreign layers.
+ mt, err := l.MediaType()
+ if err != nil {
+ return 0, err
+ }
+ if !mt.IsDistributable() && !allowNondistributableArtifacts {
+ continue
+ }
+
+ // TODO: support streaming layers which update the total count as they write.
+ if _, ok := l.(*stream.Layer); ok {
+ return 0, errors.New("cannot use stream.Layer and WithProgress")
+ }
+
+ // Dedupe layers.
+ d, err := l.Digest()
+ if err != nil {
+ return 0, err
+ }
+ if seen[d] {
+ continue
+ }
+ seen[d] = true
+
+ size, err := l.Size()
+ if err != nil {
+ return 0, err
+ }
+ total += size
+ }
+ b, err := img.RawConfigFile()
+ if err != nil {
+ return 0, err
+ }
+ total += int64(len(b))
+ size, err := img.Size()
+ if err != nil {
+ return 0, err
+ }
+ total += size
+ return total, nil
+}
+
+// countIndex counts the total size of all images + sub-indexes for an index.
+// It does not attempt to de-dupe duplicate images, etc.
+func countIndex(idx v1.ImageIndex, allowNondistributableArtifacts bool) (int64, error) {
+ var total int64
+ mf, err := idx.IndexManifest()
+ if err != nil {
+ return 0, err
+ }
+
+ for _, desc := range mf.Manifests {
+ switch desc.MediaType {
+ case types.OCIImageIndex, types.DockerManifestList:
+ sidx, err := idx.ImageIndex(desc.Digest)
+ if err != nil {
+ return 0, err
+ }
+ size, err := countIndex(sidx, allowNondistributableArtifacts)
+ if err != nil {
+ return 0, err
+ }
+ total += size
+ case types.OCIManifestSchema1, types.DockerManifestSchema2:
+ simg, err := idx.Image(desc.Digest)
+ if err != nil {
+ return 0, err
+ }
+ size, err := countImage(simg, allowNondistributableArtifacts)
+ if err != nil {
+ return 0, err
+ }
+ total += size
+ default:
+ // Workaround for #819.
+ if wl, ok := idx.(withLayer); ok {
+ layer, err := wl.Layer(desc.Digest)
+ if err != nil {
+ return 0, err
+ }
+ size, err := layer.Size()
+ if err != nil {
+ return 0, err
+ }
+ total += size
+ }
+ }
+ }
+
+ size, err := idx.Size()
+ if err != nil {
+ return 0, err
+ }
+ total += size
+ return total, nil
+}
+
+// WriteLayer uploads the provided Layer to the specified repo.
+func WriteLayer(repo name.Repository, layer v1.Layer, options ...Option) (rerr error) {
+ o, err := makeOptions(repo, options...)
+ if err != nil {
+ return err
+ }
+ scopes := scopesForUploadingImage(repo, []v1.Layer{layer})
+ tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes)
+ if err != nil {
+ return err
+ }
+ w := writer{
+ repo: repo,
+ client: &http.Client{Transport: tr},
+ context: o.context,
+ updates: o.updates,
+ }
+
+ if o.updates != nil {
+ defer close(o.updates)
+ defer func() { sendError(o.updates, rerr) }()
+
+ // TODO: support streaming layers which update the total count as they write.
+ if _, ok := layer.(*stream.Layer); ok {
+ return errors.New("cannot use stream.Layer and WithProgress")
+ }
+ size, err := layer.Size()
+ if err != nil {
+ return err
+ }
+ w.lastUpdate = &v1.Update{Total: size}
+ }
+ return w.uploadOne(layer)
+}
+
+// Tag adds a tag to the given Taggable via PUT /v2/.../manifests/
+//
+// Notable implementations of Taggable are v1.Image, v1.ImageIndex, and
+// remote.Descriptor.
+//
+// If t implements MediaType, we will use that for the Content-Type, otherwise
+// we will default to types.DockerManifestSchema2.
+//
+// Tag does not attempt to write anything other than the manifest, so callers
+// should ensure that all blobs or manifests that are referenced by t exist
+// in the target registry.
+func Tag(tag name.Tag, t Taggable, options ...Option) error {
+ return Put(tag, t, options...)
+}
+
+// Put adds a manifest from the given Taggable via PUT /v1/.../manifest/[
+//
+// Notable implementations of Taggable are v1.Image, v1.ImageIndex, and
+// remote.Descriptor.
+//
+// If t implements MediaType, we will use that for the Content-Type, otherwise
+// we will default to types.DockerManifestSchema2.
+//
+// Put does not attempt to write anything other than the manifest, so callers
+// should ensure that all blobs or manifests that are referenced by t exist
+// in the target registry.
+func Put(ref name.Reference, t Taggable, options ...Option) error {
+ o, err := makeOptions(ref.Context(), options...)
+ if err != nil {
+ return err
+ }
+ scopes := []string{ref.Scope(transport.PushScope)}
+
+ // TODO: This *always* does a token exchange. For some registries,
+ // that's pretty slow. Some ideas;
+ // * Tag could take a list of tags.
+ // * Allow callers to pass in a transport.Transport, typecheck
+ // it to allow them to reuse the transport across multiple calls.
+ // * WithTag option to do multiple manifest PUTs in commitManifest.
+ tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes)
+ if err != nil {
+ return err
+ }
+ w := writer{
+ repo: ref.Context(),
+ client: &http.Client{Transport: tr},
+ context: o.context,
+ }
+
+ return w.commitManifest(t, ref)
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/stream/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/BUILD.bazel
new file mode 100644
index 0000000000..9e0a497f21
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/BUILD.bazel
@@ -0,0 +1,13 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = ["layer.go"],
+ importmap = "k8s.io/kops/vendor/github.com/google/go-containerregistry/pkg/v1/stream",
+ importpath = "github.com/google/go-containerregistry/pkg/v1/stream",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//vendor/github.com/google/go-containerregistry/pkg/v1:go_default_library",
+ "//vendor/github.com/google/go-containerregistry/pkg/v1/types:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/stream/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/README.md
new file mode 100644
index 0000000000..da0dda48d9
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/README.md
@@ -0,0 +1,68 @@
+# `stream`
+
+[](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream)
+
+The `stream` package contains an implementation of
+[`v1.Layer`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1#Layer)
+that supports _streaming_ access, i.e. the layer contents are read once and not
+buffered.
+
+## Usage
+
+```go
+package main
+
+import (
+ "os"
+
+ "github.com/google/go-containerregistry/pkg/name"
+ "github.com/google/go-containerregistry/pkg/v1/remote"
+ "github.com/google/go-containerregistry/pkg/v1/stream"
+)
+
+// upload the contents of stdin as a layer to a local registry
+func main() {
+ repo, err := name.NewRepository("localhost:5000/stream")
+ if err != nil {
+ panic(err)
+ }
+
+ layer := stream.NewLayer(os.Stdin)
+
+ if err := remote.WriteLayer(repo, layer); err != nil {
+ panic(err)
+ }
+}
+```
+
+## Structure
+
+This implements the layer portion of an [image
+upload](/pkg/v1/remote#anatomy-of-an-image-upload). We launch a goroutine that
+is responsible for hashing the uncompressed contents to compute the `DiffID`,
+gzipping them to produce the `Compressed` contents, and hashing/counting the
+bytes to produce the `Digest`/`Size`. This goroutine writes to an
+`io.PipeWriter`, which blocks until `Compressed` reads the gzipped contents from
+the corresponding `io.PipeReader`.
+
+]
+
+
+
+## Caveats
+
+This assumes that you have an uncompressed layer (i.e. a tarball) and would like
+to compress it. Calling `Uncompressed` is always an error. Likewise, other
+methods are invalid until the contents of `Compressed` have been completely
+consumed and `Close`d.
+
+Using a `stream.Layer` will likely not work without careful consideration. For
+example, in the `mutate` package, we defer computing the manifest and config
+file until they are actually called. This allows you to `mutate.Append` a
+streaming layer to an image without accidentally consuming it. Similarly, in
+`remote.Write`, if calling `Digest` on a layer fails, we attempt to upload the
+layer anyway, understanding that we may be dealing with a `stream.Layer` whose
+contents need to be uploaded before we can upload the config file.
+
+Given the [structure](#structure) of how this is implemented, forgetting to
+`Close` a `stream.Layer` will leak a goroutine.
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go
new file mode 100644
index 0000000000..e91f57ab3a
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go
@@ -0,0 +1,242 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stream
+
+import (
+ "bufio"
+ "compress/gzip"
+ "crypto/sha256"
+ "encoding/hex"
+ "errors"
+ "hash"
+ "io"
+ "os"
+ "sync"
+
+ v1 "github.com/google/go-containerregistry/pkg/v1"
+ "github.com/google/go-containerregistry/pkg/v1/types"
+)
+
+var (
+ // ErrNotComputed is returned when the requested value is not yet
+ // computed because the stream has not been consumed yet.
+ ErrNotComputed = errors.New("value not computed until stream is consumed")
+
+ // ErrConsumed is returned by Compressed when the underlying stream has
+ // already been consumed and closed.
+ ErrConsumed = errors.New("stream was already consumed")
+)
+
+// Layer is a streaming implementation of v1.Layer.
+type Layer struct {
+ blob io.ReadCloser
+ consumed bool
+ compression int
+
+ mu sync.Mutex
+ digest, diffID *v1.Hash
+ size int64
+}
+
+var _ v1.Layer = (*Layer)(nil)
+
+// LayerOption applies options to layer
+type LayerOption func(*Layer)
+
+// WithCompressionLevel sets the gzip compression. See `gzip.NewWriterLevel` for possible values.
+func WithCompressionLevel(level int) LayerOption {
+ return func(l *Layer) {
+ l.compression = level
+ }
+}
+
+// NewLayer creates a Layer from an io.ReadCloser.
+func NewLayer(rc io.ReadCloser, opts ...LayerOption) *Layer {
+ layer := &Layer{
+ blob: rc,
+ compression: gzip.BestSpeed,
+ }
+
+ for _, opt := range opts {
+ opt(layer)
+ }
+
+ return layer
+}
+
+// Digest implements v1.Layer.
+func (l *Layer) Digest() (v1.Hash, error) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ if l.digest == nil {
+ return v1.Hash{}, ErrNotComputed
+ }
+ return *l.digest, nil
+}
+
+// DiffID implements v1.Layer.
+func (l *Layer) DiffID() (v1.Hash, error) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ if l.diffID == nil {
+ return v1.Hash{}, ErrNotComputed
+ }
+ return *l.diffID, nil
+}
+
+// Size implements v1.Layer.
+func (l *Layer) Size() (int64, error) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ if l.size == 0 {
+ return 0, ErrNotComputed
+ }
+ return l.size, nil
+}
+
+// MediaType implements v1.Layer
+func (l *Layer) MediaType() (types.MediaType, error) {
+ // We return DockerLayer for now as uncompressed layers
+ // are unimplemented
+ return types.DockerLayer, nil
+}
+
+// Uncompressed implements v1.Layer.
+func (l *Layer) Uncompressed() (io.ReadCloser, error) {
+ return nil, errors.New("NYI: stream.Layer.Uncompressed is not implemented")
+}
+
+// Compressed implements v1.Layer.
+func (l *Layer) Compressed() (io.ReadCloser, error) {
+ if l.consumed {
+ return nil, ErrConsumed
+ }
+ return newCompressedReader(l)
+}
+
+type compressedReader struct {
+ closer io.Closer // original blob's Closer.
+
+ h, zh hash.Hash // collects digests of compressed and uncompressed stream.
+ pr io.Reader
+ bw *bufio.Writer
+ count *countWriter
+
+ l *Layer // stream.Layer to update upon Close.
+}
+
+func newCompressedReader(l *Layer) (*compressedReader, error) {
+ h := sha256.New()
+ zh := sha256.New()
+ count := &countWriter{}
+
+ // gzip.Writer writes to the output stream via pipe, a hasher to
+ // capture compressed digest, and a countWriter to capture compressed
+ // size.
+ pr, pw := io.Pipe()
+
+ // Write compressed bytes to be read by the pipe.Reader, hashed by zh, and counted by count.
+ mw := io.MultiWriter(pw, zh, count)
+
+ // Buffer the output of the gzip writer so we don't have to wait on pr to keep writing.
+ // 64K ought to be small enough for anybody.
+ bw := bufio.NewWriterSize(mw, 2<<16)
+ zw, err := gzip.NewWriterLevel(bw, l.compression)
+ if err != nil {
+ return nil, err
+ }
+
+ cr := &compressedReader{
+ closer: newMultiCloser(zw, l.blob),
+ pr: pr,
+ bw: bw,
+ h: h,
+ zh: zh,
+ count: count,
+ l: l,
+ }
+ go func() {
+ if _, err := io.Copy(io.MultiWriter(h, zw), l.blob); err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ // Now close the compressed reader, to flush the gzip stream
+ // and calculate digest/diffID/size. This will cause pr to
+ // return EOF which will cause readers of the Compressed stream
+ // to finish reading.
+ pw.CloseWithError(cr.Close())
+ }()
+
+ return cr, nil
+}
+
+func (cr *compressedReader) Read(b []byte) (int, error) { return cr.pr.Read(b) }
+
+func (cr *compressedReader) Close() error {
+ cr.l.mu.Lock()
+ defer cr.l.mu.Unlock()
+
+ // Close the inner ReadCloser.
+ if err := cr.closer.Close(); err != nil {
+ return err
+ }
+
+ // Flush the buffer.
+ if err := cr.bw.Flush(); err != nil {
+ return err
+ }
+
+ diffID, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.h.Sum(nil)))
+ if err != nil {
+ return err
+ }
+ cr.l.diffID = &diffID
+
+ digest, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.zh.Sum(nil)))
+ if err != nil {
+ return err
+ }
+ cr.l.digest = &digest
+
+ cr.l.size = cr.count.n
+ cr.l.consumed = true
+ return nil
+}
+
+// countWriter counts bytes written to it.
+type countWriter struct{ n int64 }
+
+func (c *countWriter) Write(p []byte) (int, error) {
+ c.n += int64(len(p))
+ return len(p), nil
+}
+
+// multiCloser is a Closer that collects multiple Closers and Closes them in order.
+type multiCloser []io.Closer
+
+var _ io.Closer = (multiCloser)(nil)
+
+func newMultiCloser(c ...io.Closer) multiCloser { return multiCloser(c) }
+
+func (m multiCloser) Close() error {
+ for _, c := range m {
+ // NOTE: net/http will call close on success, so if we've already
+ // closed the inner rc, it's not an error.
+ if err := c.Close(); err != nil && !errors.Is(err, os.ErrClosed) {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/types/BUILD.bazel b/vendor/github.com/google/go-containerregistry/pkg/v1/types/BUILD.bazel
new file mode 100644
index 0000000000..37ff36d601
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/types/BUILD.bazel
@@ -0,0 +1,9 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = ["types.go"],
+ importmap = "k8s.io/kops/vendor/github.com/google/go-containerregistry/pkg/v1/types",
+ importpath = "github.com/google/go-containerregistry/pkg/v1/types",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go b/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go
new file mode 100644
index 0000000000..21f2236502
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go
@@ -0,0 +1,71 @@
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+// MediaType is an enumeration of the supported mime types that an element of an image might have.
+type MediaType string
+
+// The collection of known MediaType values.
+const (
+ OCIContentDescriptor MediaType = "application/vnd.oci.descriptor.v1+json"
+ OCIImageIndex MediaType = "application/vnd.oci.image.index.v1+json"
+ OCIManifestSchema1 MediaType = "application/vnd.oci.image.manifest.v1+json"
+ OCIConfigJSON MediaType = "application/vnd.oci.image.config.v1+json"
+ OCILayer MediaType = "application/vnd.oci.image.layer.v1.tar+gzip"
+ OCIRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip"
+ OCIUncompressedLayer MediaType = "application/vnd.oci.image.layer.v1.tar"
+ OCIUncompressedRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar"
+
+ DockerManifestSchema1 MediaType = "application/vnd.docker.distribution.manifest.v1+json"
+ DockerManifestSchema1Signed MediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws"
+ DockerManifestSchema2 MediaType = "application/vnd.docker.distribution.manifest.v2+json"
+ DockerManifestList MediaType = "application/vnd.docker.distribution.manifest.list.v2+json"
+ DockerLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip"
+ DockerConfigJSON MediaType = "application/vnd.docker.container.image.v1+json"
+ DockerPluginConfig MediaType = "application/vnd.docker.plugin.v1+json"
+ DockerForeignLayer MediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
+ DockerUncompressedLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar"
+
+ OCIVendorPrefix = "vnd.oci"
+ DockerVendorPrefix = "vnd.docker"
+)
+
+// IsDistributable returns true if a layer is distributable, see:
+// https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers
+func (m MediaType) IsDistributable() bool {
+ switch m {
+ case DockerForeignLayer, OCIRestrictedLayer, OCIUncompressedRestrictedLayer:
+ return false
+ }
+ return true
+}
+
+// IsImage returns true if the mediaType represents an image manifest, as opposed to something else, like an index.
+func (m MediaType) IsImage() bool {
+ switch m {
+ case OCIManifestSchema1, DockerManifestSchema2:
+ return true
+ }
+ return false
+}
+
+// IsIndex returns true if the mediaType represents an index, as opposed to something else, like an image.
+func (m MediaType) IsIndex() bool {
+ switch m {
+ case OCIImageIndex, DockerManifestList:
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go b/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go
new file mode 100644
index 0000000000..3f92f09135
--- /dev/null
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go
@@ -0,0 +1,318 @@
+// +build !ignore_autogenerated
+
+// Copyright 2018 Google LLC All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Config) DeepCopyInto(out *Config) {
+ *out = *in
+ if in.Cmd != nil {
+ in, out := &in.Cmd, &out.Cmd
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Healthcheck != nil {
+ in, out := &in.Healthcheck, &out.Healthcheck
+ *out = new(HealthConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Entrypoint != nil {
+ in, out := &in.Entrypoint, &out.Entrypoint
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.OnBuild != nil {
+ in, out := &in.OnBuild, &out.OnBuild
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Volumes != nil {
+ in, out := &in.Volumes, &out.Volumes
+ *out = make(map[string]struct{}, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.ExposedPorts != nil {
+ in, out := &in.ExposedPorts, &out.ExposedPorts
+ *out = make(map[string]struct{}, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Shell != nil {
+ in, out := &in.Shell, &out.Shell
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
+func (in *Config) DeepCopy() *Config {
+ if in == nil {
+ return nil
+ }
+ out := new(Config)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigFile) DeepCopyInto(out *ConfigFile) {
+ *out = *in
+ in.Created.DeepCopyInto(&out.Created)
+ if in.History != nil {
+ in, out := &in.History, &out.History
+ *out = make([]History, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.RootFS.DeepCopyInto(&out.RootFS)
+ in.Config.DeepCopyInto(&out.Config)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigFile.
+func (in *ConfigFile) DeepCopy() *ConfigFile {
+ if in == nil {
+ return nil
+ }
+ out := new(ConfigFile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Descriptor) DeepCopyInto(out *Descriptor) {
+ *out = *in
+ out.Digest = in.Digest
+ if in.URLs != nil {
+ in, out := &in.URLs, &out.URLs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Platform != nil {
+ in, out := &in.Platform, &out.Platform
+ *out = new(Platform)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Descriptor.
+func (in *Descriptor) DeepCopy() *Descriptor {
+ if in == nil {
+ return nil
+ }
+ out := new(Descriptor)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Hash) DeepCopyInto(out *Hash) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hash.
+func (in *Hash) DeepCopy() *Hash {
+ if in == nil {
+ return nil
+ }
+ out := new(Hash)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HealthConfig) DeepCopyInto(out *HealthConfig) {
+ *out = *in
+ if in.Test != nil {
+ in, out := &in.Test, &out.Test
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthConfig.
+func (in *HealthConfig) DeepCopy() *HealthConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(HealthConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *History) DeepCopyInto(out *History) {
+ *out = *in
+ in.Created.DeepCopyInto(&out.Created)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new History.
+func (in *History) DeepCopy() *History {
+ if in == nil {
+ return nil
+ }
+ out := new(History)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IndexManifest) DeepCopyInto(out *IndexManifest) {
+ *out = *in
+ if in.Manifests != nil {
+ in, out := &in.Manifests, &out.Manifests
+ *out = make([]Descriptor, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManifest.
+func (in *IndexManifest) DeepCopy() *IndexManifest {
+ if in == nil {
+ return nil
+ }
+ out := new(IndexManifest)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Manifest) DeepCopyInto(out *Manifest) {
+ *out = *in
+ in.Config.DeepCopyInto(&out.Config)
+ if in.Layers != nil {
+ in, out := &in.Layers, &out.Layers
+ *out = make([]Descriptor, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Manifest.
+func (in *Manifest) DeepCopy() *Manifest {
+ if in == nil {
+ return nil
+ }
+ out := new(Manifest)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Platform) DeepCopyInto(out *Platform) {
+ *out = *in
+ if in.OSFeatures != nil {
+ in, out := &in.OSFeatures, &out.OSFeatures
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Features != nil {
+ in, out := &in.Features, &out.Features
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform.
+func (in *Platform) DeepCopy() *Platform {
+ if in == nil {
+ return nil
+ }
+ out := new(Platform)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RootFS) DeepCopyInto(out *RootFS) {
+ *out = *in
+ if in.DiffIDs != nil {
+ in, out := &in.DiffIDs, &out.DiffIDs
+ *out = make([]Hash, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootFS.
+func (in *RootFS) DeepCopy() *RootFS {
+ if in == nil {
+ return nil
+ }
+ out := new(RootFS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time.
+func (in *Time) DeepCopy() *Time {
+ if in == nil {
+ return nil
+ }
+ out := new(Time)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/golang.org/x/net/internal/socks/BUILD.bazel b/vendor/golang.org/x/net/internal/socks/BUILD.bazel
deleted file mode 100644
index d027916695..0000000000
--- a/vendor/golang.org/x/net/internal/socks/BUILD.bazel
+++ /dev/null
@@ -1,12 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
- name = "go_default_library",
- srcs = [
- "client.go",
- "socks.go",
- ],
- importmap = "k8s.io/kops/vendor/golang.org/x/net/internal/socks",
- importpath = "golang.org/x/net/internal/socks",
- visibility = ["//vendor/golang.org/x/net:__subpackages__"],
-)
diff --git a/vendor/golang.org/x/net/internal/socks/client.go b/vendor/golang.org/x/net/internal/socks/client.go
deleted file mode 100644
index 3d6f516a59..0000000000
--- a/vendor/golang.org/x/net/internal/socks/client.go
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package socks
-
-import (
- "context"
- "errors"
- "io"
- "net"
- "strconv"
- "time"
-)
-
-var (
- noDeadline = time.Time{}
- aLongTimeAgo = time.Unix(1, 0)
-)
-
-func (d *Dialer) connect(ctx context.Context, c net.Conn, address string) (_ net.Addr, ctxErr error) {
- host, port, err := splitHostPort(address)
- if err != nil {
- return nil, err
- }
- if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() {
- c.SetDeadline(deadline)
- defer c.SetDeadline(noDeadline)
- }
- if ctx != context.Background() {
- errCh := make(chan error, 1)
- done := make(chan struct{})
- defer func() {
- close(done)
- if ctxErr == nil {
- ctxErr = <-errCh
- }
- }()
- go func() {
- select {
- case <-ctx.Done():
- c.SetDeadline(aLongTimeAgo)
- errCh <- ctx.Err()
- case <-done:
- errCh <- nil
- }
- }()
- }
-
- b := make([]byte, 0, 6+len(host)) // the size here is just an estimate
- b = append(b, Version5)
- if len(d.AuthMethods) == 0 || d.Authenticate == nil {
- b = append(b, 1, byte(AuthMethodNotRequired))
- } else {
- ams := d.AuthMethods
- if len(ams) > 255 {
- return nil, errors.New("too many authentication methods")
- }
- b = append(b, byte(len(ams)))
- for _, am := range ams {
- b = append(b, byte(am))
- }
- }
- if _, ctxErr = c.Write(b); ctxErr != nil {
- return
- }
-
- if _, ctxErr = io.ReadFull(c, b[:2]); ctxErr != nil {
- return
- }
- if b[0] != Version5 {
- return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0])))
- }
- am := AuthMethod(b[1])
- if am == AuthMethodNoAcceptableMethods {
- return nil, errors.New("no acceptable authentication methods")
- }
- if d.Authenticate != nil {
- if ctxErr = d.Authenticate(ctx, c, am); ctxErr != nil {
- return
- }
- }
-
- b = b[:0]
- b = append(b, Version5, byte(d.cmd), 0)
- if ip := net.ParseIP(host); ip != nil {
- if ip4 := ip.To4(); ip4 != nil {
- b = append(b, AddrTypeIPv4)
- b = append(b, ip4...)
- } else if ip6 := ip.To16(); ip6 != nil {
- b = append(b, AddrTypeIPv6)
- b = append(b, ip6...)
- } else {
- return nil, errors.New("unknown address type")
- }
- } else {
- if len(host) > 255 {
- return nil, errors.New("FQDN too long")
- }
- b = append(b, AddrTypeFQDN)
- b = append(b, byte(len(host)))
- b = append(b, host...)
- }
- b = append(b, byte(port>>8), byte(port))
- if _, ctxErr = c.Write(b); ctxErr != nil {
- return
- }
-
- if _, ctxErr = io.ReadFull(c, b[:4]); ctxErr != nil {
- return
- }
- if b[0] != Version5 {
- return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0])))
- }
- if cmdErr := Reply(b[1]); cmdErr != StatusSucceeded {
- return nil, errors.New("unknown error " + cmdErr.String())
- }
- if b[2] != 0 {
- return nil, errors.New("non-zero reserved field")
- }
- l := 2
- var a Addr
- switch b[3] {
- case AddrTypeIPv4:
- l += net.IPv4len
- a.IP = make(net.IP, net.IPv4len)
- case AddrTypeIPv6:
- l += net.IPv6len
- a.IP = make(net.IP, net.IPv6len)
- case AddrTypeFQDN:
- if _, err := io.ReadFull(c, b[:1]); err != nil {
- return nil, err
- }
- l += int(b[0])
- default:
- return nil, errors.New("unknown address type " + strconv.Itoa(int(b[3])))
- }
- if cap(b) < l {
- b = make([]byte, l)
- } else {
- b = b[:l]
- }
- if _, ctxErr = io.ReadFull(c, b); ctxErr != nil {
- return
- }
- if a.IP != nil {
- copy(a.IP, b)
- } else {
- a.Name = string(b[:len(b)-2])
- }
- a.Port = int(b[len(b)-2])<<8 | int(b[len(b)-1])
- return &a, nil
-}
-
-func splitHostPort(address string) (string, int, error) {
- host, port, err := net.SplitHostPort(address)
- if err != nil {
- return "", 0, err
- }
- portnum, err := strconv.Atoi(port)
- if err != nil {
- return "", 0, err
- }
- if 1 > portnum || portnum > 0xffff {
- return "", 0, errors.New("port number out of range " + port)
- }
- return host, portnum, nil
-}
diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go
deleted file mode 100644
index 97db2340ec..0000000000
--- a/vendor/golang.org/x/net/internal/socks/socks.go
+++ /dev/null
@@ -1,317 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package socks provides a SOCKS version 5 client implementation.
-//
-// SOCKS protocol version 5 is defined in RFC 1928.
-// Username/Password authentication for SOCKS version 5 is defined in
-// RFC 1929.
-package socks
-
-import (
- "context"
- "errors"
- "io"
- "net"
- "strconv"
-)
-
-// A Command represents a SOCKS command.
-type Command int
-
-func (cmd Command) String() string {
- switch cmd {
- case CmdConnect:
- return "socks connect"
- case cmdBind:
- return "socks bind"
- default:
- return "socks " + strconv.Itoa(int(cmd))
- }
-}
-
-// An AuthMethod represents a SOCKS authentication method.
-type AuthMethod int
-
-// A Reply represents a SOCKS command reply code.
-type Reply int
-
-func (code Reply) String() string {
- switch code {
- case StatusSucceeded:
- return "succeeded"
- case 0x01:
- return "general SOCKS server failure"
- case 0x02:
- return "connection not allowed by ruleset"
- case 0x03:
- return "network unreachable"
- case 0x04:
- return "host unreachable"
- case 0x05:
- return "connection refused"
- case 0x06:
- return "TTL expired"
- case 0x07:
- return "command not supported"
- case 0x08:
- return "address type not supported"
- default:
- return "unknown code: " + strconv.Itoa(int(code))
- }
-}
-
-// Wire protocol constants.
-const (
- Version5 = 0x05
-
- AddrTypeIPv4 = 0x01
- AddrTypeFQDN = 0x03
- AddrTypeIPv6 = 0x04
-
- CmdConnect Command = 0x01 // establishes an active-open forward proxy connection
- cmdBind Command = 0x02 // establishes a passive-open forward proxy connection
-
- AuthMethodNotRequired AuthMethod = 0x00 // no authentication required
- AuthMethodUsernamePassword AuthMethod = 0x02 // use username/password
- AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authentication methods
-
- StatusSucceeded Reply = 0x00
-)
-
-// An Addr represents a SOCKS-specific address.
-// Either Name or IP is used exclusively.
-type Addr struct {
- Name string // fully-qualified domain name
- IP net.IP
- Port int
-}
-
-func (a *Addr) Network() string { return "socks" }
-
-func (a *Addr) String() string {
- if a == nil {
- return ""
- }
- port := strconv.Itoa(a.Port)
- if a.IP == nil {
- return net.JoinHostPort(a.Name, port)
- }
- return net.JoinHostPort(a.IP.String(), port)
-}
-
-// A Conn represents a forward proxy connection.
-type Conn struct {
- net.Conn
-
- boundAddr net.Addr
-}
-
-// BoundAddr returns the address assigned by the proxy server for
-// connecting to the command target address from the proxy server.
-func (c *Conn) BoundAddr() net.Addr {
- if c == nil {
- return nil
- }
- return c.boundAddr
-}
-
-// A Dialer holds SOCKS-specific options.
-type Dialer struct {
- cmd Command // either CmdConnect or cmdBind
- proxyNetwork string // network between a proxy server and a client
- proxyAddress string // proxy server address
-
- // ProxyDial specifies the optional dial function for
- // establishing the transport connection.
- ProxyDial func(context.Context, string, string) (net.Conn, error)
-
- // AuthMethods specifies the list of request authentication
- // methods.
- // If empty, SOCKS client requests only AuthMethodNotRequired.
- AuthMethods []AuthMethod
-
- // Authenticate specifies the optional authentication
- // function. It must be non-nil when AuthMethods is not empty.
- // It must return an error when the authentication is failed.
- Authenticate func(context.Context, io.ReadWriter, AuthMethod) error
-}
-
-// DialContext connects to the provided address on the provided
-// network.
-//
-// The returned error value may be a net.OpError. When the Op field of
-// net.OpError contains "socks", the Source field contains a proxy
-// server address and the Addr field contains a command target
-// address.
-//
-// See func Dial of the net package of standard library for a
-// description of the network and address parameters.
-func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
- if err := d.validateTarget(network, address); err != nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
- }
- if ctx == nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")}
- }
- var err error
- var c net.Conn
- if d.ProxyDial != nil {
- c, err = d.ProxyDial(ctx, d.proxyNetwork, d.proxyAddress)
- } else {
- var dd net.Dialer
- c, err = dd.DialContext(ctx, d.proxyNetwork, d.proxyAddress)
- }
- if err != nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
- }
- a, err := d.connect(ctx, c, address)
- if err != nil {
- c.Close()
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
- }
- return &Conn{Conn: c, boundAddr: a}, nil
-}
-
-// DialWithConn initiates a connection from SOCKS server to the target
-// network and address using the connection c that is already
-// connected to the SOCKS server.
-//
-// It returns the connection's local address assigned by the SOCKS
-// server.
-func (d *Dialer) DialWithConn(ctx context.Context, c net.Conn, network, address string) (net.Addr, error) {
- if err := d.validateTarget(network, address); err != nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
- }
- if ctx == nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")}
- }
- a, err := d.connect(ctx, c, address)
- if err != nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
- }
- return a, nil
-}
-
-// Dial connects to the provided address on the provided network.
-//
-// Unlike DialContext, it returns a raw transport connection instead
-// of a forward proxy connection.
-//
-// Deprecated: Use DialContext or DialWithConn instead.
-func (d *Dialer) Dial(network, address string) (net.Conn, error) {
- if err := d.validateTarget(network, address); err != nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
- }
- var err error
- var c net.Conn
- if d.ProxyDial != nil {
- c, err = d.ProxyDial(context.Background(), d.proxyNetwork, d.proxyAddress)
- } else {
- c, err = net.Dial(d.proxyNetwork, d.proxyAddress)
- }
- if err != nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
- }
- if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil {
- c.Close()
- return nil, err
- }
- return c, nil
-}
-
-func (d *Dialer) validateTarget(network, address string) error {
- switch network {
- case "tcp", "tcp6", "tcp4":
- default:
- return errors.New("network not implemented")
- }
- switch d.cmd {
- case CmdConnect, cmdBind:
- default:
- return errors.New("command not implemented")
- }
- return nil
-}
-
-func (d *Dialer) pathAddrs(address string) (proxy, dst net.Addr, err error) {
- for i, s := range []string{d.proxyAddress, address} {
- host, port, err := splitHostPort(s)
- if err != nil {
- return nil, nil, err
- }
- a := &Addr{Port: port}
- a.IP = net.ParseIP(host)
- if a.IP == nil {
- a.Name = host
- }
- if i == 0 {
- proxy = a
- } else {
- dst = a
- }
- }
- return
-}
-
-// NewDialer returns a new Dialer that dials through the provided
-// proxy server's network and address.
-func NewDialer(network, address string) *Dialer {
- return &Dialer{proxyNetwork: network, proxyAddress: address, cmd: CmdConnect}
-}
-
-const (
- authUsernamePasswordVersion = 0x01
- authStatusSucceeded = 0x00
-)
-
-// UsernamePassword are the credentials for the username/password
-// authentication method.
-type UsernamePassword struct {
- Username string
- Password string
-}
-
-// Authenticate authenticates a pair of username and password with the
-// proxy server.
-func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, auth AuthMethod) error {
- switch auth {
- case AuthMethodNotRequired:
- return nil
- case AuthMethodUsernamePassword:
- if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) == 0 || len(up.Password) > 255 {
- return errors.New("invalid username/password")
- }
- b := []byte{authUsernamePasswordVersion}
- b = append(b, byte(len(up.Username)))
- b = append(b, up.Username...)
- b = append(b, byte(len(up.Password)))
- b = append(b, up.Password...)
- // TODO(mikio): handle IO deadlines and cancelation if
- // necessary
- if _, err := rw.Write(b); err != nil {
- return err
- }
- if _, err := io.ReadFull(rw, b[:2]); err != nil {
- return err
- }
- if b[0] != authUsernamePasswordVersion {
- return errors.New("invalid username/password version")
- }
- if b[1] != authStatusSucceeded {
- return errors.New("username/password authentication failed")
- }
- return nil
- }
- return errors.New("unsupported authentication method " + strconv.Itoa(int(auth)))
-}
diff --git a/vendor/golang.org/x/net/proxy/BUILD.bazel b/vendor/golang.org/x/net/proxy/BUILD.bazel
deleted file mode 100644
index 21d838f878..0000000000
--- a/vendor/golang.org/x/net/proxy/BUILD.bazel
+++ /dev/null
@@ -1,16 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-
-go_library(
- name = "go_default_library",
- srcs = [
- "dial.go",
- "direct.go",
- "per_host.go",
- "proxy.go",
- "socks5.go",
- ],
- importmap = "k8s.io/kops/vendor/golang.org/x/net/proxy",
- importpath = "golang.org/x/net/proxy",
- visibility = ["//visibility:public"],
- deps = ["//vendor/golang.org/x/net/internal/socks:go_default_library"],
-)
diff --git a/vendor/golang.org/x/net/proxy/dial.go b/vendor/golang.org/x/net/proxy/dial.go
deleted file mode 100644
index 811c2e4e96..0000000000
--- a/vendor/golang.org/x/net/proxy/dial.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proxy
-
-import (
- "context"
- "net"
-)
-
-// A ContextDialer dials using a context.
-type ContextDialer interface {
- DialContext(ctx context.Context, network, address string) (net.Conn, error)
-}
-
-// Dial works like DialContext on net.Dialer but using a dialer returned by FromEnvironment.
-//
-// The passed ctx is only used for returning the Conn, not the lifetime of the Conn.
-//
-// Custom dialers (registered via RegisterDialerType) that do not implement ContextDialer
-// can leak a goroutine for as long as it takes the underlying Dialer implementation to timeout.
-//
-// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed.
-func Dial(ctx context.Context, network, address string) (net.Conn, error) {
- d := FromEnvironment()
- if xd, ok := d.(ContextDialer); ok {
- return xd.DialContext(ctx, network, address)
- }
- return dialContext(ctx, d, network, address)
-}
-
-// WARNING: this can leak a goroutine for as long as the underlying Dialer implementation takes to timeout
-// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed.
-func dialContext(ctx context.Context, d Dialer, network, address string) (net.Conn, error) {
- var (
- conn net.Conn
- done = make(chan struct{}, 1)
- err error
- )
- go func() {
- conn, err = d.Dial(network, address)
- close(done)
- if conn != nil && ctx.Err() != nil {
- conn.Close()
- }
- }()
- select {
- case <-ctx.Done():
- err = ctx.Err()
- case <-done:
- }
- return conn, err
-}
diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go
deleted file mode 100644
index 3d66bdef9d..0000000000
--- a/vendor/golang.org/x/net/proxy/direct.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proxy
-
-import (
- "context"
- "net"
-)
-
-type direct struct{}
-
-// Direct implements Dialer by making network connections directly using net.Dial or net.DialContext.
-var Direct = direct{}
-
-var (
- _ Dialer = Direct
- _ ContextDialer = Direct
-)
-
-// Dial directly invokes net.Dial with the supplied parameters.
-func (direct) Dial(network, addr string) (net.Conn, error) {
- return net.Dial(network, addr)
-}
-
-// DialContext instantiates a net.Dialer and invokes its DialContext receiver with the supplied parameters.
-func (direct) DialContext(ctx context.Context, network, addr string) (net.Conn, error) {
- var d net.Dialer
- return d.DialContext(ctx, network, addr)
-}
diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go
deleted file mode 100644
index 573fe79e86..0000000000
--- a/vendor/golang.org/x/net/proxy/per_host.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proxy
-
-import (
- "context"
- "net"
- "strings"
-)
-
-// A PerHost directs connections to a default Dialer unless the host name
-// requested matches one of a number of exceptions.
-type PerHost struct {
- def, bypass Dialer
-
- bypassNetworks []*net.IPNet
- bypassIPs []net.IP
- bypassZones []string
- bypassHosts []string
-}
-
-// NewPerHost returns a PerHost Dialer that directs connections to either
-// defaultDialer or bypass, depending on whether the connection matches one of
-// the configured rules.
-func NewPerHost(defaultDialer, bypass Dialer) *PerHost {
- return &PerHost{
- def: defaultDialer,
- bypass: bypass,
- }
-}
-
-// Dial connects to the address addr on the given network through either
-// defaultDialer or bypass.
-func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) {
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- return nil, err
- }
-
- return p.dialerForRequest(host).Dial(network, addr)
-}
-
-// DialContext connects to the address addr on the given network through either
-// defaultDialer or bypass.
-func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.Conn, err error) {
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- return nil, err
- }
- d := p.dialerForRequest(host)
- if x, ok := d.(ContextDialer); ok {
- return x.DialContext(ctx, network, addr)
- }
- return dialContext(ctx, d, network, addr)
-}
-
-func (p *PerHost) dialerForRequest(host string) Dialer {
- if ip := net.ParseIP(host); ip != nil {
- for _, net := range p.bypassNetworks {
- if net.Contains(ip) {
- return p.bypass
- }
- }
- for _, bypassIP := range p.bypassIPs {
- if bypassIP.Equal(ip) {
- return p.bypass
- }
- }
- return p.def
- }
-
- for _, zone := range p.bypassZones {
- if strings.HasSuffix(host, zone) {
- return p.bypass
- }
- if host == zone[1:] {
- // For a zone ".example.com", we match "example.com"
- // too.
- return p.bypass
- }
- }
- for _, bypassHost := range p.bypassHosts {
- if bypassHost == host {
- return p.bypass
- }
- }
- return p.def
-}
-
-// AddFromString parses a string that contains comma-separated values
-// specifying hosts that should use the bypass proxy. Each value is either an
-// IP address, a CIDR range, a zone (*.example.com) or a host name
-// (localhost). A best effort is made to parse the string and errors are
-// ignored.
-func (p *PerHost) AddFromString(s string) {
- hosts := strings.Split(s, ",")
- for _, host := range hosts {
- host = strings.TrimSpace(host)
- if len(host) == 0 {
- continue
- }
- if strings.Contains(host, "/") {
- // We assume that it's a CIDR address like 127.0.0.0/8
- if _, net, err := net.ParseCIDR(host); err == nil {
- p.AddNetwork(net)
- }
- continue
- }
- if ip := net.ParseIP(host); ip != nil {
- p.AddIP(ip)
- continue
- }
- if strings.HasPrefix(host, "*.") {
- p.AddZone(host[1:])
- continue
- }
- p.AddHost(host)
- }
-}
-
-// AddIP specifies an IP address that will use the bypass proxy. Note that
-// this will only take effect if a literal IP address is dialed. A connection
-// to a named host will never match an IP.
-func (p *PerHost) AddIP(ip net.IP) {
- p.bypassIPs = append(p.bypassIPs, ip)
-}
-
-// AddNetwork specifies an IP range that will use the bypass proxy. Note that
-// this will only take effect if a literal IP address is dialed. A connection
-// to a named host will never match.
-func (p *PerHost) AddNetwork(net *net.IPNet) {
- p.bypassNetworks = append(p.bypassNetworks, net)
-}
-
-// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
-// "example.com" matches "example.com" and all of its subdomains.
-func (p *PerHost) AddZone(zone string) {
- if strings.HasSuffix(zone, ".") {
- zone = zone[:len(zone)-1]
- }
- if !strings.HasPrefix(zone, ".") {
- zone = "." + zone
- }
- p.bypassZones = append(p.bypassZones, zone)
-}
-
-// AddHost specifies a host name that will use the bypass proxy.
-func (p *PerHost) AddHost(host string) {
- if strings.HasSuffix(host, ".") {
- host = host[:len(host)-1]
- }
- p.bypassHosts = append(p.bypassHosts, host)
-}
diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go
deleted file mode 100644
index 9ff4b9a776..0000000000
--- a/vendor/golang.org/x/net/proxy/proxy.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package proxy provides support for a variety of protocols to proxy network
-// data.
-package proxy // import "golang.org/x/net/proxy"
-
-import (
- "errors"
- "net"
- "net/url"
- "os"
- "sync"
-)
-
-// A Dialer is a means to establish a connection.
-// Custom dialers should also implement ContextDialer.
-type Dialer interface {
- // Dial connects to the given address via the proxy.
- Dial(network, addr string) (c net.Conn, err error)
-}
-
-// Auth contains authentication parameters that specific Dialers may require.
-type Auth struct {
- User, Password string
-}
-
-// FromEnvironment returns the dialer specified by the proxy-related
-// variables in the environment and makes underlying connections
-// directly.
-func FromEnvironment() Dialer {
- return FromEnvironmentUsing(Direct)
-}
-
-// FromEnvironmentUsing returns the dialer specify by the proxy-related
-// variables in the environment and makes underlying connections
-// using the provided forwarding Dialer (for instance, a *net.Dialer
-// with desired configuration).
-func FromEnvironmentUsing(forward Dialer) Dialer {
- allProxy := allProxyEnv.Get()
- if len(allProxy) == 0 {
- return forward
- }
-
- proxyURL, err := url.Parse(allProxy)
- if err != nil {
- return forward
- }
- proxy, err := FromURL(proxyURL, forward)
- if err != nil {
- return forward
- }
-
- noProxy := noProxyEnv.Get()
- if len(noProxy) == 0 {
- return proxy
- }
-
- perHost := NewPerHost(proxy, forward)
- perHost.AddFromString(noProxy)
- return perHost
-}
-
-// proxySchemes is a map from URL schemes to a function that creates a Dialer
-// from a URL with such a scheme.
-var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error)
-
-// RegisterDialerType takes a URL scheme and a function to generate Dialers from
-// a URL with that scheme and a forwarding Dialer. Registered schemes are used
-// by FromURL.
-func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) {
- if proxySchemes == nil {
- proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error))
- }
- proxySchemes[scheme] = f
-}
-
-// FromURL returns a Dialer given a URL specification and an underlying
-// Dialer for it to make network requests.
-func FromURL(u *url.URL, forward Dialer) (Dialer, error) {
- var auth *Auth
- if u.User != nil {
- auth = new(Auth)
- auth.User = u.User.Username()
- if p, ok := u.User.Password(); ok {
- auth.Password = p
- }
- }
-
- switch u.Scheme {
- case "socks5", "socks5h":
- addr := u.Hostname()
- port := u.Port()
- if port == "" {
- port = "1080"
- }
- return SOCKS5("tcp", net.JoinHostPort(addr, port), auth, forward)
- }
-
- // If the scheme doesn't match any of the built-in schemes, see if it
- // was registered by another package.
- if proxySchemes != nil {
- if f, ok := proxySchemes[u.Scheme]; ok {
- return f(u, forward)
- }
- }
-
- return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
-}
-
-var (
- allProxyEnv = &envOnce{
- names: []string{"ALL_PROXY", "all_proxy"},
- }
- noProxyEnv = &envOnce{
- names: []string{"NO_PROXY", "no_proxy"},
- }
-)
-
-// envOnce looks up an environment variable (optionally by multiple
-// names) once. It mitigates expensive lookups on some platforms
-// (e.g. Windows).
-// (Borrowed from net/http/transport.go)
-type envOnce struct {
- names []string
- once sync.Once
- val string
-}
-
-func (e *envOnce) Get() string {
- e.once.Do(e.init)
- return e.val
-}
-
-func (e *envOnce) init() {
- for _, n := range e.names {
- e.val = os.Getenv(n)
- if e.val != "" {
- return
- }
- }
-}
-
-// reset is used by tests
-func (e *envOnce) reset() {
- e.once = sync.Once{}
- e.val = ""
-}
diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go
deleted file mode 100644
index c91651f96d..0000000000
--- a/vendor/golang.org/x/net/proxy/socks5.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proxy
-
-import (
- "context"
- "net"
-
- "golang.org/x/net/internal/socks"
-)
-
-// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given
-// address with an optional username and password.
-// See RFC 1928 and RFC 1929.
-func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) {
- d := socks.NewDialer(network, address)
- if forward != nil {
- if f, ok := forward.(ContextDialer); ok {
- d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) {
- return f.DialContext(ctx, network, address)
- }
- } else {
- d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) {
- return dialContext(ctx, forward, network, address)
- }
- }
- }
- if auth != nil {
- up := socks.UsernamePassword{
- Username: auth.User,
- Password: auth.Password,
- }
- d.AuthMethods = []socks.AuthMethod{
- socks.AuthMethodNotRequired,
- socks.AuthMethodUsernamePassword,
- }
- d.Authenticate = up.Authenticate
- }
- return d, nil
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 2bb731f3ec..cd17818b75 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -263,23 +263,17 @@ github.com/docker/distribution/registry/storage/cache
github.com/docker/distribution/registry/storage/cache/memory
# github.com/docker/docker v20.10.6+incompatible
## explicit
-github.com/docker/docker/api
github.com/docker/docker/api/types
github.com/docker/docker/api/types/blkiodev
github.com/docker/docker/api/types/container
-github.com/docker/docker/api/types/events
github.com/docker/docker/api/types/filters
-github.com/docker/docker/api/types/image
github.com/docker/docker/api/types/mount
github.com/docker/docker/api/types/network
github.com/docker/docker/api/types/registry
github.com/docker/docker/api/types/strslice
github.com/docker/docker/api/types/swarm
github.com/docker/docker/api/types/swarm/runtime
-github.com/docker/docker/api/types/time
github.com/docker/docker/api/types/versions
-github.com/docker/docker/api/types/volume
-github.com/docker/docker/client
github.com/docker/docker/errdefs
github.com/docker/docker/pkg/homedir
github.com/docker/docker/pkg/ioutils
@@ -293,7 +287,6 @@ github.com/docker/docker-credential-helpers/client
github.com/docker/docker-credential-helpers/credentials
# github.com/docker/go-connections v0.4.0
github.com/docker/go-connections/nat
-github.com/docker/go-connections/sockets
github.com/docker/go-connections/tlsconfig
# github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916
github.com/docker/go-metrics
@@ -352,6 +345,24 @@ github.com/google/go-cmp/cmp/internal/diff
github.com/google/go-cmp/cmp/internal/flags
github.com/google/go-cmp/cmp/internal/function
github.com/google/go-cmp/cmp/internal/value
+# github.com/google/go-containerregistry v0.5.1
+## explicit
+github.com/google/go-containerregistry/internal/and
+github.com/google/go-containerregistry/internal/gzip
+github.com/google/go-containerregistry/internal/redact
+github.com/google/go-containerregistry/internal/retry
+github.com/google/go-containerregistry/internal/retry/wait
+github.com/google/go-containerregistry/internal/verify
+github.com/google/go-containerregistry/pkg/authn
+github.com/google/go-containerregistry/pkg/logs
+github.com/google/go-containerregistry/pkg/name
+github.com/google/go-containerregistry/pkg/v1
+github.com/google/go-containerregistry/pkg/v1/match
+github.com/google/go-containerregistry/pkg/v1/partial
+github.com/google/go-containerregistry/pkg/v1/remote
+github.com/google/go-containerregistry/pkg/v1/remote/transport
+github.com/google/go-containerregistry/pkg/v1/stream
+github.com/google/go-containerregistry/pkg/v1/types
# github.com/google/go-querystring v1.0.0
github.com/google/go-querystring/query
# github.com/google/gofuzz v1.2.0
@@ -742,11 +753,9 @@ golang.org/x/net/http2/hpack
golang.org/x/net/idna
golang.org/x/net/internal/iana
golang.org/x/net/internal/socket
-golang.org/x/net/internal/socks
golang.org/x/net/internal/timeseries
golang.org/x/net/ipv4
golang.org/x/net/ipv6
-golang.org/x/net/proxy
golang.org/x/net/trace
# golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c
## explicit