Also includes unreleased https://github.com/openshift/imagebuilder/pull/246 to work
with the updated docker/docker dependency.

And updates some references to newly deprecated docker/docker symbols.

[NO NEW TESTS NEEDED]

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
This commit is contained in:
Miloslav Trmač 2023-01-25 19:22:41 +01:00
parent 83f2f840e4
commit e308ba0215
292 changed files with 7975 additions and 5113 deletions

18
go.mod
View File

@ -14,7 +14,7 @@ require (
github.com/containers/buildah v1.29.1-0.20230201192322-e56eb25575c7
github.com/containers/common v0.51.0
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.24.1-0.20230202144111-a49c94a010be
github.com/containers/image/v5 v5.24.1-0.20230208161124-34839152eb48
github.com/containers/ocicrypt v1.1.7
github.com/containers/psgo v1.8.0
github.com/containers/storage v1.45.3
@ -22,7 +22,7 @@ require (
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
github.com/cyphar/filepath-securejoin v0.2.3
github.com/digitalocean/go-qemu v0.0.0-20210326154740-ac9e0b687001
github.com/docker/docker v20.10.23+incompatible
github.com/docker/docker v23.0.0+incompatible
github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11
github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651
github.com/docker/go-units v0.5.0
@ -38,7 +38,7 @@ require (
github.com/hashicorp/go-multierror v1.1.1
github.com/json-iterator/go v1.1.12
github.com/mattn/go-shellwords v1.0.12
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6
github.com/moby/term v0.0.0-20221120202655-abb19827d345
github.com/nxadm/tail v1.4.8
github.com/onsi/ginkgo v1.16.5
github.com/onsi/gomega v1.26.0
@ -47,8 +47,8 @@ require (
github.com/opencontainers/runc v1.1.4
github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb
github.com/opencontainers/runtime-tools v0.9.1-0.20221014010322-58c91d646d86
github.com/opencontainers/selinux v1.10.2
github.com/openshift/imagebuilder v1.2.4-0.20220711175835-4151e43600df
github.com/opencontainers/selinux v1.11.0
github.com/openshift/imagebuilder v1.2.4-0.20230207193036-6e08c897da73
github.com/rootless-containers/rootlesskit v1.1.0
github.com/sirupsen/logrus v1.9.0
github.com/spf13/cobra v1.6.1
@ -112,7 +112,7 @@ require (
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/go-containerregistry v0.12.1 // indirect
github.com/google/go-intervals v0.0.2 // indirect
github.com/google/trillian v1.5.0 // indirect
github.com/google/trillian v1.5.1-0.20220819043421-0a389c4bb8d9 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-retryablehttp v0.7.2 // indirect
@ -131,8 +131,9 @@ require (
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mistifyio/go-zfs/v3 v3.0.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/sys/mount v0.3.3 // indirect
github.com/moby/patternmatcher v0.5.0 // indirect
github.com/moby/sys/mountinfo v0.6.2 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect
@ -151,7 +152,7 @@ require (
github.com/sigstore/sigstore v1.5.1 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
github.com/sylabs/sif/v2 v2.9.0 // indirect
github.com/sylabs/sif/v2 v2.9.1 // indirect
github.com/tchap/go-patricia v2.3.0+incompatible // indirect
github.com/theupdateframework/go-tuf v0.5.2 // indirect
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
@ -164,6 +165,7 @@ require (
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
go.opencensus.io v0.24.0 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect
golang.org/x/mod v0.7.0 // indirect
golang.org/x/oauth2 v0.4.0 // indirect
golang.org/x/tools v0.4.0 // indirect

39
go.sum
View File

@ -265,8 +265,8 @@ github.com/containers/common v0.51.0 h1:Ax4YHNTG8cEPHZJcMYRoP7sfBgOISceeyOvmZzmS
github.com/containers/common v0.51.0/go.mod h1:3W2WIdalgQfrsX/T5tjX+6CxgT3ThJVN2G9sNuFjuCM=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.24.1-0.20230202144111-a49c94a010be h1:Bcsn9ohcVpCM9D2kZvIVMrO0QmmP87jhVZh/r9WrQ18=
github.com/containers/image/v5 v5.24.1-0.20230202144111-a49c94a010be/go.mod h1:7Aonfvk5Wz0DLP40xgVSOGvygK3JXvDLxgotyGp4/KA=
github.com/containers/image/v5 v5.24.1-0.20230208161124-34839152eb48 h1:etc/YwCWeT6bfyxfKOaRnFRz2KouV53yMIi3IamUtI8=
github.com/containers/image/v5 v5.24.1-0.20230208161124-34839152eb48/go.mod h1:dZdaN/Ump5vD4SKQcu599upFIK1tutPuLrmk0Z1B7Yw=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
@ -338,8 +338,8 @@ github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.23+incompatible h1:1ZQUUYAdh+oylOT85aA2ZcfRp22jmLhoaEcVEfK8dyA=
github.com/docker/docker v20.10.23+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v23.0.0+incompatible h1:L6c28tNyqZ4/ub9AZC9d5QUuunoHHfEH4/Ue+h/E5nE=
github.com/docker/docker v23.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
@ -509,7 +509,6 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -589,8 +588,8 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/trillian v1.5.0 h1:I5pIN18bKlXtlj1Tk919rQ3mWBU2BzNNR6JhLISGMB4=
github.com/google/trillian v1.5.0/go.mod h1:2/gAIc+G1MUcErOPc+cSwHAQHZlGy+RYHjVGnhUQ3e8=
github.com/google/trillian v1.5.1-0.20220819043421-0a389c4bb8d9 h1:GFmzYtwUMi1S2mjLxfrJ/CZ9gWDG+zeLtZByg/QEBkk=
github.com/google/trillian v1.5.1-0.20220819043421-0a389c4bb8d9/go.mod h1:vywkS3p2SgNmPL7oAWqU5PiiknzRMp+ol3a19jfY2PQ=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@ -777,17 +776,20 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo=
github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM=
github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs=
github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwKdqp5K0=
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
github.com/moby/term v0.0.0-20221120202655-abb19827d345 h1:J9c53/kxIH+2nTKBEfZYFMlhghtHpIHSXpm5VRGHSnU=
github.com/moby/term v0.0.0-20221120202655-abb19827d345/go.mod h1:15ce4BGCFxt7I5NQKT+HV0yEDxmf6fSysfEDiVo3zFM=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -866,10 +868,11 @@ github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
github.com/opencontainers/selinux v1.10.2 h1:NFy2xCsjn7+WspbfZkUd5zyVeisV7VFbPSP96+8/ha4=
github.com/opencontainers/selinux v1.10.2/go.mod h1:cARutUbaUrlRClyvxOICCgKixCs6L05aUsohzA3EkHQ=
github.com/openshift/imagebuilder v1.2.4-0.20220711175835-4151e43600df h1:vf6pdI10F2Tim5a9JKiVVl4/dpNz1OEhz4EnfLdLtiA=
github.com/openshift/imagebuilder v1.2.4-0.20220711175835-4151e43600df/go.mod h1:TRYHe4CH9U6nkDjxjBNM5klrLbJBrRbpJE5SaRwUBsQ=
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/openshift/imagebuilder v1.2.4-0.20230207193036-6e08c897da73 h1:c4l+zY30Hl63ZFj+bHw6TeBBb9ok/uz7wJJoSaYR0kg=
github.com/openshift/imagebuilder v1.2.4-0.20230207193036-6e08c897da73/go.mod h1:TRYHe4CH9U6nkDjxjBNM5klrLbJBrRbpJE5SaRwUBsQ=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
@ -901,7 +904,7 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -1022,8 +1025,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/sylabs/sif/v2 v2.9.0 h1:q9K92j1QW4/QLOtKh9YZpJHrXav6x15AVhQGPVLcg+4=
github.com/sylabs/sif/v2 v2.9.0/go.mod h1:bRdFzcqif0eDjwx0isG4cgTFoKTQn/vfBXVSoP2rB2Y=
github.com/sylabs/sif/v2 v2.9.1 h1:LxF9EcH4hmwSqDBdRv9Tt57YVkvV9rDu66AA/nmns2Y=
github.com/sylabs/sif/v2 v2.9.1/go.mod h1:10lbqUw/uptKH4Z6dRDZl+9Iz7jMiFMDE99eHRJDwOs=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
@ -1167,6 +1170,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -1689,10 +1694,12 @@ k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=

View File

@ -11,6 +11,7 @@ import (
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/domain/infra/abi"
docker "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/volume"
)
func GetDiskUsage(w http.ResponseWriter, r *http.Request) {
@ -64,9 +65,9 @@ func GetDiskUsage(w http.ResponseWriter, r *http.Request) {
ctnrs[i] = &t
}
vols := make([]*docker.Volume, len(df.Volumes))
vols := make([]*volume.Volume, len(df.Volumes))
for i, o := range df.Volumes {
t := docker.Volume{
t := volume.Volume{
CreatedAt: "",
Driver: "",
Labels: map[string]string{},
@ -75,7 +76,7 @@ func GetDiskUsage(w http.ResponseWriter, r *http.Request) {
Options: nil,
Scope: "local",
Status: nil,
UsageData: &docker.VolumeUsageData{
UsageData: &volume.UsageData{
RefCount: int64(o.Links),
Size: o.Size,
},

View File

@ -52,14 +52,14 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) {
utils.InternalServerError(w, err)
return
}
volumeConfigs := make([]*docker_api_types.Volume, 0, len(vols))
volumeConfigs := make([]*docker_api_types_volume.Volume, 0, len(vols))
for _, v := range vols {
mp, err := v.MountPoint()
if err != nil {
utils.InternalServerError(w, err)
return
}
config := docker_api_types.Volume{
config := docker_api_types_volume.Volume{
Name: v.Name(),
Driver: v.Driver(),
Mountpoint: mp,
@ -70,7 +70,7 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) {
}
volumeConfigs = append(volumeConfigs, &config)
}
response := docker_api_types_volume.VolumeListOKBody{
response := docker_api_types_volume.ListResponse{
Volumes: volumeConfigs,
Warnings: []string{},
}
@ -91,7 +91,7 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) {
return
}
// decode params from body
input := docker_api_types_volume.VolumeCreateBody{}
input := docker_api_types_volume.CreateOptions{}
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
return
@ -118,7 +118,7 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) {
utils.InternalServerError(w, err)
return
}
response := docker_api_types.Volume{
response := docker_api_types_volume.Volume{
CreatedAt: existingVolume.CreatedTime().Format(time.RFC3339),
Driver: existingVolume.Driver(),
Labels: existingVolume.Labels(),
@ -163,7 +163,7 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) {
utils.InternalServerError(w, err)
return
}
volResponse := docker_api_types.Volume{
volResponse := docker_api_types_volume.Volume{
Name: config.Name,
Driver: config.Driver,
Mountpoint: mp,
@ -193,7 +193,7 @@ func InspectVolume(w http.ResponseWriter, r *http.Request) {
utils.InternalServerError(w, err)
return
}
volResponse := docker_api_types.Volume{
volResponse := docker_api_types_volume.Volume{
Name: vol.Name(),
Driver: vol.Driver(),
Mountpoint: mp,

View File

@ -240,7 +240,7 @@ type containersList struct {
// swagger:response
type volumeInspect struct {
// in:body
Body dockerAPI.Volume
Body dockerVolume.Volume
}
// Volume prune
@ -254,7 +254,7 @@ type volumePruneResponse struct {
// swagger:response
type volumeList struct {
// in:body
Body dockerVolume.VolumeListOKBody
Body dockerVolume.ListResponse
}
// Volume list

View File

@ -7,7 +7,7 @@
t POST 'libpod/system/prune?volumes=true&all=true' params='' 200
## podman system df
t GET system/df 200 '{"LayersSize":0,"Images":[],"Containers":[],"Volumes":[],"BuildCache":[],"BuilderSize":0}'
t GET system/df 200 '{"LayersSize":0,"Images":[],"Containers":[],"Volumes":[],"BuildCache":[]}'
t GET libpod/system/df 200 '{"ImagesSize":0,"Images":[],"Containers":[],"Volumes":[]}'
# Create volume. We expect df to report this volume next invocation of system/df

View File

@ -1,34 +0,0 @@
//go:build linux
// +build linux
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sys
import "golang.org/x/sys/unix"
// EpollCreate1 is an alias for unix.EpollCreate1
// Deprecated: use golang.org/x/sys/unix.EpollCreate1
var EpollCreate1 = unix.EpollCreate1
// EpollCtl is an alias for unix.EpollCtl
// Deprecated: use golang.org/x/sys/unix.EpollCtl
var EpollCtl = unix.EpollCtl
// EpollWait is an alias for unix.EpollWait
// Deprecated: use golang.org/x/sys/unix.EpollWait
var EpollWait = unix.EpollWait

View File

@ -1,35 +0,0 @@
//go:build !windows && !darwin
// +build !windows,!darwin
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sys
import (
"os"
"path/filepath"
"strconv"
)
// GetOpenFds returns the number of open fds for the process provided by pid
func GetOpenFds(pid int) (int, error) {
dirs, err := os.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd"))
if err != nil {
return -1, err
}
return len(dirs), nil
}

View File

@ -1,32 +0,0 @@
//go:build !windows
// +build !windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sys
import "os"
// ForceRemoveAll on unix is just a wrapper for os.RemoveAll
func ForceRemoveAll(path string) error {
return os.RemoveAll(path)
}
// MkdirAllWithACL is a wrapper for os.MkdirAll on Unix systems.
func MkdirAllWithACL(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm)
}

View File

@ -1,345 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sys
import (
"fmt"
"os"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"syscall"
"unsafe"
"github.com/Microsoft/hcsshim"
"golang.org/x/sys/windows"
)
const (
// SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System
SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
)
// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory
// ACL'd for Builtin Administrators and Local System.
func MkdirAllWithACL(path string, perm os.FileMode) error {
return mkdirall(path, true)
}
// MkdirAll implementation that is volume path aware for Windows. It can be used
// as a drop-in replacement for os.MkdirAll()
func MkdirAll(path string, _ os.FileMode) error {
return mkdirall(path, false)
}
// mkdirall is a custom version of os.MkdirAll modified for use on Windows
// so that it is both volume path aware, and can create a directory with
// a DACL.
func mkdirall(path string, adminAndLocalSystem bool) error {
if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) {
return nil
}
// The rest of this method is largely copied from os.MkdirAll and should be kept
// as-is to ensure compatibility.
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
dir, err := os.Stat(path)
if err == nil {
if dir.IsDir() {
return nil
}
return &os.PathError{
Op: "mkdir",
Path: path,
Err: syscall.ENOTDIR,
}
}
// Slow path: make sure parent exists and then call Mkdir for path.
i := len(path)
for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
i--
}
j := i
for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
j--
}
if j > 1 {
// Create parent
err = mkdirall(path[0:j-1], adminAndLocalSystem)
if err != nil {
return err
}
}
// Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result.
if adminAndLocalSystem {
err = mkdirWithACL(path)
} else {
err = os.Mkdir(path, 0)
}
if err != nil {
// Handle arguments like "foo/." by
// double-checking that directory doesn't exist.
dir, err1 := os.Lstat(path)
if err1 == nil && dir.IsDir() {
return nil
}
return err
}
return nil
}
// mkdirWithACL creates a new directory. If there is an error, it will be of
// type *PathError. .
//
// This is a modified and combined version of os.Mkdir and windows.Mkdir
// in golang to cater for creating a directory am ACL permitting full
// access, with inheritance, to any subfolder/file for Built-in Administrators
// and Local System.
func mkdirWithACL(name string) error {
sa := windows.SecurityAttributes{Length: 0}
sd, err := windows.SecurityDescriptorFromString(SddlAdministratorsLocalSystem)
if err != nil {
return &os.PathError{Op: "mkdir", Path: name, Err: err}
}
sa.Length = uint32(unsafe.Sizeof(sa))
sa.InheritHandle = 1
sa.SecurityDescriptor = sd
namep, err := windows.UTF16PtrFromString(name)
if err != nil {
return &os.PathError{Op: "mkdir", Path: name, Err: err}
}
e := windows.CreateDirectory(namep, &sa)
if e != nil {
return &os.PathError{Op: "mkdir", Path: name, Err: e}
}
return nil
}
// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows,
// golang filepath.IsAbs does not consider a path \windows\system32 as absolute
// as it doesn't start with a drive-letter/colon combination. However, in
// docker we need to verify things such as WORKDIR /windows/system32 in
// a Dockerfile (which gets translated to \windows\system32 when being processed
// by the daemon. This SHOULD be treated as absolute from a docker processing
// perspective.
func IsAbs(path string) bool {
if !filepath.IsAbs(path) {
if !strings.HasPrefix(path, string(os.PathSeparator)) {
return false
}
}
return true
}
// The origin of the functions below here are the golang OS and windows packages,
// slightly modified to only cope with files, not directories due to the
// specific use case.
//
// The alteration is to allow a file on Windows to be opened with
// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating
// the standby list, particularly when accessing large files such as layer.tar.
// CreateSequential creates the named file with mode 0666 (before umask), truncating
// it if it already exists. If successful, methods on the returned
// File can be used for I/O; the associated file descriptor has mode
// O_RDWR.
// If there is an error, it will be of type *PathError.
func CreateSequential(name string) (*os.File, error) {
return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0)
}
// OpenSequential opens the named file for reading. If successful, methods on
// the returned file can be used for reading; the associated file
// descriptor has mode O_RDONLY.
// If there is an error, it will be of type *PathError.
func OpenSequential(name string) (*os.File, error) {
return OpenFileSequential(name, os.O_RDONLY, 0)
}
// OpenFileSequential is the generalized open call; most users will use Open
// or Create instead.
// If there is an error, it will be of type *PathError.
func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) {
if name == "" {
return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT}
}
r, errf := windowsOpenFileSequential(name, flag, 0)
if errf == nil {
return r, nil
}
return nil, &os.PathError{Op: "open", Path: name, Err: errf}
}
func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) {
r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0)
if e != nil {
return nil, e
}
return os.NewFile(uintptr(r), name), nil
}
func makeInheritSa() *windows.SecurityAttributes {
var sa windows.SecurityAttributes
sa.Length = uint32(unsafe.Sizeof(sa))
sa.InheritHandle = 1
return &sa
}
func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) {
if len(path) == 0 {
return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND
}
pathp, err := windows.UTF16PtrFromString(path)
if err != nil {
return windows.InvalidHandle, err
}
var access uint32
switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) {
case windows.O_RDONLY:
access = windows.GENERIC_READ
case windows.O_WRONLY:
access = windows.GENERIC_WRITE
case windows.O_RDWR:
access = windows.GENERIC_READ | windows.GENERIC_WRITE
}
if mode&windows.O_CREAT != 0 {
access |= windows.GENERIC_WRITE
}
if mode&windows.O_APPEND != 0 {
access &^= windows.GENERIC_WRITE
access |= windows.FILE_APPEND_DATA
}
sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE)
var sa *windows.SecurityAttributes
if mode&windows.O_CLOEXEC == 0 {
sa = makeInheritSa()
}
var createmode uint32
switch {
case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL):
createmode = windows.CREATE_NEW
case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC):
createmode = windows.CREATE_ALWAYS
case mode&windows.O_CREAT == windows.O_CREAT:
createmode = windows.OPEN_ALWAYS
case mode&windows.O_TRUNC == windows.O_TRUNC:
createmode = windows.TRUNCATE_EXISTING
default:
createmode = windows.OPEN_EXISTING
}
// Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang.
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0)
return h, e
}
// ForceRemoveAll is the same as os.RemoveAll, but is aware of io.containerd.snapshotter.v1.windows
// and uses hcsshim to unmount and delete container layers contained therein, in the correct order,
// when passed a containerd root data directory (i.e. the `--root` directory for containerd).
func ForceRemoveAll(path string) error {
// snapshots/windows/windows.go init()
const snapshotPlugin = "io.containerd.snapshotter.v1" + "." + "windows"
// snapshots/windows/windows.go NewSnapshotter()
snapshotDir := filepath.Join(path, snapshotPlugin, "snapshots")
if stat, err := os.Stat(snapshotDir); err == nil && stat.IsDir() {
if err := cleanupWCOWLayers(snapshotDir); err != nil {
return fmt.Errorf("failed to cleanup WCOW layers in %s: %w", snapshotDir, err)
}
}
return os.RemoveAll(path)
}
func cleanupWCOWLayers(root string) error {
// See snapshots/windows/windows.go getSnapshotDir()
var layerNums []int
var rmLayerNums []int
if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
if path != root && info.IsDir() {
name := filepath.Base(path)
if strings.HasPrefix(name, "rm-") {
layerNum, err := strconv.Atoi(strings.TrimPrefix(name, "rm-"))
if err != nil {
return err
}
rmLayerNums = append(rmLayerNums, layerNum)
} else {
layerNum, err := strconv.Atoi(name)
if err != nil {
return err
}
layerNums = append(layerNums, layerNum)
}
return filepath.SkipDir
}
return nil
}); err != nil {
return err
}
sort.Sort(sort.Reverse(sort.IntSlice(rmLayerNums)))
for _, rmLayerNum := range rmLayerNums {
if err := cleanupWCOWLayer(filepath.Join(root, "rm-"+strconv.Itoa(rmLayerNum))); err != nil {
return err
}
}
sort.Sort(sort.Reverse(sort.IntSlice(layerNums)))
for _, layerNum := range layerNums {
if err := cleanupWCOWLayer(filepath.Join(root, strconv.Itoa(layerNum))); err != nil {
return err
}
}
return nil
}
func cleanupWCOWLayer(layerPath string) error {
info := hcsshim.DriverInfo{
HomeDir: filepath.Dir(layerPath),
}
// ERROR_DEV_NOT_EXIST is returned if the layer is not currently prepared or activated.
// ERROR_FLT_INSTANCE_NOT_FOUND is returned if the layer is currently activated but not prepared.
if err := hcsshim.UnprepareLayer(info, filepath.Base(layerPath)); err != nil {
if hcserror, ok := err.(*hcsshim.HcsError); !ok || (hcserror.Err != windows.ERROR_DEV_NOT_EXIST && hcserror.Err != syscall.Errno(windows.ERROR_FLT_INSTANCE_NOT_FOUND)) {
return fmt.Errorf("failed to unprepare %s: %w", layerPath, err)
}
}
if err := hcsshim.DeactivateLayer(info, filepath.Base(layerPath)); err != nil {
return fmt.Errorf("failed to deactivate %s: %w", layerPath, err)
}
if err := hcsshim.DestroyLayer(info, filepath.Base(layerPath)); err != nil {
return fmt.Errorf("failed to destroy %s: %w", layerPath, err)
}
return nil
}

View File

@ -1,82 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sys
import (
"fmt"
"os"
"strconv"
"strings"
"github.com/containerd/containerd/pkg/userns"
"golang.org/x/sys/unix"
)
const (
// OOMScoreAdjMin is from OOM_SCORE_ADJ_MIN https://github.com/torvalds/linux/blob/v5.10/include/uapi/linux/oom.h#L9
OOMScoreAdjMin = -1000
// OOMScoreAdjMax is from OOM_SCORE_ADJ_MAX https://github.com/torvalds/linux/blob/v5.10/include/uapi/linux/oom.h#L10
OOMScoreAdjMax = 1000
)
// AdjustOOMScore sets the oom score for the provided pid. If the provided score
// is out of range (-1000 - 1000), it is clipped to the min/max value.
func AdjustOOMScore(pid, score int) error {
if score > OOMScoreAdjMax {
score = OOMScoreAdjMax
} else if score < OOMScoreAdjMin {
score = OOMScoreAdjMin
}
return SetOOMScore(pid, score)
}
// SetOOMScore sets the oom score for the provided pid
func SetOOMScore(pid, score int) error {
if score > OOMScoreAdjMax || score < OOMScoreAdjMin {
return fmt.Errorf("value out of range (%d): OOM score must be between %d and %d", score, OOMScoreAdjMin, OOMScoreAdjMax)
}
path := fmt.Sprintf("/proc/%d/oom_score_adj", pid)
f, err := os.OpenFile(path, os.O_WRONLY, 0)
if err != nil {
return err
}
defer f.Close()
if _, err = f.WriteString(strconv.Itoa(score)); err != nil {
if os.IsPermission(err) && (!runningPrivileged() || userns.RunningInUserNS()) {
return nil
}
return err
}
return nil
}
// GetOOMScoreAdj gets the oom score for a process. It returns 0 (zero) if either
// no oom score is set, or a sore is set to 0.
func GetOOMScoreAdj(pid int) (int, error) {
path := fmt.Sprintf("/proc/%d/oom_score_adj", pid)
data, err := os.ReadFile(path)
if err != nil {
return 0, err
}
return strconv.Atoi(strings.TrimSpace(string(data)))
}
// runningPrivileged returns true if the effective user ID of the
// calling process is 0
func runningPrivileged() bool {
return unix.Geteuid() == 0
}

View File

@ -1,49 +0,0 @@
//go:build !linux
// +build !linux
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sys
const (
// OOMScoreMaxKillable is not implemented on non Linux
OOMScoreMaxKillable = 0
// OOMScoreAdjMax is not implemented on non Linux
OOMScoreAdjMax = 0
)
// AdjustOOMScore sets the oom score for the provided pid. If the provided score
// is out of range (-1000 - 1000), it is clipped to the min/max value.
//
// Not implemented on Windows
func AdjustOOMScore(pid, score int) error {
return nil
}
// SetOOMScore sets the oom score for the process
//
// Not implemented on Windows
func SetOOMScore(pid, score int) error {
return nil
}
// GetOOMScoreAdj gets the oom score for a process
//
// Not implemented on Windows
func GetOOMScoreAdj(pid int) (int, error) {
return 0, nil
}

View File

@ -1,81 +0,0 @@
//go:build !windows
// +build !windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sys
import (
"fmt"
"net"
"os"
"path/filepath"
"golang.org/x/sys/unix"
)
// CreateUnixSocket creates a unix socket and returns the listener
func CreateUnixSocket(path string) (net.Listener, error) {
// BSDs have a 104 limit
if len(path) > 104 {
return nil, fmt.Errorf("%q: unix socket path too long (> 104)", path)
}
if err := os.MkdirAll(filepath.Dir(path), 0660); err != nil {
return nil, err
}
if err := unix.Unlink(path); err != nil && !os.IsNotExist(err) {
return nil, err
}
return net.Listen("unix", path)
}
// GetLocalListener returns a listener out of a unix socket.
func GetLocalListener(path string, uid, gid int) (net.Listener, error) {
// Ensure parent directory is created
if err := mkdirAs(filepath.Dir(path), uid, gid); err != nil {
return nil, err
}
l, err := CreateUnixSocket(path)
if err != nil {
return l, err
}
if err := os.Chmod(path, 0660); err != nil {
l.Close()
return nil, err
}
if err := os.Chown(path, uid, gid); err != nil {
l.Close()
return nil, err
}
return l, nil
}
func mkdirAs(path string, uid, gid int) error {
if _, err := os.Stat(path); !os.IsNotExist(err) {
return err
}
if err := os.MkdirAll(path, 0770); err != nil {
return err
}
return os.Chown(path, uid, gid)
}

View File

@ -1,30 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sys
import (
"net"
"github.com/Microsoft/go-winio"
)
// GetLocalListener returns a Listernet out of a named pipe.
// `path` must be of the form of `\\.\pipe\<pipename>`
// (see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365150)
func GetLocalListener(path string, uid, gid int) (net.Listener, error) {
return winio.ListenPipe(path, nil)
}

View File

@ -1,23 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sys
import "github.com/containerd/containerd/pkg/userns"
// RunningInUserNS detects whether we are currently running in a user namespace.
// Deprecated: use github.com/containerd/containerd/pkg/userns.RunningInUserNS instead.
var RunningInUserNS = userns.RunningInUserNS

View File

@ -10,6 +10,7 @@ import (
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
)
// bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step.
@ -199,7 +200,9 @@ func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bp
}
// bpcPreserveOriginal returns a *bpCompressionStepData for not changing the original blob.
func (ic *imageCopier) bpcPreserveOriginal(stream *sourceStream, detected bpDetectCompressionStepData,
// This does not change the sourceStream parameter; we include it for symmetry with other
// pipeline steps.
func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCompressionStepData,
layerCompressionChangeSupported bool) *bpCompressionStepData {
logrus.Debugf("Using original blob without modification")
// Remember if the original blob was compressed, and if so how, so that if
@ -232,9 +235,7 @@ func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCom
if *annotations == nil {
*annotations = map[string]string{}
}
for k, v := range d.uploadedAnnotations {
(*annotations)[k] = v
}
maps.Copy(*annotations, d.uploadedAnnotations)
}
// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo adnd the original srcInfo.

View File

@ -17,8 +17,10 @@ import (
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/internal/imagedestination"
"github.com/containers/image/v5/internal/imagesource"
internalManifest "github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/pkg/platform"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache"
"github.com/containers/image/v5/pkg/compression"
@ -32,6 +34,7 @@ import (
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"github.com/vbauerster/mpb/v8"
"golang.org/x/exp/slices"
"golang.org/x/sync/semaphore"
"golang.org/x/term"
)
@ -143,7 +146,7 @@ type Options struct {
// Preserve digests, and fail if we cannot.
PreserveDigests bool
// manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type
// manifest MIME type of image set by user. "" is default and means use the autodetection to the manifest MIME type
ForceManifestMIMEType string
ImageListSelection ImageListSelection // set to either CopySystemImage (the default), CopyAllImages, or CopySpecificImages to control which instances we copy when the source reference is a list; ignored if the source reference is not a list
Instances []digest.Digest // if ImageListSelection is CopySpecificImages, copy only these instances and the list itself
@ -315,7 +318,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
if err != nil {
return nil, fmt.Errorf("reading manifest for %s: %w", transports.ImageName(srcRef), err)
}
manifestList, err := manifest.ListFromBlob(mfest, manifestType)
manifestList, err := internalManifest.ListFromBlob(mfest, manifestType)
if err != nil {
return nil, fmt.Errorf("parsing primary manifest as list for %s: %w", transports.ImageName(srcRef), err)
}
@ -370,12 +373,7 @@ func supportsMultipleImages(dest types.ImageDestination) bool {
// Anything goes!
return true
}
for _, mtype := range mtypes {
if manifest.MIMETypeIsMultiImage(mtype) {
return true
}
}
return false
return slices.ContainsFunc(mtypes, manifest.MIMETypeIsMultiImage)
}
// compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the
@ -420,11 +418,11 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
if err != nil {
return nil, fmt.Errorf("reading manifest list: %w", err)
}
originalList, err := manifest.ListFromBlob(manifestList, manifestType)
originalList, err := internalManifest.ListFromBlob(manifestList, manifestType)
if err != nil {
return nil, fmt.Errorf("parsing manifest list %q: %w", string(manifestList), err)
}
updatedList := originalList.Clone()
updatedList := originalList.CloneInternal()
sigs, err := c.sourceSignatures(ctx, unparsedToplevel, options,
"Getting image list signatures",
@ -491,24 +489,16 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
updates := make([]manifest.ListUpdate, len(instanceDigests))
instancesCopied := 0
for i, instanceDigest := range instanceDigests {
if options.ImageListSelection == CopySpecificImages {
skip := true
for _, instance := range options.Instances {
if instance == instanceDigest {
skip = false
break
}
}
if skip {
update, err := updatedList.Instance(instanceDigest)
if err != nil {
return nil, err
}
logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
// Record the digest/size/type of the manifest that we didn't copy.
updates[i] = update
continue
if options.ImageListSelection == CopySpecificImages &&
!slices.Contains(options.Instances, instanceDigest) {
update, err := updatedList.Instance(instanceDigest)
if err != nil {
return nil, err
}
logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
// Record the digest/size/type of the manifest that we didn't copy.
updates[i] = update
continue
}
logrus.Debugf("Copying instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
c.Printf("Copying image %s (%d/%d)\n", instanceDigest, instancesCopied+1, imagesToCopy)
@ -536,7 +526,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
c.Printf("Writing manifest list to image destination\n")
var errs []string
for _, thisListType := range append([]string{selectedListType}, otherManifestMIMETypeCandidates...) {
attemptedList := updatedList
var attemptedList internalManifest.ListPublic = updatedList
logrus.Debugf("Trying to use manifest list type %s…", thisListType)
@ -823,7 +813,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
// has a built-in list of functions/methods (whatever object they are for)
// which have their format strings checked; for other names we would have
// to pass a parameter to every (go tool vet) invocation.
func (c *copier) Printf(format string, a ...interface{}) {
func (c *copier) Printf(format string, a ...any) {
fmt.Fprintf(c.reportWriter, format, a...)
}
@ -948,20 +938,20 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
data[index] = cld
}
// Create layer Encryption map
encLayerBitmap := map[int]bool{}
// Decide which layers to encrypt
layersToEncrypt := set.New[int]()
var encryptAll bool
if ic.ociEncryptLayers != nil {
encryptAll = len(*ic.ociEncryptLayers) == 0
totalLayers := len(srcInfos)
for _, l := range *ic.ociEncryptLayers {
// if layer is negative, it is reverse indexed.
encLayerBitmap[(totalLayers+l)%totalLayers] = true
layersToEncrypt.Add((totalLayers + l) % totalLayers)
}
if encryptAll {
for i := 0; i < len(srcInfos); i++ {
encLayerBitmap[i] = true
layersToEncrypt.Add(i)
}
}
}
@ -980,7 +970,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
return fmt.Errorf("copying layer: %w", err)
}
copyGroup.Add(1)
go copyLayerHelper(i, srcLayer, encLayerBitmap[i], progressPool, ic.c.rawSource.Reference().DockerReference())
go copyLayerHelper(i, srcLayer, layersToEncrypt.Contains(i), progressPool, ic.c.rawSource.Reference().DockerReference())
}
// A call to copyGroup.Wait() is done at this point by the defer above.
@ -1013,15 +1003,9 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields)
func layerDigestsDiffer(a, b []types.BlobInfo) bool {
if len(a) != len(b) {
return true
}
for i := range a {
if a[i].Digest != b[i].Digest {
return true
}
}
return false
return !slices.EqualFunc(a, b, func(a, b types.BlobInfo) bool {
return a.Digest == b.Digest
})
}
// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary,

View File

@ -7,6 +7,8 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/ocicrypt"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
// isOciEncrypted returns a bool indicating if a mediatype is encrypted
@ -18,12 +20,9 @@ func isOciEncrypted(mediatype string) bool {
// isEncrypted checks if an image is encrypted
func isEncrypted(i types.Image) bool {
layers := i.LayerInfos()
for _, l := range layers {
if isOciEncrypted(l.MediaType) {
return true
}
}
return false
return slices.ContainsFunc(layers, func(l types.BlobInfo) bool {
return isOciEncrypted(l.MediaType)
})
}
// bpDecryptionStepData contains data that the copy pipeline needs about the decryption step.
@ -47,11 +46,9 @@ func (c *copier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.
stream.reader = reader
stream.info.Digest = decryptedDigest
stream.info.Size = -1
for k := range stream.info.Annotations {
if strings.HasPrefix(k, "org.opencontainers.image.enc") {
delete(stream.info.Annotations, k)
}
}
maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool {
return strings.HasPrefix(k, "org.opencontainers.image.enc")
})
return &bpDecryptionStepData{
decrypting: true,
}, nil
@ -122,8 +119,6 @@ func (d *bpEncryptionStepData) updateCryptoOperationAndAnnotations(operation *ty
if *annotations == nil {
*annotations = map[string]string{}
}
for k, v := range encryptAnnotations {
(*annotations)[k] = v
}
maps.Copy(*annotations, encryptAnnotations)
return nil
}

View File

@ -6,9 +6,11 @@ import (
"fmt"
"strings"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert.
@ -19,7 +21,7 @@ var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, man
// orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once.
type orderedSet struct {
list []string
included map[string]struct{}
included *set.Set[string]
}
// newOrderedSet creates a correctly initialized orderedSet.
@ -27,15 +29,15 @@ type orderedSet struct {
func newOrderedSet() *orderedSet {
return &orderedSet{
list: []string{},
included: map[string]struct{}{},
included: set.New[string](),
}
}
// append adds s to the end of os, only if it is not included already.
func (os *orderedSet) append(s string) {
if _, ok := os.included[s]; !ok {
if !os.included.Contains(s) {
os.list = append(os.list, s)
os.included[s] = struct{}{}
os.included.Add(s)
}
}
@ -80,10 +82,10 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
otherMIMETypeCandidates: []string{},
}, nil
}
supportedByDest := map[string]struct{}{}
supportedByDest := set.New[string]()
for _, t := range destSupportedManifestMIMETypes {
if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(t) {
supportedByDest[t] = struct{}{}
supportedByDest.Add(t)
}
}
@ -96,7 +98,7 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
prioritizedTypes := newOrderedSet()
// First of all, prefer to keep the original manifest unmodified.
if _, ok := supportedByDest[srcType]; ok {
if supportedByDest.Contains(srcType) {
prioritizedTypes.append(srcType)
}
if in.cannotModifyManifestReason != "" {
@ -113,7 +115,7 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
// Then use our list of preferred types.
for _, t := range preferredManifestMIMETypes {
if _, ok := supportedByDest[t]; ok {
if supportedByDest.Contains(t) {
prioritizedTypes.append(t)
}
}
@ -166,11 +168,8 @@ func (c *copier) determineListConversion(currentListMIMEType string, destSupport
prioritizedTypes := newOrderedSet()
// The first priority is the current type, if it's in the list, since that lets us avoid a
// conversion that isn't strictly necessary.
for _, t := range destSupportedMIMETypes {
if t == currentListMIMEType {
prioritizedTypes.append(currentListMIMEType)
break
}
if slices.Contains(destSupportedMIMETypes, currentListMIMEType) {
prioritizedTypes.append(currentListMIMEType)
}
// Pick out the other list types that we support.
for _, t := range destSupportedMIMETypes {

View File

@ -120,7 +120,7 @@ func (bar *progressBar) mark100PercentComplete() {
bar.SetCurrent(bar.originalSize) // This triggers the completion condition.
} else {
// -1 = unknown size
// 0 is somewhat of a a special case: Unlike c/image, where 0 is a definite known
// 0 is somewhat of a special case: Unlike c/image, where 0 is a definite known
// size (possible at least in theory), in mpb, zero-sized progress bars are treated
// as unknown size, in particular they are not configured to be marked as
// complete on bar.Current() reaching bar.total (because that would happen already

View File

@ -8,9 +8,9 @@ import (
"github.com/containers/image/v5/internal/imagesource/impl"
"github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
)

View File

@ -89,7 +89,7 @@ func (ref dirReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref dirReference) StringWithinTransport() string {
return ref.path

View File

@ -9,7 +9,7 @@ import (
// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path.
// To do so, all elements of the input path must exist; as a special case, the final component may be
// a non-existent name (but not a symlink pointing to a non-existent name)
// This is intended as a a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc.
// This is intended as a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc.
func ResolvePathToFullyExplicit(path string) (string, error) {
switch _, err := os.Lstat(path); {
case err == nil:

View File

@ -62,24 +62,23 @@ func ParseReference(refString string) (types.ImageReference, error) {
return nil, fmt.Errorf("docker-archive reference %s isn't of the form <path>[:<reference>]", refString)
}
parts := strings.SplitN(refString, ":", 2)
path := parts[0]
path, tagOrIndex, gotTagOrIndex := strings.Cut(refString, ":")
var nt reference.NamedTagged
sourceIndex := -1
if len(parts) == 2 {
if gotTagOrIndex {
// A :tag or :@index was specified.
if len(parts[1]) > 0 && parts[1][0] == '@' {
i, err := strconv.Atoi(parts[1][1:])
if len(tagOrIndex) > 0 && tagOrIndex[0] == '@' {
i, err := strconv.Atoi(tagOrIndex[1:])
if err != nil {
return nil, fmt.Errorf("Invalid source index %s: %w", parts[1], err)
return nil, fmt.Errorf("Invalid source index %s: %w", tagOrIndex, err)
}
if i < 0 {
return nil, fmt.Errorf("Invalid source index @%d: must not be negative", i)
}
sourceIndex = i
} else {
ref, err := reference.ParseNormalizedNamed(parts[1])
ref, err := reference.ParseNormalizedNamed(tagOrIndex)
if err != nil {
return nil, fmt.Errorf("docker-archive parsing reference: %w", err)
}
@ -137,7 +136,7 @@ func (ref archiveReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref archiveReference) StringWithinTransport() string {
switch {

View File

@ -0,0 +1,234 @@
package docker
import (
"context"
"errors"
"fmt"
"io"
"math"
"math/rand"
"net/http"
"net/url"
"strconv"
"strings"
"syscall"
"time"
"github.com/sirupsen/logrus"
)
// bodyReaderMinimumProgress is the minimum progress we want to see before we retry
const bodyReaderMinimumProgress = 1 * 1024 * 1024
// bodyReader is an io.ReadCloser returned by dockerImageSource.GetBlob,
// which can transparently resume some (very limited) kinds of aborted connections.
type bodyReader struct {
ctx context.Context
c *dockerClient
path string // path to pass to makeRequest to retry
logURL *url.URL // a string to use in error messages
body io.ReadCloser // The currently open connection we use to read data, or nil if there is nothing to read from / close.
lastRetryOffset int64
offset int64 // Current offset within the blob
firstConnectionTime time.Time
lastSuccessTime time.Time // time.Time{} if N/A
}
// newBodyReader creates a bodyReader for request path in c.
// firstBody is an already correctly opened body for the blob, returing the full blob from the start.
// If reading from firstBody fails, bodyReader may heuristically decide to resume.
func newBodyReader(ctx context.Context, c *dockerClient, path string, firstBody io.ReadCloser) (io.ReadCloser, error) {
logURL, err := c.resolveRequestURL(path)
if err != nil {
return nil, err
}
res := &bodyReader{
ctx: ctx,
c: c,
path: path,
logURL: logURL,
body: firstBody,
lastRetryOffset: 0,
offset: 0,
firstConnectionTime: time.Now(),
}
return res, nil
}
// parseDecimalInString ensures that s[start:] starts with a non-negative decimal number, and returns that number and the offset after the number.
func parseDecimalInString(s string, start int) (int64, int, error) {
i := start
for i < len(s) && s[i] >= '0' && s[i] <= '9' {
i++
}
if i == start {
return -1, -1, errors.New("missing decimal number")
}
v, err := strconv.ParseInt(s[start:i], 10, 64)
if err != nil {
return -1, -1, fmt.Errorf("parsing number: %w", err)
}
return v, i, nil
}
// parseExpectedChar ensures that s[pos] is the expected byte, and returns the offset after it.
func parseExpectedChar(s string, pos int, expected byte) (int, error) {
if pos == len(s) || s[pos] != expected {
return -1, fmt.Errorf("missing expected %q", expected)
}
return pos + 1, nil
}
// parseContentRange ensures that res contains a Content-Range header with a byte range, and returns (first, last, completeLength) on success. Size can be -1.
func parseContentRange(res *http.Response) (int64, int64, int64, error) {
hdrs := res.Header.Values("Content-Range")
switch len(hdrs) {
case 0:
return -1, -1, -1, errors.New("missing Content-Range: header")
case 1:
break
default:
return -1, -1, -1, fmt.Errorf("ambiguous Content-Range:, %d header values", len(hdrs))
}
hdr := hdrs[0]
expectedPrefix := "bytes "
if !strings.HasPrefix(hdr, expectedPrefix) {
return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, missing prefix %q", hdr, expectedPrefix)
}
first, pos, err := parseDecimalInString(hdr, len(expectedPrefix))
if err != nil {
return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing first-pos: %w", hdr, err)
}
pos, err = parseExpectedChar(hdr, pos, '-')
if err != nil {
return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q: %w", hdr, err)
}
last, pos, err := parseDecimalInString(hdr, pos)
if err != nil {
return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing last-pos: %w", hdr, err)
}
pos, err = parseExpectedChar(hdr, pos, '/')
if err != nil {
return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q: %w", hdr, err)
}
completeLength := int64(-1)
if pos < len(hdr) && hdr[pos] == '*' {
pos++
} else {
completeLength, pos, err = parseDecimalInString(hdr, pos)
if err != nil {
return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing complete-length: %w", hdr, err)
}
}
if pos < len(hdr) {
return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, unexpected trailing content", hdr)
}
return first, last, completeLength, nil
}
// Read implements io.ReadCloser
func (br *bodyReader) Read(p []byte) (int, error) {
if br.body == nil {
return 0, fmt.Errorf("internal error: bodyReader.Read called on a closed object for %s", br.logURL.Redacted())
}
n, err := br.body.Read(p)
br.offset += int64(n)
switch {
case err == nil || err == io.EOF:
br.lastSuccessTime = time.Now()
return n, err // Unlike the default: case, dont log anything.
case errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, syscall.ECONNRESET):
originalErr := err
redactedURL := br.logURL.Redacted()
if err := br.errorIfNotReconnecting(originalErr, redactedURL); err != nil {
return n, err
}
if err := br.body.Close(); err != nil {
logrus.Debugf("Error closing blob body: %v", err) // … and ignore err otherwise
}
br.body = nil
time.Sleep(1*time.Second + time.Duration(rand.Intn(100_000))*time.Microsecond) // Some jitter so that a failure blip doesnt cause a deterministic stampede
headers := map[string][]string{
"Range": {fmt.Sprintf("bytes=%d-", br.offset)},
}
res, err := br.c.makeRequest(br.ctx, http.MethodGet, br.path, headers, nil, v2Auth, nil)
if err != nil {
return n, fmt.Errorf("%w (while reconnecting: %v)", originalErr, err)
}
consumedBody := false
defer func() {
if !consumedBody {
res.Body.Close()
}
}()
switch res.StatusCode {
case http.StatusPartialContent: // OK
// A client MUST inspect a 206 response's Content-Type and Content-Range field(s) to determine what parts are enclosed and whether additional requests are needed.
// The recipient of an invalid Content-Range MUST NOT attempt to recombine the received content with a stored representation.
first, last, completeLength, err := parseContentRange(res)
if err != nil {
return n, fmt.Errorf("%w (after reconnecting, invalid Content-Range header: %v)", originalErr, err)
}
// We dont handle responses that start at an unrequested offset, nor responses that terminate before the end of the full blob.
if first != br.offset || (completeLength != -1 && last+1 != completeLength) {
return n, fmt.Errorf("%w (after reconnecting at offset %d, got unexpected Content-Range %d-%d/%d)", originalErr, br.offset, first, last, completeLength)
}
// Continue below
case http.StatusOK:
return n, fmt.Errorf("%w (after reconnecting, server did not process a Range: header, status %d)", originalErr, http.StatusOK)
default:
err := registryHTTPResponseToError(res)
return n, fmt.Errorf("%w (after reconnecting, fetching blob: %v)", originalErr, err)
}
logrus.Debugf("Succesfully reconnected to %s", redactedURL)
consumedBody = true
br.body = res.Body
br.lastRetryOffset = br.offset
return n, nil
default:
logrus.Debugf("Error reading blob body from %s: %#v", br.logURL.Redacted(), err)
return n, err
}
}
// millisecondsSince is like time.Since(tm).Milliseconds, but it returns a floating-point value
func millisecondsSince(tm time.Time) float64 {
return float64(time.Since(tm).Nanoseconds()) / 1_000_000.0
}
// errorIfNotReconnecting makes a heuristic decision whether we should reconnect after err at redactedURL; if so, it returns nil,
// otherwise it returns an appropriate error to return to the caller (possibly augmented with data about the heuristic)
func (br *bodyReader) errorIfNotReconnecting(originalErr error, redactedURL string) error {
totalTime := millisecondsSince(br.firstConnectionTime)
failureTime := math.NaN()
if (br.lastSuccessTime != time.Time{}) {
failureTime = millisecondsSince(br.lastSuccessTime)
}
logrus.Debugf("Reading blob body from %s failed (%#v), decision inputs: lastRetryOffset %d, offset %d, %.3f ms since first connection, %.3f ms since last progress",
redactedURL, originalErr, br.lastRetryOffset, br.offset, totalTime, failureTime)
progress := br.offset - br.lastRetryOffset
if progress < bodyReaderMinimumProgress {
logrus.Debugf("Not reconnecting to %s because only %d bytes progress made", redactedURL, progress)
return fmt.Errorf("(heuristic tuning data: last retry %d, current offset %d; %.3f ms total, %.3f ms since progress): %w",
br.lastRetryOffset, br.offset, totalTime, failureTime, originalErr)
}
logrus.Infof("Reading blob body from %s failed (%v), reconnecting…", redactedURL, originalErr)
return nil
}
// Close implements io.ReadCloser
func (br *bodyReader) Close() error {
if br.body == nil {
return nil
}
err := br.body.Close()
br.body = nil
return err
}

View File

@ -118,7 +118,7 @@ func (ref daemonReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
// instead, see transports.ImageName().
func (ref daemonReference) StringWithinTransport() string {

View File

@ -448,8 +448,8 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
if link == "" {
break
}
linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
linkURL, err := url.Parse(linkURLStr)
linkURLPart, _, _ := strings.Cut(link, ";")
linkURL, err := url.Parse(strings.Trim(linkURLPart, "<>"))
if err != nil {
return searchRes, err
}
@ -472,14 +472,24 @@ func (c *dockerClient) makeRequest(ctx context.Context, method, path string, hea
return nil, err
}
urlString := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
requestURL, err := url.Parse(urlString)
requestURL, err := c.resolveRequestURL(path)
if err != nil {
return nil, err
}
return c.makeRequestToResolvedURL(ctx, method, requestURL, headers, stream, -1, auth, extraScope)
}
// resolveRequestURL turns a path for c.makeRequest into a full URL.
// Most users should call makeRequest directly, this exists basically to make the URL available for debug logs.
func (c *dockerClient) resolveRequestURL(path string) (*url.URL, error) {
urlString := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
res, err := url.Parse(urlString)
if err != nil {
return nil, err
}
return res, nil
}
// Checks if the auth headers in the response contain an indication of a failed
// authorizdation because of an "insufficient_scope" error. If that's the case,
// returns the required scope to be used for fetching a new token.
@ -586,7 +596,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method stri
case <-time.After(delay):
// Nothing
}
delay = delay * 2 // If the registry does not specify a delay, back off exponentially.
delay *= 2 // If the registry does not specify a delay, back off exponentially.
}
}
@ -965,7 +975,14 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty
return nil, 0, fmt.Errorf("fetching blob: %w", err)
}
cache.RecordKnownLocation(ref.Transport(), bicTransportScope(ref), info.Digest, newBICLocationReference(ref))
return res.Body, getBlobSize(res), nil
blobSize := getBlobSize(res)
reconnectingReader, err := newBodyReader(ctx, c, path, res.Body)
if err != nil {
res.Body.Close()
return nil, 0, err
}
return reconnectingReader, blobSize, nil
}
// getOCIDescriptorContents returns the contents a blob spcified by descriptor in ref, which must fit within limit.

View File

@ -94,8 +94,8 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
break
}
linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
linkURL, err := url.Parse(linkURLStr)
linkURLPart, _, _ := strings.Cut(link, ";")
linkURL, err := url.Parse(strings.Trim(linkURLPart, "<>"))
if err != nil {
return tags, err
}

View File

@ -21,6 +21,7 @@ import (
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/internal/streamdigest"
"github.com/containers/image/v5/internal/uploadreader"
@ -32,6 +33,8 @@ import (
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
type dockerImageDestination struct {
@ -731,19 +734,10 @@ func layerMatchesSigstoreSignature(layer imgspecv1.Descriptor, mimeType string,
// But right now we dont want to deal with corner cases like bad digest formats
// or unavailable algorithms; in the worst case we end up with duplicate signature
// entries.
layer.Digest.String() != digest.FromBytes(payloadBlob).String() {
layer.Digest.String() != digest.FromBytes(payloadBlob).String() ||
!maps.Equal(layer.Annotations, annotations) {
return false
}
if len(layer.Annotations) != len(annotations) {
return false
}
for k, v1 := range layer.Annotations {
if v2, ok := annotations[k]; !ok || v1 != v2 {
return false
}
}
// All annotations in layer exist in sig, and the number of annotations is the same, so all annotations
// in sig also exist in layer.
return true
}
@ -803,12 +797,11 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context
if err != nil {
return err
}
existingSigNames := map[string]struct{}{}
existingSigNames := set.New[string]()
for _, sig := range existingSignatures.Signatures {
existingSigNames[sig.Name] = struct{}{}
existingSigNames.Add(sig.Name)
}
sigExists:
for _, newSigWithFormat := range signatures {
newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning)
if !ok {
@ -816,10 +809,10 @@ sigExists:
}
newSig := newSigSimple.UntrustedSignature()
for _, existingSig := range existingSignatures.Signatures {
if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
continue sigExists
}
if slices.ContainsFunc(existingSignatures.Signatures, func(existingSig extensionSignature) bool {
return existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig)
}) {
continue
}
// The API expect us to invent a new unique name. This is racy, but hopefully good enough.
@ -831,7 +824,7 @@ sigExists:
return fmt.Errorf("generating random signature len %d: %w", n, err)
}
signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes)
if _, ok := existingSigNames[signatureName]; !ok {
if !existingSigNames.Contains(signatureName) {
break
}
}

View File

@ -250,7 +250,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error,
currentOffset += toSkip
}
s := signalCloseReader{
closed: make(chan interface{}),
closed: make(chan struct{}),
stream: io.NopCloser(io.LimitReader(body, int64(c.Length))),
consumeStream: true,
}
@ -292,7 +292,7 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read
return
}
s := signalCloseReader{
closed: make(chan interface{}),
closed: make(chan struct{}),
stream: p,
}
streams <- s
@ -335,7 +335,7 @@ func parseMediaType(contentType string) (string, map[string]string, error) {
func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
headers := make(map[string][]string)
var rangeVals []string
rangeVals := make([]string, 0, len(chunks))
for _, c := range chunks {
rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1))
}
@ -766,7 +766,7 @@ func makeBufferedNetworkReader(stream io.ReadCloser, nBuffers, bufferSize uint)
}
type signalCloseReader struct {
closed chan interface{}
closed chan struct{}
stream io.ReadCloser
consumeStream bool
}

View File

@ -92,7 +92,7 @@ func (ref dockerReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref dockerReference) StringWithinTransport() string {
return "//" + reference.FamiliarString(ref.ref)

View File

@ -40,7 +40,7 @@ func httpResponseToError(res *http.Response, context string) error {
return ErrUnauthorizedForCredentials{Err: err}
default:
if context != "" {
context = context + ": "
context += ": "
}
return fmt.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode))
}

View File

@ -13,10 +13,12 @@ import (
"time"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
// Writer allows creating a (docker save)-formatted tar archive containing one or more images.
@ -29,7 +31,7 @@ type Writer struct {
// Other state.
blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
repositories map[string]map[string]string
legacyLayers map[string]struct{} // A set of IDs of legacy layers that have been already sent.
legacyLayers *set.Set[string] // A set of IDs of legacy layers that have been already sent.
manifest []ManifestItem
manifestByConfig map[digest.Digest]int // A map from config digest to an entry index in manifest above.
}
@ -42,7 +44,7 @@ func NewWriter(dest io.Writer) *Writer {
tar: tar.NewWriter(dest),
blobs: make(map[digest.Digest]types.BlobInfo),
repositories: map[string]map[string]string{},
legacyLayers: map[string]struct{}{},
legacyLayers: set.New[string](),
manifestByConfig: map[digest.Digest]int{},
}
}
@ -89,7 +91,7 @@ func (w *Writer) recordBlobLocked(info types.BlobInfo) {
// ensureSingleLegacyLayerLocked writes legacy VERSION and configuration files for a single layer
// The caller must have locked the Writer.
func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest digest.Digest, configBytes []byte) error {
if _, ok := w.legacyLayers[layerID]; !ok {
if !w.legacyLayers.Contains(layerID) {
// Create a symlink for the legacy format, where there is one subdirectory per layer ("image").
// See also the comment in physicalLayerPath.
physicalLayerPath := w.physicalLayerPath(layerDigest)
@ -106,7 +108,7 @@ func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest diges
return fmt.Errorf("writing config json file: %w", err)
}
w.legacyLayers[layerID] = struct{}{}
w.legacyLayers.Add(layerID)
}
return nil
}
@ -117,7 +119,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
lastLayerID := ""
for i, l := range layerDescriptors {
// The legacy format requires a config file per layer
layerConfig := make(map[string]interface{})
layerConfig := make(map[string]any)
// The root layer doesn't have any parent
if lastLayerID != "" {
@ -188,14 +190,9 @@ func checkManifestItemsMatch(a, b *ManifestItem) error {
if a.Config != b.Config {
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with configs %#v vs. %#v", a.Config, b.Config)
}
if len(a.Layers) != len(b.Layers) {
if !slices.Equal(a.Layers, b.Layers) {
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers %#v vs. %#v", a.Layers, b.Layers)
}
for i := range a.Layers {
if a.Layers[i] != b.Layers[i] {
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers[i] %#v vs. %#v", a.Layers[i], b.Layers[i])
}
}
// Ignore RepoTags, that will be built later.
// Ignore Parent and LayerSources, which we dont set to anything meaningful.
return nil
@ -229,9 +226,9 @@ func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Des
item = &w.manifest[i]
}
knownRepoTags := map[string]struct{}{}
knownRepoTags := set.New[string]()
for _, repoTag := range item.RepoTags {
knownRepoTags[repoTag] = struct{}{}
knownRepoTags.Add(repoTag)
}
for _, tag := range repoTags {
// For github.com/docker/docker consumers, this works just as well as
@ -252,9 +249,9 @@ func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Des
// analysis and explanation.
refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag())
if _, ok := knownRepoTags[refString]; !ok {
if !knownRepoTags.Contains(refString) {
item.RepoTags = append(item.RepoTags, refString)
knownRepoTags[refString] = struct{}{}
knownRepoTags.Add(refString)
}
}
@ -337,7 +334,7 @@ func (t *tarFI) ModTime() time.Time {
func (t *tarFI) IsDir() bool {
return false
}
func (t *tarFI) Sys() interface{} {
func (t *tarFI) Sys() any {
return nil
}

View File

@ -104,7 +104,7 @@ func splitDockerDomain(name string) (domain, remainder string) {
}
// familiarizeName returns a shortened version of the name familiar
// to to the Docker UI. Familiar names have the default domain
// to the Docker UI. Familiar names have the default domain
// "docker.io" and "library/" repository prefix removed.
// For example, "docker.io/library/redis" will have the familiar
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".

View File

@ -175,7 +175,7 @@ func splitDomain(name string) (string, string) {
// hostname and name string. If no valid hostname is
// found, the hostname is empty and the full value
// is returned as name
// DEPRECATED: Use Domain or Path
// Deprecated: Use Domain or Path
func SplitHostname(named Named) (string, string) {
if r, ok := named.(namedRepository); ok {
return r.Domain(), r.Path()

View File

@ -1,9 +1,10 @@
package reference
import (
storageRegexp "github.com/containers/storage/pkg/regexp"
"regexp"
"strings"
storageRegexp "github.com/containers/storage/pkg/regexp"
)
const (

View File

@ -149,7 +149,7 @@ func expectTokenOrQuoted(s string) (value string, rest string) {
p := make([]byte, len(s)-1)
j := copy(p, s[:i])
escape := true
for i = i + 1; i < len(s); i++ {
for i++; i < len(s); i++ {
b := s[i]
switch {
case escape:

View File

@ -4,7 +4,7 @@ import (
"context"
"fmt"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/types"
)

View File

@ -381,7 +381,7 @@ func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwawa
delete(rawContents, "rootfs")
delete(rawContents, "history")
updates := map[string]interface{}{"id": v1ID}
updates := map[string]any{"id": v1ID}
if parentV1ID != "" {
updates["parent"] = parentV1ID
}

View File

@ -4,7 +4,7 @@ import (
"context"
"fmt"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/types"
)

View File

@ -0,0 +1,72 @@
package manifest
import (
"encoding/json"
"fmt"
)
// AllowedManifestFields is a bit mask of “essential” manifest fields that ValidateUnambiguousManifestFormat
// can expect to be present.
type AllowedManifestFields int
const (
AllowedFieldConfig AllowedManifestFields = 1 << iota
AllowedFieldFSLayers
AllowedFieldHistory
AllowedFieldLayers
AllowedFieldManifests
AllowedFieldFirstUnusedBit // Keep this at the end!
)
// ValidateUnambiguousManifestFormat rejects manifests (incl. multi-arch) that look like more than
// one kind we currently recognize, i.e. if they contain any of the known “essential” format fields
// other than the ones the caller specifically allows.
// expectedMIMEType is used only for diagnostics.
// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format
// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous
// data that just isnt what was expected, as opposed to actually ambiguous data.
func ValidateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string,
allowed AllowedManifestFields) error {
if allowed >= AllowedFieldFirstUnusedBit {
return fmt.Errorf("internal error: invalid allowedManifestFields value %#v", allowed)
}
// Use a private type to decode, not just a map[string]any, because we want
// to also reject case-insensitive matches (which would be used by Go when really decoding
// the manifest).
// (It is expected that as manifest formats are added or extended over time, more fields will be added
// here.)
detectedFields := struct {
Config any `json:"config"`
FSLayers any `json:"fsLayers"`
History any `json:"history"`
Layers any `json:"layers"`
Manifests any `json:"manifests"`
}{}
if err := json.Unmarshal(manifest, &detectedFields); err != nil {
// The caller was supposed to already validate version numbers, so this should not happen;
// lets not bother with making this error “nice”.
return err
}
unexpected := []string{}
// Sadly this isnt easy to automate in Go, without reflection. So, copy&paste.
if detectedFields.Config != nil && (allowed&AllowedFieldConfig) == 0 {
unexpected = append(unexpected, "config")
}
if detectedFields.FSLayers != nil && (allowed&AllowedFieldFSLayers) == 0 {
unexpected = append(unexpected, "fsLayers")
}
if detectedFields.History != nil && (allowed&AllowedFieldHistory) == 0 {
unexpected = append(unexpected, "history")
}
if detectedFields.Layers != nil && (allowed&AllowedFieldLayers) == 0 {
unexpected = append(unexpected, "layers")
}
if detectedFields.Manifests != nil && (allowed&AllowedFieldManifests) == 0 {
unexpected = append(unexpected, "manifests")
}
if len(unexpected) != 0 {
return fmt.Errorf(`rejecting ambiguous manifest, unexpected fields %#v in supposedly %s`,
unexpected, expectedMIMEType)
}
return nil
}

View File

@ -0,0 +1,15 @@
package manifest
import (
"github.com/opencontainers/go-digest"
)
// Schema2Descriptor is a “descriptor” in docker/distribution schema 2.
//
// This is publicly visible as c/image/manifest.Schema2Descriptor.
type Schema2Descriptor struct {
MediaType string `json:"mediaType"`
Size int64 `json:"size"`
Digest digest.Digest `json:"digest"`
URLs []string `json:"urls,omitempty"`
}

View File

@ -0,0 +1,255 @@
package manifest
import (
"encoding/json"
"fmt"
platform "github.com/containers/image/v5/internal/pkg/platform"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/slices"
)
// Schema2PlatformSpec describes the platform which a particular manifest is
// specialized for.
// This is publicly visible as c/image/manifest.Schema2PlatformSpec.
type Schema2PlatformSpec struct {
Architecture string `json:"architecture"`
OS string `json:"os"`
OSVersion string `json:"os.version,omitempty"`
OSFeatures []string `json:"os.features,omitempty"`
Variant string `json:"variant,omitempty"`
Features []string `json:"features,omitempty"` // removed in OCI
}
// Schema2ManifestDescriptor references a platform-specific manifest.
// This is publicly visible as c/image/manifest.Schema2ManifestDescriptor.
type Schema2ManifestDescriptor struct {
Schema2Descriptor
Platform Schema2PlatformSpec `json:"platform"`
}
// Schema2ListPublic is a list of platform-specific manifests.
// This is publicly visible as c/image/manifest.Schema2List.
// Internal users should usually use Schema2List instead.
type Schema2ListPublic struct {
SchemaVersion int `json:"schemaVersion"`
MediaType string `json:"mediaType"`
Manifests []Schema2ManifestDescriptor `json:"manifests"`
}
// MIMEType returns the MIME type of this particular manifest list.
func (list *Schema2ListPublic) MIMEType() string {
return list.MediaType
}
// Instances returns a slice of digests of the manifests that this list knows of.
func (list *Schema2ListPublic) Instances() []digest.Digest {
results := make([]digest.Digest, len(list.Manifests))
for i, m := range list.Manifests {
results[i] = m.Digest
}
return results
}
// Instance returns the ListUpdate of a particular instance in the list.
func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
for _, manifest := range list.Manifests {
if manifest.Digest == instanceDigest {
return ListUpdate{
Digest: manifest.Digest,
Size: manifest.Size,
MediaType: manifest.MediaType,
}, nil
}
}
return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest)
}
// UpdateInstances updates the sizes, digests, and media types of the manifests
// which the list catalogs.
func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
if len(updates) != len(list.Manifests) {
return fmt.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates))
}
for i := range updates {
if err := updates[i].Digest.Validate(); err != nil {
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err)
}
list.Manifests[i].Digest = updates[i].Digest
if updates[i].Size < 0 {
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
}
list.Manifests[i].Size = updates[i].Size
if updates[i].MediaType == "" {
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType)
}
list.Manifests[i].MediaType = updates[i].MediaType
}
return nil
}
// ChooseInstance parses blob as a schema2 manifest list, and returns the digest
// of the image which is appropriate for the current environment.
func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
wantedPlatforms, err := platform.WantedPlatforms(ctx)
if err != nil {
return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
}
for _, wantedPlatform := range wantedPlatforms {
for _, d := range list.Manifests {
imagePlatform := imgspecv1.Platform{
Architecture: d.Platform.Architecture,
OS: d.Platform.OS,
OSVersion: d.Platform.OSVersion,
OSFeatures: slices.Clone(d.Platform.OSFeatures),
Variant: d.Platform.Variant,
}
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
return d.Digest, nil
}
}
}
return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
}
// Serialize returns the list in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
func (list *Schema2ListPublic) Serialize() ([]byte, error) {
buf, err := json.Marshal(list)
if err != nil {
return nil, fmt.Errorf("marshaling Schema2List %#v: %w", list, err)
}
return buf, nil
}
// Schema2ListPublicFromComponents creates a Schema2 manifest list instance from the
// supplied data.
// This is publicly visible as c/image/manifest.Schema2ListFromComponents.
func Schema2ListPublicFromComponents(components []Schema2ManifestDescriptor) *Schema2ListPublic {
list := Schema2ListPublic{
SchemaVersion: 2,
MediaType: DockerV2ListMediaType,
Manifests: make([]Schema2ManifestDescriptor, len(components)),
}
for i, component := range components {
m := Schema2ManifestDescriptor{
Schema2Descriptor{
MediaType: component.MediaType,
Size: component.Size,
Digest: component.Digest,
URLs: slices.Clone(component.URLs),
},
Schema2PlatformSpec{
Architecture: component.Platform.Architecture,
OS: component.Platform.OS,
OSVersion: component.Platform.OSVersion,
OSFeatures: slices.Clone(component.Platform.OSFeatures),
Variant: component.Platform.Variant,
Features: slices.Clone(component.Platform.Features),
},
}
list.Manifests[i] = m
}
return &list
}
// Schema2ListPublicClone creates a deep copy of the passed-in list.
// This is publicly visible as c/image/manifest.Schema2ListClone.
func Schema2ListPublicClone(list *Schema2ListPublic) *Schema2ListPublic {
return Schema2ListPublicFromComponents(list.Manifests)
}
// ToOCI1Index returns the list encoded as an OCI1 index.
func (list *Schema2ListPublic) ToOCI1Index() (*OCI1IndexPublic, error) {
components := make([]imgspecv1.Descriptor, 0, len(list.Manifests))
for _, manifest := range list.Manifests {
converted := imgspecv1.Descriptor{
MediaType: manifest.MediaType,
Size: manifest.Size,
Digest: manifest.Digest,
URLs: slices.Clone(manifest.URLs),
Platform: &imgspecv1.Platform{
OS: manifest.Platform.OS,
Architecture: manifest.Platform.Architecture,
OSFeatures: slices.Clone(manifest.Platform.OSFeatures),
OSVersion: manifest.Platform.OSVersion,
Variant: manifest.Platform.Variant,
},
}
components = append(components, converted)
}
oci := OCI1IndexPublicFromComponents(components, nil)
return oci, nil
}
// ToSchema2List returns the list encoded as a Schema2 list.
func (list *Schema2ListPublic) ToSchema2List() (*Schema2ListPublic, error) {
return Schema2ListPublicClone(list), nil
}
// Schema2ListPublicFromManifest creates a Schema2 manifest list instance from marshalled
// JSON, presumably generated by encoding a Schema2 manifest list.
// This is publicly visible as c/image/manifest.Schema2ListFromManifest.
func Schema2ListPublicFromManifest(manifest []byte) (*Schema2ListPublic, error) {
list := Schema2ListPublic{
Manifests: []Schema2ManifestDescriptor{},
}
if err := json.Unmarshal(manifest, &list); err != nil {
return nil, fmt.Errorf("unmarshaling Schema2List %q: %w", string(manifest), err)
}
if err := ValidateUnambiguousManifestFormat(manifest, DockerV2ListMediaType,
AllowedFieldManifests); err != nil {
return nil, err
}
return &list, nil
}
// Clone returns a deep copy of this list and its contents.
func (list *Schema2ListPublic) Clone() ListPublic {
return Schema2ListPublicClone(list)
}
// ConvertToMIMEType converts the passed-in manifest list to a manifest
// list of the specified type.
func (list *Schema2ListPublic) ConvertToMIMEType(manifestMIMEType string) (ListPublic, error) {
switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
case DockerV2ListMediaType:
return list.Clone(), nil
case imgspecv1.MediaTypeImageIndex:
return list.ToOCI1Index()
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType)
default:
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType)
}
}
// Schema2List is a list of platform-specific manifests.
type Schema2List struct {
Schema2ListPublic
}
func schema2ListFromPublic(public *Schema2ListPublic) *Schema2List {
return &Schema2List{*public}
}
func (index *Schema2List) CloneInternal() List {
return schema2ListFromPublic(Schema2ListPublicClone(&index.Schema2ListPublic))
}
func (index *Schema2List) Clone() ListPublic {
return index.CloneInternal()
}
// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled
// JSON, presumably generated by encoding a Schema2 manifest list.
func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) {
public, err := Schema2ListPublicFromManifest(manifest)
if err != nil {
return nil, err
}
return schema2ListFromPublic(public), nil
}

View File

@ -0,0 +1,86 @@
package manifest
import (
"fmt"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// ListPublic is a subset of List which is a part of the public API;
// so no methods can be added, removed or changed.
//
// Internal users should usually use List instead.
type ListPublic interface {
// MIMEType returns the MIME type of this particular manifest list.
MIMEType() string
// Instances returns a list of the manifests that this list knows of, other than its own.
Instances() []digest.Digest
// Update information about the list's instances. The length of the passed-in slice must
// match the length of the list of instances which the list already contains, and every field
// must be specified.
UpdateInstances([]ListUpdate) error
// Instance returns the size and MIME type of a particular instance in the list.
Instance(digest.Digest) (ListUpdate, error)
// ChooseInstance selects which manifest is most appropriate for the platform described by the
// SystemContext, or for the current platform if the SystemContext doesn't specify any details.
ChooseInstance(ctx *types.SystemContext) (digest.Digest, error)
// Serialize returns the list in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded
// from, even if no modifications were made!
Serialize() ([]byte, error)
// ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error.
ConvertToMIMEType(mimeType string) (ListPublic, error)
// Clone returns a deep copy of this list and its contents.
Clone() ListPublic
}
// List is an interface for parsing, modifying lists of image manifests.
// Callers can either use this abstract interface without understanding the details of the formats,
// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members
// directly.
type List interface {
ListPublic
// CloneInternal returns a deep copy of this list and its contents.
CloneInternal() List
}
// ListUpdate includes the fields which a List's UpdateInstances() method will modify.
// This is publicly visible as c/image/manifest.ListUpdate.
type ListUpdate struct {
Digest digest.Digest
Size int64
MediaType string
}
// ListPublicFromBlob parses a list of manifests.
// This is publicly visible as c/image/manifest.ListFromBlob.
func ListPublicFromBlob(manifest []byte, manifestMIMEType string) (ListPublic, error) {
list, err := ListFromBlob(manifest, manifestMIMEType)
if err != nil {
return nil, err
}
return list, nil
}
// ListFromBlob parses a list of manifests.
func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) {
normalized := NormalizedMIMEType(manifestMIMEType)
switch normalized {
case DockerV2ListMediaType:
return Schema2ListFromManifest(manifest)
case imgspecv1.MediaTypeImageIndex:
return OCI1IndexFromManifest(manifest)
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
return nil, fmt.Errorf("Treating single images as manifest lists is not implemented")
}
return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized)
}

View File

@ -0,0 +1,167 @@
package manifest
import (
"encoding/json"
"github.com/containers/libtrust"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// FIXME: Should we just use docker/distribution and docker/docker implementations directly?
// FIXME(runcom, mitr): should we have a mediatype pkg??
const (
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1
DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json"
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature
DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws"
// DockerV2Schema2MediaType MIME type represents Docker manifest schema 2
DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json"
// DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs.
DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json"
// DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers.
DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip"
// DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers.
DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar"
// DockerV2ListMediaType MIME type represents Docker manifest schema 2 list
DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json"
// DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers.
DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar"
// DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers.
DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
)
// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
// but we may not have such metadata available (e.g. when the manifest is a local file).
// This is publicly visible as c/image/manifest.GuessMIMEType.
func GuessMIMEType(manifest []byte) string {
// A subset of manifest fields; the rest is silently ignored by json.Unmarshal.
// Also docker/distribution/manifest.Versioned.
meta := struct {
MediaType string `json:"mediaType"`
SchemaVersion int `json:"schemaVersion"`
Signatures any `json:"signatures"`
}{}
if err := json.Unmarshal(manifest, &meta); err != nil {
return ""
}
switch meta.MediaType {
case DockerV2Schema2MediaType, DockerV2ListMediaType,
imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type.
return meta.MediaType
}
// this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest.
switch meta.SchemaVersion {
case 1:
if meta.Signatures != nil {
return DockerV2Schema1SignedMediaType
}
return DockerV2Schema1MediaType
case 2:
// Best effort to understand if this is an OCI image since mediaType
// wasn't in the manifest for OCI image-spec < 1.0.2.
// For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess.
ociMan := struct {
Config struct {
MediaType string `json:"mediaType"`
} `json:"config"`
}{}
if err := json.Unmarshal(manifest, &ociMan); err != nil {
return ""
}
switch ociMan.Config.MediaType {
case imgspecv1.MediaTypeImageConfig:
return imgspecv1.MediaTypeImageManifest
case DockerV2Schema2ConfigMediaType:
// This case should not happen since a Docker image
// must declare a top-level media type and
// `meta.MediaType` has already been checked.
return DockerV2Schema2MediaType
}
// Maybe an image index or an OCI artifact.
ociIndex := struct {
Manifests []imgspecv1.Descriptor `json:"manifests"`
}{}
if err := json.Unmarshal(manifest, &ociIndex); err != nil {
return ""
}
if len(ociIndex.Manifests) != 0 {
if ociMan.Config.MediaType == "" {
return imgspecv1.MediaTypeImageIndex
}
// FIXME: this is mixing media types of manifests and configs.
return ociMan.Config.MediaType
}
// It's most likely an OCI artifact with a custom config media
// type which is not (and cannot) be covered by the media-type
// checks cabove.
return imgspecv1.MediaTypeImageManifest
}
return ""
}
// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures.
// This is publicly visible as c/image/manifest.Digest.
func Digest(manifest []byte) (digest.Digest, error) {
if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType {
sig, err := libtrust.ParsePrettySignature(manifest, "signatures")
if err != nil {
return "", err
}
manifest, err = sig.Payload()
if err != nil {
// Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string
// that libtrust itself has josebase64UrlEncode()d
return "", err
}
}
return digest.FromBytes(manifest), nil
}
// MatchesDigest returns true iff the manifest matches expectedDigest.
// Error may be set if this returns false.
// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified,
// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob.
// This is publicly visible as c/image/manifest.MatchesDigest.
func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) {
// This should eventually support various digest types.
actualDigest, err := Digest(manifest)
if err != nil {
return false, err
}
return expectedDigest == actualDigest, nil
}
// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server,
// centralizing various workarounds.
// This is publicly visible as c/image/manifest.NormalizedMIMEType.
func NormalizedMIMEType(input string) string {
switch input {
// "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
// This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
// need to happen within the ImageSource.
case "application/json":
return DockerV2Schema1SignedMediaType
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType,
imgspecv1.MediaTypeImageManifest,
imgspecv1.MediaTypeImageIndex,
DockerV2Schema2MediaType,
DockerV2ListMediaType:
return input
default:
// If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time
// to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108
// and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50
//
// Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag.
// This makes no real sense, but it happens
// because requests for manifests are
// redirected to a content distribution
// network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442
return DockerV2Schema1SignedMediaType
}
}

View File

@ -0,0 +1,265 @@
package manifest
import (
"encoding/json"
"fmt"
"runtime"
platform "github.com/containers/image/v5/internal/pkg/platform"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspec "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
// OCI1IndexPublic is just an alias for the OCI index type, but one which we can
// provide methods for.
// This is publicly visible as c/image/manifest.OCI1Index
// Internal users should usually use OCI1Index instead.
type OCI1IndexPublic struct {
imgspecv1.Index
}
// MIMEType returns the MIME type of this particular manifest index.
func (index *OCI1IndexPublic) MIMEType() string {
return imgspecv1.MediaTypeImageIndex
}
// Instances returns a slice of digests of the manifests that this index knows of.
func (index *OCI1IndexPublic) Instances() []digest.Digest {
results := make([]digest.Digest, len(index.Manifests))
for i, m := range index.Manifests {
results[i] = m.Digest
}
return results
}
// Instance returns the ListUpdate of a particular instance in the index.
func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
for _, manifest := range index.Manifests {
if manifest.Digest == instanceDigest {
return ListUpdate{
Digest: manifest.Digest,
Size: manifest.Size,
MediaType: manifest.MediaType,
}, nil
}
}
return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest)
}
// UpdateInstances updates the sizes, digests, and media types of the manifests
// which the list catalogs.
func (index *OCI1IndexPublic) UpdateInstances(updates []ListUpdate) error {
if len(updates) != len(index.Manifests) {
return fmt.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates))
}
for i := range updates {
if err := updates[i].Digest.Validate(); err != nil {
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err)
}
index.Manifests[i].Digest = updates[i].Digest
if updates[i].Size < 0 {
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
}
index.Manifests[i].Size = updates[i].Size
if updates[i].MediaType == "" {
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType)
}
index.Manifests[i].MediaType = updates[i].MediaType
}
return nil
}
// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest
// of the image which is appropriate for the current environment.
func (index *OCI1IndexPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
wantedPlatforms, err := platform.WantedPlatforms(ctx)
if err != nil {
return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
}
for _, wantedPlatform := range wantedPlatforms {
for _, d := range index.Manifests {
if d.Platform == nil {
continue
}
imagePlatform := imgspecv1.Platform{
Architecture: d.Platform.Architecture,
OS: d.Platform.OS,
OSVersion: d.Platform.OSVersion,
OSFeatures: slices.Clone(d.Platform.OSFeatures),
Variant: d.Platform.Variant,
}
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
return d.Digest, nil
}
}
}
for _, d := range index.Manifests {
if d.Platform == nil {
return d.Digest, nil
}
}
return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
}
// Serialize returns the index in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
func (index *OCI1IndexPublic) Serialize() ([]byte, error) {
buf, err := json.Marshal(index)
if err != nil {
return nil, fmt.Errorf("marshaling OCI1Index %#v: %w", index, err)
}
return buf, nil
}
// OCI1IndexPublicFromComponents creates an OCI1 image index instance from the
// supplied data.
// This is publicly visible as c/image/manifest.OCI1IndexFromComponents.
func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1IndexPublic {
index := OCI1IndexPublic{
imgspecv1.Index{
Versioned: imgspec.Versioned{SchemaVersion: 2},
MediaType: imgspecv1.MediaTypeImageIndex,
Manifests: make([]imgspecv1.Descriptor, len(components)),
Annotations: maps.Clone(annotations),
},
}
for i, component := range components {
var platform *imgspecv1.Platform
if component.Platform != nil {
platform = &imgspecv1.Platform{
Architecture: component.Platform.Architecture,
OS: component.Platform.OS,
OSVersion: component.Platform.OSVersion,
OSFeatures: slices.Clone(component.Platform.OSFeatures),
Variant: component.Platform.Variant,
}
}
m := imgspecv1.Descriptor{
MediaType: component.MediaType,
Size: component.Size,
Digest: component.Digest,
URLs: slices.Clone(component.URLs),
Annotations: maps.Clone(component.Annotations),
Platform: platform,
}
index.Manifests[i] = m
}
return &index
}
// OCI1IndexPublicClone creates a deep copy of the passed-in index.
// This is publicly visible as c/image/manifest.OCI1IndexClone.
func OCI1IndexPublicClone(index *OCI1IndexPublic) *OCI1IndexPublic {
return OCI1IndexPublicFromComponents(index.Manifests, index.Annotations)
}
// ToOCI1Index returns the index encoded as an OCI1 index.
func (index *OCI1IndexPublic) ToOCI1Index() (*OCI1IndexPublic, error) {
return OCI1IndexPublicClone(index), nil
}
// ToSchema2List returns the index encoded as a Schema2 list.
func (index *OCI1IndexPublic) ToSchema2List() (*Schema2ListPublic, error) {
components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests))
for _, manifest := range index.Manifests {
platform := manifest.Platform
if platform == nil {
platform = &imgspecv1.Platform{
OS: runtime.GOOS,
Architecture: runtime.GOARCH,
}
}
converted := Schema2ManifestDescriptor{
Schema2Descriptor{
MediaType: manifest.MediaType,
Size: manifest.Size,
Digest: manifest.Digest,
URLs: slices.Clone(manifest.URLs),
},
Schema2PlatformSpec{
OS: platform.OS,
Architecture: platform.Architecture,
OSFeatures: slices.Clone(platform.OSFeatures),
OSVersion: platform.OSVersion,
Variant: platform.Variant,
},
}
components = append(components, converted)
}
s2 := Schema2ListPublicFromComponents(components)
return s2, nil
}
// OCI1IndexPublicFromManifest creates an OCI1 manifest index instance from marshalled
// JSON, presumably generated by encoding a OCI1 manifest index.
// This is publicly visible as c/image/manifest.OCI1IndexFromManifest.
func OCI1IndexPublicFromManifest(manifest []byte) (*OCI1IndexPublic, error) {
index := OCI1IndexPublic{
Index: imgspecv1.Index{
Versioned: imgspec.Versioned{SchemaVersion: 2},
MediaType: imgspecv1.MediaTypeImageIndex,
Manifests: []imgspecv1.Descriptor{},
Annotations: make(map[string]string),
},
}
if err := json.Unmarshal(manifest, &index); err != nil {
return nil, fmt.Errorf("unmarshaling OCI1Index %q: %w", string(manifest), err)
}
if err := ValidateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex,
AllowedFieldManifests); err != nil {
return nil, err
}
return &index, nil
}
// Clone returns a deep copy of this list and its contents.
func (index *OCI1IndexPublic) Clone() ListPublic {
return OCI1IndexPublicClone(index)
}
// ConvertToMIMEType converts the passed-in image index to a manifest list of
// the specified type.
func (index *OCI1IndexPublic) ConvertToMIMEType(manifestMIMEType string) (ListPublic, error) {
switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
case DockerV2ListMediaType:
return index.ToSchema2List()
case imgspecv1.MediaTypeImageIndex:
return index.Clone(), nil
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType)
default:
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType)
}
}
type OCI1Index struct {
OCI1IndexPublic
}
func oci1IndexFromPublic(public *OCI1IndexPublic) *OCI1Index {
return &OCI1Index{*public}
}
func (index *OCI1Index) CloneInternal() List {
return oci1IndexFromPublic(OCI1IndexPublicClone(&index.OCI1IndexPublic))
}
func (index *OCI1Index) Clone() ListPublic {
return index.CloneInternal()
}
// OCI1IndexFromManifest creates a OCI1 manifest list instance from marshalled
// JSON, presumably generated by encoding a OCI1 manifest list.
func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) {
public, err := OCI1IndexPublicFromManifest(manifest)
if err != nil {
return nil, err
}
return oci1IndexFromPublic(public), nil
}

View File

@ -25,6 +25,7 @@ import (
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/slices"
)
// For Linux, the kernel has already detected the ABI, ISA and Features.
@ -152,13 +153,9 @@ func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
if wantedVariant != "" {
// If the user requested a specific variant, we'll walk down
// the list from most to least compatible.
if compatibility[wantedArch] != nil {
variantOrder := compatibility[wantedArch]
for i, v := range variantOrder {
if wantedVariant == v {
variants = variantOrder[i:]
break
}
if variantOrder := compatibility[wantedArch]; variantOrder != nil {
if i := slices.Index(variantOrder, wantedVariant); i != -1 {
variants = variantOrder[i:]
}
}
if variants == nil {

View File

@ -0,0 +1,46 @@
package set
import "golang.org/x/exp/maps"
// FIXME:
// - Docstrings
// - This should be in a public library somewhere
type Set[E comparable] struct {
m map[E]struct{}
}
func New[E comparable]() *Set[E] {
return &Set[E]{
m: map[E]struct{}{},
}
}
func NewWithValues[E comparable](values ...E) *Set[E] {
s := New[E]()
for _, v := range values {
s.Add(v)
}
return s
}
func (s Set[E]) Add(v E) {
s.m[v] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
}
func (s Set[E]) Delete(v E) {
delete(s.m, v)
}
func (s *Set[E]) Contains(v E) bool {
_, ok := s.m[v]
return ok
}
func (s *Set[E]) Empty() bool {
return len(s.m) == 0
}
func (s *Set[E]) Values() []E {
return maps.Keys(s.m)
}

View File

@ -66,17 +66,15 @@ func FromBlob(blob []byte) (Signature, error) {
// The newer format: binary 0, format name, newline, data
case 0x00:
blob = blob[1:]
newline := bytes.IndexByte(blob, '\n')
if newline == -1 {
formatBytes, blobChunk, foundNewline := bytes.Cut(blob, []byte{'\n'})
if !foundNewline {
return nil, fmt.Errorf("invalid signature format, missing newline")
}
formatBytes := blob[:newline]
for _, b := range formatBytes {
if b < 32 || b >= 0x7F {
return nil, fmt.Errorf("invalid signature format, non-ASCII byte %#x", b)
}
}
blobChunk := blob[newline+1:]
switch {
case bytes.Equal(formatBytes, []byte(SimpleSigningFormat)):
return SimpleSigningFromBlob(blobChunk), nil
@ -102,10 +100,3 @@ func UnsupportedFormatError(sig Signature) error {
return fmt.Errorf("unsupported, and unrecognized, signature format %q", string(formatID))
}
}
// copyByteSlice returns a guaranteed-fresh copy of a byte slice
// Use this to make sure the underlying data is not shared and cant be unexpectedly modified.
func copyByteSlice(s []byte) []byte {
res := []byte{}
return append(res, s...)
}

View File

@ -1,6 +1,11 @@
package signature
import "encoding/json"
import (
"encoding/json"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
const (
// from sigstore/cosign/pkg/types.SimpleSigningMediaType
@ -40,8 +45,8 @@ type sigstoreJSONRepresentation struct {
func SigstoreFromComponents(untrustedMimeType string, untrustedPayload []byte, untrustedAnnotations map[string]string) Sigstore {
return Sigstore{
untrustedMIMEType: untrustedMimeType,
untrustedPayload: copyByteSlice(untrustedPayload),
untrustedAnnotations: copyStringMap(untrustedAnnotations),
untrustedPayload: slices.Clone(untrustedPayload),
untrustedAnnotations: maps.Clone(untrustedAnnotations),
}
}
@ -74,17 +79,9 @@ func (s Sigstore) UntrustedMIMEType() string {
return s.untrustedMIMEType
}
func (s Sigstore) UntrustedPayload() []byte {
return copyByteSlice(s.untrustedPayload)
return slices.Clone(s.untrustedPayload)
}
func (s Sigstore) UntrustedAnnotations() map[string]string {
return copyStringMap(s.untrustedAnnotations)
}
func copyStringMap(m map[string]string) map[string]string {
res := map[string]string{}
for k, v := range m {
res[k] = v
}
return res
return maps.Clone(s.untrustedAnnotations)
}

View File

@ -1,5 +1,7 @@
package signature
import "golang.org/x/exp/slices"
// SimpleSigning is a “simple signing” signature.
type SimpleSigning struct {
untrustedSignature []byte
@ -8,7 +10,7 @@ type SimpleSigning struct {
// SimpleSigningFromBlob converts a “simple signing” signature into a SimpleSigning object.
func SimpleSigningFromBlob(blobChunk []byte) SimpleSigning {
return SimpleSigning{
untrustedSignature: copyByteSlice(blobChunk),
untrustedSignature: slices.Clone(blobChunk),
}
}
@ -19,9 +21,9 @@ func (s SimpleSigning) FormatID() FormatID {
// blobChunk returns a representation of signature as a []byte, suitable for long-term storage.
// Almost everyone should use signature.Blob() instead.
func (s SimpleSigning) blobChunk() ([]byte, error) {
return copyByteSlice(s.untrustedSignature), nil
return slices.Clone(s.untrustedSignature), nil
}
func (s SimpleSigning) UntrustedSignature() []byte {
return copyByteSlice(s.untrustedSignature)
return slices.Clone(s.untrustedSignature)
}

View File

@ -11,7 +11,7 @@ import (
// The net/http package uses a separate goroutine to upload data to a HTTP connection,
// and it is possible for the server to return a response (typically an error) before consuming
// the full body of the request. In that case http.Client.Do can return with an error while
// the body is still being read — regardless of of the cancellation, if any, of http.Request.Context().
// the body is still being read — regardless of the cancellation, if any, of http.Request.Context().
//
// As a result, any data used/updated by the io.Reader() provided as the request body may be
// used/updated even after http.Client.Do returns, causing races.

View File

@ -1,7 +1,6 @@
package manifest
import (
"encoding/json"
"fmt"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
@ -9,96 +8,6 @@ import (
"github.com/sirupsen/logrus"
)
// dupStringSlice returns a deep copy of a slice of strings, or nil if the
// source slice is empty.
func dupStringSlice(list []string) []string {
if len(list) == 0 {
return nil
}
dup := make([]string, len(list))
copy(dup, list)
return dup
}
// dupStringStringMap returns a deep copy of a map[string]string, or nil if the
// passed-in map is nil or has no keys.
func dupStringStringMap(m map[string]string) map[string]string {
if len(m) == 0 {
return nil
}
result := make(map[string]string)
for k, v := range m {
result[k] = v
}
return result
}
// allowedManifestFields is a bit mask of “essential” manifest fields that validateUnambiguousManifestFormat
// can expect to be present.
type allowedManifestFields int
const (
allowedFieldConfig allowedManifestFields = 1 << iota
allowedFieldFSLayers
allowedFieldHistory
allowedFieldLayers
allowedFieldManifests
allowedFieldFirstUnusedBit // Keep this at the end!
)
// validateUnambiguousManifestFormat rejects manifests (incl. multi-arch) that look like more than
// one kind we currently recognize, i.e. if they contain any of the known “essential” format fields
// other than the ones the caller specifically allows.
// expectedMIMEType is used only for diagnostics.
// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format
// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous
// data that just isnt what was expected, as opposed to actually ambiguous data.
func validateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string,
allowed allowedManifestFields) error {
if allowed >= allowedFieldFirstUnusedBit {
return fmt.Errorf("internal error: invalid allowedManifestFields value %#v", allowed)
}
// Use a private type to decode, not just a map[string]interface{}, because we want
// to also reject case-insensitive matches (which would be used by Go when really decoding
// the manifest).
// (It is expected that as manifest formats are added or extended over time, more fields will be added
// here.)
detectedFields := struct {
Config interface{} `json:"config"`
FSLayers interface{} `json:"fsLayers"`
History interface{} `json:"history"`
Layers interface{} `json:"layers"`
Manifests interface{} `json:"manifests"`
}{}
if err := json.Unmarshal(manifest, &detectedFields); err != nil {
// The caller was supposed to already validate version numbers, so this should not happen;
// lets not bother with making this error “nice”.
return err
}
unexpected := []string{}
// Sadly this isnt easy to automate in Go, without reflection. So, copy&paste.
if detectedFields.Config != nil && (allowed&allowedFieldConfig) == 0 {
unexpected = append(unexpected, "config")
}
if detectedFields.FSLayers != nil && (allowed&allowedFieldFSLayers) == 0 {
unexpected = append(unexpected, "fsLayers")
}
if detectedFields.History != nil && (allowed&allowedFieldHistory) == 0 {
unexpected = append(unexpected, "history")
}
if detectedFields.Layers != nil && (allowed&allowedFieldLayers) == 0 {
unexpected = append(unexpected, "layers")
}
if detectedFields.Manifests != nil && (allowed&allowedFieldManifests) == 0 {
unexpected = append(unexpected, "manifests")
}
if len(unexpected) != 0 {
return fmt.Errorf(`rejecting ambiguous manifest, unexpected fields %#v in supposedly %s`,
unexpected, expectedMIMEType)
}
return nil
}
// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
func layerInfosToStrings(infos []LayerInfo) []string {

View File

@ -8,10 +8,13 @@ import (
"time"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/regexp"
"github.com/docker/docker/api/types/versions"
"github.com/opencontainers/go-digest"
"golang.org/x/exp/slices"
)
// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1.
@ -53,16 +56,16 @@ type Schema1V1Compatibility struct {
// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob.
// (NOTE: The instance is not necessary a literal representation of the original blob,
// layers with duplicate IDs are eliminated.)
func Schema1FromManifest(manifest []byte) (*Schema1, error) {
func Schema1FromManifest(manifestBlob []byte) (*Schema1, error) {
s1 := Schema1{}
if err := json.Unmarshal(manifest, &s1); err != nil {
if err := json.Unmarshal(manifestBlob, &s1); err != nil {
return nil, err
}
if s1.SchemaVersion != 1 {
return nil, fmt.Errorf("unsupported schema version %d", s1.SchemaVersion)
}
if err := validateUnambiguousManifestFormat(manifest, DockerV2Schema1SignedMediaType,
allowedFieldFSLayers|allowedFieldHistory); err != nil {
if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, DockerV2Schema1SignedMediaType,
manifest.AllowedFieldFSLayers|manifest.AllowedFieldHistory); err != nil {
return nil, err
}
if err := s1.initialize(); err != nil {
@ -183,22 +186,22 @@ func (m *Schema1) fixManifestLayers() error {
return errors.New("Invalid parent ID in the base layer of the image")
}
// check general duplicates to error instead of a deadlock
idmap := make(map[string]struct{})
idmap := set.New[string]()
var lastID string
for _, img := range m.ExtractedV1Compatibility {
// skip IDs that appear after each other, we handle those later
if _, exists := idmap[img.ID]; img.ID != lastID && exists {
if img.ID != lastID && idmap.Contains(img.ID) {
return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
}
lastID = img.ID
idmap[lastID] = struct{}{}
idmap.Add(lastID)
}
// backwards loop so that we keep the remaining indexes after removing items
for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- {
if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue
m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
m.History = append(m.History[:i], m.History[i+1:]...)
m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...)
m.FSLayers = slices.Delete(m.FSLayers, i, i+1)
m.History = slices.Delete(m.History, i, i+1)
m.ExtractedV1Compatibility = slices.Delete(m.ExtractedV1Compatibility, i, i+1)
} else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID {
return fmt.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent)
}

View File

@ -5,6 +5,7 @@ import (
"fmt"
"time"
"github.com/containers/image/v5/internal/manifest"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/pkg/strslice"
"github.com/containers/image/v5/types"
@ -12,12 +13,7 @@ import (
)
// Schema2Descriptor is a “descriptor” in docker/distribution schema 2.
type Schema2Descriptor struct {
MediaType string `json:"mediaType"`
Size int64 `json:"size"`
Digest digest.Digest `json:"digest"`
URLs []string `json:"urls,omitempty"`
}
type Schema2Descriptor = manifest.Schema2Descriptor
// BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor.
func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo {
@ -159,13 +155,13 @@ type Schema2Image struct {
}
// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob.
func Schema2FromManifest(manifest []byte) (*Schema2, error) {
func Schema2FromManifest(manifestBlob []byte) (*Schema2, error) {
s2 := Schema2{}
if err := json.Unmarshal(manifest, &s2); err != nil {
if err := json.Unmarshal(manifestBlob, &s2); err != nil {
return nil, err
}
if err := validateUnambiguousManifestFormat(manifest, DockerV2Schema2MediaType,
allowedFieldConfig|allowedFieldLayers); err != nil {
if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, DockerV2Schema2MediaType,
manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil {
return nil, err
}
// Check manifest's and layers' media types.

View File

@ -1,220 +1,32 @@
package manifest
import (
"encoding/json"
"fmt"
platform "github.com/containers/image/v5/internal/pkg/platform"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/containers/image/v5/internal/manifest"
)
// Schema2PlatformSpec describes the platform which a particular manifest is
// specialized for.
type Schema2PlatformSpec struct {
Architecture string `json:"architecture"`
OS string `json:"os"`
OSVersion string `json:"os.version,omitempty"`
OSFeatures []string `json:"os.features,omitempty"`
Variant string `json:"variant,omitempty"`
Features []string `json:"features,omitempty"` // removed in OCI
}
type Schema2PlatformSpec = manifest.Schema2PlatformSpec
// Schema2ManifestDescriptor references a platform-specific manifest.
type Schema2ManifestDescriptor struct {
Schema2Descriptor
Platform Schema2PlatformSpec `json:"platform"`
}
type Schema2ManifestDescriptor = manifest.Schema2ManifestDescriptor
// Schema2List is a list of platform-specific manifests.
type Schema2List struct {
SchemaVersion int `json:"schemaVersion"`
MediaType string `json:"mediaType"`
Manifests []Schema2ManifestDescriptor `json:"manifests"`
}
// MIMEType returns the MIME type of this particular manifest list.
func (list *Schema2List) MIMEType() string {
return list.MediaType
}
// Instances returns a slice of digests of the manifests that this list knows of.
func (list *Schema2List) Instances() []digest.Digest {
results := make([]digest.Digest, len(list.Manifests))
for i, m := range list.Manifests {
results[i] = m.Digest
}
return results
}
// Instance returns the ListUpdate of a particular instance in the list.
func (list *Schema2List) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
for _, manifest := range list.Manifests {
if manifest.Digest == instanceDigest {
return ListUpdate{
Digest: manifest.Digest,
Size: manifest.Size,
MediaType: manifest.MediaType,
}, nil
}
}
return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest)
}
// UpdateInstances updates the sizes, digests, and media types of the manifests
// which the list catalogs.
func (list *Schema2List) UpdateInstances(updates []ListUpdate) error {
if len(updates) != len(list.Manifests) {
return fmt.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates))
}
for i := range updates {
if err := updates[i].Digest.Validate(); err != nil {
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err)
}
list.Manifests[i].Digest = updates[i].Digest
if updates[i].Size < 0 {
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
}
list.Manifests[i].Size = updates[i].Size
if updates[i].MediaType == "" {
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType)
}
list.Manifests[i].MediaType = updates[i].MediaType
}
return nil
}
// ChooseInstance parses blob as a schema2 manifest list, and returns the digest
// of the image which is appropriate for the current environment.
func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
wantedPlatforms, err := platform.WantedPlatforms(ctx)
if err != nil {
return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
}
for _, wantedPlatform := range wantedPlatforms {
for _, d := range list.Manifests {
imagePlatform := imgspecv1.Platform{
Architecture: d.Platform.Architecture,
OS: d.Platform.OS,
OSVersion: d.Platform.OSVersion,
OSFeatures: dupStringSlice(d.Platform.OSFeatures),
Variant: d.Platform.Variant,
}
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
return d.Digest, nil
}
}
}
return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
}
// Serialize returns the list in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
func (list *Schema2List) Serialize() ([]byte, error) {
buf, err := json.Marshal(list)
if err != nil {
return nil, fmt.Errorf("marshaling Schema2List %#v: %w", list, err)
}
return buf, nil
}
type Schema2List = manifest.Schema2ListPublic
// Schema2ListFromComponents creates a Schema2 manifest list instance from the
// supplied data.
func Schema2ListFromComponents(components []Schema2ManifestDescriptor) *Schema2List {
list := Schema2List{
SchemaVersion: 2,
MediaType: DockerV2ListMediaType,
Manifests: make([]Schema2ManifestDescriptor, len(components)),
}
for i, component := range components {
m := Schema2ManifestDescriptor{
Schema2Descriptor{
MediaType: component.MediaType,
Size: component.Size,
Digest: component.Digest,
URLs: dupStringSlice(component.URLs),
},
Schema2PlatformSpec{
Architecture: component.Platform.Architecture,
OS: component.Platform.OS,
OSVersion: component.Platform.OSVersion,
OSFeatures: dupStringSlice(component.Platform.OSFeatures),
Variant: component.Platform.Variant,
Features: dupStringSlice(component.Platform.Features),
},
}
list.Manifests[i] = m
}
return &list
return manifest.Schema2ListPublicFromComponents(components)
}
// Schema2ListClone creates a deep copy of the passed-in list.
func Schema2ListClone(list *Schema2List) *Schema2List {
return Schema2ListFromComponents(list.Manifests)
}
// ToOCI1Index returns the list encoded as an OCI1 index.
func (list *Schema2List) ToOCI1Index() (*OCI1Index, error) {
components := make([]imgspecv1.Descriptor, 0, len(list.Manifests))
for _, manifest := range list.Manifests {
converted := imgspecv1.Descriptor{
MediaType: manifest.MediaType,
Size: manifest.Size,
Digest: manifest.Digest,
URLs: dupStringSlice(manifest.URLs),
Platform: &imgspecv1.Platform{
OS: manifest.Platform.OS,
Architecture: manifest.Platform.Architecture,
OSFeatures: dupStringSlice(manifest.Platform.OSFeatures),
OSVersion: manifest.Platform.OSVersion,
Variant: manifest.Platform.Variant,
},
}
components = append(components, converted)
}
oci := OCI1IndexFromComponents(components, nil)
return oci, nil
}
// ToSchema2List returns the list encoded as a Schema2 list.
func (list *Schema2List) ToSchema2List() (*Schema2List, error) {
return Schema2ListClone(list), nil
return manifest.Schema2ListPublicClone(list)
}
// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled
// JSON, presumably generated by encoding a Schema2 manifest list.
func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) {
list := Schema2List{
Manifests: []Schema2ManifestDescriptor{},
}
if err := json.Unmarshal(manifest, &list); err != nil {
return nil, fmt.Errorf("unmarshaling Schema2List %q: %w", string(manifest), err)
}
if err := validateUnambiguousManifestFormat(manifest, DockerV2ListMediaType,
allowedFieldManifests); err != nil {
return nil, err
}
return &list, nil
}
// Clone returns a deep copy of this list and its contents.
func (list *Schema2List) Clone() List {
return Schema2ListClone(list)
}
// ConvertToMIMEType converts the passed-in manifest list to a manifest
// list of the specified type.
func (list *Schema2List) ConvertToMIMEType(manifestMIMEType string) (List, error) {
switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
case DockerV2ListMediaType:
return list.Clone(), nil
case imgspecv1.MediaTypeImageIndex:
return list.ToOCI1Index()
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType)
default:
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType)
}
func Schema2ListFromManifest(manifestBlob []byte) (*Schema2List, error) {
return manifest.Schema2ListPublicFromManifest(manifestBlob)
}

View File

@ -1,10 +1,7 @@
package manifest
import (
"fmt"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
"github.com/containers/image/v5/internal/manifest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
@ -21,56 +18,14 @@ var (
// Callers can either use this abstract interface without understanding the details of the formats,
// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members
// directly.
type List interface {
// MIMEType returns the MIME type of this particular manifest list.
MIMEType() string
// Instances returns a list of the manifests that this list knows of, other than its own.
Instances() []digest.Digest
// Update information about the list's instances. The length of the passed-in slice must
// match the length of the list of instances which the list already contains, and every field
// must be specified.
UpdateInstances([]ListUpdate) error
// Instance returns the size and MIME type of a particular instance in the list.
Instance(digest.Digest) (ListUpdate, error)
// ChooseInstance selects which manifest is most appropriate for the platform described by the
// SystemContext, or for the current platform if the SystemContext doesn't specify any details.
ChooseInstance(ctx *types.SystemContext) (digest.Digest, error)
// Serialize returns the list in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded
// from, even if no modifications were made!
Serialize() ([]byte, error)
// ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error.
ConvertToMIMEType(mimeType string) (List, error)
// Clone returns a deep copy of this list and its contents.
Clone() List
}
type List = manifest.ListPublic
// ListUpdate includes the fields which a List's UpdateInstances() method will modify.
type ListUpdate struct {
Digest digest.Digest
Size int64
MediaType string
}
type ListUpdate = manifest.ListUpdate
// ListFromBlob parses a list of manifests.
func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) {
normalized := NormalizedMIMEType(manifestMIMEType)
switch normalized {
case DockerV2ListMediaType:
return Schema2ListFromManifest(manifest)
case imgspecv1.MediaTypeImageIndex:
return OCI1IndexFromManifest(manifest)
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
return nil, fmt.Errorf("Treating single images as manifest lists is not implemented")
}
return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized)
func ListFromBlob(manifestBlob []byte, manifestMIMEType string) (List, error) {
return manifest.ListPublicFromBlob(manifestBlob, manifestMIMEType)
}
// ConvertListToMIMEType converts the passed-in manifest list to a manifest

View File

@ -1,10 +1,9 @@
package manifest
import (
"encoding/json"
"fmt"
internalManifest "github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/libtrust"
digest "github.com/opencontainers/go-digest"
@ -16,28 +15,28 @@ import (
// FIXME(runcom, mitr): should we have a mediatype pkg??
const (
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1
DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json"
DockerV2Schema1MediaType = manifest.DockerV2Schema1MediaType
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature
DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws"
DockerV2Schema1SignedMediaType = manifest.DockerV2Schema1SignedMediaType
// DockerV2Schema2MediaType MIME type represents Docker manifest schema 2
DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json"
DockerV2Schema2MediaType = manifest.DockerV2Schema2MediaType
// DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs.
DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json"
DockerV2Schema2ConfigMediaType = manifest.DockerV2Schema2ConfigMediaType
// DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers.
DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip"
DockerV2Schema2LayerMediaType = manifest.DockerV2Schema2LayerMediaType
// DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers.
DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar"
DockerV2SchemaLayerMediaTypeUncompressed = manifest.DockerV2SchemaLayerMediaTypeUncompressed
// DockerV2ListMediaType MIME type represents Docker manifest schema 2 list
DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json"
DockerV2ListMediaType = manifest.DockerV2ListMediaType
// DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers.
DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar"
DockerV2Schema2ForeignLayerMediaType = manifest.DockerV2Schema2ForeignLayerMediaType
// DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers.
DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
DockerV2Schema2ForeignLayerMediaTypeGzip = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip
)
// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation
// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact)
type NonImageArtifactError = internalManifest.NonImageArtifactError
type NonImageArtifactError = manifest.NonImageArtifactError
// SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type.
func SupportedSchema2MediaType(m string) error {
@ -102,102 +101,21 @@ type LayerInfo struct {
// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
// but we may not have such metadata available (e.g. when the manifest is a local file).
func GuessMIMEType(manifest []byte) string {
// A subset of manifest fields; the rest is silently ignored by json.Unmarshal.
// Also docker/distribution/manifest.Versioned.
meta := struct {
MediaType string `json:"mediaType"`
SchemaVersion int `json:"schemaVersion"`
Signatures interface{} `json:"signatures"`
}{}
if err := json.Unmarshal(manifest, &meta); err != nil {
return ""
}
switch meta.MediaType {
case DockerV2Schema2MediaType, DockerV2ListMediaType,
imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type.
return meta.MediaType
}
// this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest.
switch meta.SchemaVersion {
case 1:
if meta.Signatures != nil {
return DockerV2Schema1SignedMediaType
}
return DockerV2Schema1MediaType
case 2:
// Best effort to understand if this is an OCI image since mediaType
// wasn't in the manifest for OCI image-spec < 1.0.2.
// For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess.
ociMan := struct {
Config struct {
MediaType string `json:"mediaType"`
} `json:"config"`
}{}
if err := json.Unmarshal(manifest, &ociMan); err != nil {
return ""
}
switch ociMan.Config.MediaType {
case imgspecv1.MediaTypeImageConfig:
return imgspecv1.MediaTypeImageManifest
case DockerV2Schema2ConfigMediaType:
// This case should not happen since a Docker image
// must declare a top-level media type and
// `meta.MediaType` has already been checked.
return DockerV2Schema2MediaType
}
// Maybe an image index or an OCI artifact.
ociIndex := struct {
Manifests []imgspecv1.Descriptor `json:"manifests"`
}{}
if err := json.Unmarshal(manifest, &ociIndex); err != nil {
return ""
}
if len(ociIndex.Manifests) != 0 {
if ociMan.Config.MediaType == "" {
return imgspecv1.MediaTypeImageIndex
}
// FIXME: this is mixing media types of manifests and configs.
return ociMan.Config.MediaType
}
// It's most likely an OCI artifact with a custom config media
// type which is not (and cannot) be covered by the media-type
// checks cabove.
return imgspecv1.MediaTypeImageManifest
}
return ""
func GuessMIMEType(manifestBlob []byte) string {
return manifest.GuessMIMEType(manifestBlob)
}
// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures.
func Digest(manifest []byte) (digest.Digest, error) {
if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType {
sig, err := libtrust.ParsePrettySignature(manifest, "signatures")
if err != nil {
return "", err
}
manifest, err = sig.Payload()
if err != nil {
// Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string
// that libtrust itself has josebase64UrlEncode()d
return "", err
}
}
return digest.FromBytes(manifest), nil
func Digest(manifestBlob []byte) (digest.Digest, error) {
return manifest.Digest(manifestBlob)
}
// MatchesDigest returns true iff the manifest matches expectedDigest.
// Error may be set if this returns false.
// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified,
// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob.
func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) {
// This should eventually support various digest types.
actualDigest, err := Digest(manifest)
if err != nil {
return false, err
}
return expectedDigest == actualDigest, nil
func MatchesDigest(manifestBlob []byte, expectedDigest digest.Digest) (bool, error) {
return manifest.MatchesDigest(manifestBlob, expectedDigest)
}
// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest.
@ -231,30 +149,7 @@ func MIMETypeSupportsEncryption(mimeType string) bool {
// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server,
// centralizing various workarounds.
func NormalizedMIMEType(input string) string {
switch input {
// "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
// This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
// need to happen within the ImageSource.
case "application/json":
return DockerV2Schema1SignedMediaType
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType,
imgspecv1.MediaTypeImageManifest,
imgspecv1.MediaTypeImageIndex,
DockerV2Schema2MediaType,
DockerV2ListMediaType:
return input
default:
// If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time
// to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108
// and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50
//
// Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag.
// This makes no real sense, but it happens
// because requests for manifests are
// redirected to a content distribution
// network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442
return DockerV2Schema1SignedMediaType
}
return manifest.NormalizedMIMEType(input)
}
// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type

View File

@ -5,6 +5,7 @@ import (
"fmt"
"strings"
"github.com/containers/image/v5/internal/manifest"
internalManifest "github.com/containers/image/v5/internal/manifest"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
@ -12,6 +13,7 @@ import (
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/slices"
)
// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor.
@ -49,13 +51,13 @@ func SupportedOCI1MediaType(m string) error {
}
// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob.
func OCI1FromManifest(manifest []byte) (*OCI1, error) {
func OCI1FromManifest(manifestBlob []byte) (*OCI1, error) {
oci1 := OCI1{}
if err := json.Unmarshal(manifest, &oci1); err != nil {
if err := json.Unmarshal(manifestBlob, &oci1); err != nil {
return nil, err
}
if err := validateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex,
allowedFieldConfig|allowedFieldLayers); err != nil {
if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageIndex,
manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil {
return nil, err
}
return &oci1, nil
@ -160,10 +162,8 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
// an error if the mediatype does not support encryption
func getEncryptedMediaType(mediatype string) (string, error) {
for _, s := range strings.Split(mediatype, "+")[1:] {
if s == "encrypted" {
return "", fmt.Errorf("unsupported mediaType: %v already encrypted", mediatype)
}
if slices.Contains(strings.Split(mediatype, "+")[1:], "encrypted") {
return "", fmt.Errorf("unsupported mediaType: %v already encrypted", mediatype)
}
unsuffixedMediatype := strings.Split(mediatype, "+")[0]
switch unsuffixedMediatype {
@ -178,7 +178,7 @@ func getEncryptedMediaType(mediatype string) (string, error) {
// an error if the mediatype does not support decryption
func getDecryptedMediaType(mediatype string) (string, error) {
if !strings.HasSuffix(mediatype, "+encrypted") {
return "", fmt.Errorf("unsupported mediaType to decrypt %v:", mediatype)
return "", fmt.Errorf("unsupported mediaType to decrypt: %v", mediatype)
}
return strings.TrimSuffix(mediatype, "+encrypted"), nil

View File

@ -1,232 +1,27 @@
package manifest
import (
"encoding/json"
"fmt"
"runtime"
platform "github.com/containers/image/v5/internal/pkg/platform"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspec "github.com/opencontainers/image-spec/specs-go"
"github.com/containers/image/v5/internal/manifest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// OCI1Index is just an alias for the OCI index type, but one which we can
// provide methods for.
type OCI1Index struct {
imgspecv1.Index
}
// MIMEType returns the MIME type of this particular manifest index.
func (index *OCI1Index) MIMEType() string {
return imgspecv1.MediaTypeImageIndex
}
// Instances returns a slice of digests of the manifests that this index knows of.
func (index *OCI1Index) Instances() []digest.Digest {
results := make([]digest.Digest, len(index.Manifests))
for i, m := range index.Manifests {
results[i] = m.Digest
}
return results
}
// Instance returns the ListUpdate of a particular instance in the index.
func (index *OCI1Index) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
for _, manifest := range index.Manifests {
if manifest.Digest == instanceDigest {
return ListUpdate{
Digest: manifest.Digest,
Size: manifest.Size,
MediaType: manifest.MediaType,
}, nil
}
}
return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest)
}
// UpdateInstances updates the sizes, digests, and media types of the manifests
// which the list catalogs.
func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error {
if len(updates) != len(index.Manifests) {
return fmt.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates))
}
for i := range updates {
if err := updates[i].Digest.Validate(); err != nil {
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err)
}
index.Manifests[i].Digest = updates[i].Digest
if updates[i].Size < 0 {
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
}
index.Manifests[i].Size = updates[i].Size
if updates[i].MediaType == "" {
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType)
}
index.Manifests[i].MediaType = updates[i].MediaType
}
return nil
}
// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest
// of the image which is appropriate for the current environment.
func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
wantedPlatforms, err := platform.WantedPlatforms(ctx)
if err != nil {
return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
}
for _, wantedPlatform := range wantedPlatforms {
for _, d := range index.Manifests {
if d.Platform == nil {
continue
}
imagePlatform := imgspecv1.Platform{
Architecture: d.Platform.Architecture,
OS: d.Platform.OS,
OSVersion: d.Platform.OSVersion,
OSFeatures: dupStringSlice(d.Platform.OSFeatures),
Variant: d.Platform.Variant,
}
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
return d.Digest, nil
}
}
}
for _, d := range index.Manifests {
if d.Platform == nil {
return d.Digest, nil
}
}
return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
}
// Serialize returns the index in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
func (index *OCI1Index) Serialize() ([]byte, error) {
buf, err := json.Marshal(index)
if err != nil {
return nil, fmt.Errorf("marshaling OCI1Index %#v: %w", index, err)
}
return buf, nil
}
type OCI1Index = manifest.OCI1IndexPublic
// OCI1IndexFromComponents creates an OCI1 image index instance from the
// supplied data.
func OCI1IndexFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1Index {
index := OCI1Index{
imgspecv1.Index{
Versioned: imgspec.Versioned{SchemaVersion: 2},
MediaType: imgspecv1.MediaTypeImageIndex,
Manifests: make([]imgspecv1.Descriptor, len(components)),
Annotations: dupStringStringMap(annotations),
},
}
for i, component := range components {
var platform *imgspecv1.Platform
if component.Platform != nil {
platform = &imgspecv1.Platform{
Architecture: component.Platform.Architecture,
OS: component.Platform.OS,
OSVersion: component.Platform.OSVersion,
OSFeatures: dupStringSlice(component.Platform.OSFeatures),
Variant: component.Platform.Variant,
}
}
m := imgspecv1.Descriptor{
MediaType: component.MediaType,
Size: component.Size,
Digest: component.Digest,
URLs: dupStringSlice(component.URLs),
Annotations: dupStringStringMap(component.Annotations),
Platform: platform,
}
index.Manifests[i] = m
}
return &index
return manifest.OCI1IndexPublicFromComponents(components, annotations)
}
// OCI1IndexClone creates a deep copy of the passed-in index.
func OCI1IndexClone(index *OCI1Index) *OCI1Index {
return OCI1IndexFromComponents(index.Manifests, index.Annotations)
}
// ToOCI1Index returns the index encoded as an OCI1 index.
func (index *OCI1Index) ToOCI1Index() (*OCI1Index, error) {
return OCI1IndexClone(index), nil
}
// ToSchema2List returns the index encoded as a Schema2 list.
func (index *OCI1Index) ToSchema2List() (*Schema2List, error) {
components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests))
for _, manifest := range index.Manifests {
platform := manifest.Platform
if platform == nil {
platform = &imgspecv1.Platform{
OS: runtime.GOOS,
Architecture: runtime.GOARCH,
}
}
converted := Schema2ManifestDescriptor{
Schema2Descriptor{
MediaType: manifest.MediaType,
Size: manifest.Size,
Digest: manifest.Digest,
URLs: dupStringSlice(manifest.URLs),
},
Schema2PlatformSpec{
OS: platform.OS,
Architecture: platform.Architecture,
OSFeatures: dupStringSlice(platform.OSFeatures),
OSVersion: platform.OSVersion,
Variant: platform.Variant,
},
}
components = append(components, converted)
}
s2 := Schema2ListFromComponents(components)
return s2, nil
return manifest.OCI1IndexPublicClone(index)
}
// OCI1IndexFromManifest creates an OCI1 manifest index instance from marshalled
// JSON, presumably generated by encoding a OCI1 manifest index.
func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) {
index := OCI1Index{
Index: imgspecv1.Index{
Versioned: imgspec.Versioned{SchemaVersion: 2},
MediaType: imgspecv1.MediaTypeImageIndex,
Manifests: []imgspecv1.Descriptor{},
Annotations: make(map[string]string),
},
}
if err := json.Unmarshal(manifest, &index); err != nil {
return nil, fmt.Errorf("unmarshaling OCI1Index %q: %w", string(manifest), err)
}
if err := validateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex,
allowedFieldManifests); err != nil {
return nil, err
}
return &index, nil
}
// Clone returns a deep copy of this list and its contents.
func (index *OCI1Index) Clone() List {
return OCI1IndexClone(index)
}
// ConvertToMIMEType converts the passed-in image index to a manifest list of
// the specified type.
func (index *OCI1Index) ConvertToMIMEType(manifestMIMEType string) (List, error) {
switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
case DockerV2ListMediaType:
return index.ToSchema2List()
case imgspecv1.MediaTypeImageIndex:
return index.Clone(), nil
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType)
default:
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType)
}
func OCI1IndexFromManifest(manifestBlob []byte) (*OCI1Index, error) {
return manifest.OCI1IndexPublicFromManifest(manifestBlob)
}

View File

@ -58,13 +58,7 @@ func splitPathAndImageWindows(reference string) (string, string) {
}
func splitPathAndImageNonWindows(reference string) (string, string) {
sep := strings.SplitN(reference, ":", 2)
path := sep[0]
var image string
if len(sep) == 2 {
image = sep[1]
}
path, image, _ := strings.Cut(reference, ":") // image is set to "" if there is no ":"
return path, image
}

View File

@ -12,9 +12,9 @@ import (
"github.com/containers/image/v5/internal/imagedestination/impl"
"github.com/containers/image/v5/internal/imagedestination/stubs"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
imgspec "github.com/opencontainers/image-spec/specs-go"

View File

@ -12,8 +12,8 @@ import (
"github.com/containers/image/v5/internal/imagesource/impl"
"github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/tlsclientconfig"
"github.com/containers/image/v5/types"
"github.com/docker/go-connections/tlsconfig"

View File

@ -100,7 +100,7 @@ func (ref ociReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref ociReference) StringWithinTransport() string {
return fmt.Sprintf("%s:%s", ref.dir, ref.image)

View File

@ -8,6 +8,7 @@ import (
"fmt"
"net"
"net/http"
"net/netip"
"net/url"
"os"
"path"
@ -19,6 +20,7 @@ import (
"github.com/containers/storage/pkg/homedir"
"github.com/imdario/mergo"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
"golang.org/x/net/http2"
"gopkg.in/yaml.v3"
)
@ -205,10 +207,7 @@ func (config *directClientConfig) ClientConfig() (*restConfig, error) {
if isConfigTransportTLS(*clientConfig) {
var err error
// REMOVED: Support for interactive fallback.
userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo)
if err != nil {
return nil, err
}
userAuthPartialConfig := getUserIdentificationPartialConfig(configAuthInfo)
if err = mergo.MergeWithOverwrite(clientConfig, userAuthPartialConfig); err != nil {
return nil, err
}
@ -255,7 +254,7 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, conf
// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
// 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file
// 4. if there is not enough information to identify the user, prompt if possible
func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*restConfig, error) {
func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) *restConfig {
mergedConfig := &restConfig{}
// blindly overwrite existing values based on precedence
@ -274,7 +273,7 @@ func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*rest
}
// REMOVED: prompting for missing information.
return mergedConfig, nil
return mergedConfig
}
// ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable.
@ -873,11 +872,11 @@ func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error
noProxyEnv := os.Getenv("NO_PROXY")
noProxyRules := strings.Split(noProxyEnv, ",")
cidrs := []*net.IPNet{}
cidrs := []netip.Prefix{}
for _, noProxyRule := range noProxyRules {
_, cidr, _ := net.ParseCIDR(noProxyRule)
if cidr != nil {
cidrs = append(cidrs, cidr)
prefix, err := netip.ParsePrefix(noProxyRule)
if err == nil {
cidrs = append(cidrs, prefix)
}
}
@ -888,7 +887,7 @@ func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error
return func(req *http.Request) (*url.URL, error) {
host := req.URL.Host
// for some urls, the Host is already the host, not the host:port
if net.ParseIP(host) == nil {
if _, err := netip.ParseAddr(host); err != nil {
var err error
host, _, err = net.SplitHostPort(req.URL.Host)
if err != nil {
@ -896,15 +895,15 @@ func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error
}
}
ip := net.ParseIP(host)
if ip == nil {
ip, err := netip.ParseAddr(host)
if err != nil {
return delegate(req)
}
for _, cidr := range cidrs {
if cidr.Contains(ip) {
return nil, nil
}
if slices.ContainsFunc(cidrs, func(cidr netip.Prefix) bool {
return cidr.Contains(ip)
}) {
return nil, nil
}
return delegate(req)

View File

@ -146,11 +146,11 @@ func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName str
// convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use;
// currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside.
func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) {
parts := strings.SplitN(ref, "/", 2)
if len(parts) != 2 {
_, repo, gotRepo := strings.Cut(ref, "/")
if !gotRepo {
return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref)
}
return reference.Domain(c.ref.dockerReference) + "/" + parts[1], nil
return reference.Domain(c.ref.dockerReference) + "/" + repo, nil
}
// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies.

View File

@ -17,10 +17,12 @@ import (
"github.com/containers/image/v5/internal/imagedestination/impl"
"github.com/containers/image/v5/internal/imagedestination/stubs"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"golang.org/x/exp/slices"
)
type openshiftImageDestination struct {
@ -180,12 +182,11 @@ func (d *openshiftImageDestination) PutSignaturesWithFormat(ctx context.Context,
if err != nil {
return err
}
existingSigNames := map[string]struct{}{}
existingSigNames := set.New[string]()
for _, sig := range image.Signatures {
existingSigNames[sig.objectMeta.Name] = struct{}{}
existingSigNames.Add(sig.objectMeta.Name)
}
sigExists:
for _, newSigWithFormat := range signatures {
newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning)
if !ok {
@ -193,10 +194,10 @@ sigExists:
}
newSig := newSigSimple.UntrustedSignature()
for _, existingSig := range image.Signatures {
if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
continue sigExists
}
if slices.ContainsFunc(image.Signatures, func(existingSig imageSignature) bool {
return existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig)
}) {
continue
}
// The API expect us to invent a new unique name. This is racy, but hopefully good enough.
@ -208,7 +209,7 @@ sigExists:
return fmt.Errorf("generating random signature len %d: %w", n, err)
}
signatureName = fmt.Sprintf("%s@%032x", imageStreamImageName, randBytes)
if _, ok := existingSigNames[signatureName]; !ok {
if !existingSigNames.Contains(signatureName) {
break
}
}

View File

@ -89,7 +89,7 @@ func (ref openshiftReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref openshiftReference) StringWithinTransport() string {
return reference.FamiliarString(ref.dockerReference)

View File

@ -75,12 +75,11 @@ type ostreeImageCloser struct {
func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) {
var repo = ""
var image = ""
s := strings.SplitN(ref, "@/", 2)
if len(s) == 1 {
image, repo = s[0], defaultOSTreeRepo
image, repoPart, gotRepoPart := strings.Cut(ref, "@/")
if !gotRepoPart {
repo = defaultOSTreeRepo
} else {
image, repo = s[0], "/"+s[1]
repo = "/" + repoPart
}
return NewReference(image, repo)
@ -134,7 +133,7 @@ func (ref ostreeReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref ostreeReference) StringWithinTransport() string {
return fmt.Sprintf("%s@%s", ref.image, ref.repo)
@ -157,11 +156,11 @@ func (ref ostreeReference) PolicyConfigurationIdentity() string {
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
// and each following element to be a prefix of the element preceding it.
func (ref ostreeReference) PolicyConfigurationNamespaces() []string {
s := strings.SplitN(ref.image, ":", 2)
if len(s) != 2 { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag.
repo, _, gotTag := strings.Cut(ref.image, ":")
if !gotTag { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag.
panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image))
}
name := s[0]
name := repo
res := []string{}
for {
res = append(res, fmt.Sprintf("%s:%s", ref.repo, name))

View File

@ -10,9 +10,9 @@ import (
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/internal/imagesource"
"github.com/containers/image/v5/internal/imagesource/impl"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
@ -207,7 +207,7 @@ func streamChunksFromFile(streams chan io.ReadCloser, errs chan error, file io.R
break
}
s := signalCloseReader{
closed: make(chan interface{}),
closed: make(chan struct{}),
stream: io.LimitReader(file, int64(c.Length)),
}
streams <- s
@ -218,7 +218,7 @@ func streamChunksFromFile(streams chan io.ReadCloser, errs chan error, file io.R
}
type signalCloseReader struct {
closed chan interface{}
closed chan struct{}
stream io.Reader
}

View File

@ -6,6 +6,7 @@ import (
"time"
"github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
@ -19,12 +20,12 @@ type locationKey struct {
blobDigest digest.Digest
}
// cache implements an in-memory-only BlobInfoCache
// cache implements an in-memory-only BlobInfoCache.
type cache struct {
mutex sync.Mutex
// The following fields can only be accessed with mutex held.
uncompressedDigests map[digest.Digest]digest.Digest
digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown, for each digest
}
@ -44,7 +45,7 @@ func New() types.BlobInfoCache {
func new2() *cache {
return &cache{
uncompressedDigests: map[digest.Digest]digest.Digest{},
digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{},
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
compressors: map[digest.Digest]string{},
}
@ -67,7 +68,7 @@ func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Diges
// Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest.
// This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
// when we already record a (compressed, uncompressed) pair.
if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 {
if s, ok := mem.digestsByUncompressed[anyDigest]; ok && !s.Empty() {
return anyDigest
}
return ""
@ -88,10 +89,10 @@ func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre
anyDigestSet, ok := mem.digestsByUncompressed[uncompressed]
if !ok {
anyDigestSet = map[digest.Digest]struct{}{}
anyDigestSet = set.New[digest.Digest]()
mem.digestsByUncompressed[uncompressed] = anyDigestSet
}
anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
anyDigestSet.Add(anyDigest)
}
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
@ -171,10 +172,11 @@ func (mem *cache) candidateLocations(transport types.ImageTransport, scope types
var uncompressedDigest digest.Digest // = ""
if canSubstitute {
if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
for d := range otherDigests {
if d != primaryDigest && d != uncompressedDigest {
res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo)
if otherDigests, ok := mem.digestsByUncompressed[uncompressedDigest]; ok {
for _, d := range otherDigests.Values() {
if d != primaryDigest && d != uncompressedDigest {
res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo)
}
}
}
if uncompressedDigest != primaryDigest {

View File

@ -30,7 +30,7 @@ var (
// Zstd compression.
Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, types.ZstdAlgorithmName,
[]byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor)
// Zstd:chunked compression.
// ZstdChunked is a Zstd compresion with chunk metadta which allows random access to individual files.
ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, /* Note: InternalUnstableUndocumentedMIMEQuestionMark is not ZstdChunkedAlgorithmName */
nil, ZstdDecompressor, compressor.ZstdCompressor)

View File

@ -12,6 +12,7 @@ import (
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
@ -139,10 +140,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
// possible sources, and then call `GetCredentials` on them. That
// prevents us from having to reverse engineer the logic in
// `GetCredentials`.
allKeys := make(map[string]bool)
addKey := func(s string) {
allKeys[s] = true
}
allKeys := set.New[string]()
// To use GetCredentials, we must at least convert the URL forms into host names.
// While we're at it, well also canonicalize docker.io to the standard format.
@ -166,14 +164,14 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
// direct mapping to a registry, so we can just
// walk the map.
for registry := range auths.CredHelpers {
addKey(registry)
allKeys.Add(registry)
}
for key := range auths.AuthConfigs {
key := normalizeAuthFileKey(key, path.legacyFormat)
if key == normalizedDockerIORegistry {
key = "docker.io"
}
addKey(key)
allKeys.Add(key)
}
}
// External helpers.
@ -188,7 +186,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
}
}
for registry := range creds {
addKey(registry)
allKeys.Add(registry)
}
}
}
@ -196,7 +194,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
// Now use `GetCredentials` to the specific auth configs for each
// previously listed registry.
authConfigs := make(map[string]types.DockerAuthConfig)
for key := range allKeys {
for _, key := range allKeys.Values() {
authConf, err := GetCredentials(sys, key)
if err != nil {
// Note: we rely on the logging in `GetCredentials`.
@ -394,17 +392,16 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error {
if isNamespaced {
logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper)
return
} else {
err := deleteAuthFromCredHelper(helper, key)
if err == nil {
logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper)
isLoggedIn = true
return
}
if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
logrus.Debugf("Not logged in to %s with credential helper %s", key, helper)
return
}
}
err := deleteAuthFromCredHelper(helper, key)
if err == nil {
logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper)
isLoggedIn = true
return
}
if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
logrus.Debugf("Not logged in to %s with credential helper %s", key, helper)
return
}
multiErr = multierror.Append(multiErr, fmt.Errorf("removing credentials for %s from credential helper %s: %w", key, helper, err))
}
@ -759,8 +756,8 @@ func decodeDockerAuth(path, key string, conf dockerAuthConfig) (types.DockerAuth
return types.DockerAuthConfig{}, err
}
parts := strings.SplitN(string(decoded), ":", 2)
if len(parts) != 2 {
user, passwordPart, valid := strings.Cut(string(decoded), ":")
if !valid {
// if it's invalid just skip, as docker does
if len(decoded) > 0 { // Docker writes "auths": { "$host": {} } entries if a credential helper is used, dont warn about those
logrus.Warnf(`Error parsing the "auth" field of a credential entry %q in %q, missing semicolon`, key, path) // Dont include the text of decoded, because that might put secrets into a log.
@ -770,8 +767,7 @@ func decodeDockerAuth(path, key string, conf dockerAuthConfig) (types.DockerAuth
return types.DockerAuthConfig{}, nil
}
user := parts[0]
password := strings.Trim(parts[1], "\x00")
password := strings.Trim(passwordPart, "\x00")
return types.DockerAuthConfig{
Username: user,
Password: password,
@ -786,7 +782,7 @@ func normalizeAuthFileKey(key string, legacyFormat bool) string {
stripped = strings.TrimPrefix(stripped, "https://")
if legacyFormat || stripped != key {
stripped = strings.SplitN(stripped, "/", 2)[0]
stripped, _, _ = strings.Cut(stripped, "/")
}
return normalizeRegistry(stripped)

View File

@ -14,6 +14,7 @@ import (
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/lockfile"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
)
// defaultShortNameMode is the default mode of registries.conf files if the
@ -308,9 +309,7 @@ func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAl
// updateWithConfigurationFrom updates c with configuration from updates.
// In case of conflict, updates is preferred.
func (c *shortNameAliasCache) updateWithConfigurationFrom(updates *shortNameAliasCache) {
for name, value := range updates.namedAliases {
c.namedAliases[name] = value
}
maps.Copy(c.namedAliases, updates.namedAliases)
}
func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAliasCache, error) {

View File

@ -16,6 +16,7 @@ import (
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/regexp"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
)
// systemRegistriesConfPath is the path to the system-wide registry
@ -1019,12 +1020,9 @@ func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) {
// Go maps have a non-deterministic order when iterating the keys, so
// we dump them in a slice and sort it to enforce some order in
// Registries slice. Some consumers of c/image (e.g., CRI-O) log the
// the configuration where a non-deterministic order could easily cause
// configuration where a non-deterministic order could easily cause
// confusion.
prefixes := []string{}
for prefix := range registryMap {
prefixes = append(prefixes, prefix)
}
prefixes := maps.Keys(registryMap)
sort.Strings(prefixes)
c.partialV2.Registries = []Registry{}

View File

@ -12,6 +12,7 @@ import (
"time"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc
@ -80,12 +81,9 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
}
func hasFile(files []os.DirEntry, name string) bool {
for _, f := range files {
if f.Name() == name {
return true
}
}
return false
return slices.ContainsFunc(files, func(f os.DirEntry) bool {
return f.Name() == name
})
}
// NewTransport Creates a default transport

View File

@ -87,7 +87,7 @@ func (ref sifReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
// instead, see transports.ImageName().
func (ref sifReference) StringWithinTransport() string {

View File

@ -12,6 +12,7 @@ import (
"github.com/containers/image/v5/signature/internal"
"github.com/sigstore/fulcio/pkg/certificate"
"github.com/sigstore/sigstore/pkg/cryptoutils"
"golang.org/x/exp/slices"
)
// fulcioTrustRoot contains policy allow validating Fulcio-issued certificates.
@ -109,7 +110,7 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time,
//
// So, pragmatically, the ideal design seem to be to only do signatures from a trusted build system (which is, by definition,
// the arbiter of desired vs. malicious signatures) that maintains an audit log of performed signature operations; and that seems to
// make make the SCT (and all of Rekor apart from the trusted timestamp) unnecessary.
// make the SCT (and all of Rekor apart from the trusted timestamp) unnecessary.
// == Validate the recorded OIDC issuer
gotOIDCIssuer := false
@ -136,15 +137,7 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time,
}
// == Validate the OIDC subject
foundEmail := false
// TO DO: Use slices.Contains after we update to Go 1.18
for _, certEmail := range untrustedCertificate.EmailAddresses {
if certEmail == f.subjectEmail {
foundEmail = true
break
}
}
if !foundEmail {
if !slices.Contains(untrustedCertificate.EmailAddresses, f.subjectEmail) {
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %s not found (got %#v)",
f.subjectEmail,
untrustedCertificate.EmailAddresses))

View File

@ -5,6 +5,8 @@ import (
"encoding/json"
"fmt"
"io"
"github.com/containers/image/v5/internal/set"
)
// JSONFormatError is returned when JSON does not match expected format.
@ -20,8 +22,8 @@ func (err JSONFormatError) Error() string {
//
// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy,
// we could use reflection to automate this. Later?
func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error {
seenKeys := map[string]struct{}{}
func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) error {
seenKeys := set.New[string]()
dec := json.NewDecoder(bytes.NewReader(data))
t, err := dec.Token()
@ -45,10 +47,10 @@ func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interfa
// Coverage: This should never happen, dec.Token() rejects non-string-literals in this state.
return JSONFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t))
}
if _, ok := seenKeys[key]; ok {
if seenKeys.Contains(key) {
return JSONFormatError(fmt.Sprintf("Duplicate key \"%s\"", key))
}
seenKeys[key] = struct{}{}
seenKeys.Add(key)
valuePtr := fieldResolver(key)
if valuePtr == nil {
@ -68,11 +70,11 @@ func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interfa
// ParanoidUnmarshalJSONObjectExactFields unmarshals data as a JSON object, but failing on the slightest unexpected aspect
// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields
// must be present exactly once, and none other fields are accepted.
func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error {
seenKeys := map[string]struct{}{}
if err := ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]any) error {
seenKeys := set.New[string]()
if err := ParanoidUnmarshalJSONObject(data, func(key string) any {
if valuePtr, ok := exactFields[key]; ok {
seenKeys[key] = struct{}{}
seenKeys.Add(key)
return valuePtr
}
return nil
@ -80,7 +82,7 @@ func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]
return err
}
for key := range exactFields {
if _, ok := seenKeys[key]; !ok {
if !seenKeys.Contains(key) {
return JSONFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key))
}
}

View File

@ -28,7 +28,7 @@ type UntrustedRekorSET struct {
}
type UntrustedRekorPayload struct {
Body []byte // In cosign, this is an interface{}, but only a string works
Body []byte // In cosign, this is an any, but only a string works
IntegratedTime int64
LogIndex int64
LogID string
@ -51,7 +51,7 @@ func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error {
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
func (s *UntrustedRekorSET) strictUnmarshalJSON(data []byte) error {
return ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"SignedEntryTimestamp": &s.UntrustedSignedEntryTimestamp,
"Payload": &s.UntrustedPayload,
})
@ -63,7 +63,7 @@ var _ json.Marshaler = (*UntrustedRekorSET)(nil)
// MarshalJSON implements the json.Marshaler interface.
func (s UntrustedRekorSET) MarshalJSON() ([]byte, error) {
return json.Marshal(map[string]interface{}{
return json.Marshal(map[string]any{
"SignedEntryTimestamp": s.UntrustedSignedEntryTimestamp,
"Payload": s.UntrustedPayload,
})
@ -86,7 +86,7 @@ func (p *UntrustedRekorPayload) UnmarshalJSON(data []byte) error {
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
func (p *UntrustedRekorPayload) strictUnmarshalJSON(data []byte) error {
return ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"body": &p.Body,
"integratedTime": &p.IntegratedTime,
"logIndex": &p.LogIndex,
@ -100,7 +100,7 @@ var _ json.Marshaler = (*UntrustedRekorPayload)(nil)
// MarshalJSON implements the json.Marshaler interface.
func (p UntrustedRekorPayload) MarshalJSON() ([]byte, error) {
return json.Marshal(map[string]interface{}{
return json.Marshal(map[string]any{
"body": p.Body,
"integratedTime": p.IntegratedTime,
"logIndex": p.LogIndex,
@ -159,7 +159,7 @@ func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unver
}
hashedRekordV001Bytes, err := json.Marshal(hashedRekord.Spec)
if err != nil {
// Coverage: hashedRekord.Spec is an interface{} that was just unmarshaled,
// Coverage: hashedRekord.Spec is an any that was just unmarshaled,
// so this should never fail.
return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("re-creating hashedrekord spec: %v", err))
}

View File

@ -55,19 +55,19 @@ func (s UntrustedSigstorePayload) MarshalJSON() ([]byte, error) {
if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" {
return nil, errors.New("Unexpected empty signature content")
}
critical := map[string]interface{}{
critical := map[string]any{
"type": sigstoreSignatureType,
"image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()},
"identity": map[string]string{"docker-reference": s.UntrustedDockerReference},
}
optional := map[string]interface{}{}
optional := map[string]any{}
if s.UntrustedCreatorID != nil {
optional["creator"] = *s.UntrustedCreatorID
}
if s.UntrustedTimestamp != nil {
optional["timestamp"] = *s.UntrustedTimestamp
}
signature := map[string]interface{}{
signature := map[string]any{
"critical": critical,
"optional": optional,
}
@ -92,7 +92,7 @@ func (s *UntrustedSigstorePayload) UnmarshalJSON(data []byte) error {
// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
var critical, optional json.RawMessage
if err := ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
if err := ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"critical": &critical,
"optional": &optional,
}); err != nil {
@ -104,7 +104,7 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
var gotCreatorID, gotTimestamp = false, false
// /usr/bin/cosign generates "optional": null if there are no user-specified annotations.
if !bytes.Equal(optional, []byte("null")) {
if err := ParanoidUnmarshalJSONObject(optional, func(key string) interface{} {
if err := ParanoidUnmarshalJSONObject(optional, func(key string) any {
switch key {
case "creator":
gotCreatorID = true
@ -113,7 +113,7 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
gotTimestamp = true
return &timestamp
default:
var ignore interface{}
var ignore any
return &ignore
}
}); err != nil {
@ -133,7 +133,7 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
var t string
var image, identity json.RawMessage
if err := ParanoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{
if err := ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{
"type": &t,
"image": &image,
"identity": &identity,
@ -145,14 +145,14 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
}
var digestString string
if err := ParanoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{
if err := ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{
"docker-manifest-digest": &digestString,
}); err != nil {
return err
}
s.UntrustedDockerManifestDigest = digest.Digest(digestString)
return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{
return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
"docker-reference": &s.UntrustedDockerReference,
})
}

View File

@ -104,7 +104,7 @@ var _ json.Unmarshaler = (*Policy)(nil)
func (p *Policy) UnmarshalJSON(data []byte) error {
*p = Policy{}
transports := policyTransportsMap{}
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
switch key {
case "default":
return &p.Default
@ -135,7 +135,7 @@ func (m *policyTransportsMap) UnmarshalJSON(data []byte) error {
// We can't unmarshal directly into map values because it is not possible to take an address of a map value.
// So, use a temporary map of pointers-to-slices and convert.
tmpMap := map[string]*PolicyTransportScopes{}
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
// transport can be nil
transport := transports.Get(key)
// internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
@ -181,7 +181,7 @@ func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error {
// We can't unmarshal directly into map values because it is not possible to take an address of a map value.
// So, use a temporary map of pointers-to-slices and convert.
tmpMap := map[string]*PolicyRequirements{}
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
// internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
if _, ok := tmpMap[key]; ok {
return nil
@ -271,7 +271,7 @@ var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil)
func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error {
*pr = prInsecureAcceptAnything{}
var tmp prInsecureAcceptAnything
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
}); err != nil {
return err
@ -301,7 +301,7 @@ var _ json.Unmarshaler = (*prReject)(nil)
func (pr *prReject) UnmarshalJSON(data []byte) error {
*pr = prReject{}
var tmp prReject
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
}); err != nil {
return err
@ -384,7 +384,7 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
var tmp prSignedBy
var gotKeyPath, gotKeyPaths, gotKeyData = false, false, false
var signedIdentity json.RawMessage
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
switch key {
case "type":
return &tmp.Type
@ -495,7 +495,7 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
*pr = prSignedBaseLayer{}
var tmp prSignedBaseLayer
var baseLayerIdentity json.RawMessage
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
"baseLayerIdentity": &baseLayerIdentity,
}); err != nil {
@ -564,7 +564,7 @@ var _ json.Unmarshaler = (*prmMatchExact)(nil)
func (prm *prmMatchExact) UnmarshalJSON(data []byte) error {
*prm = prmMatchExact{}
var tmp prmMatchExact
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
}); err != nil {
return err
@ -594,7 +594,7 @@ var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil)
func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error {
*prm = prmMatchRepoDigestOrExact{}
var tmp prmMatchRepoDigestOrExact
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
}); err != nil {
return err
@ -624,7 +624,7 @@ var _ json.Unmarshaler = (*prmMatchRepository)(nil)
func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
*prm = prmMatchRepository{}
var tmp prmMatchRepository
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
}); err != nil {
return err
@ -664,7 +664,7 @@ var _ json.Unmarshaler = (*prmExactReference)(nil)
func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
*prm = prmExactReference{}
var tmp prmExactReference
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
"dockerReference": &tmp.DockerReference,
}); err != nil {
@ -706,7 +706,7 @@ var _ json.Unmarshaler = (*prmExactRepository)(nil)
func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
*prm = prmExactRepository{}
var tmp prmExactRepository
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
"dockerRepository": &tmp.DockerRepository,
}); err != nil {
@ -778,7 +778,7 @@ var _ json.Unmarshaler = (*prmRemapIdentity)(nil)
func (prm *prmRemapIdentity) UnmarshalJSON(data []byte) error {
*prm = prmRemapIdentity{}
var tmp prmRemapIdentity
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
"prefix": &tmp.Prefix,
"signedPrefix": &tmp.SignedPrefix,

View File

@ -147,7 +147,7 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
var gotKeyPath, gotKeyData, gotFulcio, gotRekorPublicKeyPath, gotRekorPublicKeyData bool
var fulcio prSigstoreSignedFulcio
var signedIdentity json.RawMessage
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
switch key {
case "type":
return &tmp.Type
@ -298,7 +298,7 @@ func (f *prSigstoreSignedFulcio) UnmarshalJSON(data []byte) error {
*f = prSigstoreSignedFulcio{}
var tmp prSigstoreSignedFulcio
var gotCAPath, gotCAData, gotOIDCIssuer, gotSubjectEmail bool // = false...
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
switch key {
case "caPath":
gotCAPath = true

View File

@ -46,7 +46,7 @@ type PolicyRequirement interface {
// - sarRejected if the signature has not been verified;
// in that case error must be non-nil, and should be an PolicyRequirementError if evaluation
// succeeded but the result was rejection.
// - sarUnknown if if this PolicyRequirement does not deal with signatures.
// - sarUnknown if this PolicyRequirement does not deal with signatures.
// NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed.
// Returning sarUnknown and a non-nil error value is invalid.
// WARNING: This makes the signature contents acceptable for further processing,

View File

@ -12,6 +12,7 @@ import (
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/manifest"
digest "github.com/opencontainers/go-digest"
"golang.org/x/exp/slices"
)
func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
@ -67,10 +68,8 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image priva
signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{
validateKeyIdentity: func(keyIdentity string) error {
for _, trustedIdentity := range trustedIdentities {
if keyIdentity == trustedIdentity {
return nil
}
if slices.Contains(trustedIdentities, keyIdentity) {
return nil
}
// Coverage: We use a private GPG home directory and only import trusted keys, so this should
// not be reachable.

View File

@ -33,12 +33,12 @@ import (
// limitations under the License.
const (
// from sigstore/cosign/pkg/cosign.sigstorePrivateKeyPemType
// from sigstore/cosign/pkg/cosign.sigstorePrivateKeyPemType.
sigstorePrivateKeyPemType = "ENCRYPTED COSIGN PRIVATE KEY"
)
// from sigstore/cosign/pkg/cosign.loadPrivateKey
// FIXME: Do we need all of these key formats, and all of those
// FIXME: Do we need all of these key formats?
func loadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) {
// Decrypt first
p, _ := pem.Decode(key)

View File

@ -17,7 +17,7 @@ func leveledLoggerForLogrus(logger *logrus.Logger) retryablehttp.LeveledLogger {
}
// log is the actual conversion implementation
func (l *leveledLogger) log(level logrus.Level, msg string, keysAndValues []interface{}) {
func (l *leveledLogger) log(level logrus.Level, msg string, keysAndValues []any) {
fields := logrus.Fields{}
for i := 0; i < len(keysAndValues)-1; i += 2 {
key := keysAndValues[i]
@ -32,21 +32,21 @@ func (l *leveledLogger) log(level logrus.Level, msg string, keysAndValues []inte
}
// Debug implements retryablehttp.LeveledLogger
func (l *leveledLogger) Debug(msg string, keysAndValues ...interface{}) {
func (l *leveledLogger) Debug(msg string, keysAndValues ...any) {
l.log(logrus.DebugLevel, msg, keysAndValues)
}
// Error implements retryablehttp.LeveledLogger
func (l *leveledLogger) Error(msg string, keysAndValues ...interface{}) {
func (l *leveledLogger) Error(msg string, keysAndValues ...any) {
l.log(logrus.ErrorLevel, msg, keysAndValues)
}
// Info implements retryablehttp.LeveledLogger
func (l *leveledLogger) Info(msg string, keysAndValues ...interface{}) {
func (l *leveledLogger) Info(msg string, keysAndValues ...any) {
l.log(logrus.InfoLevel, msg, keysAndValues)
}
// Warn implements retryablehttp.LeveledLogger
func (l *leveledLogger) Warn(msg string, keysAndValues ...interface{}) {
func (l *leveledLogger) Warn(msg string, keysAndValues ...any) {
l.log(logrus.WarnLevel, msg, keysAndValues)
}

View File

@ -81,19 +81,19 @@ func (s untrustedSignature) MarshalJSON() ([]byte, error) {
if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" {
return nil, errors.New("Unexpected empty signature content")
}
critical := map[string]interface{}{
critical := map[string]any{
"type": signatureType,
"image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()},
"identity": map[string]string{"docker-reference": s.UntrustedDockerReference},
}
optional := map[string]interface{}{}
optional := map[string]any{}
if s.UntrustedCreatorID != nil {
optional["creator"] = *s.UntrustedCreatorID
}
if s.UntrustedTimestamp != nil {
optional["timestamp"] = *s.UntrustedTimestamp
}
signature := map[string]interface{}{
signature := map[string]any{
"critical": critical,
"optional": optional,
}
@ -118,7 +118,7 @@ func (s *untrustedSignature) UnmarshalJSON(data []byte) error {
// Splitting it into a separate function allows us to do the internal.JSONFormatError → InvalidSignatureError in a single place, the caller.
func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
var critical, optional json.RawMessage
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"critical": &critical,
"optional": &optional,
}); err != nil {
@ -128,7 +128,7 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
var creatorID string
var timestamp float64
var gotCreatorID, gotTimestamp = false, false
if err := internal.ParanoidUnmarshalJSONObject(optional, func(key string) interface{} {
if err := internal.ParanoidUnmarshalJSONObject(optional, func(key string) any {
switch key {
case "creator":
gotCreatorID = true
@ -137,7 +137,7 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
gotTimestamp = true
return &timestamp
default:
var ignore interface{}
var ignore any
return &ignore
}
}); err != nil {
@ -156,7 +156,7 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
var t string
var image, identity json.RawMessage
if err := internal.ParanoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{
"type": &t,
"image": &image,
"identity": &identity,
@ -168,14 +168,14 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
}
var digestString string
if err := internal.ParanoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{
"docker-manifest-digest": &digestString,
}); err != nil {
return err
}
s.UntrustedDockerManifestDigest = digest.Digest(digestString)
return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{
return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
"docker-reference": &s.UntrustedDockerReference,
})
}

View File

@ -21,6 +21,7 @@ import (
"github.com/containers/image/v5/internal/imagedestination/stubs"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/manifest"
@ -34,6 +35,7 @@ import (
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
var (
@ -242,7 +244,7 @@ type zstdFetcher struct {
// GetBlobAt converts from chunked.GetBlobAt to BlobChunkAccessor.GetBlobAt.
func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
var newChunks []private.ImageSourceChunk
newChunks := make([]private.ImageSourceChunk, 0, len(chunks))
for _, v := range chunks {
i := private.ImageSourceChunk{
Offset: v.Offset,
@ -795,14 +797,14 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
// Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so
// we just need to screen out the ones that are actually layers to get the list of non-layers.
dataBlobs := make(map[digest.Digest]struct{})
dataBlobs := set.New[digest.Digest]()
for blob := range s.filenames {
dataBlobs[blob] = struct{}{}
dataBlobs.Add(blob)
}
for _, layerBlob := range layerBlobs {
delete(dataBlobs, layerBlob.Digest)
dataBlobs.Delete(layerBlob.Digest)
}
for blob := range dataBlobs {
for _, blob := range dataBlobs.Values() {
v, err := os.ReadFile(s.filenames[blob])
if err != nil {
return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err)
@ -883,9 +885,7 @@ func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob
if err != nil {
return err
}
newBlob := make([]byte, len(manifestBlob))
copy(newBlob, manifestBlob)
s.manifest = newBlob
s.manifest = slices.Clone(manifestBlob)
s.manifestDigest = digest
return nil
}

View File

@ -14,6 +14,7 @@ import (
"github.com/containers/storage"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte
@ -52,17 +53,15 @@ func newReference(transport storageTransport, named reference.Named, id string)
// imageMatchesRepo returns true iff image.Names contains an element with the same repo as ref
func imageMatchesRepo(image *storage.Image, ref reference.Named) bool {
repo := ref.Name()
for _, name := range image.Names {
if named, err := reference.ParseNormalizedNamed(name); err == nil {
if named.Name() == repo {
return true
}
return slices.ContainsFunc(image.Names, func(name string) bool {
if named, err := reference.ParseNormalizedNamed(name); err == nil && named.Name() == repo {
return true
}
}
return false
return false
})
}
// multiArchImageMatchesSystemContext returns true if if the passed-in image both contains a
// multiArchImageMatchesSystemContext returns true if the passed-in image both contains a
// multi-arch manifest that matches the passed-in digest, and the image is the per-platform
// image instance that matches sys.
//
@ -170,11 +169,9 @@ func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Imag
// sake of older consumers that don't know there's a whole list in there now.
if s.named != nil {
if digested, ok := s.named.(reference.Digested); ok {
for _, digest := range loadedImage.Digests {
if digest == digested.Digest() {
loadedImage.Digest = digest
break
}
digest := digested.Digest()
if slices.Contains(loadedImage.Digests, digest) {
loadedImage.Digest = digest
}
}
}
@ -207,10 +204,10 @@ func (s storageReference) StringWithinTransport() string {
}
res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]"
if s.named != nil {
res = res + s.named.String()
res += s.named.String()
}
if s.id != "" {
res = res + "@" + s.id
res += "@" + s.id
}
return res
}
@ -218,10 +215,10 @@ func (s storageReference) StringWithinTransport() string {
func (s storageReference) PolicyConfigurationIdentity() string {
res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]"
if s.named != nil {
res = res + s.named.String()
res += s.named.String()
}
if s.id != "" {
res = res + "@" + s.id
res += "@" + s.id
}
return res
}

View File

@ -186,7 +186,7 @@ func (s *storageImageSource) getBlobAndLayerID(digest digest.Digest, layers []st
}
// GetManifest() reads the image's manifest.
func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) {
func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, mimeType string, err error) {
if instanceDigest != nil {
key := manifestBigDataKey(*instanceDigest)
blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)

View File

@ -234,35 +234,30 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc
reference = reference[closeIndex+1:]
// Peel off a "driver@" from the start.
driverInfo := ""
driverSplit := strings.SplitN(storeSpec, "@", 2)
if len(driverSplit) != 2 {
driverPart1, driverPart2, gotDriver := strings.Cut(storeSpec, "@")
if !gotDriver {
storeSpec = driverPart1
if storeSpec == "" {
return nil, ErrInvalidReference
}
} else {
driverInfo = driverSplit[0]
driverInfo = driverPart1
if driverInfo == "" {
return nil, ErrInvalidReference
}
storeSpec = driverSplit[1]
storeSpec = driverPart2
if storeSpec == "" {
return nil, ErrInvalidReference
}
}
// Peel off a ":options" from the end.
var options []string
optionsSplit := strings.SplitN(storeSpec, ":", 2)
if len(optionsSplit) == 2 {
options = strings.Split(optionsSplit[1], ",")
storeSpec = optionsSplit[0]
storeSpec, optionsPart, gotOptions := strings.Cut(storeSpec, ":")
if gotOptions {
options = strings.Split(optionsPart, ",")
}
// Peel off a "+runroot" from the new end.
runRootInfo := ""
runRootSplit := strings.SplitN(storeSpec, "+", 2)
if len(runRootSplit) == 2 {
runRootInfo = runRootSplit[1]
storeSpec = runRootSplit[0]
}
storeSpec, runRootInfo, _ := strings.Cut(storeSpec, "+") // runRootInfo is "" if there is no "+"
// The rest is our graph root.
rootInfo := storeSpec
// Check that any paths are absolute paths.

View File

@ -9,8 +9,8 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/maps"
)
// ConfigUpdater is an interface that ImageReferences for "tarball" images also
@ -35,9 +35,7 @@ func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[
if r.annotations == nil {
r.annotations = make(map[string]string)
}
for k, v := range annotations {
r.annotations[k] = v
}
maps.Copy(r.annotations, annotations)
return nil
}
@ -73,7 +71,7 @@ func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContex
func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
for _, filename := range r.filenames {
if err := os.Remove(filename); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("error removing %q: %v", filename, err)
return fmt.Errorf("error removing %q: %w", filename, err)
}
}
return nil

View File

@ -18,6 +18,8 @@ import (
digest "github.com/opencontainers/go-digest"
imgspecs "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
type tarballImageSource struct {
@ -62,13 +64,13 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
} else {
file, err = os.Open(filename)
if err != nil {
return nil, fmt.Errorf("error opening %q for reading: %v", filename, err)
return nil, fmt.Errorf("error opening %q for reading: %w", filename, err)
}
defer file.Close()
reader = file
fileinfo, err := file.Stat()
if err != nil {
return nil, fmt.Errorf("error reading size of %q: %v", filename, err)
return nil, fmt.Errorf("error reading size of %q: %w", filename, err)
}
blobSize = fileinfo.Size()
blobTime = fileinfo.ModTime()
@ -168,10 +170,6 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
MediaType: blobTypes[i],
})
}
annotations := make(map[string]string)
for k, v := range r.annotations {
annotations[k] = v
}
manifest := imgspecv1.Manifest{
Versioned: imgspecs.Versioned{
SchemaVersion: 2,
@ -182,7 +180,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
MediaType: imgspecv1.MediaTypeImageConfig,
},
Layers: layerDescriptors,
Annotations: annotations,
Annotations: maps.Clone(r.annotations),
}
// Encode the manifest.
@ -228,20 +226,19 @@ func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobIn
return io.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil
}
// Maybe one of the layer blobs.
for i := range is.blobIDs {
if blobinfo.Digest == is.blobIDs[i] {
// We want to read that layer: open the file or memory block and hand it back.
if is.filenames[i] == "-" {
return io.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil
}
reader, err := os.Open(is.filenames[i])
if err != nil {
return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err)
}
return reader, is.blobSizes[i], nil
}
i := slices.Index(is.blobIDs, blobinfo.Digest)
if i == -1 {
return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String())
}
return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String())
// We want to read that layer: open the file or memory block and hand it back.
if is.filenames[i] == "-" {
return io.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil
}
reader, err := os.Open(is.filenames[i])
if err != nil {
return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err)
}
return reader, is.blobSizes[i], nil
}
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).

View File

@ -25,24 +25,24 @@ import (
// ParseImageName converts a URL-like image name to a types.ImageReference.
func ParseImageName(imgName string) (types.ImageReference, error) {
// Keep this in sync with TransportFromImageName!
parts := strings.SplitN(imgName, ":", 2)
if len(parts) != 2 {
transportName, withinTransport, valid := strings.Cut(imgName, ":")
if !valid {
return nil, fmt.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName)
}
transport := transports.Get(parts[0])
transport := transports.Get(transportName)
if transport == nil {
return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0])
return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, transportName)
}
return transport.ParseReference(parts[1])
return transport.ParseReference(withinTransport)
}
// TransportFromImageName converts an URL-like name to a types.ImageTransport or nil when
// the transport is unknown or when the input is invalid.
func TransportFromImageName(imageName string) types.ImageTransport {
// Keep this in sync with ParseImageName!
parts := strings.SplitN(imageName, ":", 2)
if len(parts) == 2 {
return transports.Get(parts[0])
transportName, _, valid := strings.Cut(imageName, ":")
if valid {
return transports.Get(transportName)
}
return nil
}

View File

@ -5,6 +5,7 @@ import (
"sort"
"sync"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/types"
)
@ -66,22 +67,21 @@ func Register(t types.ImageTransport) {
// This is the generally recommended way to refer to images in the UI.
//
// NOTE: The returned string is not promised to be equal to the original input to ParseImageName;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
func ImageName(ref types.ImageReference) string {
return ref.Transport().Name() + ":" + ref.StringWithinTransport()
}
var deprecatedTransports = set.NewWithValues("atomic")
// ListNames returns a list of non deprecated transport names.
// Deprecated transports can be used, but are not presented to users.
func ListNames() []string {
kt.mu.Lock()
defer kt.mu.Unlock()
deprecated := map[string]bool{
"atomic": true,
}
var names []string
for _, transport := range kt.transports {
if !deprecated[transport.Name()] {
if !deprecatedTransports.Contains(transport.Name()) {
names = append(names, transport.Name())
}
}

View File

@ -11,7 +11,7 @@ import (
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// ImageTransport is a top-level namespace for ways to to store/load an image.
// ImageTransport is a top-level namespace for ways to store/load an image.
// It should generally correspond to ImageSource/ImageDestination implementations.
//
// Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport.
@ -48,7 +48,7 @@ type ImageReference interface {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
// instead, see transports.ImageName().
StringWithinTransport() string
@ -138,7 +138,7 @@ type BlobInfo struct {
// or if it should be compressed or decompressed. The field defaults to
// preserve the original layer's compressedness.
// TODO: To remove together with CryptoOperation in re-design to remove
// field out out of BlobInfo.
// field out of BlobInfo.
CompressionOperation LayerCompression
// CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct
// MIME type for compressed layers (e.g., gzip or zstd). This field MUST be
@ -149,7 +149,7 @@ type BlobInfo struct {
// CryptoOperation is used in Image.UpdateLayerInfos to instruct
// whether the original layer was encrypted/decrypted
// TODO: To remove together with CompressionOperation in re-design to
// remove field out out of BlobInfo.
// remove field out of BlobInfo.
CryptoOperation LayerCrypto
// Before adding any fields to this struct, read the NOTE above.
}

File diff suppressed because it is too large Load Diff

View File

@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api"
// Common constants for daemon and client.
const (
// DefaultVersion of Current REST API
DefaultVersion = "1.41"
DefaultVersion = "1.42"
// NoBaseImageSpecifier is the symbol used by the FROM
// command to specify that no base image is to be used.

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More