update c/image to latest

Something is broken with go get[1] but I was able to manually edit
go.mod and run make vendor.

[1] https://github.com/containers/podman/pull/17581#discussion_r1114369386

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
This commit is contained in:
Paul Holzinger 2023-02-22 15:17:34 +01:00
parent bbdf080d86
commit c825f99b12
223 changed files with 14046 additions and 12156 deletions

View File

@ -8,7 +8,7 @@ require (
github.com/containerd/containerd v1.6.18
github.com/containernetworking/cni v1.1.2
github.com/containernetworking/plugins v1.2.0
github.com/containers/image/v5 v5.24.2
github.com/containers/image/v5 v5.24.3-0.20230221191738-7b43abfbc83d
github.com/containers/ocicrypt v1.1.7
github.com/containers/storage v1.45.3
github.com/coreos/go-systemd/v22 v22.5.0
@ -89,7 +89,7 @@ require (
github.com/klauspost/compress v1.15.15 // indirect
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf // indirect
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/manifoldco/promptui v0.9.0 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
@ -105,28 +105,30 @@ require (
github.com/pkg/errors v0.9.1 // indirect
github.com/proglottis/gpgme v0.1.3 // indirect
github.com/rivo/uniseg v0.4.3 // indirect
github.com/sigstore/fulcio v1.0.0 // indirect
github.com/sigstore/fulcio v1.1.0 // indirect
github.com/sigstore/rekor v1.0.1 // indirect
github.com/sigstore/sigstore v1.5.1 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
github.com/sylabs/sif/v2 v2.9.0 // indirect
github.com/sylabs/sif/v2 v2.9.2 // indirect
github.com/tchap/go-patricia v2.3.0+incompatible // indirect
github.com/theupdateframework/go-tuf v0.5.2-0.20221207161717-9cb61d6e65f5 // indirect
github.com/theupdateframework/go-tuf v0.5.2 // indirect
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
github.com/ulikunitz/xz v0.5.11 // indirect
github.com/vbatts/tar-split v0.11.2 // indirect
github.com/vbauerster/mpb/v7 v7.5.3 // indirect
github.com/vbauerster/mpb/v8 v8.2.0 // indirect
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect
go.mongodb.org/mongo-driver v1.11.1 // indirect
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
go.opencensus.io v0.24.0 // indirect
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect
golang.org/x/mod v0.8.0 // indirect
golang.org/x/net v0.7.0 // indirect
golang.org/x/text v0.7.0 // indirect
golang.org/x/tools v0.6.0 // indirect
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect
google.golang.org/grpc v1.51.0 // indirect
google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc // indirect
google.golang.org/grpc v1.53.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect

View File

@ -110,7 +110,7 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
@ -225,8 +225,8 @@ github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHV
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
github.com/containernetworking/plugins v1.2.0 h1:SWgg3dQG1yzUo4d9iD8cwSVh1VqI+bP7mkPDoSfP9VU=
github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4=
github.com/containers/image/v5 v5.24.2 h1:QcMsHBAXBPPnVYo6iEFarvaIpym7sBlwsGHPJlucxN0=
github.com/containers/image/v5 v5.24.2/go.mod h1:oss5F6ssGQz8ZtC79oY+fuzYA3m3zBek9tq9gmhuvHc=
github.com/containers/image/v5 v5.24.3-0.20230221191738-7b43abfbc83d h1:L2XOtiE36FrZ5D+0HYKaIAcFh4lUv1syBes4tCj/P98=
github.com/containers/image/v5 v5.24.3-0.20230221191738-7b43abfbc83d/go.mod h1:8Mrhb3iwzDVdzKRHTPiCb8ZVxurvRyPY6k0hQEVSAOI=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
@ -595,8 +595,8 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf h1:ndns1qx/5dL43g16EQkPV/i8+b3l5bYQwLeoSBe7tS8=
github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf/go.mod h1:aGkAgvWY/IUcVFfuly53REpfv5edu25oij+qHRFaraA=
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 h1:unJdfS94Y3k85TKy+mvKzjW5R9rIC+Lv4KGbE7uNu0I=
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6/go.mod h1:PUgW5vI9ANEaV6qv9a6EKu8gAySgwf0xrzG9xIB/CK0=
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@ -614,7 +614,6 @@ github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHef
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
@ -647,7 +646,7 @@ github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vyg
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
github.com/moby/term v0.0.0-20221120202655-abb19827d345 h1:J9c53/kxIH+2nTKBEfZYFMlhghtHpIHSXpm5VRGHSnU=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -753,7 +752,7 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -766,7 +765,7 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@ -778,7 +777,7 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw=
@ -800,8 +799,8 @@ github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2
github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sigstore/fulcio v1.0.0 h1:hBZW6qg9GXTtCX8jOg1hmyjYLrmsEKZGeMwAbW3XNEg=
github.com/sigstore/fulcio v1.0.0/go.mod h1:j4MzLxX/Be0rHYh3JF2dgMorkWGzEMHBqIHwFU8I/Rw=
github.com/sigstore/fulcio v1.1.0 h1:mzzJ05Ccu8Y2inyioklNvc8MpzlGHxu8YqNeTm0dHfU=
github.com/sigstore/fulcio v1.1.0/go.mod h1:zv1ZQTXZbUwQdRwajlQksc34pRas+2aZYpIZoQBNev8=
github.com/sigstore/rekor v1.0.1 h1:rcESXSNkAPRWFYZel9rarspdvneET60F2ngNkadi89c=
github.com/sigstore/rekor v1.0.1/go.mod h1:ecTKdZWGWqE1pl3U1m1JebQJLU/hSjD9vYHOmHQ7w4g=
github.com/sigstore/sigstore v1.5.1 h1:iUou0QJW8eQKMUkTXbFyof9ZOblDtfaW2Sn2+QI8Tcs=
@ -856,8 +855,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/sylabs/sif/v2 v2.9.0 h1:q9K92j1QW4/QLOtKh9YZpJHrXav6x15AVhQGPVLcg+4=
github.com/sylabs/sif/v2 v2.9.0/go.mod h1:bRdFzcqif0eDjwx0isG4cgTFoKTQn/vfBXVSoP2rB2Y=
github.com/sylabs/sif/v2 v2.9.2 h1:i8YxBON4FOdqiIBX/bbY4IiHZTVJLlyA6yx9TJyQRyo=
github.com/sylabs/sif/v2 v2.9.2/go.mod h1:YSXiKUZTG7pcFpAMwQxdrVV4tVRuv1MBVBX3br1PkTg=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
@ -865,8 +864,8 @@ github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG
github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/theupdateframework/go-tuf v0.5.2-0.20221207161717-9cb61d6e65f5 h1:s+Yvt6bzRwHljSE7j6DLBDcfpZEdBhrvLgOUmd8f7ZM=
github.com/theupdateframework/go-tuf v0.5.2-0.20221207161717-9cb61d6e65f5/go.mod h1:Le8NAjvDJK1vmLgpVYr4AR1Tqam/b/mTdQyTy37UJDA=
github.com/theupdateframework/go-tuf v0.5.2 h1:habfDzTmpbzBLIFGWa2ZpVhYvFBoK0C1onC3a4zuPRA=
github.com/theupdateframework/go-tuf v0.5.2/go.mod h1:SyMV5kg5n4uEclsyxXJZI2UxPFJNDc4Y+r7wv+MlvTA=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
@ -885,8 +884,8 @@ github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
github.com/urfave/cli v1.22.9/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
github.com/vbauerster/mpb/v7 v7.5.3 h1:BkGfmb6nMrrBQDFECR/Q7RkKCw7ylMetCb4079CGs4w=
github.com/vbauerster/mpb/v7 v7.5.3/go.mod h1:i+h4QY6lmLvBNK2ah1fSreiw3ajskRlBp9AhY/PnuOE=
github.com/vbauerster/mpb/v8 v8.2.0 h1:zaH0DaIcUoOeItZ/Yy567ZhaPUC3GMhUyHollQDgZvs=
github.com/vbauerster/mpb/v8 v8.2.0/go.mod h1:HEVcHNizbUIg0l4Qwhw0BDvg50zo3CMiWkbz1WUEQ94=
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
@ -979,6 +978,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -1146,7 +1147,6 @@ golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
@ -1271,8 +1271,8 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef h1:uQ2vjV/sHTsWSqdKeLqmwitzgvjMl7o4IdtHwUDXSJY=
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc h1:ijGwO+0vL2hJt5gaygqP2j6PfflOBrRot0IczKbmtio=
google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@ -1291,8 +1291,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -1322,6 +1322,8 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/go-jose/go-jose.v2 v2.6.1 h1:qEzJlIDmG9q5VO0M/o8tGS65QMHMS1w01TQJB1VPJ4U=
gopkg.in/go-jose/go-jose.v2 v2.6.1/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
@ -1352,8 +1354,8 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -10,6 +10,7 @@ import (
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
)
// bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step.
@ -199,7 +200,9 @@ func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bp
}
// bpcPreserveOriginal returns a *bpCompressionStepData for not changing the original blob.
func (ic *imageCopier) bpcPreserveOriginal(stream *sourceStream, detected bpDetectCompressionStepData,
// This does not change the sourceStream parameter; we include it for symmetry with other
// pipeline steps.
func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCompressionStepData,
layerCompressionChangeSupported bool) *bpCompressionStepData {
logrus.Debugf("Using original blob without modification")
// Remember if the original blob was compressed, and if so how, so that if
@ -232,9 +235,7 @@ func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCom
if *annotations == nil {
*annotations = map[string]string{}
}
for k, v := range d.uploadedAnnotations {
(*annotations)[k] = v
}
maps.Copy(*annotations, d.uploadedAnnotations)
}
// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo adnd the original srcInfo.

View File

@ -17,8 +17,10 @@ import (
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/internal/imagedestination"
"github.com/containers/image/v5/internal/imagesource"
internalManifest "github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/pkg/platform"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache"
"github.com/containers/image/v5/pkg/compression"
@ -31,7 +33,8 @@ import (
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"github.com/vbauerster/mpb/v7"
"github.com/vbauerster/mpb/v8"
"golang.org/x/exp/slices"
"golang.org/x/sync/semaphore"
"golang.org/x/term"
)
@ -143,7 +146,7 @@ type Options struct {
// Preserve digests, and fail if we cannot.
PreserveDigests bool
// manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type
// manifest MIME type of image set by user. "" is default and means use the autodetection to the manifest MIME type
ForceManifestMIMEType string
ImageListSelection ImageListSelection // set to either CopySystemImage (the default), CopyAllImages, or CopySpecificImages to control which instances we copy when the source reference is a list; ignored if the source reference is not a list
Instances []digest.Digest // if ImageListSelection is CopySpecificImages, copy only these instances and the list itself
@ -315,7 +318,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
if err != nil {
return nil, fmt.Errorf("reading manifest for %s: %w", transports.ImageName(srcRef), err)
}
manifestList, err := manifest.ListFromBlob(mfest, manifestType)
manifestList, err := internalManifest.ListFromBlob(mfest, manifestType)
if err != nil {
return nil, fmt.Errorf("parsing primary manifest as list for %s: %w", transports.ImageName(srcRef), err)
}
@ -370,12 +373,7 @@ func supportsMultipleImages(dest types.ImageDestination) bool {
// Anything goes!
return true
}
for _, mtype := range mtypes {
if manifest.MIMETypeIsMultiImage(mtype) {
return true
}
}
return false
return slices.ContainsFunc(mtypes, manifest.MIMETypeIsMultiImage)
}
// compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the
@ -420,11 +418,11 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
if err != nil {
return nil, fmt.Errorf("reading manifest list: %w", err)
}
originalList, err := manifest.ListFromBlob(manifestList, manifestType)
originalList, err := internalManifest.ListFromBlob(manifestList, manifestType)
if err != nil {
return nil, fmt.Errorf("parsing manifest list %q: %w", string(manifestList), err)
}
updatedList := originalList.Clone()
updatedList := originalList.CloneInternal()
sigs, err := c.sourceSignatures(ctx, unparsedToplevel, options,
"Getting image list signatures",
@ -491,15 +489,8 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
updates := make([]manifest.ListUpdate, len(instanceDigests))
instancesCopied := 0
for i, instanceDigest := range instanceDigests {
if options.ImageListSelection == CopySpecificImages {
skip := true
for _, instance := range options.Instances {
if instance == instanceDigest {
skip = false
break
}
}
if skip {
if options.ImageListSelection == CopySpecificImages &&
!slices.Contains(options.Instances, instanceDigest) {
update, err := updatedList.Instance(instanceDigest)
if err != nil {
return nil, err
@ -509,7 +500,6 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
updates[i] = update
continue
}
}
logrus.Debugf("Copying instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
c.Printf("Copying image %s (%d/%d)\n", instanceDigest, instancesCopied+1, imagesToCopy)
unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceDigest)
@ -536,7 +526,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
c.Printf("Writing manifest list to image destination\n")
var errs []string
for _, thisListType := range append([]string{selectedListType}, otherManifestMIMETypeCandidates...) {
attemptedList := updatedList
var attemptedList internalManifest.ListPublic = updatedList
logrus.Debugf("Trying to use manifest list type %s…", thisListType)
@ -823,7 +813,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
// has a built-in list of functions/methods (whatever object they are for)
// which have their format strings checked; for other names we would have
// to pass a parameter to every (go tool vet) invocation.
func (c *copier) Printf(format string, a ...interface{}) {
func (c *copier) Printf(format string, a ...any) {
fmt.Fprintf(c.reportWriter, format, a...)
}
@ -948,20 +938,20 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
data[index] = cld
}
// Create layer Encryption map
encLayerBitmap := map[int]bool{}
// Decide which layers to encrypt
layersToEncrypt := set.New[int]()
var encryptAll bool
if ic.ociEncryptLayers != nil {
encryptAll = len(*ic.ociEncryptLayers) == 0
totalLayers := len(srcInfos)
for _, l := range *ic.ociEncryptLayers {
// if layer is negative, it is reverse indexed.
encLayerBitmap[(totalLayers+l)%totalLayers] = true
layersToEncrypt.Add((totalLayers + l) % totalLayers)
}
if encryptAll {
for i := 0; i < len(srcInfos); i++ {
encLayerBitmap[i] = true
layersToEncrypt.Add(i)
}
}
}
@ -980,7 +970,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
return fmt.Errorf("copying layer: %w", err)
}
copyGroup.Add(1)
go copyLayerHelper(i, srcLayer, encLayerBitmap[i], progressPool, ic.c.rawSource.Reference().DockerReference())
go copyLayerHelper(i, srcLayer, layersToEncrypt.Contains(i), progressPool, ic.c.rawSource.Reference().DockerReference())
}
// A call to copyGroup.Wait() is done at this point by the defer above.
@ -1013,15 +1003,9 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields)
func layerDigestsDiffer(a, b []types.BlobInfo) bool {
if len(a) != len(b) {
return true
}
for i := range a {
if a[i].Digest != b[i].Digest {
return true
}
}
return false
return !slices.EqualFunc(a, b, func(a, b types.BlobInfo) bool {
return a.Digest == b.Digest
})
}
// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary,

View File

@ -7,6 +7,8 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/ocicrypt"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
// isOciEncrypted returns a bool indicating if a mediatype is encrypted
@ -18,12 +20,9 @@ func isOciEncrypted(mediatype string) bool {
// isEncrypted checks if an image is encrypted
func isEncrypted(i types.Image) bool {
layers := i.LayerInfos()
for _, l := range layers {
if isOciEncrypted(l.MediaType) {
return true
}
}
return false
return slices.ContainsFunc(layers, func(l types.BlobInfo) bool {
return isOciEncrypted(l.MediaType)
})
}
// bpDecryptionStepData contains data that the copy pipeline needs about the decryption step.
@ -47,11 +46,9 @@ func (c *copier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.
stream.reader = reader
stream.info.Digest = decryptedDigest
stream.info.Size = -1
for k := range stream.info.Annotations {
if strings.HasPrefix(k, "org.opencontainers.image.enc") {
delete(stream.info.Annotations, k)
}
}
maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool {
return strings.HasPrefix(k, "org.opencontainers.image.enc")
})
return &bpDecryptionStepData{
decrypting: true,
}, nil
@ -122,8 +119,6 @@ func (d *bpEncryptionStepData) updateCryptoOperationAndAnnotations(operation *ty
if *annotations == nil {
*annotations = map[string]string{}
}
for k, v := range encryptAnnotations {
(*annotations)[k] = v
}
maps.Copy(*annotations, encryptAnnotations)
return nil
}

View File

@ -6,9 +6,11 @@ import (
"fmt"
"strings"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert.
@ -19,7 +21,7 @@ var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, man
// orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once.
type orderedSet struct {
list []string
included map[string]struct{}
included *set.Set[string]
}
// newOrderedSet creates a correctly initialized orderedSet.
@ -27,15 +29,15 @@ type orderedSet struct {
func newOrderedSet() *orderedSet {
return &orderedSet{
list: []string{},
included: map[string]struct{}{},
included: set.New[string](),
}
}
// append adds s to the end of os, only if it is not included already.
func (os *orderedSet) append(s string) {
if _, ok := os.included[s]; !ok {
if !os.included.Contains(s) {
os.list = append(os.list, s)
os.included[s] = struct{}{}
os.included.Add(s)
}
}
@ -80,10 +82,10 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
otherMIMETypeCandidates: []string{},
}, nil
}
supportedByDest := map[string]struct{}{}
supportedByDest := set.New[string]()
for _, t := range destSupportedManifestMIMETypes {
if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(t) {
supportedByDest[t] = struct{}{}
supportedByDest.Add(t)
}
}
@ -96,7 +98,7 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
prioritizedTypes := newOrderedSet()
// First of all, prefer to keep the original manifest unmodified.
if _, ok := supportedByDest[srcType]; ok {
if supportedByDest.Contains(srcType) {
prioritizedTypes.append(srcType)
}
if in.cannotModifyManifestReason != "" {
@ -113,7 +115,7 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
// Then use our list of preferred types.
for _, t := range preferredManifestMIMETypes {
if _, ok := supportedByDest[t]; ok {
if supportedByDest.Contains(t) {
prioritizedTypes.append(t)
}
}
@ -166,11 +168,8 @@ func (c *copier) determineListConversion(currentListMIMEType string, destSupport
prioritizedTypes := newOrderedSet()
// The first priority is the current type, if it's in the list, since that lets us avoid a
// conversion that isn't strictly necessary.
for _, t := range destSupportedMIMETypes {
if t == currentListMIMEType {
if slices.Contains(destSupportedMIMETypes, currentListMIMEType) {
prioritizedTypes.append(currentListMIMEType)
break
}
}
// Pick out the other list types that we support.
for _, t := range destSupportedMIMETypes {

View File

@ -7,8 +7,8 @@ import (
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
"github.com/vbauerster/mpb/v7"
"github.com/vbauerster/mpb/v7/decor"
"github.com/vbauerster/mpb/v8"
"github.com/vbauerster/mpb/v8/decor"
)
// newProgressPool creates a *mpb.Progress.
@ -120,7 +120,7 @@ func (bar *progressBar) mark100PercentComplete() {
bar.SetCurrent(bar.originalSize) // This triggers the completion condition.
} else {
// -1 = unknown size
// 0 is somewhat of a a special case: Unlike c/image, where 0 is a definite known
// 0 is somewhat of a special case: Unlike c/image, where 0 is a definite known
// size (possible at least in theory), in mpb, zero-sized progress bars are treated
// as unknown size, in particular they are not configured to be marked as
// complete on bar.Current() reaching bar.total (because that would happen already

View File

@ -8,9 +8,9 @@ import (
"github.com/containers/image/v5/internal/imagesource/impl"
"github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
)

View File

@ -89,7 +89,7 @@ func (ref dirReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref dirReference) StringWithinTransport() string {
return ref.path

View File

@ -9,7 +9,7 @@ import (
// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path.
// To do so, all elements of the input path must exist; as a special case, the final component may be
// a non-existent name (but not a symlink pointing to a non-existent name)
// This is intended as a a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc.
// This is intended as a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc.
func ResolvePathToFullyExplicit(path string) (string, error) {
switch _, err := os.Lstat(path); {
case err == nil:

View File

@ -1,8 +1,6 @@
package archive
import (
"context"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
@ -15,7 +13,7 @@ type archiveImageSource struct {
// newImageSource returns a types.ImageSource for the specified image reference.
// The caller must call .Close() on the returned ImageSource.
func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveReference) (private.ImageSource, error) {
func newImageSource(sys *types.SystemContext, ref archiveReference) (private.ImageSource, error) {
var archive *tarfile.Reader
var closeArchive bool
if ref.archiveReader != nil {

View File

@ -62,24 +62,23 @@ func ParseReference(refString string) (types.ImageReference, error) {
return nil, fmt.Errorf("docker-archive reference %s isn't of the form <path>[:<reference>]", refString)
}
parts := strings.SplitN(refString, ":", 2)
path := parts[0]
path, tagOrIndex, gotTagOrIndex := strings.Cut(refString, ":")
var nt reference.NamedTagged
sourceIndex := -1
if len(parts) == 2 {
if gotTagOrIndex {
// A :tag or :@index was specified.
if len(parts[1]) > 0 && parts[1][0] == '@' {
i, err := strconv.Atoi(parts[1][1:])
if len(tagOrIndex) > 0 && tagOrIndex[0] == '@' {
i, err := strconv.Atoi(tagOrIndex[1:])
if err != nil {
return nil, fmt.Errorf("Invalid source index %s: %w", parts[1], err)
return nil, fmt.Errorf("Invalid source index %s: %w", tagOrIndex, err)
}
if i < 0 {
return nil, fmt.Errorf("Invalid source index @%d: must not be negative", i)
}
sourceIndex = i
} else {
ref, err := reference.ParseNormalizedNamed(parts[1])
ref, err := reference.ParseNormalizedNamed(tagOrIndex)
if err != nil {
return nil, fmt.Errorf("docker-archive parsing reference: %w", err)
}
@ -137,7 +136,7 @@ func (ref archiveReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref archiveReference) StringWithinTransport() string {
switch {
@ -191,7 +190,7 @@ func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemConte
// NewImageSource returns a types.ImageSource for this reference.
// The caller must call .Close() on the returned ImageSource.
func (ref archiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
return newImageSource(ctx, sys, ref)
return newImageSource(sys, ref)
}
// NewImageDestination returns a types.ImageDestination for this reference.

View File

@ -19,7 +19,7 @@ type Writer struct {
archive *tarfile.Writer
writer io.Closer
// The following state can only be acccessed with the mutex held.
// The following state can only be accessed with the mutex held.
mutex sync.Mutex
hadCommit bool // At least one successful commit has happened
}

View File

@ -41,7 +41,7 @@ type bodyReader struct {
}
// newBodyReader creates a bodyReader for request path in c.
// firstBody is an already correctly opened body for the blob, returing the full blob from the start.
// firstBody is an already correctly opened body for the blob, returning the full blob from the start.
// If reading from firstBody fails, bodyReader may heuristically decide to resume.
func newBodyReader(ctx context.Context, c *dockerClient, path string, firstBody io.ReadCloser) (io.ReadCloser, error) {
logURL, err := c.resolveRequestURL(path)
@ -193,7 +193,7 @@ func (br *bodyReader) Read(p []byte) (int, error) {
return n, fmt.Errorf("%w (after reconnecting, fetching blob: %v)", originalErr, err)
}
logrus.Debugf("Succesfully reconnected to %s", redactedURL)
logrus.Debugf("Successfully reconnected to %s", redactedURL)
consumedBody = true
br.body = res.Body
br.lastRetryOffset = br.offset

View File

@ -118,7 +118,7 @@ func (ref daemonReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
// instead, see transports.ImageName().
func (ref daemonReference) StringWithinTransport() string {

View File

@ -448,8 +448,8 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
if link == "" {
break
}
linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
linkURL, err := url.Parse(linkURLStr)
linkURLPart, _, _ := strings.Cut(link, ";")
linkURL, err := url.Parse(strings.Trim(linkURLPart, "<>"))
if err != nil {
return searchRes, err
}
@ -596,7 +596,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method stri
case <-time.After(delay):
// Nothing
}
delay = delay * 2 // If the registry does not specify a delay, back off exponentially.
delay *= 2 // If the registry does not specify a delay, back off exponentially.
}
}
@ -985,7 +985,7 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty
return reconnectingReader, blobSize, nil
}
// getOCIDescriptorContents returns the contents a blob spcified by descriptor in ref, which must fit within limit.
// getOCIDescriptorContents returns the contents a blob specified by descriptor in ref, which must fit within limit.
func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerReference, desc imgspecv1.Descriptor, maxSize int, cache types.BlobInfoCache) ([]byte, error) {
// Note that this copies all kinds of attachments: attestations, and whatever else is there,
// not just signatures. We leave the signature consumers to decide based on the MIME type.
@ -994,7 +994,7 @@ func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerR
return nil, err
}
defer reader.Close()
payload, err := iolimits.ReadAtMost(reader, iolimits.MaxSignatureBodySize)
payload, err := iolimits.ReadAtMost(reader, maxSize)
if err != nil {
return nil, fmt.Errorf("reading blob %s in %s: %w", desc.Digest.String(), ref.ref.Name(), err)
}

View File

@ -94,8 +94,8 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
break
}
linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
linkURL, err := url.Parse(linkURLStr)
linkURLPart, _, _ := strings.Cut(link, ";")
linkURL, err := url.Parse(strings.Trim(linkURLPart, "<>"))
if err != nil {
return tags, err
}

View File

@ -21,6 +21,7 @@ import (
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/internal/streamdigest"
"github.com/containers/image/v5/internal/uploadreader"
@ -32,6 +33,8 @@ import (
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
type dockerImageDestination struct {
@ -731,24 +734,15 @@ func layerMatchesSigstoreSignature(layer imgspecv1.Descriptor, mimeType string,
// But right now we dont want to deal with corner cases like bad digest formats
// or unavailable algorithms; in the worst case we end up with duplicate signature
// entries.
layer.Digest.String() != digest.FromBytes(payloadBlob).String() {
layer.Digest.String() != digest.FromBytes(payloadBlob).String() ||
!maps.Equal(layer.Annotations, annotations) {
return false
}
if len(layer.Annotations) != len(annotations) {
return false
}
for k, v1 := range layer.Annotations {
if v2, ok := annotations[k]; !ok || v1 != v2 {
return false
}
}
// All annotations in layer exist in sig, and the number of annotations is the same, so all annotations
// in sig also exist in layer.
return true
}
// putBlobBytesAsOCI uploads a blob with the specified contents, and returns an appropriate
// OCI descriptior.
// OCI descriptor.
func (d *dockerImageDestination) putBlobBytesAsOCI(ctx context.Context, contents []byte, mimeType string, options private.PutBlobOptions) (imgspecv1.Descriptor, error) {
blobDigest := digest.FromBytes(contents)
info, err := d.PutBlobWithOptions(ctx, bytes.NewReader(contents),
@ -803,12 +797,11 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context
if err != nil {
return err
}
existingSigNames := map[string]struct{}{}
existingSigNames := set.New[string]()
for _, sig := range existingSignatures.Signatures {
existingSigNames[sig.Name] = struct{}{}
existingSigNames.Add(sig.Name)
}
sigExists:
for _, newSigWithFormat := range signatures {
newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning)
if !ok {
@ -816,10 +809,10 @@ sigExists:
}
newSig := newSigSimple.UntrustedSignature()
for _, existingSig := range existingSignatures.Signatures {
if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
continue sigExists
}
if slices.ContainsFunc(existingSignatures.Signatures, func(existingSig extensionSignature) bool {
return existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig)
}) {
continue
}
// The API expect us to invent a new unique name. This is racy, but hopefully good enough.
@ -831,7 +824,7 @@ sigExists:
return fmt.Errorf("generating random signature len %d: %w", n, err)
}
signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes)
if _, ok := existingSigNames[signatureName]; !ok {
if !existingSigNames.Contains(signatureName) {
break
}
}

View File

@ -250,7 +250,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error,
currentOffset += toSkip
}
s := signalCloseReader{
closed: make(chan interface{}),
closed: make(chan struct{}),
stream: io.NopCloser(io.LimitReader(body, int64(c.Length))),
consumeStream: true,
}
@ -292,7 +292,7 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read
return
}
s := signalCloseReader{
closed: make(chan interface{}),
closed: make(chan struct{}),
stream: p,
}
streams <- s
@ -335,7 +335,7 @@ func parseMediaType(contentType string) (string, map[string]string, error) {
func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
headers := make(map[string][]string)
var rangeVals []string
rangeVals := make([]string, 0, len(chunks))
for _, c := range chunks {
rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1))
}
@ -766,7 +766,7 @@ func makeBufferedNetworkReader(stream io.ReadCloser, nBuffers, bufferSize uint)
}
type signalCloseReader struct {
closed chan interface{}
closed chan struct{}
stream io.ReadCloser
consumeStream bool
}

View File

@ -92,7 +92,7 @@ func (ref dockerReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref dockerReference) StringWithinTransport() string {
return "//" + reference.FamiliarString(ref.ref)

View File

@ -40,7 +40,7 @@ func httpResponseToError(res *http.Response, context string) error {
return ErrUnauthorizedForCredentials{Err: err}
default:
if context != "" {
context = context + ": "
context += ": "
}
return fmt.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode))
}

View File

@ -13,10 +13,12 @@ import (
"time"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
// Writer allows creating a (docker save)-formatted tar archive containing one or more images.
@ -29,7 +31,7 @@ type Writer struct {
// Other state.
blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
repositories map[string]map[string]string
legacyLayers map[string]struct{} // A set of IDs of legacy layers that have been already sent.
legacyLayers *set.Set[string] // A set of IDs of legacy layers that have been already sent.
manifest []ManifestItem
manifestByConfig map[digest.Digest]int // A map from config digest to an entry index in manifest above.
}
@ -42,7 +44,7 @@ func NewWriter(dest io.Writer) *Writer {
tar: tar.NewWriter(dest),
blobs: make(map[digest.Digest]types.BlobInfo),
repositories: map[string]map[string]string{},
legacyLayers: map[string]struct{}{},
legacyLayers: set.New[string](),
manifestByConfig: map[digest.Digest]int{},
}
}
@ -89,7 +91,7 @@ func (w *Writer) recordBlobLocked(info types.BlobInfo) {
// ensureSingleLegacyLayerLocked writes legacy VERSION and configuration files for a single layer
// The caller must have locked the Writer.
func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest digest.Digest, configBytes []byte) error {
if _, ok := w.legacyLayers[layerID]; !ok {
if !w.legacyLayers.Contains(layerID) {
// Create a symlink for the legacy format, where there is one subdirectory per layer ("image").
// See also the comment in physicalLayerPath.
physicalLayerPath := w.physicalLayerPath(layerDigest)
@ -106,7 +108,7 @@ func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest diges
return fmt.Errorf("writing config json file: %w", err)
}
w.legacyLayers[layerID] = struct{}{}
w.legacyLayers.Add(layerID)
}
return nil
}
@ -117,7 +119,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
lastLayerID := ""
for i, l := range layerDescriptors {
// The legacy format requires a config file per layer
layerConfig := make(map[string]interface{})
layerConfig := make(map[string]any)
// The root layer doesn't have any parent
if lastLayerID != "" {
@ -188,14 +190,9 @@ func checkManifestItemsMatch(a, b *ManifestItem) error {
if a.Config != b.Config {
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with configs %#v vs. %#v", a.Config, b.Config)
}
if len(a.Layers) != len(b.Layers) {
if !slices.Equal(a.Layers, b.Layers) {
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers %#v vs. %#v", a.Layers, b.Layers)
}
for i := range a.Layers {
if a.Layers[i] != b.Layers[i] {
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers[i] %#v vs. %#v", a.Layers[i], b.Layers[i])
}
}
// Ignore RepoTags, that will be built later.
// Ignore Parent and LayerSources, which we dont set to anything meaningful.
return nil
@ -229,9 +226,9 @@ func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Des
item = &w.manifest[i]
}
knownRepoTags := map[string]struct{}{}
knownRepoTags := set.New[string]()
for _, repoTag := range item.RepoTags {
knownRepoTags[repoTag] = struct{}{}
knownRepoTags.Add(repoTag)
}
for _, tag := range repoTags {
// For github.com/docker/docker consumers, this works just as well as
@ -252,9 +249,9 @@ func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Des
// analysis and explanation.
refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag())
if _, ok := knownRepoTags[refString]; !ok {
if !knownRepoTags.Contains(refString) {
item.RepoTags = append(item.RepoTags, refString)
knownRepoTags[refString] = struct{}{}
knownRepoTags.Add(refString)
}
}
@ -337,7 +334,7 @@ func (t *tarFI) ModTime() time.Time {
func (t *tarFI) IsDir() bool {
return false
}
func (t *tarFI) Sys() interface{} {
func (t *tarFI) Sys() any {
return nil
}

View File

@ -104,7 +104,7 @@ func splitDockerDomain(name string) (domain, remainder string) {
}
// familiarizeName returns a shortened version of the name familiar
// to to the Docker UI. Familiar names have the default domain
// to the Docker UI. Familiar names have the default domain
// "docker.io" and "library/" repository prefix removed.
// For example, "docker.io/library/redis" will have the familiar
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".

View File

@ -8,8 +8,8 @@
// domain := domain-component ['.' domain-component]* [':' port-number]
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
// port-number := /[0-9]+/
// path-component := alpha-numeric [separator alpha-numeric]*
// alpha-numeric := /[a-z0-9]+/
// path-component := alphanumeric [separator alphanumeric]*
// alphanumeric := /[a-z0-9]+/
// separator := /[_.]|__|[-]*/
//
// tag := /[\w][\w.-]{0,127}/
@ -175,7 +175,7 @@ func splitDomain(name string) (string, string) {
// hostname and name string. If no valid hostname is
// found, the hostname is empty and the full value
// is returned as name
// DEPRECATED: Use Domain or Path
// Deprecated: Use Domain or Path
func SplitHostname(named Named) (string, string) {
if r, ok := named.(namedRepository); ok {
return r.Domain(), r.Path()

View File

@ -1,9 +1,10 @@
package reference
import (
storageRegexp "github.com/containers/storage/pkg/regexp"
"regexp"
"strings"
storageRegexp "github.com/containers/storage/pkg/regexp"
)
const (

View File

@ -13,9 +13,9 @@ import (
"github.com/containers/image/v5/internal/rootless"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
"github.com/ghodss/yaml"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v3"
)
// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage.
@ -39,18 +39,18 @@ var defaultDockerDir = "/var/lib/containers/sigstore"
// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all.
// NOTE: Keep this in sync with docs/registries.d.md!
type registryConfiguration struct {
DefaultDocker *registryNamespace `json:"default-docker"`
DefaultDocker *registryNamespace `yaml:"default-docker"`
// The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*),
Docker map[string]registryNamespace `json:"docker"`
Docker map[string]registryNamespace `yaml:"docker"`
}
// registryNamespace defines lookaside locations for a single namespace.
type registryNamespace struct {
Lookaside string `json:"lookaside"` // For reading, and if LookasideStaging is not present, for writing.
LookasideStaging string `json:"lookaside-staging"` // For writing only.
SigStore string `json:"sigstore"` // For compatibility, deprecated in favor of Lookaside.
SigStoreStaging string `json:"sigstore-staging"` // For compatibility, deprecated in favor of LookasideStaging.
UseSigstoreAttachments *bool `json:"use-sigstore-attachments,omitempty"`
Lookaside string `yaml:"lookaside"` // For reading, and if LookasideStaging is not present, for writing.
LookasideStaging string `yaml:"lookaside-staging"` // For writing only.
SigStore string `yaml:"sigstore"` // For compatibility, deprecated in favor of Lookaside.
SigStoreStaging string `yaml:"sigstore-staging"` // For compatibility, deprecated in favor of LookasideStaging.
UseSigstoreAttachments *bool `yaml:"use-sigstore-attachments,omitempty"`
}
// lookasideStorageBase is an "opaque" type representing a lookaside Docker signature storage.

View File

@ -149,7 +149,7 @@ func expectTokenOrQuoted(s string) (value string, rest string) {
p := make([]byte, len(s)-1)
j := copy(p, s[:i])
escape := true
for i = i + 1; i < len(s); i++ {
for i++; i < len(s); i++ {
b := s[i]
switch {
case escape:

View File

@ -4,7 +4,7 @@ import (
"context"
"fmt"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/types"
)

View File

@ -381,7 +381,7 @@ func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwawa
delete(rawContents, "rootfs")
delete(rawContents, "history")
updates := map[string]interface{}{"id": v1ID}
updates := map[string]any{"id": v1ID}
if parentV1ID != "" {
updates["parent"] = parentV1ID
}

View File

@ -4,7 +4,7 @@ import (
"context"
"fmt"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/types"
)

View File

@ -10,7 +10,7 @@ import (
)
// FromReference returns a types.ImageCloser implementation for the default instance reading from reference.
// If reference poitns to a manifest list, .Manifest() still returns the manifest list,
// If reference points to a manifest list, .Manifest() still returns the manifest list,
// but other methods transparently return data from an appropriate image instance.
//
// The caller must call .Close() on the returned ImageCloser.

View File

@ -0,0 +1,72 @@
package manifest
import (
"encoding/json"
"fmt"
)
// AllowedManifestFields is a bit mask of “essential” manifest fields that ValidateUnambiguousManifestFormat
// can expect to be present.
type AllowedManifestFields int
const (
AllowedFieldConfig AllowedManifestFields = 1 << iota
AllowedFieldFSLayers
AllowedFieldHistory
AllowedFieldLayers
AllowedFieldManifests
AllowedFieldFirstUnusedBit // Keep this at the end!
)
// ValidateUnambiguousManifestFormat rejects manifests (incl. multi-arch) that look like more than
// one kind we currently recognize, i.e. if they contain any of the known “essential” format fields
// other than the ones the caller specifically allows.
// expectedMIMEType is used only for diagnostics.
// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format
// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous
// data that just isnt what was expected, as opposed to actually ambiguous data.
func ValidateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string,
allowed AllowedManifestFields) error {
if allowed >= AllowedFieldFirstUnusedBit {
return fmt.Errorf("internal error: invalid allowedManifestFields value %#v", allowed)
}
// Use a private type to decode, not just a map[string]any, because we want
// to also reject case-insensitive matches (which would be used by Go when really decoding
// the manifest).
// (It is expected that as manifest formats are added or extended over time, more fields will be added
// here.)
detectedFields := struct {
Config any `json:"config"`
FSLayers any `json:"fsLayers"`
History any `json:"history"`
Layers any `json:"layers"`
Manifests any `json:"manifests"`
}{}
if err := json.Unmarshal(manifest, &detectedFields); err != nil {
// The caller was supposed to already validate version numbers, so this should not happen;
// lets not bother with making this error “nice”.
return err
}
unexpected := []string{}
// Sadly this isnt easy to automate in Go, without reflection. So, copy&paste.
if detectedFields.Config != nil && (allowed&AllowedFieldConfig) == 0 {
unexpected = append(unexpected, "config")
}
if detectedFields.FSLayers != nil && (allowed&AllowedFieldFSLayers) == 0 {
unexpected = append(unexpected, "fsLayers")
}
if detectedFields.History != nil && (allowed&AllowedFieldHistory) == 0 {
unexpected = append(unexpected, "history")
}
if detectedFields.Layers != nil && (allowed&AllowedFieldLayers) == 0 {
unexpected = append(unexpected, "layers")
}
if detectedFields.Manifests != nil && (allowed&AllowedFieldManifests) == 0 {
unexpected = append(unexpected, "manifests")
}
if len(unexpected) != 0 {
return fmt.Errorf(`rejecting ambiguous manifest, unexpected fields %#v in supposedly %s`,
unexpected, expectedMIMEType)
}
return nil
}

View File

@ -0,0 +1,15 @@
package manifest
import (
"github.com/opencontainers/go-digest"
)
// Schema2Descriptor is a “descriptor” in docker/distribution schema 2.
//
// This is publicly visible as c/image/manifest.Schema2Descriptor.
type Schema2Descriptor struct {
MediaType string `json:"mediaType"`
Size int64 `json:"size"`
Digest digest.Digest `json:"digest"`
URLs []string `json:"urls,omitempty"`
}

View File

@ -0,0 +1,255 @@
package manifest
import (
"encoding/json"
"fmt"
platform "github.com/containers/image/v5/internal/pkg/platform"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/slices"
)
// Schema2PlatformSpec describes the platform which a particular manifest is
// specialized for.
// This is publicly visible as c/image/manifest.Schema2PlatformSpec.
type Schema2PlatformSpec struct {
Architecture string `json:"architecture"`
OS string `json:"os"`
OSVersion string `json:"os.version,omitempty"`
OSFeatures []string `json:"os.features,omitempty"`
Variant string `json:"variant,omitempty"`
Features []string `json:"features,omitempty"` // removed in OCI
}
// Schema2ManifestDescriptor references a platform-specific manifest.
// This is publicly visible as c/image/manifest.Schema2ManifestDescriptor.
type Schema2ManifestDescriptor struct {
Schema2Descriptor
Platform Schema2PlatformSpec `json:"platform"`
}
// Schema2ListPublic is a list of platform-specific manifests.
// This is publicly visible as c/image/manifest.Schema2List.
// Internal users should usually use Schema2List instead.
type Schema2ListPublic struct {
SchemaVersion int `json:"schemaVersion"`
MediaType string `json:"mediaType"`
Manifests []Schema2ManifestDescriptor `json:"manifests"`
}
// MIMEType returns the MIME type of this particular manifest list.
func (list *Schema2ListPublic) MIMEType() string {
return list.MediaType
}
// Instances returns a slice of digests of the manifests that this list knows of.
func (list *Schema2ListPublic) Instances() []digest.Digest {
results := make([]digest.Digest, len(list.Manifests))
for i, m := range list.Manifests {
results[i] = m.Digest
}
return results
}
// Instance returns the ListUpdate of a particular instance in the list.
func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
for _, manifest := range list.Manifests {
if manifest.Digest == instanceDigest {
return ListUpdate{
Digest: manifest.Digest,
Size: manifest.Size,
MediaType: manifest.MediaType,
}, nil
}
}
return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest)
}
// UpdateInstances updates the sizes, digests, and media types of the manifests
// which the list catalogs.
func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
if len(updates) != len(list.Manifests) {
return fmt.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates))
}
for i := range updates {
if err := updates[i].Digest.Validate(); err != nil {
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err)
}
list.Manifests[i].Digest = updates[i].Digest
if updates[i].Size < 0 {
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
}
list.Manifests[i].Size = updates[i].Size
if updates[i].MediaType == "" {
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType)
}
list.Manifests[i].MediaType = updates[i].MediaType
}
return nil
}
// ChooseInstance parses blob as a schema2 manifest list, and returns the digest
// of the image which is appropriate for the current environment.
func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
wantedPlatforms, err := platform.WantedPlatforms(ctx)
if err != nil {
return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
}
for _, wantedPlatform := range wantedPlatforms {
for _, d := range list.Manifests {
imagePlatform := imgspecv1.Platform{
Architecture: d.Platform.Architecture,
OS: d.Platform.OS,
OSVersion: d.Platform.OSVersion,
OSFeatures: slices.Clone(d.Platform.OSFeatures),
Variant: d.Platform.Variant,
}
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
return d.Digest, nil
}
}
}
return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
}
// Serialize returns the list in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
func (list *Schema2ListPublic) Serialize() ([]byte, error) {
buf, err := json.Marshal(list)
if err != nil {
return nil, fmt.Errorf("marshaling Schema2List %#v: %w", list, err)
}
return buf, nil
}
// Schema2ListPublicFromComponents creates a Schema2 manifest list instance from the
// supplied data.
// This is publicly visible as c/image/manifest.Schema2ListFromComponents.
func Schema2ListPublicFromComponents(components []Schema2ManifestDescriptor) *Schema2ListPublic {
list := Schema2ListPublic{
SchemaVersion: 2,
MediaType: DockerV2ListMediaType,
Manifests: make([]Schema2ManifestDescriptor, len(components)),
}
for i, component := range components {
m := Schema2ManifestDescriptor{
Schema2Descriptor{
MediaType: component.MediaType,
Size: component.Size,
Digest: component.Digest,
URLs: slices.Clone(component.URLs),
},
Schema2PlatformSpec{
Architecture: component.Platform.Architecture,
OS: component.Platform.OS,
OSVersion: component.Platform.OSVersion,
OSFeatures: slices.Clone(component.Platform.OSFeatures),
Variant: component.Platform.Variant,
Features: slices.Clone(component.Platform.Features),
},
}
list.Manifests[i] = m
}
return &list
}
// Schema2ListPublicClone creates a deep copy of the passed-in list.
// This is publicly visible as c/image/manifest.Schema2ListClone.
func Schema2ListPublicClone(list *Schema2ListPublic) *Schema2ListPublic {
return Schema2ListPublicFromComponents(list.Manifests)
}
// ToOCI1Index returns the list encoded as an OCI1 index.
func (list *Schema2ListPublic) ToOCI1Index() (*OCI1IndexPublic, error) {
components := make([]imgspecv1.Descriptor, 0, len(list.Manifests))
for _, manifest := range list.Manifests {
converted := imgspecv1.Descriptor{
MediaType: manifest.MediaType,
Size: manifest.Size,
Digest: manifest.Digest,
URLs: slices.Clone(manifest.URLs),
Platform: &imgspecv1.Platform{
OS: manifest.Platform.OS,
Architecture: manifest.Platform.Architecture,
OSFeatures: slices.Clone(manifest.Platform.OSFeatures),
OSVersion: manifest.Platform.OSVersion,
Variant: manifest.Platform.Variant,
},
}
components = append(components, converted)
}
oci := OCI1IndexPublicFromComponents(components, nil)
return oci, nil
}
// ToSchema2List returns the list encoded as a Schema2 list.
func (list *Schema2ListPublic) ToSchema2List() (*Schema2ListPublic, error) {
return Schema2ListPublicClone(list), nil
}
// Schema2ListPublicFromManifest creates a Schema2 manifest list instance from marshalled
// JSON, presumably generated by encoding a Schema2 manifest list.
// This is publicly visible as c/image/manifest.Schema2ListFromManifest.
func Schema2ListPublicFromManifest(manifest []byte) (*Schema2ListPublic, error) {
list := Schema2ListPublic{
Manifests: []Schema2ManifestDescriptor{},
}
if err := json.Unmarshal(manifest, &list); err != nil {
return nil, fmt.Errorf("unmarshaling Schema2List %q: %w", string(manifest), err)
}
if err := ValidateUnambiguousManifestFormat(manifest, DockerV2ListMediaType,
AllowedFieldManifests); err != nil {
return nil, err
}
return &list, nil
}
// Clone returns a deep copy of this list and its contents.
func (list *Schema2ListPublic) Clone() ListPublic {
return Schema2ListPublicClone(list)
}
// ConvertToMIMEType converts the passed-in manifest list to a manifest
// list of the specified type.
func (list *Schema2ListPublic) ConvertToMIMEType(manifestMIMEType string) (ListPublic, error) {
switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
case DockerV2ListMediaType:
return list.Clone(), nil
case imgspecv1.MediaTypeImageIndex:
return list.ToOCI1Index()
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType)
default:
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType)
}
}
// Schema2List is a list of platform-specific manifests.
type Schema2List struct {
Schema2ListPublic
}
func schema2ListFromPublic(public *Schema2ListPublic) *Schema2List {
return &Schema2List{*public}
}
func (index *Schema2List) CloneInternal() List {
return schema2ListFromPublic(Schema2ListPublicClone(&index.Schema2ListPublic))
}
func (index *Schema2List) Clone() ListPublic {
return index.CloneInternal()
}
// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled
// JSON, presumably generated by encoding a Schema2 manifest list.
func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) {
public, err := Schema2ListPublicFromManifest(manifest)
if err != nil {
return nil, err
}
return schema2ListFromPublic(public), nil
}

View File

@ -2,6 +2,10 @@ package manifest
import "fmt"
// FIXME: This is a duplicate of c/image/manifestDockerV2Schema2ConfigMediaType.
// Deduplicate that, depending on outcome of https://github.com/containers/image/pull/1791 .
const dockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json"
// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation
// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact)
//
@ -28,5 +32,9 @@ func NewNonImageArtifactError(mimeType string) error {
}
func (e NonImageArtifactError) Error() string {
// Special-case these invalid mixed images, which show up from time to time:
if e.mimeType == dockerV2Schema2ConfigMediaType {
return fmt.Sprintf("invalid mixed OCI image with Docker v2s2 config (%q)", e.mimeType)
}
return fmt.Sprintf("unsupported image-specific operation on artifact with type %q", e.mimeType)
}

View File

@ -0,0 +1,86 @@
package manifest
import (
"fmt"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// ListPublic is a subset of List which is a part of the public API;
// so no methods can be added, removed or changed.
//
// Internal users should usually use List instead.
type ListPublic interface {
// MIMEType returns the MIME type of this particular manifest list.
MIMEType() string
// Instances returns a list of the manifests that this list knows of, other than its own.
Instances() []digest.Digest
// Update information about the list's instances. The length of the passed-in slice must
// match the length of the list of instances which the list already contains, and every field
// must be specified.
UpdateInstances([]ListUpdate) error
// Instance returns the size and MIME type of a particular instance in the list.
Instance(digest.Digest) (ListUpdate, error)
// ChooseInstance selects which manifest is most appropriate for the platform described by the
// SystemContext, or for the current platform if the SystemContext doesn't specify any details.
ChooseInstance(ctx *types.SystemContext) (digest.Digest, error)
// Serialize returns the list in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded
// from, even if no modifications were made!
Serialize() ([]byte, error)
// ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error.
ConvertToMIMEType(mimeType string) (ListPublic, error)
// Clone returns a deep copy of this list and its contents.
Clone() ListPublic
}
// List is an interface for parsing, modifying lists of image manifests.
// Callers can either use this abstract interface without understanding the details of the formats,
// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members
// directly.
type List interface {
ListPublic
// CloneInternal returns a deep copy of this list and its contents.
CloneInternal() List
}
// ListUpdate includes the fields which a List's UpdateInstances() method will modify.
// This is publicly visible as c/image/manifest.ListUpdate.
type ListUpdate struct {
Digest digest.Digest
Size int64
MediaType string
}
// ListPublicFromBlob parses a list of manifests.
// This is publicly visible as c/image/manifest.ListFromBlob.
func ListPublicFromBlob(manifest []byte, manifestMIMEType string) (ListPublic, error) {
list, err := ListFromBlob(manifest, manifestMIMEType)
if err != nil {
return nil, err
}
return list, nil
}
// ListFromBlob parses a list of manifests.
func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) {
normalized := NormalizedMIMEType(manifestMIMEType)
switch normalized {
case DockerV2ListMediaType:
return Schema2ListFromManifest(manifest)
case imgspecv1.MediaTypeImageIndex:
return OCI1IndexFromManifest(manifest)
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
return nil, fmt.Errorf("Treating single images as manifest lists is not implemented")
}
return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized)
}

View File

@ -0,0 +1,167 @@
package manifest
import (
"encoding/json"
"github.com/containers/libtrust"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// FIXME: Should we just use docker/distribution and docker/docker implementations directly?
// FIXME(runcom, mitr): should we have a mediatype pkg??
const (
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1
DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json"
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature
DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws"
// DockerV2Schema2MediaType MIME type represents Docker manifest schema 2
DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json"
// DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs.
DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json"
// DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers.
DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip"
// DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers.
DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar"
// DockerV2ListMediaType MIME type represents Docker manifest schema 2 list
DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json"
// DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers.
DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar"
// DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers.
DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
)
// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
// but we may not have such metadata available (e.g. when the manifest is a local file).
// This is publicly visible as c/image/manifest.GuessMIMEType.
func GuessMIMEType(manifest []byte) string {
// A subset of manifest fields; the rest is silently ignored by json.Unmarshal.
// Also docker/distribution/manifest.Versioned.
meta := struct {
MediaType string `json:"mediaType"`
SchemaVersion int `json:"schemaVersion"`
Signatures any `json:"signatures"`
}{}
if err := json.Unmarshal(manifest, &meta); err != nil {
return ""
}
switch meta.MediaType {
case DockerV2Schema2MediaType, DockerV2ListMediaType,
imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type.
return meta.MediaType
}
// this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest.
switch meta.SchemaVersion {
case 1:
if meta.Signatures != nil {
return DockerV2Schema1SignedMediaType
}
return DockerV2Schema1MediaType
case 2:
// Best effort to understand if this is an OCI image since mediaType
// wasn't in the manifest for OCI image-spec < 1.0.2.
// For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess.
ociMan := struct {
Config struct {
MediaType string `json:"mediaType"`
} `json:"config"`
}{}
if err := json.Unmarshal(manifest, &ociMan); err != nil {
return ""
}
switch ociMan.Config.MediaType {
case imgspecv1.MediaTypeImageConfig:
return imgspecv1.MediaTypeImageManifest
case DockerV2Schema2ConfigMediaType:
// This case should not happen since a Docker image
// must declare a top-level media type and
// `meta.MediaType` has already been checked.
return DockerV2Schema2MediaType
}
// Maybe an image index or an OCI artifact.
ociIndex := struct {
Manifests []imgspecv1.Descriptor `json:"manifests"`
}{}
if err := json.Unmarshal(manifest, &ociIndex); err != nil {
return ""
}
if len(ociIndex.Manifests) != 0 {
if ociMan.Config.MediaType == "" {
return imgspecv1.MediaTypeImageIndex
}
// FIXME: this is mixing media types of manifests and configs.
return ociMan.Config.MediaType
}
// It's most likely an OCI artifact with a custom config media
// type which is not (and cannot) be covered by the media-type
// checks cabove.
return imgspecv1.MediaTypeImageManifest
}
return ""
}
// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures.
// This is publicly visible as c/image/manifest.Digest.
func Digest(manifest []byte) (digest.Digest, error) {
if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType {
sig, err := libtrust.ParsePrettySignature(manifest, "signatures")
if err != nil {
return "", err
}
manifest, err = sig.Payload()
if err != nil {
// Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string
// that libtrust itself has josebase64UrlEncode()d
return "", err
}
}
return digest.FromBytes(manifest), nil
}
// MatchesDigest returns true iff the manifest matches expectedDigest.
// Error may be set if this returns false.
// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified,
// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob.
// This is publicly visible as c/image/manifest.MatchesDigest.
func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) {
// This should eventually support various digest types.
actualDigest, err := Digest(manifest)
if err != nil {
return false, err
}
return expectedDigest == actualDigest, nil
}
// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server,
// centralizing various workarounds.
// This is publicly visible as c/image/manifest.NormalizedMIMEType.
func NormalizedMIMEType(input string) string {
switch input {
// "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
// This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
// need to happen within the ImageSource.
case "application/json":
return DockerV2Schema1SignedMediaType
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType,
imgspecv1.MediaTypeImageManifest,
imgspecv1.MediaTypeImageIndex,
DockerV2Schema2MediaType,
DockerV2ListMediaType:
return input
default:
// If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time
// to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108
// and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50
//
// Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag.
// This makes no real sense, but it happens
// because requests for manifests are
// redirected to a content distribution
// network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442
return DockerV2Schema1SignedMediaType
}
}

View File

@ -0,0 +1,265 @@
package manifest
import (
"encoding/json"
"fmt"
"runtime"
platform "github.com/containers/image/v5/internal/pkg/platform"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspec "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
// OCI1IndexPublic is just an alias for the OCI index type, but one which we can
// provide methods for.
// This is publicly visible as c/image/manifest.OCI1Index
// Internal users should usually use OCI1Index instead.
type OCI1IndexPublic struct {
imgspecv1.Index
}
// MIMEType returns the MIME type of this particular manifest index.
func (index *OCI1IndexPublic) MIMEType() string {
return imgspecv1.MediaTypeImageIndex
}
// Instances returns a slice of digests of the manifests that this index knows of.
func (index *OCI1IndexPublic) Instances() []digest.Digest {
results := make([]digest.Digest, len(index.Manifests))
for i, m := range index.Manifests {
results[i] = m.Digest
}
return results
}
// Instance returns the ListUpdate of a particular instance in the index.
func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
for _, manifest := range index.Manifests {
if manifest.Digest == instanceDigest {
return ListUpdate{
Digest: manifest.Digest,
Size: manifest.Size,
MediaType: manifest.MediaType,
}, nil
}
}
return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest)
}
// UpdateInstances updates the sizes, digests, and media types of the manifests
// which the list catalogs.
func (index *OCI1IndexPublic) UpdateInstances(updates []ListUpdate) error {
if len(updates) != len(index.Manifests) {
return fmt.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates))
}
for i := range updates {
if err := updates[i].Digest.Validate(); err != nil {
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err)
}
index.Manifests[i].Digest = updates[i].Digest
if updates[i].Size < 0 {
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
}
index.Manifests[i].Size = updates[i].Size
if updates[i].MediaType == "" {
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType)
}
index.Manifests[i].MediaType = updates[i].MediaType
}
return nil
}
// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest
// of the image which is appropriate for the current environment.
func (index *OCI1IndexPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
wantedPlatforms, err := platform.WantedPlatforms(ctx)
if err != nil {
return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
}
for _, wantedPlatform := range wantedPlatforms {
for _, d := range index.Manifests {
if d.Platform == nil {
continue
}
imagePlatform := imgspecv1.Platform{
Architecture: d.Platform.Architecture,
OS: d.Platform.OS,
OSVersion: d.Platform.OSVersion,
OSFeatures: slices.Clone(d.Platform.OSFeatures),
Variant: d.Platform.Variant,
}
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
return d.Digest, nil
}
}
}
for _, d := range index.Manifests {
if d.Platform == nil {
return d.Digest, nil
}
}
return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
}
// Serialize returns the index in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
func (index *OCI1IndexPublic) Serialize() ([]byte, error) {
buf, err := json.Marshal(index)
if err != nil {
return nil, fmt.Errorf("marshaling OCI1Index %#v: %w", index, err)
}
return buf, nil
}
// OCI1IndexPublicFromComponents creates an OCI1 image index instance from the
// supplied data.
// This is publicly visible as c/image/manifest.OCI1IndexFromComponents.
func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1IndexPublic {
index := OCI1IndexPublic{
imgspecv1.Index{
Versioned: imgspec.Versioned{SchemaVersion: 2},
MediaType: imgspecv1.MediaTypeImageIndex,
Manifests: make([]imgspecv1.Descriptor, len(components)),
Annotations: maps.Clone(annotations),
},
}
for i, component := range components {
var platform *imgspecv1.Platform
if component.Platform != nil {
platform = &imgspecv1.Platform{
Architecture: component.Platform.Architecture,
OS: component.Platform.OS,
OSVersion: component.Platform.OSVersion,
OSFeatures: slices.Clone(component.Platform.OSFeatures),
Variant: component.Platform.Variant,
}
}
m := imgspecv1.Descriptor{
MediaType: component.MediaType,
Size: component.Size,
Digest: component.Digest,
URLs: slices.Clone(component.URLs),
Annotations: maps.Clone(component.Annotations),
Platform: platform,
}
index.Manifests[i] = m
}
return &index
}
// OCI1IndexPublicClone creates a deep copy of the passed-in index.
// This is publicly visible as c/image/manifest.OCI1IndexClone.
func OCI1IndexPublicClone(index *OCI1IndexPublic) *OCI1IndexPublic {
return OCI1IndexPublicFromComponents(index.Manifests, index.Annotations)
}
// ToOCI1Index returns the index encoded as an OCI1 index.
func (index *OCI1IndexPublic) ToOCI1Index() (*OCI1IndexPublic, error) {
return OCI1IndexPublicClone(index), nil
}
// ToSchema2List returns the index encoded as a Schema2 list.
func (index *OCI1IndexPublic) ToSchema2List() (*Schema2ListPublic, error) {
components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests))
for _, manifest := range index.Manifests {
platform := manifest.Platform
if platform == nil {
platform = &imgspecv1.Platform{
OS: runtime.GOOS,
Architecture: runtime.GOARCH,
}
}
converted := Schema2ManifestDescriptor{
Schema2Descriptor{
MediaType: manifest.MediaType,
Size: manifest.Size,
Digest: manifest.Digest,
URLs: slices.Clone(manifest.URLs),
},
Schema2PlatformSpec{
OS: platform.OS,
Architecture: platform.Architecture,
OSFeatures: slices.Clone(platform.OSFeatures),
OSVersion: platform.OSVersion,
Variant: platform.Variant,
},
}
components = append(components, converted)
}
s2 := Schema2ListPublicFromComponents(components)
return s2, nil
}
// OCI1IndexPublicFromManifest creates an OCI1 manifest index instance from marshalled
// JSON, presumably generated by encoding a OCI1 manifest index.
// This is publicly visible as c/image/manifest.OCI1IndexFromManifest.
func OCI1IndexPublicFromManifest(manifest []byte) (*OCI1IndexPublic, error) {
index := OCI1IndexPublic{
Index: imgspecv1.Index{
Versioned: imgspec.Versioned{SchemaVersion: 2},
MediaType: imgspecv1.MediaTypeImageIndex,
Manifests: []imgspecv1.Descriptor{},
Annotations: make(map[string]string),
},
}
if err := json.Unmarshal(manifest, &index); err != nil {
return nil, fmt.Errorf("unmarshaling OCI1Index %q: %w", string(manifest), err)
}
if err := ValidateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex,
AllowedFieldManifests); err != nil {
return nil, err
}
return &index, nil
}
// Clone returns a deep copy of this list and its contents.
func (index *OCI1IndexPublic) Clone() ListPublic {
return OCI1IndexPublicClone(index)
}
// ConvertToMIMEType converts the passed-in image index to a manifest list of
// the specified type.
func (index *OCI1IndexPublic) ConvertToMIMEType(manifestMIMEType string) (ListPublic, error) {
switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
case DockerV2ListMediaType:
return index.ToSchema2List()
case imgspecv1.MediaTypeImageIndex:
return index.Clone(), nil
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType)
default:
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType)
}
}
type OCI1Index struct {
OCI1IndexPublic
}
func oci1IndexFromPublic(public *OCI1IndexPublic) *OCI1Index {
return &OCI1Index{*public}
}
func (index *OCI1Index) CloneInternal() List {
return oci1IndexFromPublic(OCI1IndexPublicClone(&index.OCI1IndexPublic))
}
func (index *OCI1Index) Clone() ListPublic {
return index.CloneInternal()
}
// OCI1IndexFromManifest creates a OCI1 manifest list instance from marshalled
// JSON, presumably generated by encoding a OCI1 manifest list.
func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) {
public, err := OCI1IndexPublicFromManifest(manifest)
if err != nil {
return nil, err
}
return oci1IndexFromPublic(public), nil
}

View File

@ -25,6 +25,7 @@ import (
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/slices"
)
// For Linux, the kernel has already detected the ABI, ISA and Features.
@ -152,13 +153,9 @@ func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
if wantedVariant != "" {
// If the user requested a specific variant, we'll walk down
// the list from most to least compatible.
if compatibility[wantedArch] != nil {
variantOrder := compatibility[wantedArch]
for i, v := range variantOrder {
if wantedVariant == v {
if variantOrder := compatibility[wantedArch]; variantOrder != nil {
if i := slices.Index(variantOrder, wantedVariant); i != -1 {
variants = variantOrder[i:]
break
}
}
}
if variants == nil {

View File

@ -0,0 +1,46 @@
package set
import "golang.org/x/exp/maps"
// FIXME:
// - Docstrings
// - This should be in a public library somewhere
type Set[E comparable] struct {
m map[E]struct{}
}
func New[E comparable]() *Set[E] {
return &Set[E]{
m: map[E]struct{}{},
}
}
func NewWithValues[E comparable](values ...E) *Set[E] {
s := New[E]()
for _, v := range values {
s.Add(v)
}
return s
}
func (s Set[E]) Add(v E) {
s.m[v] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
}
func (s Set[E]) Delete(v E) {
delete(s.m, v)
}
func (s *Set[E]) Contains(v E) bool {
_, ok := s.m[v]
return ok
}
func (s *Set[E]) Empty() bool {
return len(s.m) == 0
}
func (s *Set[E]) Values() []E {
return maps.Keys(s.m)
}

View File

@ -24,7 +24,7 @@ type Signature interface {
blobChunk() ([]byte, error)
}
// BlobChunk returns a representation of sig as a []byte, suitable for long-term storage.
// Blob returns a representation of sig as a []byte, suitable for long-term storage.
func Blob(sig Signature) ([]byte, error) {
chunk, err := sig.blobChunk()
if err != nil {
@ -66,22 +66,20 @@ func FromBlob(blob []byte) (Signature, error) {
// The newer format: binary 0, format name, newline, data
case 0x00:
blob = blob[1:]
newline := bytes.IndexByte(blob, '\n')
if newline == -1 {
formatBytes, blobChunk, foundNewline := bytes.Cut(blob, []byte{'\n'})
if !foundNewline {
return nil, fmt.Errorf("invalid signature format, missing newline")
}
formatBytes := blob[:newline]
for _, b := range formatBytes {
if b < 32 || b >= 0x7F {
return nil, fmt.Errorf("invalid signature format, non-ASCII byte %#x", b)
}
}
blobChunk := blob[newline+1:]
switch {
case bytes.Equal(formatBytes, []byte(SimpleSigningFormat)):
return SimpleSigningFromBlob(blobChunk), nil
case bytes.Equal(formatBytes, []byte(SigstoreFormat)):
return SigstoreFromBlobChunk(blobChunk)
return sigstoreFromBlobChunk(blobChunk)
default:
return nil, fmt.Errorf("unrecognized signature format %q", string(formatBytes))
}
@ -102,10 +100,3 @@ func UnsupportedFormatError(sig Signature) error {
return fmt.Errorf("unsupported, and unrecognized, signature format %q", string(formatID))
}
}
// copyByteSlice returns a guaranteed-fresh copy of a byte slice
// Use this to make sure the underlying data is not shared and cant be unexpectedly modified.
func copyByteSlice(s []byte) []byte {
res := []byte{}
return append(res, s...)
}

View File

@ -1,6 +1,11 @@
package signature
import "encoding/json"
import (
"encoding/json"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
const (
// from sigstore/cosign/pkg/types.SimpleSigningMediaType
@ -40,13 +45,13 @@ type sigstoreJSONRepresentation struct {
func SigstoreFromComponents(untrustedMimeType string, untrustedPayload []byte, untrustedAnnotations map[string]string) Sigstore {
return Sigstore{
untrustedMIMEType: untrustedMimeType,
untrustedPayload: copyByteSlice(untrustedPayload),
untrustedAnnotations: copyStringMap(untrustedAnnotations),
untrustedPayload: slices.Clone(untrustedPayload),
untrustedAnnotations: maps.Clone(untrustedAnnotations),
}
}
// SigstoreFromBlobChunk converts a Sigstore signature, as returned by Sigstore.blobChunk, into a Sigstore object.
func SigstoreFromBlobChunk(blobChunk []byte) (Sigstore, error) {
// sigstoreFromBlobChunk converts a Sigstore signature, as returned by Sigstore.blobChunk, into a Sigstore object.
func sigstoreFromBlobChunk(blobChunk []byte) (Sigstore, error) {
var v sigstoreJSONRepresentation
if err := json.Unmarshal(blobChunk, &v); err != nil {
return Sigstore{}, err
@ -74,17 +79,9 @@ func (s Sigstore) UntrustedMIMEType() string {
return s.untrustedMIMEType
}
func (s Sigstore) UntrustedPayload() []byte {
return copyByteSlice(s.untrustedPayload)
return slices.Clone(s.untrustedPayload)
}
func (s Sigstore) UntrustedAnnotations() map[string]string {
return copyStringMap(s.untrustedAnnotations)
}
func copyStringMap(m map[string]string) map[string]string {
res := map[string]string{}
for k, v := range m {
res[k] = v
}
return res
return maps.Clone(s.untrustedAnnotations)
}

View File

@ -1,5 +1,7 @@
package signature
import "golang.org/x/exp/slices"
// SimpleSigning is a “simple signing” signature.
type SimpleSigning struct {
untrustedSignature []byte
@ -8,7 +10,7 @@ type SimpleSigning struct {
// SimpleSigningFromBlob converts a “simple signing” signature into a SimpleSigning object.
func SimpleSigningFromBlob(blobChunk []byte) SimpleSigning {
return SimpleSigning{
untrustedSignature: copyByteSlice(blobChunk),
untrustedSignature: slices.Clone(blobChunk),
}
}
@ -19,9 +21,9 @@ func (s SimpleSigning) FormatID() FormatID {
// blobChunk returns a representation of signature as a []byte, suitable for long-term storage.
// Almost everyone should use signature.Blob() instead.
func (s SimpleSigning) blobChunk() ([]byte, error) {
return copyByteSlice(s.untrustedSignature), nil
return slices.Clone(s.untrustedSignature), nil
}
func (s SimpleSigning) UntrustedSignature() []byte {
return copyByteSlice(s.untrustedSignature)
return slices.Clone(s.untrustedSignature)
}

View File

@ -11,7 +11,7 @@ import (
// The net/http package uses a separate goroutine to upload data to a HTTP connection,
// and it is possible for the server to return a response (typically an error) before consuming
// the full body of the request. In that case http.Client.Do can return with an error while
// the body is still being read — regardless of of the cancellation, if any, of http.Request.Context().
// the body is still being read — regardless of the cancellation, if any, of http.Request.Context().
//
// As a result, any data used/updated by the io.Reader() provided as the request body may be
// used/updated even after http.Client.Do returns, causing races.

View File

@ -1,7 +1,6 @@
package manifest
import (
"encoding/json"
"fmt"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
@ -9,96 +8,6 @@ import (
"github.com/sirupsen/logrus"
)
// dupStringSlice returns a deep copy of a slice of strings, or nil if the
// source slice is empty.
func dupStringSlice(list []string) []string {
if len(list) == 0 {
return nil
}
dup := make([]string, len(list))
copy(dup, list)
return dup
}
// dupStringStringMap returns a deep copy of a map[string]string, or nil if the
// passed-in map is nil or has no keys.
func dupStringStringMap(m map[string]string) map[string]string {
if len(m) == 0 {
return nil
}
result := make(map[string]string)
for k, v := range m {
result[k] = v
}
return result
}
// allowedManifestFields is a bit mask of “essential” manifest fields that validateUnambiguousManifestFormat
// can expect to be present.
type allowedManifestFields int
const (
allowedFieldConfig allowedManifestFields = 1 << iota
allowedFieldFSLayers
allowedFieldHistory
allowedFieldLayers
allowedFieldManifests
allowedFieldFirstUnusedBit // Keep this at the end!
)
// validateUnambiguousManifestFormat rejects manifests (incl. multi-arch) that look like more than
// one kind we currently recognize, i.e. if they contain any of the known “essential” format fields
// other than the ones the caller specifically allows.
// expectedMIMEType is used only for diagnostics.
// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format
// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous
// data that just isnt what was expected, as opposed to actually ambiguous data.
func validateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string,
allowed allowedManifestFields) error {
if allowed >= allowedFieldFirstUnusedBit {
return fmt.Errorf("internal error: invalid allowedManifestFields value %#v", allowed)
}
// Use a private type to decode, not just a map[string]interface{}, because we want
// to also reject case-insensitive matches (which would be used by Go when really decoding
// the manifest).
// (It is expected that as manifest formats are added or extended over time, more fields will be added
// here.)
detectedFields := struct {
Config interface{} `json:"config"`
FSLayers interface{} `json:"fsLayers"`
History interface{} `json:"history"`
Layers interface{} `json:"layers"`
Manifests interface{} `json:"manifests"`
}{}
if err := json.Unmarshal(manifest, &detectedFields); err != nil {
// The caller was supposed to already validate version numbers, so this should not happen;
// lets not bother with making this error “nice”.
return err
}
unexpected := []string{}
// Sadly this isnt easy to automate in Go, without reflection. So, copy&paste.
if detectedFields.Config != nil && (allowed&allowedFieldConfig) == 0 {
unexpected = append(unexpected, "config")
}
if detectedFields.FSLayers != nil && (allowed&allowedFieldFSLayers) == 0 {
unexpected = append(unexpected, "fsLayers")
}
if detectedFields.History != nil && (allowed&allowedFieldHistory) == 0 {
unexpected = append(unexpected, "history")
}
if detectedFields.Layers != nil && (allowed&allowedFieldLayers) == 0 {
unexpected = append(unexpected, "layers")
}
if detectedFields.Manifests != nil && (allowed&allowedFieldManifests) == 0 {
unexpected = append(unexpected, "manifests")
}
if len(unexpected) != 0 {
return fmt.Errorf(`rejecting ambiguous manifest, unexpected fields %#v in supposedly %s`,
unexpected, expectedMIMEType)
}
return nil
}
// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
func layerInfosToStrings(infos []LayerInfo) []string {

View File

@ -8,10 +8,13 @@ import (
"time"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/regexp"
"github.com/docker/docker/api/types/versions"
"github.com/opencontainers/go-digest"
"golang.org/x/exp/slices"
)
// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1.
@ -53,16 +56,16 @@ type Schema1V1Compatibility struct {
// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob.
// (NOTE: The instance is not necessary a literal representation of the original blob,
// layers with duplicate IDs are eliminated.)
func Schema1FromManifest(manifest []byte) (*Schema1, error) {
func Schema1FromManifest(manifestBlob []byte) (*Schema1, error) {
s1 := Schema1{}
if err := json.Unmarshal(manifest, &s1); err != nil {
if err := json.Unmarshal(manifestBlob, &s1); err != nil {
return nil, err
}
if s1.SchemaVersion != 1 {
return nil, fmt.Errorf("unsupported schema version %d", s1.SchemaVersion)
}
if err := validateUnambiguousManifestFormat(manifest, DockerV2Schema1SignedMediaType,
allowedFieldFSLayers|allowedFieldHistory); err != nil {
if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, DockerV2Schema1SignedMediaType,
manifest.AllowedFieldFSLayers|manifest.AllowedFieldHistory); err != nil {
return nil, err
}
if err := s1.initialize(); err != nil {
@ -183,22 +186,22 @@ func (m *Schema1) fixManifestLayers() error {
return errors.New("Invalid parent ID in the base layer of the image")
}
// check general duplicates to error instead of a deadlock
idmap := make(map[string]struct{})
idmap := set.New[string]()
var lastID string
for _, img := range m.ExtractedV1Compatibility {
// skip IDs that appear after each other, we handle those later
if _, exists := idmap[img.ID]; img.ID != lastID && exists {
if img.ID != lastID && idmap.Contains(img.ID) {
return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
}
lastID = img.ID
idmap[lastID] = struct{}{}
idmap.Add(lastID)
}
// backwards loop so that we keep the remaining indexes after removing items
for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- {
if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue
m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
m.History = append(m.History[:i], m.History[i+1:]...)
m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...)
m.FSLayers = slices.Delete(m.FSLayers, i, i+1)
m.History = slices.Delete(m.History, i, i+1)
m.ExtractedV1Compatibility = slices.Delete(m.ExtractedV1Compatibility, i, i+1)
} else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID {
return fmt.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent)
}

View File

@ -5,6 +5,7 @@ import (
"fmt"
"time"
"github.com/containers/image/v5/internal/manifest"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/pkg/strslice"
"github.com/containers/image/v5/types"
@ -12,12 +13,7 @@ import (
)
// Schema2Descriptor is a “descriptor” in docker/distribution schema 2.
type Schema2Descriptor struct {
MediaType string `json:"mediaType"`
Size int64 `json:"size"`
Digest digest.Digest `json:"digest"`
URLs []string `json:"urls,omitempty"`
}
type Schema2Descriptor = manifest.Schema2Descriptor
// BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor.
func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo {
@ -159,13 +155,13 @@ type Schema2Image struct {
}
// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob.
func Schema2FromManifest(manifest []byte) (*Schema2, error) {
func Schema2FromManifest(manifestBlob []byte) (*Schema2, error) {
s2 := Schema2{}
if err := json.Unmarshal(manifest, &s2); err != nil {
if err := json.Unmarshal(manifestBlob, &s2); err != nil {
return nil, err
}
if err := validateUnambiguousManifestFormat(manifest, DockerV2Schema2MediaType,
allowedFieldConfig|allowedFieldLayers); err != nil {
if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, DockerV2Schema2MediaType,
manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil {
return nil, err
}
// Check manifest's and layers' media types.

View File

@ -1,220 +1,32 @@
package manifest
import (
"encoding/json"
"fmt"
platform "github.com/containers/image/v5/internal/pkg/platform"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/containers/image/v5/internal/manifest"
)
// Schema2PlatformSpec describes the platform which a particular manifest is
// specialized for.
type Schema2PlatformSpec struct {
Architecture string `json:"architecture"`
OS string `json:"os"`
OSVersion string `json:"os.version,omitempty"`
OSFeatures []string `json:"os.features,omitempty"`
Variant string `json:"variant,omitempty"`
Features []string `json:"features,omitempty"` // removed in OCI
}
type Schema2PlatformSpec = manifest.Schema2PlatformSpec
// Schema2ManifestDescriptor references a platform-specific manifest.
type Schema2ManifestDescriptor struct {
Schema2Descriptor
Platform Schema2PlatformSpec `json:"platform"`
}
type Schema2ManifestDescriptor = manifest.Schema2ManifestDescriptor
// Schema2List is a list of platform-specific manifests.
type Schema2List struct {
SchemaVersion int `json:"schemaVersion"`
MediaType string `json:"mediaType"`
Manifests []Schema2ManifestDescriptor `json:"manifests"`
}
// MIMEType returns the MIME type of this particular manifest list.
func (list *Schema2List) MIMEType() string {
return list.MediaType
}
// Instances returns a slice of digests of the manifests that this list knows of.
func (list *Schema2List) Instances() []digest.Digest {
results := make([]digest.Digest, len(list.Manifests))
for i, m := range list.Manifests {
results[i] = m.Digest
}
return results
}
// Instance returns the ListUpdate of a particular instance in the list.
func (list *Schema2List) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
for _, manifest := range list.Manifests {
if manifest.Digest == instanceDigest {
return ListUpdate{
Digest: manifest.Digest,
Size: manifest.Size,
MediaType: manifest.MediaType,
}, nil
}
}
return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest)
}
// UpdateInstances updates the sizes, digests, and media types of the manifests
// which the list catalogs.
func (list *Schema2List) UpdateInstances(updates []ListUpdate) error {
if len(updates) != len(list.Manifests) {
return fmt.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates))
}
for i := range updates {
if err := updates[i].Digest.Validate(); err != nil {
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err)
}
list.Manifests[i].Digest = updates[i].Digest
if updates[i].Size < 0 {
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
}
list.Manifests[i].Size = updates[i].Size
if updates[i].MediaType == "" {
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType)
}
list.Manifests[i].MediaType = updates[i].MediaType
}
return nil
}
// ChooseInstance parses blob as a schema2 manifest list, and returns the digest
// of the image which is appropriate for the current environment.
func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
wantedPlatforms, err := platform.WantedPlatforms(ctx)
if err != nil {
return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
}
for _, wantedPlatform := range wantedPlatforms {
for _, d := range list.Manifests {
imagePlatform := imgspecv1.Platform{
Architecture: d.Platform.Architecture,
OS: d.Platform.OS,
OSVersion: d.Platform.OSVersion,
OSFeatures: dupStringSlice(d.Platform.OSFeatures),
Variant: d.Platform.Variant,
}
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
return d.Digest, nil
}
}
}
return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
}
// Serialize returns the list in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
func (list *Schema2List) Serialize() ([]byte, error) {
buf, err := json.Marshal(list)
if err != nil {
return nil, fmt.Errorf("marshaling Schema2List %#v: %w", list, err)
}
return buf, nil
}
type Schema2List = manifest.Schema2ListPublic
// Schema2ListFromComponents creates a Schema2 manifest list instance from the
// supplied data.
func Schema2ListFromComponents(components []Schema2ManifestDescriptor) *Schema2List {
list := Schema2List{
SchemaVersion: 2,
MediaType: DockerV2ListMediaType,
Manifests: make([]Schema2ManifestDescriptor, len(components)),
}
for i, component := range components {
m := Schema2ManifestDescriptor{
Schema2Descriptor{
MediaType: component.MediaType,
Size: component.Size,
Digest: component.Digest,
URLs: dupStringSlice(component.URLs),
},
Schema2PlatformSpec{
Architecture: component.Platform.Architecture,
OS: component.Platform.OS,
OSVersion: component.Platform.OSVersion,
OSFeatures: dupStringSlice(component.Platform.OSFeatures),
Variant: component.Platform.Variant,
Features: dupStringSlice(component.Platform.Features),
},
}
list.Manifests[i] = m
}
return &list
return manifest.Schema2ListPublicFromComponents(components)
}
// Schema2ListClone creates a deep copy of the passed-in list.
func Schema2ListClone(list *Schema2List) *Schema2List {
return Schema2ListFromComponents(list.Manifests)
}
// ToOCI1Index returns the list encoded as an OCI1 index.
func (list *Schema2List) ToOCI1Index() (*OCI1Index, error) {
components := make([]imgspecv1.Descriptor, 0, len(list.Manifests))
for _, manifest := range list.Manifests {
converted := imgspecv1.Descriptor{
MediaType: manifest.MediaType,
Size: manifest.Size,
Digest: manifest.Digest,
URLs: dupStringSlice(manifest.URLs),
Platform: &imgspecv1.Platform{
OS: manifest.Platform.OS,
Architecture: manifest.Platform.Architecture,
OSFeatures: dupStringSlice(manifest.Platform.OSFeatures),
OSVersion: manifest.Platform.OSVersion,
Variant: manifest.Platform.Variant,
},
}
components = append(components, converted)
}
oci := OCI1IndexFromComponents(components, nil)
return oci, nil
}
// ToSchema2List returns the list encoded as a Schema2 list.
func (list *Schema2List) ToSchema2List() (*Schema2List, error) {
return Schema2ListClone(list), nil
return manifest.Schema2ListPublicClone(list)
}
// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled
// JSON, presumably generated by encoding a Schema2 manifest list.
func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) {
list := Schema2List{
Manifests: []Schema2ManifestDescriptor{},
}
if err := json.Unmarshal(manifest, &list); err != nil {
return nil, fmt.Errorf("unmarshaling Schema2List %q: %w", string(manifest), err)
}
if err := validateUnambiguousManifestFormat(manifest, DockerV2ListMediaType,
allowedFieldManifests); err != nil {
return nil, err
}
return &list, nil
}
// Clone returns a deep copy of this list and its contents.
func (list *Schema2List) Clone() List {
return Schema2ListClone(list)
}
// ConvertToMIMEType converts the passed-in manifest list to a manifest
// list of the specified type.
func (list *Schema2List) ConvertToMIMEType(manifestMIMEType string) (List, error) {
switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
case DockerV2ListMediaType:
return list.Clone(), nil
case imgspecv1.MediaTypeImageIndex:
return list.ToOCI1Index()
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType)
default:
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType)
}
func Schema2ListFromManifest(manifestBlob []byte) (*Schema2List, error) {
return manifest.Schema2ListPublicFromManifest(manifestBlob)
}

View File

@ -1,10 +1,7 @@
package manifest
import (
"fmt"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
"github.com/containers/image/v5/internal/manifest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
@ -21,56 +18,14 @@ var (
// Callers can either use this abstract interface without understanding the details of the formats,
// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members
// directly.
type List interface {
// MIMEType returns the MIME type of this particular manifest list.
MIMEType() string
// Instances returns a list of the manifests that this list knows of, other than its own.
Instances() []digest.Digest
// Update information about the list's instances. The length of the passed-in slice must
// match the length of the list of instances which the list already contains, and every field
// must be specified.
UpdateInstances([]ListUpdate) error
// Instance returns the size and MIME type of a particular instance in the list.
Instance(digest.Digest) (ListUpdate, error)
// ChooseInstance selects which manifest is most appropriate for the platform described by the
// SystemContext, or for the current platform if the SystemContext doesn't specify any details.
ChooseInstance(ctx *types.SystemContext) (digest.Digest, error)
// Serialize returns the list in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded
// from, even if no modifications were made!
Serialize() ([]byte, error)
// ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error.
ConvertToMIMEType(mimeType string) (List, error)
// Clone returns a deep copy of this list and its contents.
Clone() List
}
type List = manifest.ListPublic
// ListUpdate includes the fields which a List's UpdateInstances() method will modify.
type ListUpdate struct {
Digest digest.Digest
Size int64
MediaType string
}
type ListUpdate = manifest.ListUpdate
// ListFromBlob parses a list of manifests.
func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) {
normalized := NormalizedMIMEType(manifestMIMEType)
switch normalized {
case DockerV2ListMediaType:
return Schema2ListFromManifest(manifest)
case imgspecv1.MediaTypeImageIndex:
return OCI1IndexFromManifest(manifest)
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
return nil, fmt.Errorf("Treating single images as manifest lists is not implemented")
}
return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized)
func ListFromBlob(manifestBlob []byte, manifestMIMEType string) (List, error) {
return manifest.ListPublicFromBlob(manifestBlob, manifestMIMEType)
}
// ConvertListToMIMEType converts the passed-in manifest list to a manifest

View File

@ -1,10 +1,9 @@
package manifest
import (
"encoding/json"
"fmt"
internalManifest "github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/libtrust"
digest "github.com/opencontainers/go-digest"
@ -16,28 +15,28 @@ import (
// FIXME(runcom, mitr): should we have a mediatype pkg??
const (
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1
DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json"
DockerV2Schema1MediaType = manifest.DockerV2Schema1MediaType
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature
DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws"
DockerV2Schema1SignedMediaType = manifest.DockerV2Schema1SignedMediaType
// DockerV2Schema2MediaType MIME type represents Docker manifest schema 2
DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json"
DockerV2Schema2MediaType = manifest.DockerV2Schema2MediaType
// DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs.
DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json"
DockerV2Schema2ConfigMediaType = manifest.DockerV2Schema2ConfigMediaType
// DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers.
DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip"
DockerV2Schema2LayerMediaType = manifest.DockerV2Schema2LayerMediaType
// DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers.
DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar"
DockerV2SchemaLayerMediaTypeUncompressed = manifest.DockerV2SchemaLayerMediaTypeUncompressed
// DockerV2ListMediaType MIME type represents Docker manifest schema 2 list
DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json"
DockerV2ListMediaType = manifest.DockerV2ListMediaType
// DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers.
DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar"
DockerV2Schema2ForeignLayerMediaType = manifest.DockerV2Schema2ForeignLayerMediaType
// DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers.
DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
DockerV2Schema2ForeignLayerMediaTypeGzip = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip
)
// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation
// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact)
type NonImageArtifactError = internalManifest.NonImageArtifactError
type NonImageArtifactError = manifest.NonImageArtifactError
// SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type.
func SupportedSchema2MediaType(m string) error {
@ -102,102 +101,21 @@ type LayerInfo struct {
// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
// but we may not have such metadata available (e.g. when the manifest is a local file).
func GuessMIMEType(manifest []byte) string {
// A subset of manifest fields; the rest is silently ignored by json.Unmarshal.
// Also docker/distribution/manifest.Versioned.
meta := struct {
MediaType string `json:"mediaType"`
SchemaVersion int `json:"schemaVersion"`
Signatures interface{} `json:"signatures"`
}{}
if err := json.Unmarshal(manifest, &meta); err != nil {
return ""
}
switch meta.MediaType {
case DockerV2Schema2MediaType, DockerV2ListMediaType,
imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type.
return meta.MediaType
}
// this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest.
switch meta.SchemaVersion {
case 1:
if meta.Signatures != nil {
return DockerV2Schema1SignedMediaType
}
return DockerV2Schema1MediaType
case 2:
// Best effort to understand if this is an OCI image since mediaType
// wasn't in the manifest for OCI image-spec < 1.0.2.
// For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess.
ociMan := struct {
Config struct {
MediaType string `json:"mediaType"`
} `json:"config"`
}{}
if err := json.Unmarshal(manifest, &ociMan); err != nil {
return ""
}
switch ociMan.Config.MediaType {
case imgspecv1.MediaTypeImageConfig:
return imgspecv1.MediaTypeImageManifest
case DockerV2Schema2ConfigMediaType:
// This case should not happen since a Docker image
// must declare a top-level media type and
// `meta.MediaType` has already been checked.
return DockerV2Schema2MediaType
}
// Maybe an image index or an OCI artifact.
ociIndex := struct {
Manifests []imgspecv1.Descriptor `json:"manifests"`
}{}
if err := json.Unmarshal(manifest, &ociIndex); err != nil {
return ""
}
if len(ociIndex.Manifests) != 0 {
if ociMan.Config.MediaType == "" {
return imgspecv1.MediaTypeImageIndex
}
// FIXME: this is mixing media types of manifests and configs.
return ociMan.Config.MediaType
}
// It's most likely an OCI artifact with a custom config media
// type which is not (and cannot) be covered by the media-type
// checks cabove.
return imgspecv1.MediaTypeImageManifest
}
return ""
func GuessMIMEType(manifestBlob []byte) string {
return manifest.GuessMIMEType(manifestBlob)
}
// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures.
func Digest(manifest []byte) (digest.Digest, error) {
if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType {
sig, err := libtrust.ParsePrettySignature(manifest, "signatures")
if err != nil {
return "", err
}
manifest, err = sig.Payload()
if err != nil {
// Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string
// that libtrust itself has josebase64UrlEncode()d
return "", err
}
}
return digest.FromBytes(manifest), nil
func Digest(manifestBlob []byte) (digest.Digest, error) {
return manifest.Digest(manifestBlob)
}
// MatchesDigest returns true iff the manifest matches expectedDigest.
// Error may be set if this returns false.
// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified,
// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob.
func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) {
// This should eventually support various digest types.
actualDigest, err := Digest(manifest)
if err != nil {
return false, err
}
return expectedDigest == actualDigest, nil
func MatchesDigest(manifestBlob []byte, expectedDigest digest.Digest) (bool, error) {
return manifest.MatchesDigest(manifestBlob, expectedDigest)
}
// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest.
@ -231,30 +149,7 @@ func MIMETypeSupportsEncryption(mimeType string) bool {
// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server,
// centralizing various workarounds.
func NormalizedMIMEType(input string) string {
switch input {
// "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
// This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
// need to happen within the ImageSource.
case "application/json":
return DockerV2Schema1SignedMediaType
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType,
imgspecv1.MediaTypeImageManifest,
imgspecv1.MediaTypeImageIndex,
DockerV2Schema2MediaType,
DockerV2ListMediaType:
return input
default:
// If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time
// to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108
// and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50
//
// Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag.
// This makes no real sense, but it happens
// because requests for manifests are
// redirected to a content distribution
// network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442
return DockerV2Schema1SignedMediaType
}
return manifest.NormalizedMIMEType(input)
}
// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type

View File

@ -5,13 +5,14 @@ import (
"fmt"
"strings"
internalManifest "github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/manifest"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
ociencspec "github.com/containers/ocicrypt/spec"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/slices"
)
// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor.
@ -49,13 +50,13 @@ func SupportedOCI1MediaType(m string) error {
}
// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob.
func OCI1FromManifest(manifest []byte) (*OCI1, error) {
func OCI1FromManifest(manifestBlob []byte) (*OCI1, error) {
oci1 := OCI1{}
if err := json.Unmarshal(manifest, &oci1); err != nil {
if err := json.Unmarshal(manifestBlob, &oci1); err != nil {
return nil, err
}
if err := validateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex,
allowedFieldConfig|allowedFieldLayers); err != nil {
if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageIndex,
manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil {
return nil, err
}
return &oci1, nil
@ -160,11 +161,9 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
// an error if the mediatype does not support encryption
func getEncryptedMediaType(mediatype string) (string, error) {
for _, s := range strings.Split(mediatype, "+")[1:] {
if s == "encrypted" {
if slices.Contains(strings.Split(mediatype, "+")[1:], "encrypted") {
return "", fmt.Errorf("unsupported mediaType: %v already encrypted", mediatype)
}
}
unsuffixedMediatype := strings.Split(mediatype, "+")[0]
switch unsuffixedMediatype {
case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable:
@ -178,7 +177,7 @@ func getEncryptedMediaType(mediatype string) (string, error) {
// an error if the mediatype does not support decryption
func getDecryptedMediaType(mediatype string) (string, error) {
if !strings.HasSuffix(mediatype, "+encrypted") {
return "", fmt.Errorf("unsupported mediaType to decrypt %v:", mediatype)
return "", fmt.Errorf("unsupported mediaType to decrypt: %v", mediatype)
}
return strings.TrimSuffix(mediatype, "+encrypted"), nil
@ -197,7 +196,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
// Most software calling this without human intervention is going to expect the values to be realistic and relevant,
// and is probably better served by failing; we can always re-visit that later if we fail now, but
// if we started returning some data for OCI artifacts now, we couldnt start failing in this function later.
return nil, internalManifest.NewNonImageArtifactError(m.Config.MediaType)
return nil, manifest.NewNonImageArtifactError(m.Config.MediaType)
}
config, err := configGetter(m.ConfigInfo())
@ -248,7 +247,7 @@ func (m *OCI1) ImageID([]digest.Digest) (string, error) {
// (The only known caller of ImageID is storage/storageImageDestination.computeID,
// which cant work with non-image artifacts.)
if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
return "", internalManifest.NewNonImageArtifactError(m.Config.MediaType)
return "", manifest.NewNonImageArtifactError(m.Config.MediaType)
}
if err := m.Config.Digest.Validate(); err != nil {

View File

@ -1,232 +1,27 @@
package manifest
import (
"encoding/json"
"fmt"
"runtime"
platform "github.com/containers/image/v5/internal/pkg/platform"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspec "github.com/opencontainers/image-spec/specs-go"
"github.com/containers/image/v5/internal/manifest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// OCI1Index is just an alias for the OCI index type, but one which we can
// provide methods for.
type OCI1Index struct {
imgspecv1.Index
}
// MIMEType returns the MIME type of this particular manifest index.
func (index *OCI1Index) MIMEType() string {
return imgspecv1.MediaTypeImageIndex
}
// Instances returns a slice of digests of the manifests that this index knows of.
func (index *OCI1Index) Instances() []digest.Digest {
results := make([]digest.Digest, len(index.Manifests))
for i, m := range index.Manifests {
results[i] = m.Digest
}
return results
}
// Instance returns the ListUpdate of a particular instance in the index.
func (index *OCI1Index) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
for _, manifest := range index.Manifests {
if manifest.Digest == instanceDigest {
return ListUpdate{
Digest: manifest.Digest,
Size: manifest.Size,
MediaType: manifest.MediaType,
}, nil
}
}
return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest)
}
// UpdateInstances updates the sizes, digests, and media types of the manifests
// which the list catalogs.
func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error {
if len(updates) != len(index.Manifests) {
return fmt.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates))
}
for i := range updates {
if err := updates[i].Digest.Validate(); err != nil {
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err)
}
index.Manifests[i].Digest = updates[i].Digest
if updates[i].Size < 0 {
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
}
index.Manifests[i].Size = updates[i].Size
if updates[i].MediaType == "" {
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType)
}
index.Manifests[i].MediaType = updates[i].MediaType
}
return nil
}
// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest
// of the image which is appropriate for the current environment.
func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
wantedPlatforms, err := platform.WantedPlatforms(ctx)
if err != nil {
return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
}
for _, wantedPlatform := range wantedPlatforms {
for _, d := range index.Manifests {
if d.Platform == nil {
continue
}
imagePlatform := imgspecv1.Platform{
Architecture: d.Platform.Architecture,
OS: d.Platform.OS,
OSVersion: d.Platform.OSVersion,
OSFeatures: dupStringSlice(d.Platform.OSFeatures),
Variant: d.Platform.Variant,
}
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
return d.Digest, nil
}
}
}
for _, d := range index.Manifests {
if d.Platform == nil {
return d.Digest, nil
}
}
return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
}
// Serialize returns the index in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
func (index *OCI1Index) Serialize() ([]byte, error) {
buf, err := json.Marshal(index)
if err != nil {
return nil, fmt.Errorf("marshaling OCI1Index %#v: %w", index, err)
}
return buf, nil
}
type OCI1Index = manifest.OCI1IndexPublic
// OCI1IndexFromComponents creates an OCI1 image index instance from the
// supplied data.
func OCI1IndexFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1Index {
index := OCI1Index{
imgspecv1.Index{
Versioned: imgspec.Versioned{SchemaVersion: 2},
MediaType: imgspecv1.MediaTypeImageIndex,
Manifests: make([]imgspecv1.Descriptor, len(components)),
Annotations: dupStringStringMap(annotations),
},
}
for i, component := range components {
var platform *imgspecv1.Platform
if component.Platform != nil {
platform = &imgspecv1.Platform{
Architecture: component.Platform.Architecture,
OS: component.Platform.OS,
OSVersion: component.Platform.OSVersion,
OSFeatures: dupStringSlice(component.Platform.OSFeatures),
Variant: component.Platform.Variant,
}
}
m := imgspecv1.Descriptor{
MediaType: component.MediaType,
Size: component.Size,
Digest: component.Digest,
URLs: dupStringSlice(component.URLs),
Annotations: dupStringStringMap(component.Annotations),
Platform: platform,
}
index.Manifests[i] = m
}
return &index
return manifest.OCI1IndexPublicFromComponents(components, annotations)
}
// OCI1IndexClone creates a deep copy of the passed-in index.
func OCI1IndexClone(index *OCI1Index) *OCI1Index {
return OCI1IndexFromComponents(index.Manifests, index.Annotations)
}
// ToOCI1Index returns the index encoded as an OCI1 index.
func (index *OCI1Index) ToOCI1Index() (*OCI1Index, error) {
return OCI1IndexClone(index), nil
}
// ToSchema2List returns the index encoded as a Schema2 list.
func (index *OCI1Index) ToSchema2List() (*Schema2List, error) {
components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests))
for _, manifest := range index.Manifests {
platform := manifest.Platform
if platform == nil {
platform = &imgspecv1.Platform{
OS: runtime.GOOS,
Architecture: runtime.GOARCH,
}
}
converted := Schema2ManifestDescriptor{
Schema2Descriptor{
MediaType: manifest.MediaType,
Size: manifest.Size,
Digest: manifest.Digest,
URLs: dupStringSlice(manifest.URLs),
},
Schema2PlatformSpec{
OS: platform.OS,
Architecture: platform.Architecture,
OSFeatures: dupStringSlice(platform.OSFeatures),
OSVersion: platform.OSVersion,
Variant: platform.Variant,
},
}
components = append(components, converted)
}
s2 := Schema2ListFromComponents(components)
return s2, nil
return manifest.OCI1IndexPublicClone(index)
}
// OCI1IndexFromManifest creates an OCI1 manifest index instance from marshalled
// JSON, presumably generated by encoding a OCI1 manifest index.
func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) {
index := OCI1Index{
Index: imgspecv1.Index{
Versioned: imgspec.Versioned{SchemaVersion: 2},
MediaType: imgspecv1.MediaTypeImageIndex,
Manifests: []imgspecv1.Descriptor{},
Annotations: make(map[string]string),
},
}
if err := json.Unmarshal(manifest, &index); err != nil {
return nil, fmt.Errorf("unmarshaling OCI1Index %q: %w", string(manifest), err)
}
if err := validateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex,
allowedFieldManifests); err != nil {
return nil, err
}
return &index, nil
}
// Clone returns a deep copy of this list and its contents.
func (index *OCI1Index) Clone() List {
return OCI1IndexClone(index)
}
// ConvertToMIMEType converts the passed-in image index to a manifest list of
// the specified type.
func (index *OCI1Index) ConvertToMIMEType(manifestMIMEType string) (List, error) {
switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
case DockerV2ListMediaType:
return index.ToSchema2List()
case imgspecv1.MediaTypeImageIndex:
return index.Clone(), nil
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType)
default:
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType)
}
func OCI1IndexFromManifest(manifestBlob []byte) (*OCI1Index, error) {
return manifest.OCI1IndexPublicFromManifest(manifestBlob)
}

View File

@ -58,13 +58,7 @@ func splitPathAndImageWindows(reference string) (string, string) {
}
func splitPathAndImageNonWindows(reference string) (string, string) {
sep := strings.SplitN(reference, ":", 2)
path := sep[0]
var image string
if len(sep) == 2 {
image = sep[1]
}
path, image, _ := strings.Cut(reference, ":") // image is set to "" if there is no ":"
return path, image
}

View File

@ -12,9 +12,9 @@ import (
"github.com/containers/image/v5/internal/imagedestination/impl"
"github.com/containers/image/v5/internal/imagedestination/stubs"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
imgspec "github.com/opencontainers/image-spec/specs-go"

View File

@ -12,8 +12,8 @@ import (
"github.com/containers/image/v5/internal/imagesource/impl"
"github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/tlsclientconfig"
"github.com/containers/image/v5/types"
"github.com/docker/go-connections/tlsconfig"

View File

@ -100,7 +100,7 @@ func (ref ociReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref ociReference) StringWithinTransport() string {
return fmt.Sprintf("%s:%s", ref.dir, ref.image)

View File

@ -3,11 +3,12 @@ package openshift
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"encoding/base64"
"errors"
"fmt"
"net"
"net/http"
"net/netip"
"net/url"
"os"
"path"
@ -17,10 +18,10 @@ import (
"time"
"github.com/containers/storage/pkg/homedir"
"github.com/ghodss/yaml"
"github.com/imdario/mergo"
"github.com/sirupsen/logrus"
"golang.org/x/net/http2"
"golang.org/x/exp/slices"
"gopkg.in/yaml.v3"
)
// restTLSClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/restclient.TLSClientConfig.
@ -205,15 +206,12 @@ func (config *directClientConfig) ClientConfig() (*restConfig, error) {
if isConfigTransportTLS(*clientConfig) {
var err error
// REMOVED: Support for interactive fallback.
userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo)
if err != nil {
return nil, err
}
userAuthPartialConfig := getUserIdentificationPartialConfig(configAuthInfo)
if err = mergo.MergeWithOverwrite(clientConfig, userAuthPartialConfig); err != nil {
return nil, err
}
serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo)
serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configClusterInfo)
if err != nil {
return nil, err
}
@ -232,7 +230,7 @@ func (config *directClientConfig) ClientConfig() (*restConfig, error) {
// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files)
// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
// 3. load the ~/.kubernetes_auth file as a default
func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, configClusterInfo clientcmdCluster) (*restConfig, error) {
func getServerIdentificationPartialConfig(configClusterInfo clientcmdCluster) (*restConfig, error) {
mergedConfig := &restConfig{}
// configClusterInfo holds the information identify the server provided by .kubeconfig
@ -255,7 +253,7 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, conf
// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
// 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file
// 4. if there is not enough information to identify the user, prompt if possible
func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*restConfig, error) {
func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) *restConfig {
mergedConfig := &restConfig{}
// blindly overwrite existing values based on precedence
@ -274,7 +272,7 @@ func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*rest
}
// REMOVED: prompting for missing information.
return mergedConfig, nil
return mergedConfig
}
// ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable.
@ -672,11 +670,7 @@ func load(data []byte) (*clientcmdConfig, error) {
return config, nil
}
// Note: This does absolutely no kind/version checking or conversions.
data, err := yaml.YAMLToJSON(data)
if err != nil {
return nil, err
}
if err := json.Unmarshal(data, config); err != nil {
if err := yaml.Unmarshal(data, config); err != nil {
return nil, err
}
return config, nil
@ -877,11 +871,11 @@ func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error
noProxyEnv := os.Getenv("NO_PROXY")
noProxyRules := strings.Split(noProxyEnv, ",")
cidrs := []*net.IPNet{}
cidrs := []netip.Prefix{}
for _, noProxyRule := range noProxyRules {
_, cidr, _ := net.ParseCIDR(noProxyRule)
if cidr != nil {
cidrs = append(cidrs, cidr)
prefix, err := netip.ParsePrefix(noProxyRule)
if err == nil {
cidrs = append(cidrs, prefix)
}
}
@ -892,7 +886,7 @@ func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error
return func(req *http.Request) (*url.URL, error) {
host := req.URL.Host
// for some urls, the Host is already the host, not the host:port
if net.ParseIP(host) == nil {
if _, err := netip.ParseAddr(host); err != nil {
var err error
host, _, err = net.SplitHostPort(req.URL.Host)
if err != nil {
@ -900,16 +894,16 @@ func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error
}
}
ip := net.ParseIP(host)
if ip == nil {
ip, err := netip.ParseAddr(host)
if err != nil {
return delegate(req)
}
for _, cidr := range cidrs {
if cidr.Contains(ip) {
if slices.ContainsFunc(cidrs, func(cidr netip.Prefix) bool {
return cidr.Contains(ip)
}) {
return nil, nil
}
}
return delegate(req)
}
@ -936,14 +930,14 @@ func tlsCacheGet(config *restConfig) (http.RoundTripper, error) {
Proxy: newProxierWithNoProxyCIDR(http.ProxyFromEnvironment),
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: tlsConfig,
Dial: (&net.Dialer{
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
}).DialContext,
}
// Allow clients to disable http2 if needed.
if s := os.Getenv("DISABLE_HTTP2"); len(s) == 0 {
_ = http2.ConfigureTransport(t)
t.ForceAttemptHTTP2 = true
}
return t, nil
}
@ -1057,20 +1051,20 @@ func (c *restConfig) HasCertAuth() bool {
// IMPORTANT if you add fields to this struct, please update IsConfigEmpty()
type clientcmdConfig struct {
// Clusters is a map of referenceable names to cluster configs
Clusters clustersMap `json:"clusters"`
Clusters clustersMap `yaml:"clusters"`
// AuthInfos is a map of referenceable names to user configs
AuthInfos authInfosMap `json:"users"`
AuthInfos authInfosMap `yaml:"users"`
// Contexts is a map of referenceable names to context configs
Contexts contextsMap `json:"contexts"`
Contexts contextsMap `yaml:"contexts"`
// CurrentContext is the name of the context that you would like to use by default
CurrentContext string `json:"current-context"`
CurrentContext string `yaml:"current-context"`
}
type clustersMap map[string]*clientcmdCluster
func (m *clustersMap) UnmarshalJSON(data []byte) error {
func (m *clustersMap) UnmarshalYAML(value *yaml.Node) error {
var a []v1NamedCluster
if err := json.Unmarshal(data, &a); err != nil {
if err := value.Decode(&a); err != nil {
return err
}
for _, e := range a {
@ -1082,9 +1076,9 @@ func (m *clustersMap) UnmarshalJSON(data []byte) error {
type authInfosMap map[string]*clientcmdAuthInfo
func (m *authInfosMap) UnmarshalJSON(data []byte) error {
func (m *authInfosMap) UnmarshalYAML(value *yaml.Node) error {
var a []v1NamedAuthInfo
if err := json.Unmarshal(data, &a); err != nil {
if err := value.Decode(&a); err != nil {
return err
}
for _, e := range a {
@ -1096,9 +1090,9 @@ func (m *authInfosMap) UnmarshalJSON(data []byte) error {
type contextsMap map[string]*clientcmdContext
func (m *contextsMap) UnmarshalJSON(data []byte) error {
func (m *contextsMap) UnmarshalYAML(value *yaml.Node) error {
var a []v1NamedContext
if err := json.Unmarshal(data, &a); err != nil {
if err := value.Decode(&a); err != nil {
return err
}
for _, e := range a {
@ -1118,19 +1112,32 @@ func clientcmdNewConfig() *clientcmdConfig {
}
}
// yamlBinaryAsBase64String is a []byte that can be stored in yaml as a !!str, not a !!binary
type yamlBinaryAsBase64String []byte
func (bin *yamlBinaryAsBase64String) UnmarshalText(text []byte) error {
res := make([]byte, base64.StdEncoding.DecodedLen(len(text)))
n, err := base64.StdEncoding.Decode(res, text)
if err != nil {
return err
}
*bin = res[:n]
return nil
}
// clientcmdCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Cluster.
// Cluster contains information about how to communicate with a kubernetes cluster
type clientcmdCluster struct {
// LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
LocationOfOrigin string
// Server is the address of the kubernetes cluster (https://hostname:port).
Server string `json:"server"`
Server string `yaml:"server"`
// InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure.
InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"`
InsecureSkipTLSVerify bool `yaml:"insecure-skip-tls-verify,omitempty"`
// CertificateAuthority is the path to a cert file for the certificate authority.
CertificateAuthority string `json:"certificate-authority,omitempty"`
CertificateAuthority string `yaml:"certificate-authority,omitempty"`
// CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority
CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
CertificateAuthorityData yamlBinaryAsBase64String `yaml:"certificate-authority-data,omitempty"`
}
// clientcmdAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.AuthInfo.
@ -1139,19 +1146,19 @@ type clientcmdAuthInfo struct {
// LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
LocationOfOrigin string
// ClientCertificate is the path to a client cert file for TLS.
ClientCertificate string `json:"client-certificate,omitempty"`
ClientCertificate string `yaml:"client-certificate,omitempty"`
// ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate
ClientCertificateData []byte `json:"client-certificate-data,omitempty"`
ClientCertificateData yamlBinaryAsBase64String `yaml:"client-certificate-data,omitempty"`
// ClientKey is the path to a client key file for TLS.
ClientKey string `json:"client-key,omitempty"`
ClientKey string `yaml:"client-key,omitempty"`
// ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey
ClientKeyData []byte `json:"client-key-data,omitempty"`
ClientKeyData yamlBinaryAsBase64String `yaml:"client-key-data,omitempty"`
// Token is the bearer token for authentication to the kubernetes cluster.
Token string `json:"token,omitempty"`
Token string `yaml:"token,omitempty"`
// Username is the username for basic authentication to the kubernetes cluster.
Username string `json:"username,omitempty"`
Username string `yaml:"username,omitempty"`
// Password is the password for basic authentication to the kubernetes cluster.
Password string `json:"password,omitempty"`
Password string `yaml:"password,omitempty"`
}
// clientcmdContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Context.
@ -1160,36 +1167,36 @@ type clientcmdContext struct {
// LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
LocationOfOrigin string
// Cluster is the name of the cluster for this context
Cluster string `json:"cluster"`
Cluster string `yaml:"cluster"`
// AuthInfo is the name of the authInfo for this context
AuthInfo string `json:"user"`
AuthInfo string `yaml:"user"`
// Namespace is the default namespace to use on unspecified requests
Namespace string `json:"namespace,omitempty"`
Namespace string `yaml:"namespace,omitempty"`
}
// v1NamedCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedCluster.
// NamedCluster relates nicknames to cluster information
type v1NamedCluster struct {
// Name is the nickname for this Cluster
Name string `json:"name"`
Name string `yaml:"name"`
// Cluster holds the cluster information
Cluster clientcmdCluster `json:"cluster"`
Cluster clientcmdCluster `yaml:"cluster"`
}
// v1NamedContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedContext.
// NamedContext relates nicknames to context information
type v1NamedContext struct {
// Name is the nickname for this Context
Name string `json:"name"`
Name string `yaml:"name"`
// Context holds the context information
Context clientcmdContext `json:"context"`
Context clientcmdContext `yaml:"context"`
}
// v1NamedAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedAuthInfo.
// NamedAuthInfo relates nicknames to auth information
type v1NamedAuthInfo struct {
// Name is the nickname for this AuthInfo
Name string `json:"name"`
Name string `yaml:"name"`
// AuthInfo holds the auth information
AuthInfo clientcmdAuthInfo `json:"user"`
AuthInfo clientcmdAuthInfo `yaml:"user"`
}

View File

@ -146,11 +146,11 @@ func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName str
// convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use;
// currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside.
func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) {
parts := strings.SplitN(ref, "/", 2)
if len(parts) != 2 {
_, repo, gotRepo := strings.Cut(ref, "/")
if !gotRepo {
return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref)
}
return reference.Domain(c.ref.dockerReference) + "/" + parts[1], nil
return reference.Domain(c.ref.dockerReference) + "/" + repo, nil
}
// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies.

View File

@ -17,10 +17,12 @@ import (
"github.com/containers/image/v5/internal/imagedestination/impl"
"github.com/containers/image/v5/internal/imagedestination/stubs"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"golang.org/x/exp/slices"
)
type openshiftImageDestination struct {
@ -180,12 +182,11 @@ func (d *openshiftImageDestination) PutSignaturesWithFormat(ctx context.Context,
if err != nil {
return err
}
existingSigNames := map[string]struct{}{}
existingSigNames := set.New[string]()
for _, sig := range image.Signatures {
existingSigNames[sig.objectMeta.Name] = struct{}{}
existingSigNames.Add(sig.objectMeta.Name)
}
sigExists:
for _, newSigWithFormat := range signatures {
newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning)
if !ok {
@ -193,10 +194,10 @@ sigExists:
}
newSig := newSigSimple.UntrustedSignature()
for _, existingSig := range image.Signatures {
if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
continue sigExists
}
if slices.ContainsFunc(image.Signatures, func(existingSig imageSignature) bool {
return existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig)
}) {
continue
}
// The API expect us to invent a new unique name. This is racy, but hopefully good enough.
@ -208,7 +209,7 @@ sigExists:
return fmt.Errorf("generating random signature len %d: %w", n, err)
}
signatureName = fmt.Sprintf("%s@%032x", imageStreamImageName, randBytes)
if _, ok := existingSigNames[signatureName]; !ok {
if !existingSigNames.Contains(signatureName) {
break
}
}

View File

@ -89,7 +89,7 @@ func (ref openshiftReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref openshiftReference) StringWithinTransport() string {
return reference.FamiliarString(ref.dockerReference)

View File

@ -69,7 +69,7 @@ type manifestSchema struct {
}
type ostreeImageDestination struct {
compat impl.Compat
impl.Compat
impl.PropertyMethodsInitialize
stubs.NoPutBlobPartialInitialize
stubs.AlwaysSupportsSignatures

View File

@ -75,12 +75,11 @@ type ostreeImageCloser struct {
func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) {
var repo = ""
var image = ""
s := strings.SplitN(ref, "@/", 2)
if len(s) == 1 {
image, repo = s[0], defaultOSTreeRepo
image, repoPart, gotRepoPart := strings.Cut(ref, "@/")
if !gotRepoPart {
repo = defaultOSTreeRepo
} else {
image, repo = s[0], "/"+s[1]
repo = "/" + repoPart
}
return NewReference(image, repo)
@ -134,7 +133,7 @@ func (ref ostreeReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref ostreeReference) StringWithinTransport() string {
return fmt.Sprintf("%s@%s", ref.image, ref.repo)
@ -157,11 +156,11 @@ func (ref ostreeReference) PolicyConfigurationIdentity() string {
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
// and each following element to be a prefix of the element preceding it.
func (ref ostreeReference) PolicyConfigurationNamespaces() []string {
s := strings.SplitN(ref.image, ":", 2)
if len(s) != 2 { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag.
repo, _, gotTag := strings.Cut(ref.image, ":")
if !gotTag { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag.
panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image))
}
name := s[0]
name := repo
res := []string{}
for {
res = append(res, fmt.Sprintf("%s:%s", ref.repo, name))

View File

@ -6,6 +6,7 @@ import (
"time"
"github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
@ -19,12 +20,12 @@ type locationKey struct {
blobDigest digest.Digest
}
// cache implements an in-memory-only BlobInfoCache
// cache implements an in-memory-only BlobInfoCache.
type cache struct {
mutex sync.Mutex
// The following fields can only be accessed with mutex held.
uncompressedDigests map[digest.Digest]digest.Digest
digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown, for each digest
}
@ -44,7 +45,7 @@ func New() types.BlobInfoCache {
func new2() *cache {
return &cache{
uncompressedDigests: map[digest.Digest]digest.Digest{},
digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{},
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
compressors: map[digest.Digest]string{},
}
@ -67,7 +68,7 @@ func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Diges
// Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest.
// This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
// when we already record a (compressed, uncompressed) pair.
if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 {
if s, ok := mem.digestsByUncompressed[anyDigest]; ok && !s.Empty() {
return anyDigest
}
return ""
@ -88,10 +89,10 @@ func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre
anyDigestSet, ok := mem.digestsByUncompressed[uncompressed]
if !ok {
anyDigestSet = map[digest.Digest]struct{}{}
anyDigestSet = set.New[digest.Digest]()
mem.digestsByUncompressed[uncompressed] = anyDigestSet
}
anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
anyDigestSet.Add(anyDigest)
}
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
@ -171,12 +172,13 @@ func (mem *cache) candidateLocations(transport types.ImageTransport, scope types
var uncompressedDigest digest.Digest // = ""
if canSubstitute {
if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
for d := range otherDigests {
if otherDigests, ok := mem.digestsByUncompressed[uncompressedDigest]; ok {
for _, d := range otherDigests.Values() {
if d != primaryDigest && d != uncompressedDigest {
res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo)
}
}
}
if uncompressedDigest != primaryDigest {
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, requireCompressionInfo)
}

View File

@ -30,7 +30,7 @@ var (
// Zstd compression.
Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, types.ZstdAlgorithmName,
[]byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor)
// Zstd:chunked compression.
// ZstdChunked is a Zstd compression with chunk metadta which allows random access to individual files.
ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, /* Note: InternalUnstableUndocumentedMIMEQuestionMark is not ZstdChunkedAlgorithmName */
nil, ZstdDecompressor, compressor.ZstdCompressor)

View File

@ -12,6 +12,7 @@ import (
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
@ -139,10 +140,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
// possible sources, and then call `GetCredentials` on them. That
// prevents us from having to reverse engineer the logic in
// `GetCredentials`.
allKeys := make(map[string]bool)
addKey := func(s string) {
allKeys[s] = true
}
allKeys := set.New[string]()
// To use GetCredentials, we must at least convert the URL forms into host names.
// While we're at it, well also canonicalize docker.io to the standard format.
@ -166,14 +164,14 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
// direct mapping to a registry, so we can just
// walk the map.
for registry := range auths.CredHelpers {
addKey(registry)
allKeys.Add(registry)
}
for key := range auths.AuthConfigs {
key := normalizeAuthFileKey(key, path.legacyFormat)
if key == normalizedDockerIORegistry {
key = "docker.io"
}
addKey(key)
allKeys.Add(key)
}
}
// External helpers.
@ -188,7 +186,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
}
}
for registry := range creds {
addKey(registry)
allKeys.Add(registry)
}
}
}
@ -196,7 +194,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
// Now use `GetCredentials` to the specific auth configs for each
// previously listed registry.
authConfigs := make(map[string]types.DockerAuthConfig)
for key := range allKeys {
for _, key := range allKeys.Values() {
authConf, err := GetCredentials(sys, key)
if err != nil {
// Note: we rely on the logging in `GetCredentials`.
@ -394,7 +392,7 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error {
if isNamespaced {
logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper)
return
} else {
}
err := deleteAuthFromCredHelper(helper, key)
if err == nil {
logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper)
@ -405,7 +403,6 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error {
logrus.Debugf("Not logged in to %s with credential helper %s", key, helper)
return
}
}
multiErr = multierror.Append(multiErr, fmt.Errorf("removing credentials for %s from credential helper %s: %w", key, helper, err))
}
@ -759,8 +756,8 @@ func decodeDockerAuth(path, key string, conf dockerAuthConfig) (types.DockerAuth
return types.DockerAuthConfig{}, err
}
parts := strings.SplitN(string(decoded), ":", 2)
if len(parts) != 2 {
user, passwordPart, valid := strings.Cut(string(decoded), ":")
if !valid {
// if it's invalid just skip, as docker does
if len(decoded) > 0 { // Docker writes "auths": { "$host": {} } entries if a credential helper is used, dont warn about those
logrus.Warnf(`Error parsing the "auth" field of a credential entry %q in %q, missing semicolon`, key, path) // Dont include the text of decoded, because that might put secrets into a log.
@ -770,8 +767,7 @@ func decodeDockerAuth(path, key string, conf dockerAuthConfig) (types.DockerAuth
return types.DockerAuthConfig{}, nil
}
user := parts[0]
password := strings.Trim(parts[1], "\x00")
password := strings.Trim(passwordPart, "\x00")
return types.DockerAuthConfig{
Username: user,
Password: password,
@ -786,7 +782,7 @@ func normalizeAuthFileKey(key string, legacyFormat bool) string {
stripped = strings.TrimPrefix(stripped, "https://")
if legacyFormat || stripped != key {
stripped = strings.SplitN(stripped, "/", 2)[0]
stripped, _, _ = strings.Cut(stripped, "/")
}
return normalizeRegistry(stripped)

View File

@ -14,6 +14,7 @@ import (
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/lockfile"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
)
// defaultShortNameMode is the default mode of registries.conf files if the
@ -308,9 +309,7 @@ func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAl
// updateWithConfigurationFrom updates c with configuration from updates.
// In case of conflict, updates is preferred.
func (c *shortNameAliasCache) updateWithConfigurationFrom(updates *shortNameAliasCache) {
for name, value := range updates.namedAliases {
c.namedAliases[name] = value
}
maps.Copy(c.namedAliases, updates.namedAliases)
}
func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAliasCache, error) {

View File

@ -16,6 +16,7 @@ import (
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/regexp"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
)
// systemRegistriesConfPath is the path to the system-wide registry
@ -1019,12 +1020,9 @@ func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) {
// Go maps have a non-deterministic order when iterating the keys, so
// we dump them in a slice and sort it to enforce some order in
// Registries slice. Some consumers of c/image (e.g., CRI-O) log the
// the configuration where a non-deterministic order could easily cause
// configuration where a non-deterministic order could easily cause
// confusion.
prefixes := []string{}
for prefix := range registryMap {
prefixes = append(prefixes, prefix)
}
prefixes := maps.Keys(registryMap)
sort.Strings(prefixes)
c.partialV2.Registries = []Registry{}

View File

@ -12,6 +12,7 @@ import (
"time"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc
@ -80,12 +81,9 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
}
func hasFile(files []os.DirEntry, name string) bool {
for _, f := range files {
if f.Name() == name {
return true
}
}
return false
return slices.ContainsFunc(files, func(f os.DirEntry) bool {
return f.Name() == name
})
}
// NewTransport Creates a default transport
@ -93,7 +91,6 @@ func NewTransport() *http.Transport {
direct := &net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}
tr := &http.Transport{
Proxy: http.ProxyFromEnvironment,

View File

@ -87,7 +87,7 @@ func (ref sifReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
// instead, see transports.ImageName().
func (ref sifReference) StringWithinTransport() string {

View File

@ -12,6 +12,7 @@ import (
"github.com/containers/image/v5/signature/internal"
"github.com/sigstore/fulcio/pkg/certificate"
"github.com/sigstore/sigstore/pkg/cryptoutils"
"golang.org/x/exp/slices"
)
// fulcioTrustRoot contains policy allow validating Fulcio-issued certificates.
@ -104,12 +105,12 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time,
// log of approved Fulcio invocations, and its not clear where that would come from, especially human users manually
// logging in using OpenID are not going to maintain a record of those actions.
//
// Also, the SCT does not help reveal _what_ was maliciously signed, nor does it protect against malicous signatures
// Also, the SCT does not help reveal _what_ was maliciously signed, nor does it protect against malicious signatures
// by correctly-issued certificates.
//
// So, pragmatically, the ideal design seem to be to only do signatures from a trusted build system (which is, by definition,
// the arbiter of desired vs. malicious signatures) that maintains an audit log of performed signature operations; and that seems to
// make make the SCT (and all of Rekor apart from the trusted timestamp) unnecessary.
// make the SCT (and all of Rekor apart from the trusted timestamp) unnecessary.
// == Validate the recorded OIDC issuer
gotOIDCIssuer := false
@ -136,15 +137,7 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time,
}
// == Validate the OIDC subject
foundEmail := false
// TO DO: Use slices.Contains after we update to Go 1.18
for _, certEmail := range untrustedCertificate.EmailAddresses {
if certEmail == f.subjectEmail {
foundEmail = true
break
}
}
if !foundEmail {
if !slices.Contains(untrustedCertificate.EmailAddresses, f.subjectEmail) {
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %s not found (got %#v)",
f.subjectEmail,
untrustedCertificate.EmailAddresses))

View File

@ -5,6 +5,8 @@ import (
"encoding/json"
"fmt"
"io"
"github.com/containers/image/v5/internal/set"
)
// JSONFormatError is returned when JSON does not match expected format.
@ -20,8 +22,8 @@ func (err JSONFormatError) Error() string {
//
// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy,
// we could use reflection to automate this. Later?
func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error {
seenKeys := map[string]struct{}{}
func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) error {
seenKeys := set.New[string]()
dec := json.NewDecoder(bytes.NewReader(data))
t, err := dec.Token()
@ -45,10 +47,10 @@ func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interfa
// Coverage: This should never happen, dec.Token() rejects non-string-literals in this state.
return JSONFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t))
}
if _, ok := seenKeys[key]; ok {
if seenKeys.Contains(key) {
return JSONFormatError(fmt.Sprintf("Duplicate key \"%s\"", key))
}
seenKeys[key] = struct{}{}
seenKeys.Add(key)
valuePtr := fieldResolver(key)
if valuePtr == nil {
@ -68,11 +70,11 @@ func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interfa
// ParanoidUnmarshalJSONObjectExactFields unmarshals data as a JSON object, but failing on the slightest unexpected aspect
// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields
// must be present exactly once, and none other fields are accepted.
func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error {
seenKeys := map[string]struct{}{}
if err := ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]any) error {
seenKeys := set.New[string]()
if err := ParanoidUnmarshalJSONObject(data, func(key string) any {
if valuePtr, ok := exactFields[key]; ok {
seenKeys[key] = struct{}{}
seenKeys.Add(key)
return valuePtr
}
return nil
@ -80,7 +82,7 @@ func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]
return err
}
for key := range exactFields {
if _, ok := seenKeys[key]; !ok {
if !seenKeys.Contains(key) {
return JSONFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key))
}
}

View File

@ -28,7 +28,7 @@ type UntrustedRekorSET struct {
}
type UntrustedRekorPayload struct {
Body []byte // In cosign, this is an interface{}, but only a string works
Body []byte // In cosign, this is an any, but only a string works
IntegratedTime int64
LogIndex int64
LogID string
@ -51,7 +51,7 @@ func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error {
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
func (s *UntrustedRekorSET) strictUnmarshalJSON(data []byte) error {
return ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"SignedEntryTimestamp": &s.UntrustedSignedEntryTimestamp,
"Payload": &s.UntrustedPayload,
})
@ -63,7 +63,7 @@ var _ json.Marshaler = (*UntrustedRekorSET)(nil)
// MarshalJSON implements the json.Marshaler interface.
func (s UntrustedRekorSET) MarshalJSON() ([]byte, error) {
return json.Marshal(map[string]interface{}{
return json.Marshal(map[string]any{
"SignedEntryTimestamp": s.UntrustedSignedEntryTimestamp,
"Payload": s.UntrustedPayload,
})
@ -86,7 +86,7 @@ func (p *UntrustedRekorPayload) UnmarshalJSON(data []byte) error {
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
func (p *UntrustedRekorPayload) strictUnmarshalJSON(data []byte) error {
return ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"body": &p.Body,
"integratedTime": &p.IntegratedTime,
"logIndex": &p.LogIndex,
@ -100,7 +100,7 @@ var _ json.Marshaler = (*UntrustedRekorPayload)(nil)
// MarshalJSON implements the json.Marshaler interface.
func (p UntrustedRekorPayload) MarshalJSON() ([]byte, error) {
return json.Marshal(map[string]interface{}{
return json.Marshal(map[string]any{
"body": p.Body,
"integratedTime": p.IntegratedTime,
"logIndex": p.LogIndex,
@ -159,7 +159,7 @@ func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unver
}
hashedRekordV001Bytes, err := json.Marshal(hashedRekord.Spec)
if err != nil {
// Coverage: hashedRekord.Spec is an interface{} that was just unmarshaled,
// Coverage: hashedRekord.Spec is an any that was just unmarshaled,
// so this should never fail.
return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("re-creating hashedrekord spec: %v", err))
}

View File

@ -21,14 +21,14 @@ const (
// UntrustedSigstorePayload is a parsed content of a sigstore signature payload (not the full signature)
type UntrustedSigstorePayload struct {
UntrustedDockerManifestDigest digest.Digest
UntrustedDockerReference string // FIXME: more precise type?
UntrustedCreatorID *string
untrustedDockerManifestDigest digest.Digest
untrustedDockerReference string // FIXME: more precise type?
untrustedCreatorID *string
// This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision,
// but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds).
// So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually,
// we would add another field, UntrustedTimestampNS int64.
UntrustedTimestamp *int64
untrustedTimestamp *int64
}
// NewUntrustedSigstorePayload returns an UntrustedSigstorePayload object with
@ -39,10 +39,10 @@ func NewUntrustedSigstorePayload(dockerManifestDigest digest.Digest, dockerRefer
creatorID := "containers/image " + version.Version
timestamp := time.Now().Unix()
return UntrustedSigstorePayload{
UntrustedDockerManifestDigest: dockerManifestDigest,
UntrustedDockerReference: dockerReference,
UntrustedCreatorID: &creatorID,
UntrustedTimestamp: &timestamp,
untrustedDockerManifestDigest: dockerManifestDigest,
untrustedDockerReference: dockerReference,
untrustedCreatorID: &creatorID,
untrustedTimestamp: &timestamp,
}
}
@ -52,22 +52,22 @@ var _ json.Marshaler = (*UntrustedSigstorePayload)(nil)
// MarshalJSON implements the json.Marshaler interface.
func (s UntrustedSigstorePayload) MarshalJSON() ([]byte, error) {
if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" {
if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" {
return nil, errors.New("Unexpected empty signature content")
}
critical := map[string]interface{}{
critical := map[string]any{
"type": sigstoreSignatureType,
"image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()},
"identity": map[string]string{"docker-reference": s.UntrustedDockerReference},
"image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()},
"identity": map[string]string{"docker-reference": s.untrustedDockerReference},
}
optional := map[string]interface{}{}
if s.UntrustedCreatorID != nil {
optional["creator"] = *s.UntrustedCreatorID
optional := map[string]any{}
if s.untrustedCreatorID != nil {
optional["creator"] = *s.untrustedCreatorID
}
if s.UntrustedTimestamp != nil {
optional["timestamp"] = *s.UntrustedTimestamp
if s.untrustedTimestamp != nil {
optional["timestamp"] = *s.untrustedTimestamp
}
signature := map[string]interface{}{
signature := map[string]any{
"critical": critical,
"optional": optional,
}
@ -92,7 +92,7 @@ func (s *UntrustedSigstorePayload) UnmarshalJSON(data []byte) error {
// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
var critical, optional json.RawMessage
if err := ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
if err := ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"critical": &critical,
"optional": &optional,
}); err != nil {
@ -104,7 +104,7 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
var gotCreatorID, gotTimestamp = false, false
// /usr/bin/cosign generates "optional": null if there are no user-specified annotations.
if !bytes.Equal(optional, []byte("null")) {
if err := ParanoidUnmarshalJSONObject(optional, func(key string) interface{} {
if err := ParanoidUnmarshalJSONObject(optional, func(key string) any {
switch key {
case "creator":
gotCreatorID = true
@ -113,7 +113,7 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
gotTimestamp = true
return &timestamp
default:
var ignore interface{}
var ignore any
return &ignore
}
}); err != nil {
@ -121,19 +121,19 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
}
}
if gotCreatorID {
s.UntrustedCreatorID = &creatorID
s.untrustedCreatorID = &creatorID
}
if gotTimestamp {
intTimestamp := int64(timestamp)
if float64(intTimestamp) != timestamp {
return NewInvalidSignatureError("Field optional.timestamp is not is not an integer")
}
s.UntrustedTimestamp = &intTimestamp
s.untrustedTimestamp = &intTimestamp
}
var t string
var image, identity json.RawMessage
if err := ParanoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{
if err := ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{
"type": &t,
"image": &image,
"identity": &identity,
@ -145,15 +145,15 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
}
var digestString string
if err := ParanoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{
if err := ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{
"docker-manifest-digest": &digestString,
}); err != nil {
return err
}
s.UntrustedDockerManifestDigest = digest.Digest(digestString)
s.untrustedDockerManifestDigest = digest.Digest(digestString)
return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{
"docker-reference": &s.UntrustedDockerReference,
return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
"docker-reference": &s.untrustedDockerReference,
})
}
@ -191,10 +191,10 @@ func VerifySigstorePayload(publicKey crypto.PublicKey, unverifiedPayload []byte,
if err := json.Unmarshal(unverifiedPayload, &unmatchedPayload); err != nil {
return nil, NewInvalidSignatureError(err.Error())
}
if err := rules.ValidateSignedDockerManifestDigest(unmatchedPayload.UntrustedDockerManifestDigest); err != nil {
if err := rules.ValidateSignedDockerManifestDigest(unmatchedPayload.untrustedDockerManifestDigest); err != nil {
return nil, err
}
if err := rules.ValidateSignedDockerReference(unmatchedPayload.UntrustedDockerReference); err != nil {
if err := rules.ValidateSignedDockerReference(unmatchedPayload.untrustedDockerReference); err != nil {
return nil, err
}
// SigstorePayloadAcceptanceRules have accepted this value.

View File

@ -104,7 +104,7 @@ var _ json.Unmarshaler = (*Policy)(nil)
func (p *Policy) UnmarshalJSON(data []byte) error {
*p = Policy{}
transports := policyTransportsMap{}
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
switch key {
case "default":
return &p.Default
@ -135,7 +135,7 @@ func (m *policyTransportsMap) UnmarshalJSON(data []byte) error {
// We can't unmarshal directly into map values because it is not possible to take an address of a map value.
// So, use a temporary map of pointers-to-slices and convert.
tmpMap := map[string]*PolicyTransportScopes{}
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
// transport can be nil
transport := transports.Get(key)
// internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
@ -181,7 +181,7 @@ func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error {
// We can't unmarshal directly into map values because it is not possible to take an address of a map value.
// So, use a temporary map of pointers-to-slices and convert.
tmpMap := map[string]*PolicyRequirements{}
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
// internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
if _, ok := tmpMap[key]; ok {
return nil
@ -271,7 +271,7 @@ var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil)
func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error {
*pr = prInsecureAcceptAnything{}
var tmp prInsecureAcceptAnything
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
}); err != nil {
return err
@ -301,7 +301,7 @@ var _ json.Unmarshaler = (*prReject)(nil)
func (pr *prReject) UnmarshalJSON(data []byte) error {
*pr = prReject{}
var tmp prReject
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
}); err != nil {
return err
@ -384,7 +384,7 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
var tmp prSignedBy
var gotKeyPath, gotKeyPaths, gotKeyData = false, false, false
var signedIdentity json.RawMessage
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
switch key {
case "type":
return &tmp.Type
@ -495,7 +495,7 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
*pr = prSignedBaseLayer{}
var tmp prSignedBaseLayer
var baseLayerIdentity json.RawMessage
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
"baseLayerIdentity": &baseLayerIdentity,
}); err != nil {
@ -564,7 +564,7 @@ var _ json.Unmarshaler = (*prmMatchExact)(nil)
func (prm *prmMatchExact) UnmarshalJSON(data []byte) error {
*prm = prmMatchExact{}
var tmp prmMatchExact
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
}); err != nil {
return err
@ -594,7 +594,7 @@ var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil)
func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error {
*prm = prmMatchRepoDigestOrExact{}
var tmp prmMatchRepoDigestOrExact
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
}); err != nil {
return err
@ -624,7 +624,7 @@ var _ json.Unmarshaler = (*prmMatchRepository)(nil)
func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
*prm = prmMatchRepository{}
var tmp prmMatchRepository
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
}); err != nil {
return err
@ -664,7 +664,7 @@ var _ json.Unmarshaler = (*prmExactReference)(nil)
func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
*prm = prmExactReference{}
var tmp prmExactReference
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
"dockerReference": &tmp.DockerReference,
}); err != nil {
@ -706,7 +706,7 @@ var _ json.Unmarshaler = (*prmExactRepository)(nil)
func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
*prm = prmExactRepository{}
var tmp prmExactRepository
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
"dockerRepository": &tmp.DockerRepository,
}); err != nil {
@ -778,7 +778,7 @@ var _ json.Unmarshaler = (*prmRemapIdentity)(nil)
func (prm *prmRemapIdentity) UnmarshalJSON(data []byte) error {
*prm = prmRemapIdentity{}
var tmp prmRemapIdentity
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"type": &tmp.Type,
"prefix": &tmp.Prefix,
"signedPrefix": &tmp.SignedPrefix,

View File

@ -147,7 +147,7 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
var gotKeyPath, gotKeyData, gotFulcio, gotRekorPublicKeyPath, gotRekorPublicKeyData bool
var fulcio prSigstoreSignedFulcio
var signedIdentity json.RawMessage
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
switch key {
case "type":
return &tmp.Type
@ -298,7 +298,7 @@ func (f *prSigstoreSignedFulcio) UnmarshalJSON(data []byte) error {
*f = prSigstoreSignedFulcio{}
var tmp prSigstoreSignedFulcio
var gotCAPath, gotCAData, gotOIDCIssuer, gotSubjectEmail bool // = false...
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
switch key {
case "caPath":
gotCAPath = true

View File

@ -46,7 +46,7 @@ type PolicyRequirement interface {
// - sarRejected if the signature has not been verified;
// in that case error must be non-nil, and should be an PolicyRequirementError if evaluation
// succeeded but the result was rejection.
// - sarUnknown if if this PolicyRequirement does not deal with signatures.
// - sarUnknown if this PolicyRequirement does not deal with signatures.
// NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed.
// Returning sarUnknown and a non-nil error value is invalid.
// WARNING: This makes the signature contents acceptable for further processing,

View File

@ -12,6 +12,7 @@ import (
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/manifest"
digest "github.com/opencontainers/go-digest"
"golang.org/x/exp/slices"
)
func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
@ -67,11 +68,9 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image priva
signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{
validateKeyIdentity: func(keyIdentity string) error {
for _, trustedIdentity := range trustedIdentities {
if keyIdentity == trustedIdentity {
if slices.Contains(trustedIdentities, keyIdentity) {
return nil
}
}
// Coverage: We use a private GPG home directory and only import trusted keys, so this should
// not be reachable.
return PolicyRequirementError(fmt.Sprintf("Signature by key %s is not accepted", keyIdentity))

View File

@ -33,12 +33,12 @@ import (
// limitations under the License.
const (
// from sigstore/cosign/pkg/cosign.sigstorePrivateKeyPemType
// from sigstore/cosign/pkg/cosign.sigstorePrivateKeyPemType.
sigstorePrivateKeyPemType = "ENCRYPTED COSIGN PRIVATE KEY"
)
// from sigstore/cosign/pkg/cosign.loadPrivateKey
// FIXME: Do we need all of these key formats, and all of those
// FIXME: Do we need all of these key formats?
func loadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) {
// Decrypt first
p, _ := pem.Decode(key)

View File

@ -31,14 +31,14 @@ type Signature struct {
// untrustedSignature is a parsed content of a signature.
type untrustedSignature struct {
UntrustedDockerManifestDigest digest.Digest
UntrustedDockerReference string // FIXME: more precise type?
UntrustedCreatorID *string
untrustedDockerManifestDigest digest.Digest
untrustedDockerReference string // FIXME: more precise type?
untrustedCreatorID *string
// This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision,
// but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds).
// So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually,
// we would add another field, UntrustedTimestampNS int64.
UntrustedTimestamp *int64
untrustedTimestamp *int64
}
// UntrustedSignatureInformation is information available in an untrusted signature.
@ -65,10 +65,10 @@ func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference s
creatorID := "atomic " + version.Version
timestamp := time.Now().Unix()
return untrustedSignature{
UntrustedDockerManifestDigest: dockerManifestDigest,
UntrustedDockerReference: dockerReference,
UntrustedCreatorID: &creatorID,
UntrustedTimestamp: &timestamp,
untrustedDockerManifestDigest: dockerManifestDigest,
untrustedDockerReference: dockerReference,
untrustedCreatorID: &creatorID,
untrustedTimestamp: &timestamp,
}
}
@ -78,22 +78,22 @@ var _ json.Marshaler = (*untrustedSignature)(nil)
// MarshalJSON implements the json.Marshaler interface.
func (s untrustedSignature) MarshalJSON() ([]byte, error) {
if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" {
if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" {
return nil, errors.New("Unexpected empty signature content")
}
critical := map[string]interface{}{
critical := map[string]any{
"type": signatureType,
"image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()},
"identity": map[string]string{"docker-reference": s.UntrustedDockerReference},
"image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()},
"identity": map[string]string{"docker-reference": s.untrustedDockerReference},
}
optional := map[string]interface{}{}
if s.UntrustedCreatorID != nil {
optional["creator"] = *s.UntrustedCreatorID
optional := map[string]any{}
if s.untrustedCreatorID != nil {
optional["creator"] = *s.untrustedCreatorID
}
if s.UntrustedTimestamp != nil {
optional["timestamp"] = *s.UntrustedTimestamp
if s.untrustedTimestamp != nil {
optional["timestamp"] = *s.untrustedTimestamp
}
signature := map[string]interface{}{
signature := map[string]any{
"critical": critical,
"optional": optional,
}
@ -118,7 +118,7 @@ func (s *untrustedSignature) UnmarshalJSON(data []byte) error {
// Splitting it into a separate function allows us to do the internal.JSONFormatError → InvalidSignatureError in a single place, the caller.
func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
var critical, optional json.RawMessage
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
"critical": &critical,
"optional": &optional,
}); err != nil {
@ -128,7 +128,7 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
var creatorID string
var timestamp float64
var gotCreatorID, gotTimestamp = false, false
if err := internal.ParanoidUnmarshalJSONObject(optional, func(key string) interface{} {
if err := internal.ParanoidUnmarshalJSONObject(optional, func(key string) any {
switch key {
case "creator":
gotCreatorID = true
@ -137,26 +137,26 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
gotTimestamp = true
return &timestamp
default:
var ignore interface{}
var ignore any
return &ignore
}
}); err != nil {
return err
}
if gotCreatorID {
s.UntrustedCreatorID = &creatorID
s.untrustedCreatorID = &creatorID
}
if gotTimestamp {
intTimestamp := int64(timestamp)
if float64(intTimestamp) != timestamp {
return internal.NewInvalidSignatureError("Field optional.timestamp is not is not an integer")
}
s.UntrustedTimestamp = &intTimestamp
s.untrustedTimestamp = &intTimestamp
}
var t string
var image, identity json.RawMessage
if err := internal.ParanoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{
"type": &t,
"image": &image,
"identity": &identity,
@ -168,15 +168,15 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
}
var digestString string
if err := internal.ParanoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{
"docker-manifest-digest": &digestString,
}); err != nil {
return err
}
s.UntrustedDockerManifestDigest = digest.Digest(digestString)
s.untrustedDockerManifestDigest = digest.Digest(digestString)
return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{
"docker-reference": &s.UntrustedDockerReference,
return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
"docker-reference": &s.untrustedDockerReference,
})
}
@ -229,16 +229,16 @@ func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte
if err := json.Unmarshal(signed, &unmatchedSignature); err != nil {
return nil, internal.NewInvalidSignatureError(err.Error())
}
if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil {
if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.untrustedDockerManifestDigest); err != nil {
return nil, err
}
if err := rules.validateSignedDockerReference(unmatchedSignature.UntrustedDockerReference); err != nil {
if err := rules.validateSignedDockerReference(unmatchedSignature.untrustedDockerReference); err != nil {
return nil, err
}
// signatureAcceptanceRules have accepted this value.
return &Signature{
DockerManifestDigest: unmatchedSignature.UntrustedDockerManifestDigest,
DockerReference: unmatchedSignature.UntrustedDockerReference,
DockerManifestDigest: unmatchedSignature.untrustedDockerManifestDigest,
DockerReference: unmatchedSignature.untrustedDockerReference,
}, nil
}
@ -269,14 +269,14 @@ func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []
}
var timestamp *time.Time // = nil
if untrustedDecodedContents.UntrustedTimestamp != nil {
ts := time.Unix(*untrustedDecodedContents.UntrustedTimestamp, 0)
if untrustedDecodedContents.untrustedTimestamp != nil {
ts := time.Unix(*untrustedDecodedContents.untrustedTimestamp, 0)
timestamp = &ts
}
return &UntrustedSignatureInformation{
UntrustedDockerManifestDigest: untrustedDecodedContents.UntrustedDockerManifestDigest,
UntrustedDockerReference: untrustedDecodedContents.UntrustedDockerReference,
UntrustedCreatorID: untrustedDecodedContents.UntrustedCreatorID,
UntrustedDockerManifestDigest: untrustedDecodedContents.untrustedDockerManifestDigest,
UntrustedDockerReference: untrustedDecodedContents.untrustedDockerReference,
UntrustedCreatorID: untrustedDecodedContents.untrustedCreatorID,
UntrustedTimestamp: timestamp,
UntrustedShortKeyIdentifier: shortKeyIdentifier,
}, nil

View File

@ -21,6 +21,7 @@ import (
"github.com/containers/image/v5/internal/imagedestination/stubs"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/manifest"
@ -34,6 +35,7 @@ import (
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
var (
@ -157,7 +159,7 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string {
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
info, err := s.putBlobToPendingFile(ctx, stream, blobinfo, &options)
info, err := s.putBlobToPendingFile(stream, blobinfo, &options)
if err != nil {
return info, err
}
@ -171,7 +173,7 @@ func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream
// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file.
// The caller must arrange the blob to be eventually committed using s.commitLayer().
func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) {
func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) {
// Stores a layer or data blob in our temporary directory, checking that any information
// in the blobinfo matches the incoming data.
errorBlobInfo := types.BlobInfo{
@ -201,7 +203,7 @@ func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stre
diffID := digest.Canonical.Digester()
// Copy the data to the file.
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
// TODO: This can take quite some time, and should ideally be cancellable using context.Context.
_, err = io.Copy(diffID.Hash(), decompressed)
decompressed.Close()
if err != nil {
@ -242,7 +244,7 @@ type zstdFetcher struct {
// GetBlobAt converts from chunked.GetBlobAt to BlobChunkAccessor.GetBlobAt.
func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
var newChunks []private.ImageSourceChunk
newChunks := make([]private.ImageSourceChunk, 0, len(chunks))
for _, v := range chunks {
i := private.ImageSourceChunk{
Offset: v.Offset,
@ -300,7 +302,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
reused, info, err := s.tryReusingBlobAsPending(ctx, blobinfo, &options)
reused, info, err := s.tryReusingBlobAsPending(blobinfo, &options)
if err != nil || !reused || options.LayerIndex == nil {
return reused, info, err
}
@ -310,7 +312,7 @@ func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context,
// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata.
// The caller must arrange the blob to be eventually committed using s.commitLayer().
func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
func (s *storageImageDestination) tryReusingBlobAsPending(blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
// lock the entire method as it executes fairly quickly
s.lock.Lock()
defer s.lock.Unlock()
@ -795,14 +797,14 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
// Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so
// we just need to screen out the ones that are actually layers to get the list of non-layers.
dataBlobs := make(map[digest.Digest]struct{})
dataBlobs := set.New[digest.Digest]()
for blob := range s.filenames {
dataBlobs[blob] = struct{}{}
dataBlobs.Add(blob)
}
for _, layerBlob := range layerBlobs {
delete(dataBlobs, layerBlob.Digest)
dataBlobs.Delete(layerBlob.Digest)
}
for blob := range dataBlobs {
for _, blob := range dataBlobs.Values() {
v, err := os.ReadFile(s.filenames[blob])
if err != nil {
return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err)
@ -883,9 +885,7 @@ func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob
if err != nil {
return err
}
newBlob := make([]byte, len(manifestBlob))
copy(newBlob, manifestBlob)
s.manifest = newBlob
s.manifest = slices.Clone(manifestBlob)
s.manifestDigest = digest
return nil
}

View File

@ -43,7 +43,7 @@ func (s *storageImageCloser) Size() (int64, error) {
// newImage creates an image that also knows its size
func newImage(ctx context.Context, sys *types.SystemContext, s storageReference) (types.ImageCloser, error) {
src, err := newImageSource(ctx, sys, s)
src, err := newImageSource(sys, s)
if err != nil {
return nil, err
}

View File

@ -14,6 +14,7 @@ import (
"github.com/containers/storage"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte
@ -52,17 +53,15 @@ func newReference(transport storageTransport, named reference.Named, id string)
// imageMatchesRepo returns true iff image.Names contains an element with the same repo as ref
func imageMatchesRepo(image *storage.Image, ref reference.Named) bool {
repo := ref.Name()
for _, name := range image.Names {
if named, err := reference.ParseNormalizedNamed(name); err == nil {
if named.Name() == repo {
return slices.ContainsFunc(image.Names, func(name string) bool {
if named, err := reference.ParseNormalizedNamed(name); err == nil && named.Name() == repo {
return true
}
}
}
return false
})
}
// multiArchImageMatchesSystemContext returns true if if the passed-in image both contains a
// multiArchImageMatchesSystemContext returns true if the passed-in image both contains a
// multi-arch manifest that matches the passed-in digest, and the image is the per-platform
// image instance that matches sys.
//
@ -170,11 +169,9 @@ func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Imag
// sake of older consumers that don't know there's a whole list in there now.
if s.named != nil {
if digested, ok := s.named.(reference.Digested); ok {
for _, digest := range loadedImage.Digests {
if digest == digested.Digest() {
digest := digested.Digest()
if slices.Contains(loadedImage.Digests, digest) {
loadedImage.Digest = digest
break
}
}
}
}
@ -207,10 +204,10 @@ func (s storageReference) StringWithinTransport() string {
}
res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]"
if s.named != nil {
res = res + s.named.String()
res += s.named.String()
}
if s.id != "" {
res = res + "@" + s.id
res += "@" + s.id
}
return res
}
@ -218,10 +215,10 @@ func (s storageReference) StringWithinTransport() string {
func (s storageReference) PolicyConfigurationIdentity() string {
res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]"
if s.named != nil {
res = res + s.named.String()
res += s.named.String()
}
if s.id != "" {
res = res + "@" + s.id
res += "@" + s.id
}
return res
}
@ -280,7 +277,7 @@ func (s storageReference) DeleteImage(ctx context.Context, sys *types.SystemCont
}
func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
return newImageSource(ctx, sys, s)
return newImageSource(sys, s)
}
func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {

View File

@ -44,7 +44,7 @@ type storageImageSource struct {
}
// newImageSource sets up an image for reading.
func newImageSource(ctx context.Context, sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) {
func newImageSource(sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) {
// First, locate the image.
img, err := imageRef.resolveImage(sys)
if err != nil {
@ -186,7 +186,7 @@ func (s *storageImageSource) getBlobAndLayerID(digest digest.Digest, layers []st
}
// GetManifest() reads the image's manifest.
func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) {
func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, mimeType string, err error) {
if instanceDigest != nil {
key := manifestBigDataKey(*instanceDigest)
blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)

View File

@ -234,35 +234,30 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc
reference = reference[closeIndex+1:]
// Peel off a "driver@" from the start.
driverInfo := ""
driverSplit := strings.SplitN(storeSpec, "@", 2)
if len(driverSplit) != 2 {
driverPart1, driverPart2, gotDriver := strings.Cut(storeSpec, "@")
if !gotDriver {
storeSpec = driverPart1
if storeSpec == "" {
return nil, ErrInvalidReference
}
} else {
driverInfo = driverSplit[0]
driverInfo = driverPart1
if driverInfo == "" {
return nil, ErrInvalidReference
}
storeSpec = driverSplit[1]
storeSpec = driverPart2
if storeSpec == "" {
return nil, ErrInvalidReference
}
}
// Peel off a ":options" from the end.
var options []string
optionsSplit := strings.SplitN(storeSpec, ":", 2)
if len(optionsSplit) == 2 {
options = strings.Split(optionsSplit[1], ",")
storeSpec = optionsSplit[0]
storeSpec, optionsPart, gotOptions := strings.Cut(storeSpec, ":")
if gotOptions {
options = strings.Split(optionsPart, ",")
}
// Peel off a "+runroot" from the new end.
runRootInfo := ""
runRootSplit := strings.SplitN(storeSpec, "+", 2)
if len(runRootSplit) == 2 {
runRootInfo = runRootSplit[1]
storeSpec = runRootSplit[0]
}
storeSpec, runRootInfo, _ := strings.Cut(storeSpec, "+") // runRootInfo is "" if there is no "+"
// The rest is our graph root.
rootInfo := storeSpec
// Check that any paths are absolute paths.

View File

@ -9,8 +9,8 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/maps"
)
// ConfigUpdater is an interface that ImageReferences for "tarball" images also
@ -35,9 +35,7 @@ func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[
if r.annotations == nil {
r.annotations = make(map[string]string)
}
for k, v := range annotations {
r.annotations[k] = v
}
maps.Copy(r.annotations, annotations)
return nil
}
@ -73,7 +71,7 @@ func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContex
func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
for _, filename := range r.filenames {
if err := os.Remove(filename); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("error removing %q: %v", filename, err)
return fmt.Errorf("error removing %q: %w", filename, err)
}
}
return nil

View File

@ -18,6 +18,8 @@ import (
digest "github.com/opencontainers/go-digest"
imgspecs "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
type tarballImageSource struct {
@ -62,13 +64,13 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
} else {
file, err = os.Open(filename)
if err != nil {
return nil, fmt.Errorf("error opening %q for reading: %v", filename, err)
return nil, fmt.Errorf("error opening %q for reading: %w", filename, err)
}
defer file.Close()
reader = file
fileinfo, err := file.Stat()
if err != nil {
return nil, fmt.Errorf("error reading size of %q: %v", filename, err)
return nil, fmt.Errorf("error reading size of %q: %w", filename, err)
}
blobSize = fileinfo.Size()
blobTime = fileinfo.ModTime()
@ -168,10 +170,6 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
MediaType: blobTypes[i],
})
}
annotations := make(map[string]string)
for k, v := range r.annotations {
annotations[k] = v
}
manifest := imgspecv1.Manifest{
Versioned: imgspecs.Versioned{
SchemaVersion: 2,
@ -182,7 +180,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
MediaType: imgspecv1.MediaTypeImageConfig,
},
Layers: layerDescriptors,
Annotations: annotations,
Annotations: maps.Clone(r.annotations),
}
// Encode the manifest.
@ -228,8 +226,10 @@ func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobIn
return io.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil
}
// Maybe one of the layer blobs.
for i := range is.blobIDs {
if blobinfo.Digest == is.blobIDs[i] {
i := slices.Index(is.blobIDs, blobinfo.Digest)
if i == -1 {
return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String())
}
// We want to read that layer: open the file or memory block and hand it back.
if is.filenames[i] == "-" {
return io.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil
@ -239,9 +239,6 @@ func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobIn
return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err)
}
return reader, is.blobSizes[i], nil
}
}
return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String())
}
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).

View File

@ -25,24 +25,24 @@ import (
// ParseImageName converts a URL-like image name to a types.ImageReference.
func ParseImageName(imgName string) (types.ImageReference, error) {
// Keep this in sync with TransportFromImageName!
parts := strings.SplitN(imgName, ":", 2)
if len(parts) != 2 {
transportName, withinTransport, valid := strings.Cut(imgName, ":")
if !valid {
return nil, fmt.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName)
}
transport := transports.Get(parts[0])
transport := transports.Get(transportName)
if transport == nil {
return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0])
return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, transportName)
}
return transport.ParseReference(parts[1])
return transport.ParseReference(withinTransport)
}
// TransportFromImageName converts an URL-like name to a types.ImageTransport or nil when
// the transport is unknown or when the input is invalid.
func TransportFromImageName(imageName string) types.ImageTransport {
// Keep this in sync with ParseImageName!
parts := strings.SplitN(imageName, ":", 2)
if len(parts) == 2 {
return transports.Get(parts[0])
transportName, _, valid := strings.Cut(imageName, ":")
if valid {
return transports.Get(transportName)
}
return nil
}

View File

@ -5,6 +5,7 @@ import (
"sort"
"sync"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/types"
)
@ -66,22 +67,21 @@ func Register(t types.ImageTransport) {
// This is the generally recommended way to refer to images in the UI.
//
// NOTE: The returned string is not promised to be equal to the original input to ParseImageName;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
func ImageName(ref types.ImageReference) string {
return ref.Transport().Name() + ":" + ref.StringWithinTransport()
}
var deprecatedTransports = set.NewWithValues("atomic")
// ListNames returns a list of non deprecated transport names.
// Deprecated transports can be used, but are not presented to users.
func ListNames() []string {
kt.mu.Lock()
defer kt.mu.Unlock()
deprecated := map[string]bool{
"atomic": true,
}
var names []string
for _, transport := range kt.transports {
if !deprecated[transport.Name()] {
if !deprecatedTransports.Contains(transport.Name()) {
names = append(names, transport.Name())
}
}

View File

@ -11,7 +11,7 @@ import (
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// ImageTransport is a top-level namespace for ways to to store/load an image.
// ImageTransport is a top-level namespace for ways to store/load an image.
// It should generally correspond to ImageSource/ImageDestination implementations.
//
// Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport.
@ -48,7 +48,7 @@ type ImageReference interface {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
// instead, see transports.ImageName().
StringWithinTransport() string
@ -138,7 +138,7 @@ type BlobInfo struct {
// or if it should be compressed or decompressed. The field defaults to
// preserve the original layer's compressedness.
// TODO: To remove together with CryptoOperation in re-design to remove
// field out out of BlobInfo.
// field out of BlobInfo.
CompressionOperation LayerCompression
// CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct
// MIME type for compressed layers (e.g., gzip or zstd). This field MUST be
@ -149,7 +149,7 @@ type BlobInfo struct {
// CryptoOperation is used in Image.UpdateLayerInfos to instruct
// whether the original layer was encrypted/decrypted
// TODO: To remove together with CompressionOperation in re-design to
// remove field out out of BlobInfo.
// remove field out of BlobInfo.
CryptoOperation LayerCrypto
// Before adding any fields to this struct, read the NOTE above.
}

View File

@ -8,10 +8,10 @@ const (
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 24
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 2
VersionPatch = 3
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""
VersionDev = "-dev"
)
// Version is the specification version that the package types support.

View File

@ -1,5 +1,7 @@
package core
import "fmt"
func newChallenge(challengeType AcmeChallenge, token string) Challenge {
return Challenge{
Type: challengeType,
@ -25,3 +27,19 @@ func DNSChallenge01(token string) Challenge {
func TLSALPNChallenge01(token string) Challenge {
return newChallenge(ChallengeTypeTLSALPN01, token)
}
// NewChallenge constructs a random challenge of the given kind. It returns an
// error if the challenge type is unrecognized. If token is empty a random token
// will be generated, otherwise the provided token is used.
func NewChallenge(kind AcmeChallenge, token string) (Challenge, error) {
switch kind {
case ChallengeTypeHTTP01:
return HTTPChallenge01(token), nil
case ChallengeTypeDNS01:
return DNSChallenge01(token), nil
case ChallengeTypeTLSALPN01:
return TLSALPNChallenge01(token), nil
default:
return Challenge{}, fmt.Errorf("unrecognized challenge type %q", kind)
}
}

View File

@ -7,7 +7,8 @@ import (
// PolicyAuthority defines the public interface for the Boulder PA
// TODO(#5891): Move this interface to a more appropriate location.
type PolicyAuthority interface {
WillingToIssueWildcards(identifiers []identifier.ACMEIdentifier) error
ChallengesFor(domain identifier.ACMEIdentifier) ([]Challenge, error)
ChallengeTypeEnabled(t AcmeChallenge) bool
WillingToIssueWildcards([]identifier.ACMEIdentifier) error
ChallengesFor(identifier.ACMEIdentifier) ([]Challenge, error)
ChallengeTypeEnabled(AcmeChallenge) bool
CheckAuthz(*Authorization) error
}

View File

@ -2,7 +2,6 @@ package core
import (
"crypto"
"crypto/x509"
"encoding/base64"
"encoding/json"
"fmt"
@ -12,7 +11,7 @@ import (
"time"
"golang.org/x/crypto/ocsp"
"gopkg.in/square/go-jose.v2"
"gopkg.in/go-jose/go-jose.v2"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/probs"
@ -53,7 +52,6 @@ const (
type AcmeChallenge string
// These types are the available challenges
// TODO(#5009): Make this a custom type as well.
const (
ChallengeTypeHTTP01 = AcmeChallenge("http-01")
ChallengeTypeDNS01 = AcmeChallenge("dns-01")
@ -87,44 +85,10 @@ var OCSPStatusToInt = map[OCSPStatus]int{
// DNSPrefix is attached to DNS names in DNS challenges
const DNSPrefix = "_acme-challenge"
// CertificateRequest is just a CSR
//
// This data is unmarshalled from JSON by way of RawCertificateRequest, which
// represents the actual structure received from the client.
type CertificateRequest struct {
CSR *x509.CertificateRequest // The CSR
Bytes []byte // The original bytes of the CSR, for logging.
}
type RawCertificateRequest struct {
CSR JSONBuffer `json:"csr"` // The encoded CSR
}
// UnmarshalJSON provides an implementation for decoding CertificateRequest objects.
func (cr *CertificateRequest) UnmarshalJSON(data []byte) error {
var raw RawCertificateRequest
err := json.Unmarshal(data, &raw)
if err != nil {
return err
}
csr, err := x509.ParseCertificateRequest(raw.CSR)
if err != nil {
return err
}
cr.CSR = csr
cr.Bytes = raw.CSR
return nil
}
// MarshalJSON provides an implementation for encoding CertificateRequest objects.
func (cr CertificateRequest) MarshalJSON() ([]byte, error) {
return json.Marshal(RawCertificateRequest{
CSR: cr.CSR.Raw,
})
}
// Registration objects represent non-public metadata attached
// to account keys.
type Registration struct {
@ -373,9 +337,6 @@ type Authorization struct {
// slice and the order of these challenges may not be predictable.
Challenges []Challenge `json:"challenges,omitempty" db:"-"`
// This field is deprecated. It's filled in by WFE for the ACMEv1 API.
Combinations [][]int `json:"combinations,omitempty" db:"combinations"`
// Wildcard is a Boulder-specific Authorization field that indicates the
// authorization was created as a result of an order containing a name with
// a `*.`wildcard prefix. This will help convey to users that an
@ -399,38 +360,25 @@ func (authz *Authorization) FindChallengeByStringID(id string) int {
// SolvedBy will look through the Authorizations challenges, returning the type
// of the *first* challenge it finds with Status: valid, or an error if no
// challenge is valid.
func (authz *Authorization) SolvedBy() (*AcmeChallenge, error) {
func (authz *Authorization) SolvedBy() (AcmeChallenge, error) {
if len(authz.Challenges) == 0 {
return nil, fmt.Errorf("Authorization has no challenges")
return "", fmt.Errorf("Authorization has no challenges")
}
for _, chal := range authz.Challenges {
if chal.Status == StatusValid {
return &chal.Type, nil
return chal.Type, nil
}
}
return nil, fmt.Errorf("Authorization not solved by any challenge")
return "", fmt.Errorf("Authorization not solved by any challenge")
}
// JSONBuffer fields get encoded and decoded JOSE-style, in base64url encoding
// with stripped padding.
type JSONBuffer []byte
// URL-safe base64 encode that strips padding
func base64URLEncode(data []byte) string {
var result = base64.URLEncoding.EncodeToString(data)
return strings.TrimRight(result, "=")
}
// URL-safe base64 decoder that adds padding
func base64URLDecode(data string) ([]byte, error) {
var missing = (4 - len(data)%4) % 4
data += strings.Repeat("=", missing)
return base64.URLEncoding.DecodeString(data)
}
// MarshalJSON encodes a JSONBuffer for transmission.
func (jb JSONBuffer) MarshalJSON() (result []byte, err error) {
return json.Marshal(base64URLEncode(jb))
return json.Marshal(base64.RawURLEncoding.EncodeToString(jb))
}
// UnmarshalJSON decodes a JSONBuffer to an object.
@ -440,7 +388,7 @@ func (jb *JSONBuffer) UnmarshalJSON(data []byte) (err error) {
if err != nil {
return err
}
*jb, err = base64URLDecode(str)
*jb, err = base64.RawURLEncoding.DecodeString(strings.TrimRight(str, "="))
return
}

File diff suppressed because it is too large Load Diff

View File

@ -1,101 +0,0 @@
syntax = "proto3";
package core;
option go_package = "github.com/letsencrypt/boulder/core/proto";
message Challenge {
int64 id = 1;
string type = 2;
string status = 6;
string uri = 9;
string token = 3;
string keyAuthorization = 5;
repeated ValidationRecord validationrecords = 10;
ProblemDetails error = 7;
int64 validated = 11;
}
message ValidationRecord {
string hostname = 1;
string port = 2;
repeated bytes addressesResolved = 3; // net.IP.MarshalText()
bytes addressUsed = 4; // net.IP.MarshalText()
repeated string authorities = 5;
string url = 6;
// A list of addresses tried before the address used (see
// core/objects.go and the comment on the ValidationRecord structure
// definition for more information.
repeated bytes addressesTried = 7; // net.IP.MarshalText()
}
message ProblemDetails {
string problemType = 1;
string detail = 2;
int32 httpStatus = 3;
}
message Certificate {
int64 registrationID = 1;
string serial = 2;
string digest = 3;
bytes der = 4;
int64 issued = 5; // Unix timestamp (nanoseconds)
int64 expires = 6; // Unix timestamp (nanoseconds)
}
message CertificateStatus {
string serial = 1;
reserved 2; // previously subscriberApproved
string status = 3;
int64 ocspLastUpdated = 4;
int64 revokedDate = 5;
int64 revokedReason = 6;
int64 lastExpirationNagSent = 7;
bytes ocspResponse = 8;
int64 notAfter = 9;
bool isExpired = 10;
int64 issuerID = 11;
}
message Registration {
int64 id = 1;
bytes key = 2;
repeated string contact = 3;
bool contactsPresent = 4;
string agreement = 5;
bytes initialIP = 6;
int64 createdAt = 7; // Unix timestamp (nanoseconds)
string status = 8;
}
message Authorization {
string id = 1;
string identifier = 2;
int64 registrationID = 3;
string status = 4;
int64 expires = 5; // Unix timestamp (nanoseconds)
repeated core.Challenge challenges = 6;
reserved 7; // previously combinations
reserved 8; // previously v2
}
message Order {
int64 id = 1;
int64 registrationID = 2;
int64 expires = 3;
ProblemDetails error = 4;
string certificateSerial = 5;
reserved 6; // previously authorizations, deprecated in favor of v2Authorizations
string status = 7;
repeated string names = 8;
bool beganProcessing = 9;
int64 created = 10;
repeated int64 v2Authorizations = 11;
}
message CRLEntry {
string serial = 1;
int32 reason = 2;
int64 revokedAt = 3; // Unix timestamp (nanoseconds)
}

View File

@ -23,7 +23,7 @@ import (
"time"
"unicode"
jose "gopkg.in/square/go-jose.v2"
jose "gopkg.in/go-jose/go-jose.v2"
)
const Unspecified = "Unspecified"

View File

@ -1,56 +0,0 @@
// Code generated by "stringer -type=FeatureFlag"; DO NOT EDIT.
package features
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[unused-0]
_ = x[PrecertificateRevocation-1]
_ = x[StripDefaultSchemePort-2]
_ = x[NonCFSSLSigner-3]
_ = x[StoreIssuerInfo-4]
_ = x[StreamlineOrderAndAuthzs-5]
_ = x[V1DisableNewValidations-6]
_ = x[ExpirationMailerDontLookTwice-7]
_ = x[OldTLSInbound-8]
_ = x[OldTLSOutbound-9]
_ = x[ROCSPStage1-10]
_ = x[ROCSPStage2-11]
_ = x[ROCSPStage3-12]
_ = x[CAAValidationMethods-13]
_ = x[CAAAccountURI-14]
_ = x[EnforceMultiVA-15]
_ = x[MultiVAFullResults-16]
_ = x[MandatoryPOSTAsGET-17]
_ = x[AllowV1Registration-18]
_ = x[StoreRevokerInfo-19]
_ = x[RestrictRSAKeySizes-20]
_ = x[FasterNewOrdersRateLimit-21]
_ = x[ECDSAForAll-22]
_ = x[ServeRenewalInfo-23]
_ = x[GetAuthzReadOnly-24]
_ = x[GetAuthzUseIndex-25]
_ = x[CheckFailedAuthorizationsFirst-26]
_ = x[AllowReRevocation-27]
_ = x[MozRevocationReasons-28]
_ = x[SHA1CSRs-29]
_ = x[AllowUnrecognizedFeatures-30]
_ = x[RejectDuplicateCSRExtensions-31]
_ = x[ROCSPStage6-32]
_ = x[ROCSPStage7-33]
}
const _FeatureFlag_name = "unusedPrecertificateRevocationStripDefaultSchemePortNonCFSSLSignerStoreIssuerInfoStreamlineOrderAndAuthzsV1DisableNewValidationsExpirationMailerDontLookTwiceOldTLSInboundOldTLSOutboundROCSPStage1ROCSPStage2ROCSPStage3CAAValidationMethodsCAAAccountURIEnforceMultiVAMultiVAFullResultsMandatoryPOSTAsGETAllowV1RegistrationStoreRevokerInfoRestrictRSAKeySizesFasterNewOrdersRateLimitECDSAForAllServeRenewalInfoGetAuthzReadOnlyGetAuthzUseIndexCheckFailedAuthorizationsFirstAllowReRevocationMozRevocationReasonsSHA1CSRsAllowUnrecognizedFeaturesRejectDuplicateCSRExtensionsROCSPStage6ROCSPStage7"
var _FeatureFlag_index = [...]uint16{0, 6, 30, 52, 66, 81, 105, 128, 157, 170, 184, 195, 206, 217, 237, 250, 264, 282, 300, 319, 335, 354, 378, 389, 405, 421, 437, 467, 484, 504, 512, 537, 565, 576, 587}
func (i FeatureFlag) String() string {
if i < 0 || i >= FeatureFlag(len(_FeatureFlag_index)-1) {
return "FeatureFlag(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _FeatureFlag_name[_FeatureFlag_index[i]:_FeatureFlag_index[i+1]]
}

View File

@ -1,203 +0,0 @@
//go:generate stringer -type=FeatureFlag
package features
import (
"fmt"
"strings"
"sync"
)
type FeatureFlag int
const (
unused FeatureFlag = iota // unused is used for testing
// Deprecated features, these can be removed once stripped from production configs
PrecertificateRevocation
StripDefaultSchemePort
NonCFSSLSigner
StoreIssuerInfo
StreamlineOrderAndAuthzs
V1DisableNewValidations
ExpirationMailerDontLookTwice
OldTLSInbound
OldTLSOutbound
ROCSPStage1
ROCSPStage2
ROCSPStage3
// Currently in-use features
// Check CAA and respect validationmethods parameter.
CAAValidationMethods
// Check CAA and respect accounturi parameter.
CAAAccountURI
// EnforceMultiVA causes the VA to block on remote VA PerformValidation
// requests in order to make a valid/invalid decision with the results.
EnforceMultiVA
// MultiVAFullResults will cause the main VA to wait for all of the remote VA
// results, not just the threshold required to make a decision.
MultiVAFullResults
// MandatoryPOSTAsGET forbids legacy unauthenticated GET requests for ACME
// resources.
MandatoryPOSTAsGET
// Allow creation of new registrations in ACMEv1.
AllowV1Registration
// StoreRevokerInfo enables storage of the revoker and a bool indicating if the row
// was checked for extant unrevoked certificates in the blockedKeys table.
StoreRevokerInfo
// RestrictRSAKeySizes enables restriction of acceptable RSA public key moduli to
// the common sizes (2048, 3072, and 4096 bits).
RestrictRSAKeySizes
// FasterNewOrdersRateLimit enables use of a separate table for counting the
// new orders rate limit.
FasterNewOrdersRateLimit
// ECDSAForAll enables all accounts, regardless of their presence in the CA's
// ecdsaAllowedAccounts config value, to get issuance from ECDSA issuers.
ECDSAForAll
// ServeRenewalInfo exposes the renewalInfo endpoint in the directory and for
// GET requests. WARNING: This feature is a draft and highly unstable.
ServeRenewalInfo
// GetAuthzReadOnly causes the SA to use its read-only database connection
// (which is generally pointed at a replica rather than the primary db) when
// querying the authz2 table.
GetAuthzReadOnly
// GetAuthzUseIndex causes the SA to use to add a USE INDEX hint when it
// queries the authz2 table.
GetAuthzUseIndex
// Check the failed authorization limit before doing authz reuse.
CheckFailedAuthorizationsFirst
// AllowReRevocation causes the RA to allow the revocation reason of an
// already-revoked certificate to be updated to `keyCompromise` from any
// other reason if that compromise is demonstrated by making the second
// revocation request signed by the certificate keypair.
AllowReRevocation
// MozRevocationReasons causes the RA to enforce the following upcoming
// Mozilla policies regarding revocation:
// - A subscriber can request that their certificate be revoked with reason
// keyCompromise, even without demonstrating that compromise at the time.
// However, the cert's pubkey will not be added to the blocked keys list.
// - When an applicant other than the original subscriber requests that a
// certificate be revoked (by demonstrating control over all names in it),
// the cert will be revoked with reason cessationOfOperation, regardless of
// what revocation reason they request.
// - When anyone requests that a certificate be revoked by signing the request
// with the certificate's keypair, the cert will be revoked with reason
// keyCompromise, regardless of what revocation reason they request.
MozRevocationReasons
// SHA1CSRs controls whether the /acme/finalize endpoint rejects CSRs that
// are self-signed using SHA1.
SHA1CSRs
// AllowUnrecognizedFeatures is internal to the features package: if true,
// skip error when unrecognized feature flag names are passed.
AllowUnrecognizedFeatures
// RejectDuplicateCSRExtensions enables verification that submitted CSRs do
// not contain duplicate extensions. This behavior will be on by default in
// go1.19.
RejectDuplicateCSRExtensions
// ROCSPStage6 disables writing full OCSP Responses to MariaDB during
// (pre)certificate issuance and during revocation. Because Stage 4 involved
// disabling ocsp-updater, this means that no ocsp response bytes will be
// written to the database anymore.
ROCSPStage6
// ROCSPStage7 disables generating OCSP responses during issuance and
// revocation. This affects codepaths in both the RA (revocation) and the CA
// (precert "birth certificates").
ROCSPStage7
)
// List of features and their default value, protected by fMu
var features = map[FeatureFlag]bool{
unused: false,
CAAValidationMethods: false,
CAAAccountURI: false,
EnforceMultiVA: false,
MultiVAFullResults: false,
MandatoryPOSTAsGET: false,
AllowV1Registration: true,
V1DisableNewValidations: false,
PrecertificateRevocation: false,
StripDefaultSchemePort: false,
StoreIssuerInfo: false,
StoreRevokerInfo: false,
RestrictRSAKeySizes: false,
FasterNewOrdersRateLimit: false,
NonCFSSLSigner: false,
ECDSAForAll: false,
StreamlineOrderAndAuthzs: false,
ServeRenewalInfo: false,
GetAuthzReadOnly: false,
GetAuthzUseIndex: false,
CheckFailedAuthorizationsFirst: false,
AllowReRevocation: false,
MozRevocationReasons: false,
OldTLSOutbound: true,
OldTLSInbound: true,
SHA1CSRs: true,
AllowUnrecognizedFeatures: false,
ExpirationMailerDontLookTwice: false,
RejectDuplicateCSRExtensions: false,
ROCSPStage1: false,
ROCSPStage2: false,
ROCSPStage3: false,
ROCSPStage6: false,
ROCSPStage7: false,
}
var fMu = new(sync.RWMutex)
var initial = map[FeatureFlag]bool{}
var nameToFeature = make(map[string]FeatureFlag, len(features))
func init() {
for f, v := range features {
nameToFeature[f.String()] = f
initial[f] = v
}
}
// Set accepts a list of features and whether they should
// be enabled or disabled. In the presence of unrecognized
// flags, it will return an error or not depending on the
// value of AllowUnrecognizedFeatures.
func Set(featureSet map[string]bool) error {
fMu.Lock()
defer fMu.Unlock()
var unknown []string
for n, v := range featureSet {
f, present := nameToFeature[n]
if present {
features[f] = v
} else {
unknown = append(unknown, n)
}
}
if len(unknown) > 0 && !features[AllowUnrecognizedFeatures] {
return fmt.Errorf("unrecognized feature flag names: %s",
strings.Join(unknown, ", "))
}
return nil
}
// Enabled returns true if the feature is enabled or false
// if it isn't, it will panic if passed a feature that it
// doesn't know.
func Enabled(n FeatureFlag) bool {
fMu.RLock()
defer fMu.RUnlock()
v, present := features[n]
if !present {
panic(fmt.Sprintf("feature '%s' doesn't exist", n.String()))
}
return v
}
// Reset resets the features to their initial state
func Reset() {
fMu.Lock()
defer fMu.Unlock()
for k, v := range initial {
features[k] = v
}
}

View File

@ -13,9 +13,6 @@ import (
"github.com/letsencrypt/boulder/core"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/features"
sapb "github.com/letsencrypt/boulder/sa/proto"
"google.golang.org/grpc"
"github.com/titanous/rocacheck"
)
@ -68,10 +65,12 @@ func badKey(msg string, args ...interface{}) error {
return fmt.Errorf("%w%s", ErrBadKey, fmt.Errorf(msg, args...))
}
// BlockedKeyCheckFunc is used to pass in the sa.BlockedKey method to KeyPolicy,
// rather than storing a full sa.SQLStorageAuthority. This makes testing
// BlockedKeyCheckFunc is used to pass in the sa.BlockedKey functionality to KeyPolicy,
// rather than storing a full sa.SQLStorageAuthority. This allows external
// users who dont want to import all of boulder/sa, and makes testing
// significantly simpler.
type BlockedKeyCheckFunc func(context.Context, *sapb.KeyBlockedRequest, ...grpc.CallOption) (*sapb.Exists, error)
// On success, the function returns a boolean which is true if the key is blocked.
type BlockedKeyCheckFunc func(ctx context.Context, keyHash []byte) (bool, error)
// KeyPolicy determines which types of key may be used with various boulder
// operations.
@ -82,7 +81,7 @@ type KeyPolicy struct {
weakRSAList *WeakRSAKeys
blockedList *blockedKeys
fermatRounds int
dbCheck BlockedKeyCheckFunc
blockedCheck BlockedKeyCheckFunc
}
// NewKeyPolicy returns a KeyPolicy that allows RSA, ECDSA256 and ECDSA384.
@ -97,7 +96,7 @@ func NewKeyPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) {
AllowRSA: true,
AllowECDSANISTP256: true,
AllowECDSANISTP384: true,
dbCheck: bkc,
blockedCheck: bkc,
}
if config.WeakKeyFile != "" {
keyList, err := LoadWeakRSASuffixes(config.WeakKeyFile)
@ -142,15 +141,15 @@ func (policy *KeyPolicy) GoodKey(ctx context.Context, key crypto.PublicKey) erro
return badKey("public key is forbidden")
}
}
if policy.dbCheck != nil {
if policy.blockedCheck != nil {
digest, err := core.KeyDigest(key)
if err != nil {
return badKey("%w", err)
}
exists, err := policy.dbCheck(ctx, &sapb.KeyBlockedRequest{KeyHash: digest[:]})
exists, err := policy.blockedCheck(ctx, digest[:])
if err != nil {
return err
} else if exists.Exists {
} else if exists {
return badKey("public key is forbidden")
}
}
@ -275,6 +274,12 @@ func (policy *KeyPolicy) goodCurve(c elliptic.Curve) (err error) {
}
}
// Baseline Requirements, Section 6.1.5 requires key size >= 2048 and a multiple
// of 8 bits: https://github.com/cabforum/servercert/blob/main/docs/BR.md#615-key-sizes
// Baseline Requirements, Section 6.1.1.3 requires that we reject any keys which
// have a known method to easily compute their private key, such as Debian Weak
// Keys. Our enforcement mechanism relies on enumerating all Debian Weak Keys at
// common key sizes, so we restrict all issuance to those common key sizes.
var acceptableRSAKeySizes = map[int]bool{
2048: true,
3072: true,
@ -290,28 +295,13 @@ func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) (err error) {
return badKey("key is on a known weak RSA key list")
}
// Baseline Requirements Appendix A
// Modulus must be >= 2048 bits and <= 4096 bits
modulus := key.N
// See comment on acceptableRSAKeySizes above.
modulusBitLen := modulus.BitLen()
if features.Enabled(features.RestrictRSAKeySizes) {
if !acceptableRSAKeySizes[modulusBitLen] {
return badKey("key size not supported: %d", modulusBitLen)
}
} else {
const maxKeySize = 4096
if modulusBitLen < 2048 {
return badKey("key too small: %d", modulusBitLen)
}
if modulusBitLen > maxKeySize {
return badKey("key too large: %d > %d", modulusBitLen, maxKeySize)
}
// Bit lengths that are not a multiple of 8 may cause problems on some
// client implementations.
if modulusBitLen%8 != 0 {
return badKey("key length wasn't a multiple of 8: %d", modulusBitLen)
}
}
// Rather than support arbitrary exponents, which significantly increases
// the size of the key space we allow, we restrict E to the defacto standard

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More