Merge pull request #21844 from rhatdan/image

Vendor in latest containers/(image,storage)
This commit is contained in:
openshift-merge-bot[bot] 2024-02-27 21:11:38 +00:00 committed by GitHub
commit fcce681ea1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
52 changed files with 1520 additions and 545 deletions

17
go.mod
View File

@ -14,11 +14,11 @@ require (
github.com/containers/common v0.57.1-0.20240207210145-1eeaf97594e9
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/gvisor-tap-vsock v0.7.3
github.com/containers/image/v5 v5.29.2-0.20240130233108-e66a1ade2efc
github.com/containers/image/v5 v5.29.3-0.20240227090231-5bef5e1e1506
github.com/containers/libhvee v0.6.1-0.20240225143609-c1bda9d3838c
github.com/containers/ocicrypt v1.1.9
github.com/containers/psgo v1.9.0
github.com/containers/storage v1.52.1-0.20240202181245-1419a5980565
github.com/containers/storage v1.52.1-0.20240227082351-387d7c8480b0
github.com/containers/winquit v1.1.0
github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09
github.com/coreos/stream-metadata-go v0.4.4
@ -44,7 +44,7 @@ require (
github.com/klauspost/compress v1.17.7
github.com/linuxkit/virtsock v0.0.0-20220523201153-1a23e78aa7a2
github.com/mattn/go-shellwords v1.0.12
github.com/mattn/go-sqlite3 v1.14.21
github.com/mattn/go-sqlite3 v1.14.22
github.com/mdlayher/vsock v1.2.1
github.com/moby/sys/user v0.1.0
github.com/moby/term v0.5.0
@ -54,7 +54,7 @@ require (
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.1.0
github.com/opencontainers/runc v1.1.12
github.com/opencontainers/runtime-spec v1.1.1-0.20230823135140-4fec88fd00a4
github.com/opencontainers/runtime-spec v1.2.0
github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc
github.com/opencontainers/selinux v1.11.0
github.com/openshift/imagebuilder v1.2.6
@ -69,7 +69,7 @@ require (
github.com/vbauerster/mpb/v8 v8.7.2
github.com/vishvananda/netlink v1.2.1-beta.2
go.etcd.io/bbolt v1.3.9
golang.org/x/exp v0.0.0-20231226003508-02704c960a9b
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a
golang.org/x/net v0.21.0
golang.org/x/sync v0.6.0
golang.org/x/sys v0.17.0
@ -86,7 +86,7 @@ require (
require (
dario.cat/mergo v1.0.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Microsoft/hcsshim v0.12.0-rc.2 // indirect
github.com/Microsoft/hcsshim v0.12.0-rc.3 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 // indirect
@ -97,6 +97,7 @@ require (
github.com/chzyer/readline v1.5.1 // indirect
github.com/containerd/cgroups/v3 v3.0.3 // indirect
github.com/containerd/containerd v1.7.13 // indirect
github.com/containerd/errdefs v0.1.0 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
github.com/containerd/typeurl/v2 v2.1.1 // indirect
@ -210,9 +211,9 @@ require (
go.opentelemetry.io/otel/sdk v1.21.0 // indirect
go.opentelemetry.io/otel/trace v1.22.0 // indirect
golang.org/x/arch v0.7.0 // indirect
golang.org/x/crypto v0.19.0 // indirect
golang.org/x/crypto v0.20.0 // indirect
golang.org/x/mod v0.14.0 // indirect
golang.org/x/oauth2 v0.16.0 // indirect
golang.org/x/oauth2 v0.17.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.17.0 // indirect
google.golang.org/appengine v1.6.8 // indirect

36
go.sum
View File

@ -10,8 +10,8 @@ github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/Microsoft/hcsshim v0.12.0-rc.2 h1:gfKebjq3Mq17Ys+4cjE8vc2h6tZVeqCGb9a7vBVqpAk=
github.com/Microsoft/hcsshim v0.12.0-rc.2/go.mod h1:G2TZhBED5frlh/hsuxV5CDh/ylkSFknPAMPpQg9owQw=
github.com/Microsoft/hcsshim v0.12.0-rc.3 h1:5GNGrobGs/sN/0nFO21W9k4lFn+iXXZAE8fCZbmdRak=
github.com/Microsoft/hcsshim v0.12.0-rc.3/go.mod h1:WuNfcaYNaw+KpCEsZCIM6HCEmu0c5HfXpi+dDSmveP0=
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
@ -62,6 +62,8 @@ github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGD
github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
github.com/containerd/containerd v1.7.13 h1:wPYKIeGMN8vaggSKuV1X0wZulpMz4CrgEsZdaCyB6Is=
github.com/containerd/containerd v1.7.13/go.mod h1:zT3up6yTRfEUa6+GsITYIJNgSVL9NQ4x4h1RPzk0Wu4=
github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM=
github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
@ -80,8 +82,8 @@ github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6J
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/gvisor-tap-vsock v0.7.3 h1:yORnf15sP+sLFhxLNLgmB5/lOhldn9dRMHx/tmYtSOQ=
github.com/containers/gvisor-tap-vsock v0.7.3/go.mod h1:NI1fLMtKXQZoDrrOeqryGz7x7j/XSFWRmQILva7Fu9c=
github.com/containers/image/v5 v5.29.2-0.20240130233108-e66a1ade2efc h1:3I5+mrrG7Fuv4aA13t1hAMQcjN3rTAQInfbxa5P+XH4=
github.com/containers/image/v5 v5.29.2-0.20240130233108-e66a1ade2efc/go.mod h1:oMMRA6avp1Na54lVPCj/OvcfXDMLlzfy3H7xeRiWmmI=
github.com/containers/image/v5 v5.29.3-0.20240227090231-5bef5e1e1506 h1:Wdkl9fdxXYp2zaiw0GWC7fF8DSqD72B5jhILY0qK/sU=
github.com/containers/image/v5 v5.29.3-0.20240227090231-5bef5e1e1506/go.mod h1:2/7sa5zJsx3Gl4v2MHkBrSMxsQePJcx9EDehbxmxlKE=
github.com/containers/libhvee v0.6.1-0.20240225143609-c1bda9d3838c h1:C80Xw6cDHkx0zMJk/Qkczcz/1OOVEF9+6iHuEZbD47k=
github.com/containers/libhvee v0.6.1-0.20240225143609-c1bda9d3838c/go.mod h1:zX7HGsRwCxBOpzc8Jvwq2aEaECsb2q5/l5HqB9n7UPc=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
@ -92,8 +94,8 @@ github.com/containers/ocicrypt v1.1.9 h1:2Csfba4jse85Raxk5HIyEk8OwZNjRvfkhEGijOj
github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPNFN4jwA9GBys=
github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g=
github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A=
github.com/containers/storage v1.52.1-0.20240202181245-1419a5980565 h1:Gcirfx2DNoayB/+ypLgl5+ABzIPPDAoncs1qgZHHQHE=
github.com/containers/storage v1.52.1-0.20240202181245-1419a5980565/go.mod h1:2E/QBqWVcJXwumP7nVUrampwRNL4XKjHL/aQya7ZdhI=
github.com/containers/storage v1.52.1-0.20240227082351-387d7c8480b0 h1:n5UoWBdTeOGN5rlV49ZqS8E4vgo8Z6LxFFgbCDNaU+c=
github.com/containers/storage v1.52.1-0.20240227082351-387d7c8480b0/go.mod h1:mFA6QpUoT9qTa3q2DD1CvSo3Az3syNkw1P9X+4nUYdY=
github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE=
github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8=
github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo=
@ -127,7 +129,7 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh
github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/cli v25.0.1+incompatible h1:mFpqnrS6Hsm3v1k7Wa/BO23oz0k121MTbTO1lpcGSkU=
github.com/docker/cli v25.0.3+incompatible h1:KLeNs7zws74oFuVhgZQ5ONGZiXUUdgsdy6/EsX/6284=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ=
@ -393,8 +395,8 @@ github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZ
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mattn/go-sqlite3 v1.14.21 h1:IXocQLOykluc3xPE0Lvy8FtggMz1G+U3mEjg+0zGizc=
github.com/mattn/go-sqlite3 v1.14.21/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/mdlayher/packet v1.1.2 h1:3Up1NG6LZrsgDVn6X4L9Ge/iyRyxFEFD9o6Pr3Q1nQY=
github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U=
@ -453,8 +455,8 @@ github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQ
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/opencontainers/runc v1.1.1-0.20240131200429-02120488a4c0 h1:NwSQ/5rex97Rum/xZOMjlDQbbZ8YJKOTihf9sxqHxtE=
github.com/opencontainers/runc v1.1.1-0.20240131200429-02120488a4c0/go.mod h1:tBsQqk9ETVlXxzXjk2Xh/1VjxC/U3Gaq5ps/rC/cadE=
github.com/opencontainers/runtime-spec v1.1.1-0.20230823135140-4fec88fd00a4 h1:EctkgBjZ1y4q+sibyuuIgiKpa0QSd2elFtSSdNvBVow=
github.com/opencontainers/runtime-spec v1.1.1-0.20230823135140-4fec88fd00a4/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc h1:d2hUh5O6MRBvStV55MQ8we08t42zSTqBbscoQccWmMc=
github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc/go.mod h1:8tx1helyqhUC65McMm3x7HmOex8lO2/v9zPuxmKHurs=
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
@ -641,11 +643,11 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg=
golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20231226003508-02704c960a9b h1:kLiC65FbiHWFAOu+lxwNPujcsl8VYyTYYEZnsOO1WK4=
golang.org/x/exp v0.0.0-20231226003508-02704c960a9b/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@ -676,8 +678,8 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ=
golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

View File

@ -29,12 +29,23 @@ ifeq "$(DEV_BUILD)" "1"
DELTA_TARGET=out/delta-dev.tar.gz
endif
ifeq "$(SNP_BUILD)" "1"
DELTA_TARGET=out/delta-snp.tar.gz
endif
# The link aliases for gcstools
GCS_TOOLS=\
generichook \
install-drivers
.PHONY: all always rootfs test
# Common path prefix.
PATH_PREFIX:=
# These have PATH_PREFIX prepended to obtain the full path in recipies e.g. $(PATH_PREFIX)/$(VMGS_TOOL)
VMGS_TOOL:=
IGVM_TOOL:=
KERNEL_PATH:=
.PHONY: all always rootfs test snp simple
.DEFAULT_GOAL := all
@ -49,9 +60,58 @@ test:
rootfs: out/rootfs.vhd
out/rootfs.vhd: out/rootfs.tar.gz bin/cmd/tar2ext4
snp: out/kernelinitrd.vmgs out/rootfs.hash.vhd out/rootfs.vhd out/v2056.vmgs
simple: out/simple.vmgs snp
%.vmgs: %.bin
rm -f $@
# du -BM returns the size of the bin file in M, eg 7M. The sed command replaces the M with *1024*1024 and then bc does the math to convert to bytes
$(PATH_PREFIX)/$(VMGS_TOOL) create --filepath $@ --filesize `du -BM $< | sed "s/M.*/*1024*1024/" | bc`
$(PATH_PREFIX)/$(VMGS_TOOL) write --filepath $@ --datapath $< -i=8
# Simplest debug UVM used to test changes to the linux kernel. No dmverity protection. Boots an initramdisk rather than directly booting a vhd disk.
out/simple.bin: out/initrd.img $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup_simple.sh
rm -f $@
python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 rdinit=/startup_simple.sh" -rdinit out/initrd.img -vtl 0
ROOTFS_DEVICE:=/dev/sda
VERITY_DEVICE:=/dev/sdb
# Debug build for use with uvmtester. UVM with dm-verity protected vhd disk mounted directly via the kernel command line. Ignores corruption in dm-verity protected disk. (Use dmesg to see if dm-verity is ignoring data corruption.)
out/v2056.bin: out/rootfs.vhd out/rootfs.hash.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup_v2056.sh
rm -f $@
python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(VERITY_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) 0 sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" init=/startup_v2056.sh" -vtl 0
# Full UVM with dm-verity protected vhd disk mounted directly via the kernel command line.
out/kernelinitrd.bin: out/rootfs.vhd out/rootfs.hash.vhd out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup.sh
rm -f $@
python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(VERITY_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) 0 sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt)\" init=/startup.sh" -vtl 0
# Rule to make a vhd from a file. This is used to create the rootfs.hash.vhd from rootfs.hash.
%.vhd: % bin/cmd/tar2ext4
./bin/cmd/tar2ext4 -only-vhd -i $< -o $@
# Rule to make a vhd from an ext4 file. This is used to create the rootfs.vhd from rootfs.ext4.
%.vhd: %.ext4 bin/cmd/tar2ext4
./bin/cmd/tar2ext4 -only-vhd -i $< -o $@
%.hash %.hash.info %.hash.datablocks %.hash.rootdigest %hash.datablocksize %.hash.datasectors %.hash.hashblocksize: %.ext4 %.hash.salt
veritysetup format --no-superblock --salt $(shell cat out/rootfs.hash.salt) $< $*.hash > $*.hash.info
# Retrieve info required by dm-verity at boot time
# Get the blocksize of rootfs
cat $*.hash.info | awk '/^Root hash:/{ print $$3 }' > $*.hash.rootdigest
cat $*.hash.info | awk '/^Salt:/{ print $$2 }' > $*.hash.salt
cat $*.hash.info | awk '/^Data block size:/{ print $$4 }' > $*.hash.datablocksize
cat $*.hash.info | awk '/^Hash block size:/{ print $$4 }' > $*.hash.hashblocksize
cat $*.hash.info | awk '/^Data blocks:/{ print $$3 }' > $*.hash.datablocks
echo $$(( $$(cat $*.hash.datablocks) * $$(cat $*.hash.datablocksize) / 512 )) > $*.hash.datasectors
out/rootfs.hash.salt:
hexdump -vn32 -e'8/4 "%08X" 1 "\n"' /dev/random > $@
out/rootfs.ext4: out/rootfs.tar.gz bin/cmd/tar2ext4
gzip -f -d ./out/rootfs.tar.gz
bin/cmd/tar2ext4 -vhd -i ./out/rootfs.tar -o $@
./bin/cmd/tar2ext4 -i ./out/rootfs.tar -o $@
out/rootfs.tar.gz: out/initrd.img
rm -rf rootfs-conv
@ -74,6 +134,20 @@ out/delta-dev.tar.gz: out/delta.tar.gz bin/internal/tools/snp-report
tar -zcf $@ -C rootfs-dev .
rm -rf rootfs-dev
out/delta-snp.tar.gz: out/delta.tar.gz bin/internal/tools/snp-report boot/startup_v2056.sh boot/startup_simple.sh boot/startup.sh
rm -rf rootfs-snp
mkdir rootfs-snp
tar -xzf out/delta.tar.gz -C rootfs-snp
cp boot/startup_v2056.sh rootfs-snp/startup_v2056.sh
cp boot/startup_simple.sh rootfs-snp/startup_simple.sh
cp boot/startup.sh rootfs-snp/startup.sh
cp bin/internal/tools/snp-report rootfs-snp/bin/
chmod a+x rootfs-snp/startup_v2056.sh
chmod a+x rootfs-snp/startup_simple.sh
chmod a+x rootfs-snp/startup.sh
tar -zcf $@ -C rootfs-snp .
rm -rf rootfs-snp
out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths Makefile
@mkdir -p out
rm -rf rootfs
@ -94,7 +168,10 @@ out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/ho
tar -zcf $@ -C rootfs .
rm -rf rootfs
bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report:
out/containerd-shim-runhcs-v1.exe:
GOOS=windows $(GO_BUILD) -o $@ $(SRCROOT)/cmd/containerd-shim-runhcs-v1
bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report bin/cmd/dmverity-vhd:
@mkdir -p $(dir $@)
GOOS=linux $(GO_BUILD) -o $@ $(SRCROOT)/$(@:bin/%=%)
@ -108,4 +185,4 @@ bin/init: init/init.o vsockexec/vsock.o
%.o: %.c
@mkdir -p $(dir $@)
$(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $<
$(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $<

View File

@ -6,7 +6,7 @@ import (
"net"
"os"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/errdefs"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
@ -16,7 +16,7 @@ import (
func toStatusCode(err error) codes.Code {
// checks if err implements GRPCStatus() *"google.golang.org/grpc/status".Status,
// wraps an error defined in "github.com/containerd/containerd/errdefs", or is a
// wraps an error defined in "github.com/containerd/errdefs", or is a
// context timeout or cancelled error
if s, ok := status.FromError(errdefs.ToGRPC(err)); ok {
return s.Code()

191
vendor/github.com/containerd/errdefs/LICENSE generated vendored Normal file
View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright The containerd Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

13
vendor/github.com/containerd/errdefs/README.md generated vendored Normal file
View File

@ -0,0 +1,13 @@
# errdefs
A Go package for defining and checking common containerd errors.
## Project details
**errdefs** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
As a containerd sub-project, you will find the:
* [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
* [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
* and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
information in our [`containerd/project`](https://github.com/containerd/project) repository.

92
vendor/github.com/containerd/errdefs/errors.go generated vendored Normal file
View File

@ -0,0 +1,92 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package errdefs defines the common errors used throughout containerd
// packages.
//
// Use with fmt.Errorf to add context to an error.
//
// To detect an error class, use the IsXXX functions to tell whether an error
// is of a certain type.
//
// The functions ToGRPC and FromGRPC can be used to map server-side and
// client-side errors to the correct types.
package errdefs
import (
"context"
"errors"
)
// Definitions of common error types used throughout containerd. All containerd
// errors returned by most packages will map into one of these errors classes.
// Packages should return errors of these types when they want to instruct a
// client to take a particular action.
//
// For the most part, we just try to provide local grpc errors. Most conditions
// map very well to those defined by grpc.
var (
ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping.
ErrInvalidArgument = errors.New("invalid argument")
ErrNotFound = errors.New("not found")
ErrAlreadyExists = errors.New("already exists")
ErrFailedPrecondition = errors.New("failed precondition")
ErrUnavailable = errors.New("unavailable")
ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented
)
// IsInvalidArgument returns true if the error is due to an invalid argument
func IsInvalidArgument(err error) bool {
return errors.Is(err, ErrInvalidArgument)
}
// IsNotFound returns true if the error is due to a missing object
func IsNotFound(err error) bool {
return errors.Is(err, ErrNotFound)
}
// IsAlreadyExists returns true if the error is due to an already existing
// metadata item
func IsAlreadyExists(err error) bool {
return errors.Is(err, ErrAlreadyExists)
}
// IsFailedPrecondition returns true if an operation could not proceed to the
// lack of a particular condition
func IsFailedPrecondition(err error) bool {
return errors.Is(err, ErrFailedPrecondition)
}
// IsUnavailable returns true if the error is due to a resource being unavailable
func IsUnavailable(err error) bool {
return errors.Is(err, ErrUnavailable)
}
// IsNotImplemented returns true if the error is due to not being implemented
func IsNotImplemented(err error) bool {
return errors.Is(err, ErrNotImplemented)
}
// IsCanceled returns true if the error is due to `context.Canceled`.
func IsCanceled(err error) bool {
return errors.Is(err, context.Canceled)
}
// IsDeadlineExceeded returns true if the error is due to
// `context.DeadlineExceeded`.
func IsDeadlineExceeded(err error) bool {
return errors.Is(err, context.DeadlineExceeded)
}

147
vendor/github.com/containerd/errdefs/grpc.go generated vendored Normal file
View File

@ -0,0 +1,147 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package errdefs
import (
"context"
"fmt"
"strings"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// ToGRPC will attempt to map the backend containerd error into a grpc error,
// using the original error message as a description.
//
// Further information may be extracted from certain errors depending on their
// type.
//
// If the error is unmapped, the original error will be returned to be handled
// by the regular grpc error handling stack.
func ToGRPC(err error) error {
if err == nil {
return nil
}
if isGRPCError(err) {
// error has already been mapped to grpc
return err
}
switch {
case IsInvalidArgument(err):
return status.Errorf(codes.InvalidArgument, err.Error())
case IsNotFound(err):
return status.Errorf(codes.NotFound, err.Error())
case IsAlreadyExists(err):
return status.Errorf(codes.AlreadyExists, err.Error())
case IsFailedPrecondition(err):
return status.Errorf(codes.FailedPrecondition, err.Error())
case IsUnavailable(err):
return status.Errorf(codes.Unavailable, err.Error())
case IsNotImplemented(err):
return status.Errorf(codes.Unimplemented, err.Error())
case IsCanceled(err):
return status.Errorf(codes.Canceled, err.Error())
case IsDeadlineExceeded(err):
return status.Errorf(codes.DeadlineExceeded, err.Error())
}
return err
}
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
// and combining it with the target error string.
//
// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
func ToGRPCf(err error, format string, args ...interface{}) error {
return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
}
// FromGRPC returns the underlying error from a grpc service based on the grpc error code
func FromGRPC(err error) error {
if err == nil {
return nil
}
var cls error // divide these into error classes, becomes the cause
switch code(err) {
case codes.InvalidArgument:
cls = ErrInvalidArgument
case codes.AlreadyExists:
cls = ErrAlreadyExists
case codes.NotFound:
cls = ErrNotFound
case codes.Unavailable:
cls = ErrUnavailable
case codes.FailedPrecondition:
cls = ErrFailedPrecondition
case codes.Unimplemented:
cls = ErrNotImplemented
case codes.Canceled:
cls = context.Canceled
case codes.DeadlineExceeded:
cls = context.DeadlineExceeded
default:
cls = ErrUnknown
}
msg := rebaseMessage(cls, err)
if msg != "" {
err = fmt.Errorf("%s: %w", msg, cls)
} else {
err = cls
}
return err
}
// rebaseMessage removes the repeats for an error at the end of an error
// string. This will happen when taking an error over grpc then remapping it.
//
// Effectively, we just remove the string of cls from the end of err if it
// appears there.
func rebaseMessage(cls error, err error) string {
desc := errDesc(err)
clss := cls.Error()
if desc == clss {
return ""
}
return strings.TrimSuffix(desc, ": "+clss)
}
func isGRPCError(err error) bool {
_, ok := status.FromError(err)
return ok
}
func code(err error) codes.Code {
if s, ok := status.FromError(err); ok {
return s.Code()
}
return codes.Unknown
}
func errDesc(err error) string {
if s, ok := status.FromError(err); ok {
return s.Message()
}
return err.Error()
}

View File

@ -70,7 +70,8 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI
// bpCompressionStepData contains data that the copy pipeline needs about the compression step.
type bpCompressionStepData struct {
operation types.LayerCompression // Operation to use for updating the blob metadata.
operation bpcOperation // What we are actually doing
uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do)
uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
uploadedAnnotations map[string]string // Annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
srcCompressorName string // Compressor name to record in the blob info cache for the source blob.
@ -78,6 +79,18 @@ type bpCompressionStepData struct {
closers []io.Closer // Objects to close after the upload is done, if any.
}
type bpcOperation int
const (
bpcOpInvalid bpcOperation = iota
bpcOpPreserveOpaque // We are preserving something where compression is not applicable
bpcOpPreserveCompressed // We are preserving a compressed, and decompressible, layer
bpcOpPreserveUncompressed // We are preserving an uncompressed, and compressible, layer
bpcOpCompressUncompressed // We are compressing uncompressed data
bpcOpRecompressCompressed // We are recompressing compressed data
bpcOpDecompressCompressed // We are decompressing compressed data
)
// blobPipelineCompressionStep updates *stream to compress and/or decompress it.
// srcInfo is primarily used for error messages.
// Returns data for other steps; the caller should eventually call updateCompressionEdits and perhaps recordValidatedBlobData,
@ -112,10 +125,11 @@ func (ic *imageCopier) blobPipelineCompressionStep(stream *sourceStream, canModi
// bpcPreserveEncrypted checks if the input is encrypted, and returns a *bpCompressionStepData if so.
func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectCompressionStepData) (*bpCompressionStepData, error) {
if isOciEncrypted(stream.info.MediaType) {
// We cant do anything with an encrypted blob unless decrypted.
logrus.Debugf("Using original blob without modification for encrypted blob")
// PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted
return &bpCompressionStepData{
operation: types.PreserveOriginal,
operation: bpcOpPreserveOpaque,
uploadedOperation: types.PreserveOriginal,
uploadedAlgorithm: nil,
srcCompressorName: internalblobinfocache.UnknownCompression,
uploadedCompressorName: internalblobinfocache.UnknownCompression,
@ -143,7 +157,8 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp
Size: -1,
}
return &bpCompressionStepData{
operation: types.Compress,
operation: bpcOpCompressUncompressed,
uploadedOperation: types.Compress,
uploadedAlgorithm: uploadedAlgorithm,
uploadedAnnotations: annotations,
srcCompressorName: detected.srcCompressorName,
@ -182,7 +197,8 @@ func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bp
}
succeeded = true
return &bpCompressionStepData{
operation: types.PreserveOriginal,
operation: bpcOpRecompressCompressed,
uploadedOperation: types.PreserveOriginal,
uploadedAlgorithm: ic.compressionFormat,
uploadedAnnotations: annotations,
srcCompressorName: detected.srcCompressorName,
@ -208,7 +224,8 @@ func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bp
Size: -1,
}
return &bpCompressionStepData{
operation: types.Decompress,
operation: bpcOpDecompressCompressed,
uploadedOperation: types.Decompress,
uploadedAlgorithm: nil,
srcCompressorName: detected.srcCompressorName,
uploadedCompressorName: internalblobinfocache.Uncompressed,
@ -232,14 +249,26 @@ func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCom
// But dont touch blobs in objects where we cant change compression,
// so that src.UpdatedImage() doesnt fail; assume that for such blobs
// LayerInfosForCopy() should not be making any changes in the first place.
var bpcOp bpcOperation
var uploadedOp types.LayerCompression
var algorithm *compressiontypes.Algorithm
if layerCompressionChangeSupported && detected.isCompressed {
switch {
case !layerCompressionChangeSupported:
bpcOp = bpcOpPreserveOpaque
uploadedOp = types.PreserveOriginal
algorithm = nil
case detected.isCompressed:
bpcOp = bpcOpPreserveCompressed
uploadedOp = types.PreserveOriginal
algorithm = &detected.format
} else {
default:
bpcOp = bpcOpPreserveUncompressed
uploadedOp = types.Decompress
algorithm = nil
}
return &bpCompressionStepData{
operation: types.PreserveOriginal,
operation: bpcOp,
uploadedOperation: uploadedOp,
uploadedAlgorithm: algorithm,
srcCompressorName: detected.srcCompressorName,
uploadedCompressorName: detected.srcCompressorName,
@ -248,7 +277,7 @@ func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCom
// updateCompressionEdits sets *operation, *algorithm and updates *annotations, if necessary.
func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCompression, algorithm **compressiontypes.Algorithm, annotations *map[string]string) {
*operation = d.operation
*operation = d.uploadedOperation
// If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest.
*algorithm = d.uploadedAlgorithm
if *annotations == nil {
@ -257,7 +286,8 @@ func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCom
maps.Copy(*annotations, d.uploadedAnnotations)
}
// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo adnd the original srcInfo.
// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo (as returned by PutBlob)
// and the original srcInfo (which the caller guarantees has been validated).
// This must ONLY be called if all data has been validated by OUR code, and is not coming from third parties.
func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo,
encryptionStep *bpEncryptionStepData, decryptionStep *bpDecryptionStepData) error {
@ -268,17 +298,26 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
// in the blob info cache (which would probably be necessary for any more complex logic),
// and the simplicity is attractive.
if !encryptionStep.encrypting && !decryptionStep.decrypting {
// If d.operation != types.PreserveOriginal, we now have two reliable digest values:
// If d.operation != bpcOpPreserve*, we now have two reliable digest values:
// srcinfo.Digest describes the pre-d.operation input, verified by digestingReader
// uploadedInfo.Digest describes the post-d.operation output, computed by PutBlob
// (because stream.info.Digest == "", this must have been computed afresh).
// (because we set stream.info.Digest == "", this must have been computed afresh).
switch d.operation {
case types.PreserveOriginal:
break // Do nothing, we have only one digest and we might not have even verified it.
case types.Compress:
case bpcOpPreserveOpaque:
// No useful information
case bpcOpCompressUncompressed:
c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
case types.Decompress:
case bpcOpDecompressCompressed:
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
case bpcOpRecompressCompressed, bpcOpPreserveCompressed:
// We know one or two compressed digests. BlobInfoCache associates compression variants via the uncompressed digest,
// and we dont know that one.
// That also means that repeated copies with the same recompression dont identify reuse opportunities (unless
// RecordDigestUncompressedPair was called for both compressed variants for some other reason).
case bpcOpPreserveUncompressed:
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, srcInfo.Digest)
case bpcOpInvalid:
fallthrough
default:
return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation)
}
@ -286,7 +325,7 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
// HACK: Dont record zstd:chunked algorithms.
// There is already a similar hack in internal/imagedestination/impl/helpers.BlobMatchesRequiredCompression,
// There is already a similar hack in internal/imagedestination/impl/helpers.CandidateMatchesTryReusingBlobOptions,
// and that one prevents reusing zstd:chunked blobs, so recording the algorithm here would be mostly harmless.
//
// We skip that here anyway to work around the inability of blobPipelineDetectCompressionStep to differentiate

View File

@ -82,18 +82,11 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
if in.forceManifestMIMEType != "" {
destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType}
}
restrictiveCompressionRequired := in.requestedCompressionFormat != nil && !internalManifest.CompressionAlgorithmIsUniversallySupported(*in.requestedCompressionFormat)
if len(destSupportedManifestMIMETypes) == 0 {
if (!in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType)) &&
(!restrictiveCompressionRequired || internalManifest.MIMETypeSupportsCompressionAlgorithm(srcType, *in.requestedCompressionFormat)) {
return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions.
preferredMIMEType: srcType,
otherMIMETypeCandidates: []string{},
}, nil
}
destSupportedManifestMIMETypes = allManifestMIMETypes
}
restrictiveCompressionRequired := in.requestedCompressionFormat != nil && !internalManifest.CompressionAlgorithmIsUniversallySupported(*in.requestedCompressionFormat)
supportedByDest := set.New[string]()
for _, t := range destSupportedManifestMIMETypes {
if in.requiresOCIEncryption && !manifest.MIMETypeSupportsEncryption(t) {

View File

@ -38,6 +38,7 @@ type instanceCopy struct {
// Fields which can be used by callers when operation
// is `instanceCopyClone`
cloneArtifactType string
cloneCompressionVariant OptionCompressionVariant
clonePlatform *imgspecv1.Platform
cloneAnnotations map[string]string
@ -142,6 +143,7 @@ func prepareInstanceCopies(list internalManifest.List, instanceDigests []digest.
res = append(res, instanceCopy{
op: instanceCopyClone,
sourceDigest: instanceDigest,
cloneArtifactType: instanceDetails.ReadOnly.ArtifactType,
cloneCompressionVariant: compressionVariant,
clonePlatform: instanceDetails.ReadOnly.Platform,
cloneAnnotations: maps.Clone(instanceDetails.ReadOnly.Annotations),
@ -268,6 +270,7 @@ func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte,
AddDigest: updated.manifestDigest,
AddSize: int64(len(updated.manifest)),
AddMediaType: updated.manifestMIMEType,
AddArtifactType: instance.cloneArtifactType,
AddPlatform: instance.clonePlatform,
AddAnnotations: instance.cloneAnnotations,
AddCompressionAlgorithms: updated.compressionAlgorithms,

View File

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"io"
"time"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
@ -148,13 +149,14 @@ type blobChunkAccessorProxy struct {
// The readers must be fully consumed, in the order they are returned, before blocking
// to read the next chunk.
func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
start := time.Now()
rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks)
if err == nil {
total := int64(0)
for _, c := range chunks {
total += int64(c.Length)
}
s.bar.IncrInt64(total)
s.bar.EwmaIncrInt64(total, time.Since(start))
}
return rc, errs, err
}

View File

@ -33,6 +33,7 @@ type imageCopier struct {
c *copier
manifestUpdates *types.ManifestUpdateOptions
src *image.SourcedImage
manifestConversionPlan manifestConversionPlan
diffIDsAreNeeded bool
cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
canSubstituteBlobs bool
@ -136,7 +137,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
c: c,
manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}},
src: src,
// diffIDsAreNeeded is computed later
// manifestConversionPlan and diffIDsAreNeeded are computed later
cannotModifyManifestReason: cannotModifyManifestReason,
requireCompressionFormatMatch: opts.requireCompressionFormatMatch,
}
@ -164,7 +165,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
destRequiresOciEncryption := (isEncrypted(src) && ic.c.options.OciDecryptConfig == nil) || c.options.OciEncryptLayers != nil
manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{
ic.manifestConversionPlan, err = determineManifestConversion(determineManifestConversionInputs{
srcMIMEType: ic.src.ManifestMIMEType,
destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(),
forceManifestMIMEType: c.options.ForceManifestMIMEType,
@ -179,8 +180,8 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
// code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code
// (e.g. the UpdatedImageNeedsLayerDiffIDs check just below) can make decisions based
// on the expected destination format.
if manifestConversionPlan.preferredMIMETypeNeedsConversion {
ic.manifestUpdates.ManifestMIMEType = manifestConversionPlan.preferredMIMEType
if ic.manifestConversionPlan.preferredMIMETypeNeedsConversion {
ic.manifestUpdates.ManifestMIMEType = ic.manifestConversionPlan.preferredMIMEType
}
// If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
@ -219,11 +220,11 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
manifestBytes, manifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
wipResult := copySingleImageResult{
manifest: manifestBytes,
manifestMIMEType: manifestConversionPlan.preferredMIMEType,
manifestMIMEType: ic.manifestConversionPlan.preferredMIMEType,
manifestDigest: manifestDigest,
}
if err != nil {
logrus.Debugf("Writing manifest using preferred type %s failed: %v", manifestConversionPlan.preferredMIMEType, err)
logrus.Debugf("Writing manifest using preferred type %s failed: %v", ic.manifestConversionPlan.preferredMIMEType, err)
// … if it fails, and the failure is either because the manifest is rejected by the registry, or
// because we failed to create a manifest of the specified type because the specific manifest type
// doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may
@ -232,13 +233,13 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
var manifestLayerCompressionIncompatibilityError manifest.ManifestLayerCompressionIncompatibilityError
isManifestRejected := errors.As(err, &manifestTypeRejectedError)
isCompressionIncompatible := errors.As(err, &manifestLayerCompressionIncompatibilityError)
if (!isManifestRejected && !isCompressionIncompatible) || len(manifestConversionPlan.otherMIMETypeCandidates) == 0 {
if (!isManifestRejected && !isCompressionIncompatible) || len(ic.manifestConversionPlan.otherMIMETypeCandidates) == 0 {
// We dont have other options.
// In principle the code below would handle this as well, but the resulting error message is fairly ugly.
// Dont bother the user with MIME types if we have no choice.
return copySingleImageResult{}, err
}
// If the original MIME type is acceptable, determineManifestConversion always uses it as manifestConversionPlan.preferredMIMEType.
// If the original MIME type is acceptable, determineManifestConversion always uses it as ic.manifestConversionPlan.preferredMIMEType.
// So if we are here, we will definitely be trying to convert the manifest.
// With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason,
// so lets bail out early and with a better error message.
@ -247,8 +248,8 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
}
// errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
errs := []string{fmt.Sprintf("%s(%v)", manifestConversionPlan.preferredMIMEType, err)}
for _, manifestMIMEType := range manifestConversionPlan.otherMIMETypeCandidates {
errs := []string{fmt.Sprintf("%s(%v)", ic.manifestConversionPlan.preferredMIMEType, err)}
for _, manifestMIMEType := range ic.manifestConversionPlan.otherMIMETypeCandidates {
logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType)
ic.manifestUpdates.ManifestMIMEType = manifestMIMEType
attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
@ -382,7 +383,7 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context,
compressionAlgos := set.New[string]()
for _, srcInfo := range ic.src.LayerInfos() {
if c := compressionAlgorithmFromMIMEType(srcInfo); c != nil {
if _, c := compressionEditsFromMIMEType(srcInfo); c != nil {
compressionAlgos.Add(c.Name())
}
}
@ -635,17 +636,22 @@ type diffIDResult struct {
err error
}
func compressionAlgorithmFromMIMEType(srcInfo types.BlobInfo) *compressiontypes.Algorithm {
// compressionEditsFromMIMEType returns a (CompressionOperation, CompressionAlgorithm) value pair suitable
// for types.BlobInfo, based on a MIME type of srcInfo.
func compressionEditsFromMIMEType(srcInfo types.BlobInfo) (types.LayerCompression, *compressiontypes.Algorithm) {
// This MIME type → compression mapping belongs in manifest-specific code in our manifest
// package (but we should preferably replace/change UpdatedImage instead of productizing
// this workaround).
switch srcInfo.MediaType {
case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip:
return &compression.Gzip
return types.PreserveOriginal, &compression.Gzip
case imgspecv1.MediaTypeImageLayerZstd:
return &compression.Zstd
return types.PreserveOriginal, &compression.Zstd
case manifest.DockerV2SchemaLayerMediaTypeUncompressed, imgspecv1.MediaTypeImageLayer:
return types.Decompress, nil
default:
return types.PreserveOriginal, nil
}
return nil
}
// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it,
@ -659,8 +665,8 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// which uses the compression information to compute the updated MediaType values.
// (Sadly UpdatedImage() is documented to not update MediaTypes from
// ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.)
if srcInfo.CompressionAlgorithm == nil {
srcInfo.CompressionAlgorithm = compressionAlgorithmFromMIMEType(srcInfo)
if srcInfo.CompressionOperation == types.PreserveOriginal && srcInfo.CompressionAlgorithm == nil {
srcInfo.CompressionOperation, srcInfo.CompressionAlgorithm = compressionEditsFromMIMEType(srcInfo)
}
ic.c.printCopyInfo("blob", srcInfo)
@ -683,34 +689,33 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v",
srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression)
canSubstitute := ic.canSubstituteBlobs && ic.src.CanChangeLayerCompression(srcInfo.MediaType)
// TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm
// that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing
// a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause
// a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob.
// Fixing that will probably require passing more information to TryReusingBlob() than the current version of
// the ImageDestination interface lets us pass in.
var requiredCompression *compressiontypes.Algorithm
var originalCompression *compressiontypes.Algorithm
if ic.requireCompressionFormatMatch {
requiredCompression = ic.compressionFormat
originalCompression = srcInfo.CompressionAlgorithm
}
var tocDigest digest.Digest
// Check if we have a chunked layer in storage that's based on that blob. These layers are stored by their TOC digest.
tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations)
d, err := chunkedToc.GetTOCDigest(srcInfo.Annotations)
if err != nil {
return types.BlobInfo{}, "", err
}
if d != nil {
tocDigest = *d
}
reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{
Cache: ic.c.blobInfoCache,
CanSubstitute: canSubstitute,
EmptyLayer: emptyLayer,
LayerIndex: &layerIndex,
SrcRef: srcRef,
RequiredCompression: requiredCompression,
OriginalCompression: originalCompression,
TOCDigest: tocDigest,
Cache: ic.c.blobInfoCache,
CanSubstitute: canSubstitute,
EmptyLayer: emptyLayer,
LayerIndex: &layerIndex,
SrcRef: srcRef,
PossibleManifestFormats: append([]string{ic.manifestConversionPlan.preferredMIMEType}, ic.manifestConversionPlan.otherMIMETypeCandidates...),
RequiredCompression: requiredCompression,
OriginalCompression: srcInfo.CompressionAlgorithm,
TOCDigest: tocDigest,
})
if err != nil {
return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err)
@ -718,7 +723,11 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
if reused {
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
func() { // A scope for defer
bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", "skipped: already exists")
label := "skipped: already exists"
if reusedBlob.MatchedByTOCDigest {
label = "skipped: already exists (found by TOC)"
}
bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", label)
defer bar.Abort(false)
bar.mark100PercentComplete()
}()
@ -751,7 +760,10 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
wrapped: ic.c.rawSource,
bar: bar,
}
uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache)
uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, private.PutBlobPartialOptions{
Cache: ic.c.blobInfoCache,
LayerIndex: layerIndex,
})
if err == nil {
if srcInfo.Size != -1 {
refill := srcInfo.Size - bar.Current()

View File

@ -190,7 +190,7 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if !impl.OriginalBlobMatchesRequiredCompression(options) {
if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
return false, private.ReusedBlob{}, nil
}
if info.Digest == "" {

View File

@ -27,6 +27,7 @@ import (
"github.com/containers/image/v5/internal/uploadreader"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache/none"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
"github.com/docker/distribution/registry/api/errcode"
v2 "github.com/docker/distribution/registry/api/v2"
@ -311,6 +312,13 @@ func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info t
return false, private.ReusedBlob{}, nil
}
func optionalCompressionName(algo *compressiontypes.Algorithm) string {
if algo != nil {
return algo.Name()
}
return "nil"
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
@ -321,7 +329,7 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
}
if impl.OriginalBlobMatchesRequiredCompression(options) {
if impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
// First, check whether the blob happens to already exist at the destination.
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
if err != nil {
@ -331,11 +339,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
return true, reusedInfo, nil
}
} else {
requiredCompression := "nil"
if options.OriginalCompression != nil {
requiredCompression = options.OriginalCompression.Name()
}
logrus.Debugf("Ignoring exact blob match case due to compression mismatch ( %s vs %s )", options.RequiredCompression.Name(), requiredCompression)
logrus.Debugf("Ignoring exact blob match, compression %s does not match required %s or MIME types %#v",
optionalCompressionName(options.OriginalCompression), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats)
}
// Then try reusing blobs from other locations.
@ -355,15 +360,13 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
continue
}
}
if !impl.BlobMatchesRequiredCompression(options, compressionAlgorithm) {
requiredCompression := "nil"
if compressionAlgorithm != nil {
requiredCompression = compressionAlgorithm.Name()
}
if !impl.CandidateMatchesTryReusingBlobOptions(options, compressionAlgorithm) {
if !candidate.UnknownLocation {
logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) in %s", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression, candidateRepo.Name())
logrus.Debugf("Ignoring candidate blob %s in %s, compression %s does not match required %s or MIME types %#v", candidate.Digest.String(), candidateRepo.Name(),
optionalCompressionName(compressionAlgorithm), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats)
} else {
logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) with no location match, checking current repo", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression)
logrus.Debugf("Ignoring candidate blob %s with no known location, compression %s does not match required %s or MIME types %#v", candidate.Digest.String(),
optionalCompressionName(compressionAlgorithm), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats)
}
continue
}

View File

@ -129,7 +129,7 @@ func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader,
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if !impl.OriginalBlobMatchesRequiredCompression(options) {
if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
return false, private.ReusedBlob{}, nil
}
if err := d.archive.lock(); err != nil {

View File

@ -1,25 +1,41 @@
package impl
import (
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/private"
compression "github.com/containers/image/v5/pkg/compression/types"
"golang.org/x/exp/slices"
)
// BlobMatchesRequiredCompression validates if compression is required by the caller while selecting a blob, if it is required
// CandidateMatchesTryReusingBlobOptions validates if compression is required by the caller while selecting a blob, if it is required
// then function performs a match against the compression requested by the caller and compression of existing blob
// (which can be nil to represent uncompressed or unknown)
func BlobMatchesRequiredCompression(options private.TryReusingBlobOptions, candidateCompression *compression.Algorithm) bool {
if options.RequiredCompression == nil {
return true // no requirement imposed
func CandidateMatchesTryReusingBlobOptions(options private.TryReusingBlobOptions, candidateCompression *compression.Algorithm) bool {
if options.RequiredCompression != nil {
if options.RequiredCompression.Name() == compression.ZstdChunkedAlgorithmName {
// HACK: Never match when the caller asks for zstd:chunked, because we dont record the annotations required to use the chunked blobs.
// The caller must re-compress to build those annotations.
return false
}
if candidateCompression == nil || (options.RequiredCompression.Name() != candidateCompression.Name()) {
return false
}
}
if options.RequiredCompression.Name() == compression.ZstdChunkedAlgorithmName {
// HACK: Never match when the caller asks for zstd:chunked, because we dont record the annotations required to use the chunked blobs.
// The caller must re-compress to build those annotations.
return false
// For candidateCompression == nil, we cant tell the difference between “uncompressed” and “unknown”;
// and “uncompressed” is acceptable in all known formats (well, it seems to work in practice for schema1),
// so dont impose any restrictions if candidateCompression == nil
if options.PossibleManifestFormats != nil && candidateCompression != nil {
if !slices.ContainsFunc(options.PossibleManifestFormats, func(mt string) bool {
return manifest.MIMETypeSupportsCompressionAlgorithm(mt, *candidateCompression)
}) {
return false
}
}
return candidateCompression != nil && (options.RequiredCompression.Name() == candidateCompression.Name())
return true
}
func OriginalBlobMatchesRequiredCompression(opts private.TryReusingBlobOptions) bool {
return BlobMatchesRequiredCompression(opts, opts.OriginalCompression)
func OriginalCandidateMatchesTryReusingBlobOptions(opts private.TryReusingBlobOptions) bool {
return CandidateMatchesTryReusingBlobOptions(opts, opts.OriginalCompression)
}

View File

@ -4,7 +4,6 @@ import (
"context"
"fmt"
"github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
)
@ -39,7 +38,7 @@ func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool {
// It is available only if SupportsPutBlobPartial().
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
// should fall back to PutBlobWithOptions.
func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
return private.UploadedBlob{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName)
}

View File

@ -73,6 +73,7 @@ type ListUpdate struct {
Platform *imgspecv1.Platform
Annotations map[string]string
CompressionAlgorithmNames []string
ArtifactType string
}
}
@ -101,6 +102,7 @@ type ListEdit struct {
AddDigest digest.Digest
AddSize int64
AddMediaType string
AddArtifactType string
AddPlatform *imgspecv1.Platform
AddAnnotations map[string]string
AddCompressionAlgorithms []compression.Algorithm

View File

@ -61,6 +61,7 @@ func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate
ret.ReadOnly.Platform = manifest.Platform
ret.ReadOnly.Annotations = manifest.Annotations
ret.ReadOnly.CompressionAlgorithmNames = annotationsToCompressionAlgorithmNames(manifest.Annotations)
ret.ReadOnly.ArtifactType = manifest.ArtifactType
return ret, nil
}
}
@ -103,7 +104,7 @@ func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, an
}
for _, algo := range compressionAlgorithms {
switch algo.Name() {
case compression.ZstdAlgorithmName:
case compression.ZstdAlgorithmName, compression.ZstdChunkedAlgorithmName: // Should this use InternalUnstableUndocumentedMIMEQuestionMark() ?
(*annotationsMap)[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue
default:
continue
@ -157,11 +158,13 @@ func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error {
}
addCompressionAnnotations(editInstance.AddCompressionAlgorithms, &annotations)
addedEntries = append(addedEntries, imgspecv1.Descriptor{
MediaType: editInstance.AddMediaType,
Size: editInstance.AddSize,
Digest: editInstance.AddDigest,
Platform: editInstance.AddPlatform,
Annotations: annotations})
MediaType: editInstance.AddMediaType,
ArtifactType: editInstance.AddArtifactType,
Size: editInstance.AddSize,
Digest: editInstance.AddDigest,
Platform: editInstance.AddPlatform,
Annotations: annotations,
})
default:
return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation)
}
@ -299,12 +302,13 @@ func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotation
platform = &platformCopy
}
m := imgspecv1.Descriptor{
MediaType: component.MediaType,
Size: component.Size,
Digest: component.Digest,
URLs: slices.Clone(component.URLs),
Annotations: maps.Clone(component.Annotations),
Platform: platform,
MediaType: component.MediaType,
ArtifactType: component.ArtifactType,
Size: component.Size,
Digest: component.Digest,
URLs: slices.Clone(component.URLs),
Annotations: maps.Clone(component.Annotations),
Platform: platform,
}
index.Manifests[i] = m
}

View File

@ -55,7 +55,7 @@ type ImageDestinationInternalOnly interface {
// It is available only if SupportsPutBlobPartial().
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
// should fall back to PutBlobWithOptions.
PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (UploadedBlob, error)
PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, options PutBlobPartialOptions) (UploadedBlob, error)
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
@ -100,6 +100,12 @@ type PutBlobOptions struct {
LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
}
// PutBlobPartialOptions are used in PutBlobPartial.
type PutBlobPartialOptions struct {
Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update.
LayerIndex int // A zero-based index of the layer within the image (PutBlobPartial is only called with layer-like blobs, not configs)
}
// TryReusingBlobOptions are used in TryReusingBlobWithOptions.
type TryReusingBlobOptions struct {
Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update.
@ -112,12 +118,13 @@ type TryReusingBlobOptions struct {
// Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers
// if they use internal/imagedestination/impl.Compat;
// in that case, they will all be consistently zero-valued.
RequiredCompression *compression.Algorithm // If set, reuse blobs with a matching algorithm as per implementations in internal/imagedestination/impl.helpers.go
OriginalCompression *compression.Algorithm // Must be set if RequiredCompression is set; can be set to nil to indicate “uncompressed” or “unknown”.
EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
SrcRef reference.Named // A reference to the source image that contains the input blob.
TOCDigest *digest.Digest // If specified, the blob can be looked up in the destination also by its TOC digest.
EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
SrcRef reference.Named // A reference to the source image that contains the input blob.
PossibleManifestFormats []string // A set of possible manifest formats; at least one should support the reused layer blob.
RequiredCompression *compression.Algorithm // If set, reuse blobs with a matching algorithm as per implementations in internal/imagedestination/impl.helpers.go
OriginalCompression *compression.Algorithm // May be nil to indicate “uncompressed” or “unknown”.
TOCDigest digest.Digest // If specified, the blob can be looked up in the destination also by its TOC digest.
}
// ReusedBlob is information about a blob reused in a destination.
@ -129,6 +136,8 @@ type ReusedBlob struct {
// a differently-compressed blob.
CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A
CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A
MatchedByTOCDigest bool // Whether the layer was reused/matched by TOC digest. Used only for UI purposes.
}
// ImageSourceChunk is a portion of a blob.

View File

@ -9,7 +9,6 @@ import (
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
ociencspec "github.com/containers/ocicrypt/spec"
chunkedToc "github.com/containers/storage/pkg/chunked/toc"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@ -260,44 +259,9 @@ func (m *OCI1) ImageID(diffIDs []digest.Digest) (string, error) {
if err := m.Config.Digest.Validate(); err != nil {
return "", err
}
// If there is any layer that is using partial content, we calculate the image ID
// in a different way since the diffID cannot be validated as for regular pulled images.
for _, layer := range m.Layers {
toc, err := chunkedToc.GetTOCDigest(layer.Annotations)
if err != nil {
return "", fmt.Errorf("error looking up annotation for layer %q: %w", layer.Digest, err)
}
if toc != nil {
return m.calculateImageIDForPartialImage(diffIDs)
}
}
return m.Config.Digest.Hex(), nil
}
func (m *OCI1) calculateImageIDForPartialImage(diffIDs []digest.Digest) (string, error) {
newID := digest.Canonical.Digester()
for i, layer := range m.Layers {
diffID := diffIDs[i]
_, err := newID.Hash().Write([]byte(diffID.Hex()))
if err != nil {
return "", fmt.Errorf("error writing diffID %q: %w", diffID, err)
}
toc, err := chunkedToc.GetTOCDigest(layer.Annotations)
if err != nil {
return "", fmt.Errorf("error looking up annotation for layer %q: %w", layer.Digest, err)
}
if toc != nil {
_, err = newID.Hash().Write([]byte(toc.Hex()))
if err != nil {
return "", fmt.Errorf("error writing TOC %q: %w", toc, err)
}
}
}
return newID.Digest().Hex(), nil
}
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
// (and the code can handle that).
// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted

View File

@ -6,7 +6,6 @@ import (
"io"
"os"
"github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/internal/imagedestination"
"github.com/containers/image/v5/internal/imagedestination/impl"
"github.com/containers/image/v5/internal/private"
@ -120,8 +119,8 @@ func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, str
// It is available only if SupportsPutBlobPartial().
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
// should fall back to PutBlobWithOptions.
func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, options)
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination

View File

@ -173,7 +173,7 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if !impl.OriginalBlobMatchesRequiredCompression(options) {
if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
return false, private.ReusedBlob{}, nil
}
if info.Digest == "" {

View File

@ -12,7 +12,6 @@ import (
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/internal/imagedestination"
"github.com/containers/image/v5/internal/imagedestination/impl"
"github.com/containers/image/v5/internal/imagedestination/stubs"
@ -128,8 +127,8 @@ func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stre
// It is available only if SupportsPutBlobPartial().
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
// should fall back to PutBlobWithOptions.
func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, options)
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination

View File

@ -335,7 +335,7 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if !impl.OriginalBlobMatchesRequiredCompression(options) {
if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
return false, private.ReusedBlob{}, nil
}
if d.repo == nil {

View File

@ -9,7 +9,6 @@ import (
"path/filepath"
"sync"
"github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/internal/imagedestination"
"github.com/containers/image/v5/internal/imagedestination/impl"
"github.com/containers/image/v5/internal/private"
@ -227,8 +226,8 @@ func (d *blobCacheDestination) SupportsPutBlobPartial() bool {
// It is available only if SupportsPutBlobPartial().
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
// should fall back to PutBlobWithOptions.
func (d *blobCacheDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
return d.destination.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
func (d *blobCacheDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
return d.destination.PutBlobPartial(ctx, chunkAccessor, srcInfo, options)
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
@ -237,7 +236,7 @@ func (d *blobCacheDestination) PutBlobPartial(ctx context.Context, chunkAccessor
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *blobCacheDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if !impl.OriginalBlobMatchesRequiredCompression(options) {
if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
return false, private.ReusedBlob{}, nil
}
present, reusedInfo, err := d.destination.TryReusingBlobWithOptions(ctx, info, options)

View File

@ -219,6 +219,10 @@ func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unver
if hashedRekordV001.Data.Hash.Algorithm == nil {
return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.algorithm" field in hashedrekord`)
}
// FIXME: Rekor 1.3.5 has added SHA-386 and SHA-512 as recognized values.
// Eventually we should support them as well; doing that cleanly would require updqating to Rekor 1.3.5, which requires Go 1.21.
// Short-term, Cosign (as of 2024-02 and Cosign 2.2.3) only produces and accepts SHA-256, so right now thats not a compatibility
// issue.
if *hashedRekordV001.Data.Hash.Algorithm != models.HashedrekordV001SchemaDataHashAlgorithmSha256 {
return time.Time{}, NewInvalidSignatureError(fmt.Sprintf(`Unexpected "data.hash.algorithm" value %#v`, *hashedRekordV001.Data.Hash.Algorithm))
}

View File

@ -16,7 +16,6 @@ import (
"sync/atomic"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/internal/imagedestination/impl"
"github.com/containers/image/v5/internal/imagedestination/stubs"
"github.com/containers/image/v5/internal/private"
@ -55,41 +54,61 @@ type storageImageDestination struct {
stubs.ImplementsPutBlobPartial
stubs.AlwaysSupportsSignatures
imageRef storageReference
directory string // Temporary directory where we store blobs until Commit() time
nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs
manifest []byte // Manifest contents, temporary
manifestDigest digest.Digest // Valid if len(manifest) != 0
signatures []byte // Signature contents, temporary
signatureses map[digest.Digest][]byte // Instance signature contents, temporary
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice
imageRef storageReference
directory string // Temporary directory where we store blobs until Commit() time
nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs
manifest []byte // Manifest contents, temporary
manifestDigest digest.Digest // Valid if len(manifest) != 0
untrustedDiffIDValues []digest.Digest // From configs RootFS.DiffIDs, valid if not nil
signatures []byte // Signature contents, temporary
signatureses map[digest.Digest][]byte // Instance signature contents, temporary
metadata storageImageMetadata // Metadata contents being built
// A storage destination may be used concurrently. Accesses are
// serialized via a mutex. Please refer to the individual comments
// below for details.
lock sync.Mutex
// Mapping from layer (by index) to the associated ID in the storage.
// It's protected *implicitly* since `commitLayer()`, at any given
// time, can only be executed by *one* goroutine. Please refer to
// `queueOrCommit()` for further details on how the single-caller
// guarantee is implemented.
indexToStorageID map[int]*string
// All accesses to below data are protected by `lock` which is made
// *explicit* in the code.
uncompressedOrTocDigest map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs or TOC IDs.
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image
blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output
indexToStorageID map[int]string
// A storage destination may be used concurrently, due to HasThreadSafePutBlob.
lock sync.Mutex // Protects lockProtected
lockProtected storageImageDestinationLockProtected
}
// storageImageDestinationLockProtected contains storageImageDestination data which might be
// accessed concurrently, due to HasThreadSafePutBlob.
// _During the concurrent TryReusingBlob/PutBlob/* calls_ (but not necessarily during the final Commit)
// uses must hold storageImageDestination.lock.
type storageImageDestinationLockProtected struct {
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image
// In general, a layer is identified either by (compressed) digest, or by TOC digest.
// When creating a layer, the c/storage layer metadata and image IDs must _only_ be based on trusted values
// we have computed ourselves. (Layer reuse can then look up against such trusted values, but it might not
// recompute those values for incomding layers — the point of the reuse is that we dont need to consume the incoming layer.)
// Layer identification: For a layer, at least one of indexToTOCDigest and blobDiffIDs must be available before commitLayer is called.
// The presence of an indexToTOCDigest is what decides how the layer is identified, i.e. which fields must be trusted.
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest, IFF the layer was created/found/reused by TOC digest
// Layer data: Before commitLayer is called, either at least one of (diffOutputs, blobAdditionalLayer, filenames)
// should be available; or indexToTOCDigest/blobDiffIDs should be enough to locate an existing c/storage layer.
// They are looked up in the order they are mentioned above.
diffOutputs map[int]*graphdriver.DriverWithDifferOutput // Mapping from layer index to a partially-pulled layer intermediate data
blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
// Mapping from layer blobsums to names of files we used to hold them. If set, fileSizes and blobDiffIDs must also be set.
filenames map[digest.Digest]string
// Mapping from layer blobsums to their sizes. If set, filenames and blobDiffIDs must also be set.
fileSizes map[digest.Digest]int64
}
// addedLayerInfo records data about a layer to use in this image.
type addedLayerInfo struct {
digest digest.Digest
emptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept.
digest digest.Digest // Mandatory, the digest of the layer.
emptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept.
}
// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
@ -117,18 +136,23 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*
HasThreadSafePutBlob: true,
}),
imageRef: imageRef,
directory: directory,
signatureses: make(map[digest.Digest][]byte),
uncompressedOrTocDigest: make(map[digest.Digest]digest.Digest),
blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
fileSizes: make(map[digest.Digest]int64),
filenames: make(map[digest.Digest]string),
SignatureSizes: []int{},
SignaturesSizes: make(map[digest.Digest][]int),
indexToStorageID: make(map[int]*string),
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput),
imageRef: imageRef,
directory: directory,
signatureses: make(map[digest.Digest][]byte),
metadata: storageImageMetadata{
SignatureSizes: []int{},
SignaturesSizes: make(map[digest.Digest][]int),
},
indexToStorageID: make(map[int]string),
lockProtected: storageImageDestinationLockProtected{
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
blobDiffIDs: make(map[digest.Digest]digest.Digest),
indexToTOCDigest: make(map[int]digest.Digest),
diffOutputs: make(map[int]*graphdriver.DriverWithDifferOutput),
blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
filenames: make(map[digest.Digest]string),
fileSizes: make(map[digest.Digest]int64),
},
}
dest.Compat = impl.AddCompat(dest)
return dest, nil
@ -142,12 +166,13 @@ func (s *storageImageDestination) Reference() types.ImageReference {
// Close cleans up the temporary directory and additional layer store handlers.
func (s *storageImageDestination) Close() error {
for _, al := range s.blobAdditionalLayer {
// This is outside of the scope of HasThreadSafePutBlob, so we dont need to hold s.lock.
for _, al := range s.lockProtected.blobAdditionalLayer {
al.Release()
}
for _, v := range s.diffOutputs {
for _, v := range s.lockProtected.diffOutputs {
if v.Target != "" {
_ = s.imageRef.transport.store.CleanupStagingDirectory(v.Target)
_ = s.imageRef.transport.store.CleanupStagedLayer(v)
}
}
return os.RemoveAll(s.directory)
@ -227,9 +252,9 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
// Record information about the blob.
s.lock.Lock()
s.uncompressedOrTocDigest[blobDigest] = diffID.Digest()
s.fileSizes[blobDigest] = counter.Count
s.filenames[blobDigest] = filename
s.lockProtected.blobDiffIDs[blobDigest] = diffID.Digest()
s.lockProtected.fileSizes[blobDigest] = counter.Count
s.lockProtected.filenames[blobDigest] = filename
s.lock.Unlock()
// This is safe because we have just computed diffID, and blobDigest was either computed
// by us, or validated by the caller (usually copy.digestingReader).
@ -269,7 +294,7 @@ func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.Read
// It is available only if SupportsPutBlobPartial().
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
// should fall back to PutBlobWithOptions.
func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
fetcher := zstdFetcher{
chunkAccessor: chunkAccessor,
ctx: ctx,
@ -286,13 +311,25 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
return private.UploadedBlob{}, err
}
if out.TOCDigest == "" && out.UncompressedDigest == "" {
return private.UploadedBlob{}, errors.New("internal error: ApplyDiffWithDiffer succeeded with neither TOCDigest nor UncompressedDigest set")
}
blobDigest := srcInfo.Digest
s.lock.Lock()
s.uncompressedOrTocDigest[blobDigest] = blobDigest
s.fileSizes[blobDigest] = 0
s.filenames[blobDigest] = ""
s.diffOutputs[blobDigest] = out
if out.UncompressedDigest != "" {
// The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is
// responsible for ensuring blobDigest has been validated.
s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
} else {
// Dont identify layers by TOC if UncompressedDigest is available.
// - Using UncompressedDigest allows image reuse with non-partially-pulled layers
// - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch.
// That TOC is quite unlikely to match with any other TOC value.
s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
}
s.lockProtected.diffOutputs[options.LayerIndex] = out
s.lock.Unlock()
return private.UploadedBlob{
@ -307,7 +344,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if !impl.OriginalBlobMatchesRequiredCompression(options) {
if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
return false, private.ReusedBlob{}, nil
}
reused, info, err := s.tryReusingBlobAsPending(blobinfo.Digest, blobinfo.Size, &options)
@ -321,68 +358,79 @@ func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context,
})
}
// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (digest, size or -1), filling s.uncompressedOrTocDigest and other metadata.
// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (blobDigest, size or -1), filling s.blobDiffIDs and other metadata.
// The caller must arrange the blob to be eventually committed using s.commitLayer().
func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
// lock the entire method as it executes fairly quickly
s.lock.Lock()
defer s.lock.Unlock()
if options.SrcRef != nil {
// Check if we have the layer in the underlying additional layer store.
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(digest, options.SrcRef.String())
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobDigest, options.SrcRef.String())
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, digest, err)
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobDigest, err)
} else if err == nil {
// Record the uncompressed value so that we can use it to calculate layer IDs.
s.uncompressedOrTocDigest[digest] = aLayer.UncompressedDigest()
s.blobAdditionalLayer[digest] = aLayer
s.lockProtected.blobDiffIDs[blobDigest] = aLayer.UncompressedDigest()
s.lockProtected.blobAdditionalLayer[blobDigest] = aLayer
return true, private.ReusedBlob{
Digest: digest,
Digest: blobDigest,
Size: aLayer.CompressedSize(),
}, nil
}
}
if digest == "" {
if blobDigest == "" {
return false, private.ReusedBlob{}, errors.New(`Can not check for a blob with unknown digest`)
}
if err := digest.Validate(); err != nil {
if err := blobDigest.Validate(); err != nil {
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
}
if options.TOCDigest != "" {
if err := options.TOCDigest.Validate(); err != nil {
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
}
}
// Check if we have a wasn't-compressed layer in storage that's based on that blob.
// Check if we've already cached it in a file.
if size, ok := s.fileSizes[digest]; ok {
if size, ok := s.lockProtected.fileSizes[blobDigest]; ok {
// s.lockProtected.blobDiffIDs is set either by putBlobToPendingFile or in createNewLayer when creating the
// filenames/fileSizes entry.
return true, private.ReusedBlob{
Digest: digest,
Digest: blobDigest,
Size: size,
}, nil
}
// Check if we have a wasn't-compressed layer in storage that's based on that blob.
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(digest)
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobDigest)
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, digest, err)
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, blobDigest, err)
}
if len(layers) > 0 {
// Save this for completeness.
s.uncompressedOrTocDigest[digest] = layers[0].UncompressedDigest
s.lockProtected.blobDiffIDs[blobDigest] = blobDigest
return true, private.ReusedBlob{
Digest: digest,
Digest: blobDigest,
Size: layers[0].UncompressedSize,
}, nil
}
// Check if we have a was-compressed layer in storage that's based on that blob.
layers, err = s.imageRef.transport.store.LayersByCompressedDigest(digest)
layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobDigest)
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, digest, err)
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, blobDigest, err)
}
if len(layers) > 0 {
// Record the uncompressed value so that we can use it to calculate layer IDs.
s.uncompressedOrTocDigest[digest] = layers[0].UncompressedDigest
// LayersByCompressedDigest only finds layers which were created from a full layer blob, and extracting that
// always sets UncompressedDigest.
diffID := layers[0].UncompressedDigest
if diffID == "" {
return false, private.ReusedBlob{}, fmt.Errorf("internal error: compressed layer %q (for compressed digest %q) does not have an uncompressed digest", layers[0].ID, blobDigest.String())
}
s.lockProtected.blobDiffIDs[blobDigest] = diffID
return true, private.ReusedBlob{
Digest: digest,
Digest: blobDigest,
Size: layers[0].CompressedSize,
}, nil
}
@ -391,23 +439,23 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
// Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
// uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size.
if options.CanSubstitute || size != -1 {
if uncompressedDigest := options.Cache.UncompressedDigest(digest); uncompressedDigest != "" && uncompressedDigest != digest {
if uncompressedDigest := options.Cache.UncompressedDigest(blobDigest); uncompressedDigest != "" && uncompressedDigest != blobDigest {
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
}
if len(layers) > 0 {
if size != -1 {
s.uncompressedOrTocDigest[digest] = layers[0].UncompressedDigest
s.lockProtected.blobDiffIDs[blobDigest] = uncompressedDigest
return true, private.ReusedBlob{
Digest: digest,
Digest: blobDigest,
Size: size,
}, nil
}
if !options.CanSubstitute {
return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", digest)
return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", blobDigest)
}
s.uncompressedOrTocDigest[uncompressedDigest] = layers[0].UncompressedDigest
s.lockProtected.blobDiffIDs[uncompressedDigest] = uncompressedDigest
return true, private.ReusedBlob{
Digest: uncompressedDigest,
Size: layers[0].UncompressedSize,
@ -416,23 +464,30 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
}
}
tocDigest := digest
if options.TOCDigest != nil {
tocDigest = *options.TOCDigest
}
if options.TOCDigest != "" && options.LayerIndex != nil {
// Check if we have a chunked layer in storage with the same TOC digest.
layers, err := s.imageRef.transport.store.LayersByTOCDigest(options.TOCDigest)
// Check if we have a chunked layer in storage with the same TOC digest.
layers, err = s.imageRef.transport.store.LayersByTOCDigest(tocDigest)
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, tocDigest, err)
}
if len(layers) > 0 {
// Save this for completeness.
s.uncompressedOrTocDigest[digest] = layers[0].TOCDigest
return true, private.ReusedBlob{
Digest: layers[0].TOCDigest,
Size: layers[0].UncompressedSize,
}, nil
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, options.TOCDigest, err)
}
if len(layers) > 0 {
if size != -1 {
s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest
return true, private.ReusedBlob{
Digest: blobDigest,
Size: size,
MatchedByTOCDigest: true,
}, nil
} else if options.CanSubstitute && layers[0].UncompressedDigest != "" {
s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest
return true, private.ReusedBlob{
Digest: layers[0].UncompressedDigest,
Size: layers[0].UncompressedSize,
MatchedByTOCDigest: true,
}, nil
}
}
}
// Nope, we don't have it.
@ -444,6 +499,8 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
// that since we don't have a recommendation, a random ID should be used if one needs
// to be allocated.
func (s *storageImageDestination) computeID(m manifest.Manifest) string {
// This is outside of the scope of HasThreadSafePutBlob, so we dont need to hold s.lock.
// Build the diffID list. We need the decompressed sums that we've been calculating to
// fill in the DiffIDs. It's expected (but not enforced by us) that the number of
// diffIDs corresponds to the number of non-EmptyLayer entries in the history.
@ -457,28 +514,59 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string {
continue
}
blobSum := m.FSLayers[i].BlobSum
diffID, ok := s.uncompressedOrTocDigest[blobSum]
diffID, ok := s.lockProtected.blobDiffIDs[blobSum]
if !ok {
// this can, in principle, legitimately happen when a layer is reused by TOC.
logrus.Infof("error looking up diffID for layer %q", blobSum.String())
return ""
}
diffIDs = append([]digest.Digest{diffID}, diffIDs...)
}
case *manifest.Schema2:
case *manifest.Schema2, *manifest.OCI1:
// We know the ID calculation doesn't actually use the diffIDs, so we don't need to populate
// the diffID list.
case *manifest.OCI1:
for _, l := range m.Layers {
diffIDs = append(diffIDs, l.Digest)
}
default:
return ""
}
id, err := m.ImageID(diffIDs)
// We want to use the same ID for “the same” images, but without risking unwanted sharing / malicious image corruption.
//
// Traditionally that means the same ~config digest, as computed by m.ImageID;
// but if we pull a layer by TOC, we verify the layer against neither the (compressed) blob digest in the manifest,
// nor against the configs RootFS.DiffIDs. We dont really want to do either, to allow partial layer pulls where we never see
// most of the data.
//
// So, if a layer is pulled by TOC (and we do validate against the TOC), the fact that we used the TOC, and the value of the TOC,
// must enter into the image ID computation.
// But for images where no TOC was used, continue to use IDs computed the traditional way, to maximize image reuse on upgrades,
// and to introduce the changed behavior only when partial pulls are used.
//
// Note that its not 100% guaranteed that an image pulled by TOC uses an OCI manifest; consider
// (skopeo copy --format v2s2 docker://…/zstd-chunked-image containers-storage:… ). So this is not happening only in the OCI case above.
ordinaryImageID, err := m.ImageID(diffIDs)
if err != nil {
return ""
}
return id
tocIDInput := ""
hasLayerPulledByTOC := false
for i := range m.LayerInfos() {
layerValue := "" // An empty string is not a valid digest, so this is unambiguous with the TOC case.
tocDigest, ok := s.lockProtected.indexToTOCDigest[i] // "" if not a TOC
if ok {
hasLayerPulledByTOC = true
layerValue = tocDigest.String()
}
tocIDInput += layerValue + "|" // "|" can not be present in a TOC digest, so this is an unambiguous separator.
}
if !hasLayerPulledByTOC {
return ordinaryImageID
}
// ordinaryImageID is a digest of a config, which is a JSON value.
// To avoid the risk of collisions, start the input with @ so that the input is not a valid JSON.
tocImageID := digest.FromString("@With TOC:" + tocIDInput).Hex()
logrus.Debugf("Ordinary storage image ID %s; a layer was looked up by TOC, so using image ID %s", ordinaryImageID, tocImageID)
return tocImageID
}
// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig
@ -491,7 +579,7 @@ func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, er
return nil, fmt.Errorf("invalid digest supplied when reading blob: %w", err)
}
// Assume it's a file, since we're only calling this from a place that expects to read files.
if filename, ok := s.filenames[info.Digest]; ok {
if filename, ok := s.lockProtected.filenames[info.Digest]; ok {
contents, err2 := os.ReadFile(filename)
if err2 != nil {
return nil, fmt.Errorf(`reading blob from file %q: %w`, filename, err2)
@ -525,17 +613,17 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo)
// caller is the "worker" routine committing layers. All other routines
// can continue pulling and queuing in layers.
s.lock.Lock()
s.indexToAddedLayerInfo[index] = info
s.lockProtected.indexToAddedLayerInfo[index] = info
// We're still waiting for at least one previous/parent layer to be
// committed, so there's nothing to do.
if index != s.currentIndex {
if index != s.lockProtected.currentIndex {
s.lock.Unlock()
return nil
}
for {
info, ok := s.indexToAddedLayerInfo[index]
info, ok := s.lockProtected.indexToAddedLayerInfo[index]
if !ok {
break
}
@ -550,25 +638,30 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo)
// Set the index at the very end to make sure that only one routine
// enters stage 2).
s.currentIndex = index
s.lockProtected.currentIndex = index
s.lock.Unlock()
return nil
}
// getDiffIDOrTOCDigest returns the diffID for the specified digest or the digest for the TOC, if known.
func (s *storageImageDestination) getDiffIDOrTOCDigest(uncompressedDigest digest.Digest) (digest.Digest, bool) {
// singleLayerIDComponent returns a single layers the input to computing a layer (chain) ID,
// and an indication whether the input already has the shape of a layer ID.
// It returns ("", false) if the layer is not found at all (which should never happen)
func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDigest digest.Digest) (string, bool) {
s.lock.Lock()
defer s.lock.Unlock()
if d, found := s.diffOutputs[uncompressedDigest]; found {
return d.TOCDigest, found
if d, found := s.lockProtected.indexToTOCDigest[layerIndex]; found {
return "@TOC=" + d.Hex(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
}
d, found := s.uncompressedOrTocDigest[uncompressedDigest]
return d, found
if d, found := s.lockProtected.blobDiffIDs[blobDigest]; found {
return d.Hex(), true // This looks like chain IDs, and it uses the traditional value.
}
return "", false
}
// commitLayer commits the specified layer with the given index to the storage.
// size can usually be -1; it can be provided if the layer is not known to be already present in uncompressedOrTocDigest.
// size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs.
//
// If the layer cannot be committed yet, the function returns (true, nil).
//
@ -586,29 +679,38 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
// Start with an empty string or the previous layer ID. Note that
// `s.indexToStorageID` can only be accessed by *one* goroutine at any
// given time. Hence, we don't need to lock accesses.
var lastLayer string
if prev := s.indexToStorageID[index-1]; prev != nil {
lastLayer = *prev
var parentLayer string
if index != 0 {
prev, ok := s.indexToStorageID[index-1]
if !ok {
return false, fmt.Errorf("Internal error: commitLayer called with previous layer %d not committed yet", index-1)
}
parentLayer = prev
}
// Carry over the previous ID for empty non-base layers.
if info.emptyLayer {
s.indexToStorageID[index] = &lastLayer
s.indexToStorageID[index] = parentLayer
return false, nil
}
// Check if there's already a layer with the ID that we'd give to the result of applying
// this layer blob to its parent, if it has one, or the blob's hex value otherwise.
// The diffIDOrTOCDigest refers either to the DiffID or the digest of the TOC.
diffIDOrTOCDigest, haveDiffIDOrTOCDigest := s.getDiffIDOrTOCDigest(info.digest)
if !haveDiffIDOrTOCDigest {
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
// or to even check if we had it.
// Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
// The layerID refers either to the DiffID or the digest of the TOC.
layerIDComponent, layerIDComponentStandalone := s.singleLayerIDComponent(index, info.digest)
if layerIDComponent == "" {
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob() / TryReusingBlob() / …
//
// Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache: a caller
// that relies on using a blob digest that has never been seen by the store had better call
// TryReusingBlob; not calling PutBlob already violates the documented API, so theres only
// so far we are going to accommodate that (if we should be doing that at all).
logrus.Debugf("looking for diffID or TOC digest for blob %+v", info.digest)
//
// We are also ignoring lookups by TOC, and other non-trivial situations.
// Those can only happen using the c/image/internal/private API,
// so those internal callers should be fixed to follow the API instead of expanding this fallback.
logrus.Debugf("looking for diffID for blob=%+v", info.digest)
// Use tryReusingBlobAsPending, not the top-level TryReusingBlobWithOptions, to prevent recursion via queueOrCommit.
has, _, err := s.tryReusingBlobAsPending(info.digest, size, &private.TryReusingBlobOptions{
Cache: none.NoCache,
@ -618,115 +720,140 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
return false, fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err)
}
if !has {
return false, fmt.Errorf("error determining uncompressed digest or TOC digest for blob %q", info.digest.String())
return false, fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String())
}
diffIDOrTOCDigest, haveDiffIDOrTOCDigest = s.getDiffIDOrTOCDigest(info.digest)
if !haveDiffIDOrTOCDigest {
return false, fmt.Errorf("we have blob %q, but don't know its uncompressed or TOC digest", info.digest.String())
layerIDComponent, layerIDComponentStandalone = s.singleLayerIDComponent(index, info.digest)
if layerIDComponent == "" {
return false, fmt.Errorf("we have blob %q, but don't know its layer ID", info.digest.String())
}
}
id := diffIDOrTOCDigest.Hex()
if lastLayer != "" {
id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffIDOrTOCDigest.Hex())).Hex()
id := layerIDComponent
if !layerIDComponentStandalone || parentLayer != "" {
id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Hex()
}
if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
// There's already a layer that should have the right contents, just reuse it.
lastLayer = layer.ID
s.indexToStorageID[index] = &lastLayer
s.indexToStorageID[index] = layer.ID
return false, nil
}
layer, err := s.createNewLayer(index, info.digest, parentLayer, id)
if err != nil {
return false, err
}
if layer == nil {
return true, nil
}
s.indexToStorageID[index] = layer.ID
return false, nil
}
// createNewLayer creates a new layer newLayerID for (index, layerDigest) on top of parentLayer (which may be "").
// If the layer cannot be committed yet, the function returns (nil, nil).
func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.Digest, parentLayer, newLayerID string) (*storage.Layer, error) {
s.lock.Lock()
diffOutput, ok := s.diffOutputs[info.digest]
diffOutput, ok := s.lockProtected.diffOutputs[index]
s.lock.Unlock()
if ok {
if s.manifest == nil {
logrus.Debugf("Skipping commit for TOC=%q, manifest not yet available", id)
return true, nil
var untrustedUncompressedDigest digest.Digest
if diffOutput.UncompressedDigest == "" {
d, err := s.untrustedLayerDiffID(index)
if err != nil {
return nil, err
}
if d == "" {
logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID)
return nil, nil
}
untrustedUncompressedDigest = d
}
man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
if err != nil {
return false, fmt.Errorf("parsing manifest: %w", err)
}
cb, err := s.getConfigBlob(man.ConfigInfo())
if err != nil {
return false, err
}
// retrieve the expected uncompressed digest from the config blob.
configOCI := &imgspecv1.Image{}
if err := json.Unmarshal(cb, configOCI); err != nil {
return false, err
}
if index >= len(configOCI.RootFS.DiffIDs) {
return false, fmt.Errorf("index %d out of range for configOCI.RootFS.DiffIDs", index)
}
layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil)
if err != nil {
return false, err
}
// let the storage layer know what was the original uncompressed layer.
flags := make(map[string]interface{})
flags[expectedLayerDiffIDFlag] = configOCI.RootFS.DiffIDs[index]
logrus.Debugf("Setting uncompressed digest to %q for layer %q", configOCI.RootFS.DiffIDs[index], id)
options := &graphdriver.ApplyDiffWithDifferOpts{
Flags: flags,
if untrustedUncompressedDigest != "" {
flags[expectedLayerDiffIDFlag] = untrustedUncompressedDigest
logrus.Debugf("Setting uncompressed digest to %q for layer %q", untrustedUncompressedDigest, newLayerID)
}
if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, options); err != nil {
_ = s.imageRef.transport.store.Delete(layer.ID)
return false, err
}
args := storage.ApplyStagedLayerOptions{
ID: newLayerID,
ParentLayer: parentLayer,
s.indexToStorageID[index] = &layer.ID
return false, nil
DiffOutput: diffOutput,
DiffOptions: &graphdriver.ApplyDiffWithDifferOpts{
Flags: flags,
},
}
layer, err := s.imageRef.transport.store.ApplyStagedLayer(args)
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
return nil, fmt.Errorf("failed to put layer using a partial pull: %w", err)
}
return layer, nil
}
s.lock.Lock()
al, ok := s.blobAdditionalLayer[info.digest]
al, ok := s.lockProtected.blobAdditionalLayer[layerDigest]
s.lock.Unlock()
if ok {
layer, err := al.PutAs(id, lastLayer, nil)
layer, err := al.PutAs(newLayerID, parentLayer, nil)
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
return false, fmt.Errorf("failed to put layer from digest and labels: %w", err)
return nil, fmt.Errorf("failed to put layer from digest and labels: %w", err)
}
lastLayer = layer.ID
s.indexToStorageID[index] = &lastLayer
return false, nil
return layer, nil
}
// Check if we previously cached a file with that blob's contents. If we didn't,
// then we need to read the desired contents from a layer.
var trustedUncompressedDigest, trustedOriginalDigest digest.Digest // For storage.LayerOptions
s.lock.Lock()
filename, ok := s.filenames[info.digest]
tocDigest := s.lockProtected.indexToTOCDigest[index] // "" if not set
optionalDiffID := s.lockProtected.blobDiffIDs[layerDigest] // "" if not set
filename, gotFilename := s.lockProtected.filenames[layerDigest]
s.lock.Unlock()
if !ok {
// Try to find the layer with contents matching that blobsum.
layer := ""
layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffIDOrTOCDigest)
if err2 == nil && len(layers) > 0 {
layer = layers[0].ID
} else {
layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(info.digest)
if gotFilename && tocDigest == "" {
// If tocDigest != "", if we now happen to find a layerDigest match, the newLayerID has already been computed as TOC-based,
// and we don't know the relationship of the layerDigest and TOC digest.
// We could recompute newLayerID to be DiffID-based and use the file, but such a within-image layer
// reuse is expected to be pretty rare; instead, ignore the unexpected file match and proceed to the
// originally-planned TOC match.
// Because tocDigest == "", optionaldiffID must have been set; and even if it werent, PutLayer will recompute the digest from the stream.
trustedUncompressedDigest = optionalDiffID
trustedOriginalDigest = layerDigest // The code setting .filenames[layerDigest] is responsible for the contents matching.
} else {
// Try to find the layer with contents matching the data we use.
var layer *storage.Layer // = nil
if tocDigest != "" {
layers, err2 := s.imageRef.transport.store.LayersByTOCDigest(tocDigest)
if err2 == nil && len(layers) > 0 {
layer = layers[0].ID
layer = &layers[0]
} else {
return nil, fmt.Errorf("locating layer for TOC digest %q: %w", tocDigest, err2)
}
} else {
// Because tocDigest == "", optionaldiffID must have been set
layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(optionalDiffID)
if err2 == nil && len(layers) > 0 {
layer = &layers[0]
} else {
layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(layerDigest)
if err2 == nil && len(layers) > 0 {
layer = &layers[0]
}
}
if layer == nil {
return nil, fmt.Errorf("locating layer for blob %q: %w", layerDigest, err2)
}
}
if layer == "" {
return false, fmt.Errorf("locating layer for blob %q: %w", info.digest, err2)
}
// Read the layer's contents.
noCompression := archive.Uncompressed
diffOptions := &storage.DiffOptions{
Compression: &noCompression,
}
diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
diff, err2 := s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
if err2 != nil {
return false, fmt.Errorf("reading layer %q for blob %q: %w", layer, info.digest, err2)
return nil, fmt.Errorf("reading layer %q for blob %q: %w", layer.ID, layerDigest, err2)
}
// Copy the layer diff to a file. Diff() takes a lock that it holds
// until the ReadCloser that it returns is closed, and PutLayer() wants
@ -736,41 +863,112 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0o600)
if err != nil {
diff.Close()
return false, fmt.Errorf("creating temporary file %q: %w", filename, err)
return nil, fmt.Errorf("creating temporary file %q: %w", filename, err)
}
// Copy the data to the file.
// TODO: This can take quite some time, and should ideally be cancellable using
// ctx.Done().
_, err = io.Copy(file, diff)
fileSize, err := io.Copy(file, diff)
diff.Close()
file.Close()
if err != nil {
return false, fmt.Errorf("storing blob to file %q: %w", filename, err)
return nil, fmt.Errorf("storing blob to file %q: %w", filename, err)
}
if optionalDiffID == "" && layer.UncompressedDigest != "" {
optionalDiffID = layer.UncompressedDigest
}
// The stream we have is uncompressed, this matches contents of the stream.
// If tocDigest != "", trustedUncompressedDigest might still be ""; in that case PutLayer will compute the value from the stream.
trustedUncompressedDigest = optionalDiffID
// FIXME? trustedOriginalDigest could be set to layerDigest IF tocDigest == "" (otherwise layerDigest is untrusted).
// But for c/storage to reasonably use it (as a CompressedDigest value), we should also ensure the CompressedSize of the created
// layer is correct, and the API does not currently make it possible (.CompressedSize is set from the input stream).
//
// We can legitimately set storage.LayerOptions.OriginalDigest to "",
// but that would just result in PutLayer computing the digest of the input stream == optionalDiffID.
// So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation.
trustedOriginalDigest = optionalDiffID
// Allow using the already-collected layer contents without extracting the layer again.
//
// This only matches against the uncompressed digest.
// We dont have the original compressed data here to trivially set filenames[layerDigest].
// In particular we cant achieve the correct Layer.CompressedSize value with the current c/storage API.
// Within-image layer reuse is probably very rare, for now we prefer to avoid that complexity.
if trustedUncompressedDigest != "" {
s.lock.Lock()
s.lockProtected.blobDiffIDs[trustedUncompressedDigest] = trustedUncompressedDigest
s.lockProtected.filenames[trustedUncompressedDigest] = filename
s.lockProtected.fileSizes[trustedUncompressedDigest] = fileSize
s.lock.Unlock()
}
// Make sure that we can find this file later, should we need the layer's
// contents again.
s.lock.Lock()
s.filenames[info.digest] = filename
s.lock.Unlock()
}
// Read the cached blob and use it as a diff.
file, err := os.Open(filename)
if err != nil {
return false, fmt.Errorf("opening file %q: %w", filename, err)
return nil, fmt.Errorf("opening file %q: %w", filename, err)
}
defer file.Close()
// Build the new layer using the diff, regardless of where it came from.
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{
OriginalDigest: info.digest,
UncompressedDigest: diffIDOrTOCDigest,
layer, _, err := s.imageRef.transport.store.PutLayer(newLayerID, parentLayer, nil, "", false, &storage.LayerOptions{
OriginalDigest: trustedOriginalDigest,
UncompressedDigest: trustedUncompressedDigest,
}, file)
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
return false, fmt.Errorf("adding layer with blob %q: %w", info.digest, err)
return nil, fmt.Errorf("adding layer with blob %q: %w", layerDigest, err)
}
return layer, nil
}
// untrustedLayerDiffID returns a DiffID value for layerIndex from the images config.
// If the value is not yet available (but it can be available after s.manifets is set), it returns ("", nil).
// WARNING: We dont validate the DiffID value against the layer contents; it must not be used for any deduplication.
func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.Digest, error) {
// At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob, and
// nothing is writing to s.manifest yet, or PutManifest has been called and s.manifest != nil.
// Either way this function does not need the protection of s.lock.
if s.manifest == nil {
logrus.Debugf("Skipping commit for layer %d, manifest not yet available", layerIndex)
return "", nil
}
s.indexToStorageID[index] = &layer.ID
return false, nil
if s.untrustedDiffIDValues == nil {
mt := manifest.GuessMIMEType(s.manifest)
if mt != imgspecv1.MediaTypeImageManifest {
// We could, in principle, build an ImageSource, support arbitrary image formats using image.FromUnparsedImage,
// and then use types.Image.OCIConfig so that we can parse the image.
//
// In practice, this should, right now, only matter for pulls of OCI images (this code path implies that a layer has annotation),
// while converting to a non-OCI formats, using a manual (skopeo copy) or something similar, not (podman pull).
// So it is not implemented yet.
return "", fmt.Errorf("determining DiffID for manifest type %q is not yet supported", mt)
}
man, err := manifest.FromBlob(s.manifest, mt)
if err != nil {
return "", fmt.Errorf("parsing manifest: %w", err)
}
cb, err := s.getConfigBlob(man.ConfigInfo())
if err != nil {
return "", err
}
// retrieve the expected uncompressed digest from the config blob.
configOCI := &imgspecv1.Image{}
if err := json.Unmarshal(cb, configOCI); err != nil {
return "", err
}
s.untrustedDiffIDValues = slices.Clone(configOCI.RootFS.DiffIDs)
if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory…
s.untrustedDiffIDValues = []digest.Digest{}
}
}
if layerIndex >= len(s.untrustedDiffIDValues) {
return "", fmt.Errorf("image config has only %d DiffID values, but a layer with index %d exists", len(s.untrustedDiffIDValues), layerIndex)
}
return s.untrustedDiffIDValues[layerIndex], nil
}
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
@ -781,6 +979,8 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
// This function is outside of the scope of HasThreadSafePutBlob, so we dont need to hold s.lock.
if len(s.manifest) == 0 {
return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()")
}
@ -827,12 +1027,12 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
}
}
var lastLayer string
if len(layerBlobs) > 0 { // Can happen when using caches
prev := s.indexToStorageID[len(layerBlobs)-1]
if prev == nil {
if len(layerBlobs) > 0 { // Zero-layer images rarely make sense, but it is technically possible, and may happen for non-image artifacts.
prev, ok := s.indexToStorageID[len(layerBlobs)-1]
if !ok {
return fmt.Errorf("Internal error: storageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1)
}
lastLayer = *prev
lastLayer = prev
}
// If one of those blobs was a configuration blob, then we can try to dig out the date when the image
@ -846,14 +1046,14 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
// Set up to save the non-layer blobs as data items. Since we only share layers, they should all be in files, so
// we just need to screen out the ones that are actually layers to get the list of non-layers.
dataBlobs := set.New[digest.Digest]()
for blob := range s.filenames {
for blob := range s.lockProtected.filenames {
dataBlobs.Add(blob)
}
for _, layerBlob := range layerBlobs {
dataBlobs.Delete(layerBlob.Digest)
}
for _, blob := range dataBlobs.Values() {
v, err := os.ReadFile(s.filenames[blob])
v, err := os.ReadFile(s.lockProtected.filenames[blob])
if err != nil {
return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err)
}
@ -906,7 +1106,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
}
// Set up to save our metadata.
metadata, err := json.Marshal(s)
metadata, err := json.Marshal(s.metadata)
if err != nil {
return fmt.Errorf("encoding metadata for image: %w", err)
}
@ -1011,7 +1211,7 @@ func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, s
}
if instanceDigest == nil {
s.signatures = sigblob
s.SignatureSizes = sizes
s.metadata.SignatureSizes = sizes
if len(s.manifest) > 0 {
manifestDigest := s.manifestDigest
instanceDigest = &manifestDigest
@ -1019,7 +1219,7 @@ func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, s
}
if instanceDigest != nil {
s.signatureses[*instanceDigest] = sigblob
s.SignaturesSizes[*instanceDigest] = sizes
s.metadata.SignaturesSizes[*instanceDigest] = sizes
}
return nil
}

View File

@ -18,11 +18,6 @@ var (
ErrNoSuchImage = storage.ErrNotAnImage
)
type storageImageCloser struct {
types.ImageCloser
size int64
}
// manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions.
// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably;
// for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey
@ -36,6 +31,17 @@ func signatureBigDataKey(digest digest.Digest) string {
return "signature-" + digest.Encoded()
}
// storageImageMetadata is stored, as JSON, in storage.Image.Metadata
type storageImageMetadata struct {
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice
}
type storageImageCloser struct {
types.ImageCloser
size int64
}
// Size() returns the previously-computed size of the image, with no error.
func (s *storageImageCloser) Size() (int64, error) {
return s.size, nil

View File

@ -29,16 +29,6 @@ import (
"github.com/sirupsen/logrus"
)
// getBlobMutexProtected is a struct to hold the state of the getBlobMutex mutex.
type getBlobMutexProtected struct {
// digestToLayerID is a lookup map from the layer digest (either the uncompressed digest or the TOC digest) to the
// layer ID in the store.
digestToLayerID map[digest.Digest]string
// layerPosition stores where we are in reading a blob's layers
layerPosition map[digest.Digest]int
}
type storageImageSource struct {
impl.Compat
impl.PropertyMethodsInitialize
@ -47,13 +37,25 @@ type storageImageSource struct {
imageRef storageReference
image *storage.Image
systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions (it guards layerPosition and digestToLayerID)
metadata storageImageMetadata
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
getBlobMutexProtected getBlobMutexProtected
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice
}
// getBlobMutexProtected contains storageImageSource data protected by getBlobMutex.
type getBlobMutexProtected struct {
// digestToLayerID is a lookup map from a possibly-untrusted uncompressed layer digest (as returned by LayerInfosForCopy) to the
// layer ID in the store.
digestToLayerID map[digest.Digest]string
// layerPosition stores where we are in reading a blob's layers
layerPosition map[digest.Digest]int
}
// expectedLayerDiffIDFlag is a per-layer flag containing an UNTRUSTED uncompressed digest of the layer.
// It is set when pulling a layer by TOC; later, this value is used with digestToLayerID
// to allow identifying the layer — and the consumer is expected to verify the blob returned by GetBlob against the digest.
const expectedLayerDiffIDFlag = "expected-layer-diffid"
// newImageSource sets up an image for reading.
@ -71,11 +73,13 @@ func newImageSource(sys *types.SystemContext, imageRef storageReference) (*stora
}),
NoGetBlobAtInitialize: stubs.NoGetBlobAt(imageRef),
imageRef: imageRef,
systemContext: sys,
image: img,
SignatureSizes: []int{},
SignaturesSizes: make(map[digest.Digest][]int),
imageRef: imageRef,
systemContext: sys,
image: img,
metadata: storageImageMetadata{
SignatureSizes: []int{},
SignaturesSizes: make(map[digest.Digest][]int),
},
getBlobMutexProtected: getBlobMutexProtected{
digestToLayerID: make(map[digest.Digest]string),
layerPosition: make(map[digest.Digest]int),
@ -83,7 +87,7 @@ func newImageSource(sys *types.SystemContext, imageRef storageReference) (*stora
}
image.Compat = impl.AddCompat(image)
if img.Metadata != "" {
if err := json.Unmarshal([]byte(img.Metadata), image); err != nil {
if err := json.Unmarshal([]byte(img.Metadata), &image.metadata); err != nil {
return nil, fmt.Errorf("decoding metadata for source image: %w", err)
}
}
@ -118,8 +122,9 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c
var layers []storage.Layer
// If the digest was overridden by LayerInfosForCopy, then we need to use the TOC digest
// to retrieve it from the storage.
// This lookup path is strictly necessary for layers identified by TOC digest
// (where LayersByUncompressedDigest might not find our layer);
// for other layers it is an optimization to avoid the cost of the LayersByUncompressedDigest call.
s.getBlobMutex.Lock()
layerID, found := s.getBlobMutexProtected.digestToLayerID[digest]
s.getBlobMutex.Unlock()
@ -297,29 +302,29 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige
if err != nil {
return nil, fmt.Errorf("reading layer %q in image %q: %w", layerID, s.image.ID, err)
}
if layer.UncompressedDigest == "" && layer.TOCDigest == "" {
return nil, fmt.Errorf("uncompressed digest and TOC digest for layer %q is unknown", layerID)
}
if layer.UncompressedSize < 0 {
return nil, fmt.Errorf("uncompressed size for layer %q is unknown", layerID)
}
blobDigest := layer.UncompressedDigest
if layer.TOCDigest != "" {
if blobDigest == "" {
if layer.TOCDigest == "" {
return nil, fmt.Errorf("uncompressed digest and TOC digest for layer %q is unknown", layerID)
}
if layer.Flags == nil || layer.Flags[expectedLayerDiffIDFlag] == nil {
return nil, fmt.Errorf("TOC digest %q for layer %q is present but %q flag is not set", layer.TOCDigest, layerID, expectedLayerDiffIDFlag)
}
if expectedDigest, ok := layer.Flags[expectedLayerDiffIDFlag].(string); ok {
// if the layer is stored by its TOC, report the expected diffID as the layer Digest
// but store the TOC digest so we can later retrieve it from the storage.
blobDigest, err = digest.Parse(expectedDigest)
if err != nil {
return nil, fmt.Errorf("parsing expected diffID %q for layer %q: %w", expectedDigest, layerID, err)
}
} else {
expectedDigest, ok := layer.Flags[expectedLayerDiffIDFlag].(string)
if !ok {
return nil, fmt.Errorf("TOC digest %q for layer %q is present but %q flag is not a string", layer.TOCDigest, layerID, expectedLayerDiffIDFlag)
}
// If the layer is stored by its TOC, report the expected diffID as the layer Digest;
// the generic code is responsible for validating the digest.
// We can locate the layer without further c/storage help using s.getBlobMutexProtected.digestToLayerID.
blobDigest, err = digest.Parse(expectedDigest)
if err != nil {
return nil, fmt.Errorf("parsing expected diffID %q for layer %q: %w", expectedDigest, layerID, err)
}
}
s.getBlobMutex.Lock()
s.getBlobMutexProtected.digestToLayerID[blobDigest] = layer.ID
@ -375,11 +380,11 @@ func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []
func (s *storageImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
var offset int
signatureBlobs := []byte{}
signatureSizes := s.SignatureSizes
signatureSizes := s.metadata.SignatureSizes
key := "signatures"
instance := "default instance"
if instanceDigest != nil {
signatureSizes = s.SignaturesSizes[*instanceDigest]
signatureSizes = s.metadata.SignaturesSizes[*instanceDigest]
key = signatureBigDataKey(*instanceDigest)
instance = instanceDigest.Encoded()
}
@ -425,7 +430,7 @@ func (s *storageImageSource) getSize() (int64, error) {
sum += bigSize
}
// Add the signature sizes.
for _, sigSize := range s.SignatureSizes {
for _, sigSize := range s.metadata.SignatureSizes {
sum += int64(sigSize)
}
// Walk the layer list.

View File

@ -135,8 +135,8 @@ type BlobInfo struct {
// CompressionOperation is used in Image.UpdateLayerInfos to instruct
// whether the original layer's "compressed or not" should be preserved,
// possibly while changing the compression algorithm from one to another,
// or if it should be compressed or decompressed. The field defaults to
// preserve the original layer's compressedness.
// or if it should be changed to compressed or decompressed.
// The field defaults to preserve the original layer's compressedness.
// TODO: To remove together with CryptoOperation in re-design to remove
// field out of BlobInfo.
CompressionOperation LayerCompression

View File

@ -249,8 +249,8 @@ type DriverWithDiffer interface {
// ApplyDiffWithDiffer applies the changes using the callback function.
// If id is empty, then a staging directory is created. The staging directory is guaranteed to be usable with ApplyDiffFromStagingDirectory.
ApplyDiffWithDiffer(id, parent string, options *ApplyDiffWithDifferOpts, differ Differ) (output DriverWithDifferOutput, err error)
// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory.
ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *DriverWithDifferOutput, options *ApplyDiffWithDifferOpts) error
// ApplyDiffFromStagingDirectory applies the changes using the diffOutput target directory.
ApplyDiffFromStagingDirectory(id, parent string, diffOutput *DriverWithDifferOutput, options *ApplyDiffWithDifferOpts) error
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
CleanupStagingDirectory(stagingDirectory string) error
// DifferTarget gets the location where files are stored for the layer.

View File

@ -296,7 +296,7 @@ func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool {
// a bunch of network file systems...
case graphdriver.FsMagicNfsFs, graphdriver.FsMagicSmbFs, graphdriver.FsMagicAcfs,
graphdriver.FsMagicAfs, graphdriver.FsMagicCephFs, graphdriver.FsMagicCIFS,
graphdriver.FsMagicFHGFSFs, graphdriver.FsMagicGPFS, graphdriver.FsMagicIBRIX,
graphdriver.FsMagicGPFS, graphdriver.FsMagicIBRIX,
graphdriver.FsMagicKAFS, graphdriver.FsMagicLUSTRE, graphdriver.FsMagicNCP,
graphdriver.FsMagicNFSD, graphdriver.FsMagicOCFS2, graphdriver.FsMagicPANFS,
graphdriver.FsMagicPRLFS, graphdriver.FsMagicSMB2, graphdriver.FsMagicSNFS,
@ -2124,7 +2124,8 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
}
// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory.
func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) error {
func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) error {
stagingDirectory := diffOutput.Target
if filepath.Dir(stagingDirectory) != d.getStagingDir() {
return fmt.Errorf("%q is not a staging directory", stagingDirectory)
}

View File

@ -4,6 +4,7 @@
package overlay
import (
"fmt"
"path"
"github.com/containers/storage/pkg/directory"
@ -15,3 +16,15 @@ import (
func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
return directory.Usage(path.Join(d.dir(id), "diff"))
}
func getComposeFsHelper() (string, error) {
return "", fmt.Errorf("composefs not supported on this build")
}
func mountComposefsBlob(dataDir, mountPoint string) error {
return fmt.Errorf("composefs not supported on this build")
}
func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error {
return fmt.Errorf("composefs not supported on this build")
}

View File

@ -181,6 +181,13 @@ type DiffOptions struct {
Compression *archive.Compression
}
// stagedLayerOptions are the options passed to .create to populate a staged
// layer
type stagedLayerOptions struct {
DiffOutput *drivers.DriverWithDifferOutput
DiffOptions *drivers.ApplyDiffWithDifferOpts
}
// roLayerStore wraps a graph driver, adding the ability to refer to layers by
// name, and keeping track of parent-child relationships, along with a list of
// all known layers.
@ -267,7 +274,7 @@ type rwLayerStore interface {
// underlying drivers do not themselves distinguish between writeable
// and read-only layers. Returns the new layer structure and the size of the
// diff which was applied to its parent to initialize its contents.
create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader) (*Layer, int64, error)
create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error)
// updateNames modifies names associated with a layer based on (op, names).
updateNames(id string, names []string, op updateNameOperation) error
@ -312,8 +319,8 @@ type rwLayerStore interface {
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
CleanupStagingDirectory(stagingDirectory string) error
// ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff.
ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error
// applyDiffFromStagingDirectory uses diffOutput.Target to create the diff.
applyDiffFromStagingDirectory(id string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error
// DifferTarget gets the location where files are stored for the layer.
DifferTarget(id string) (string, error)
@ -1232,7 +1239,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
}
// Requires startWriting.
func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader) (layer *Layer, size int64, err error) {
func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (layer *Layer, size int64, err error) {
if moreOptions == nil {
moreOptions = &LayerOptions{}
}
@ -1426,6 +1433,11 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
cleanupFailureContext = "applying layer diff"
return nil, -1, err
}
} else if slo != nil {
if err := r.applyDiffFromStagingDirectory(layer.ID, slo.DiffOutput, slo.DiffOptions); err != nil {
cleanupFailureContext = "applying staged directory diff"
return nil, -1, err
}
} else {
// applyDiffWithOptions() would have updated r.bycompressedsum
// and r.byuncompressedsum for us, but if we used a template
@ -2286,7 +2298,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
if layerOptions != nil && layerOptions.UncompressedDigest != "" &&
layerOptions.UncompressedDigest.Algorithm() == digest.Canonical {
uncompressedDigest = layerOptions.UncompressedDigest
} else {
} else if compression != archive.Uncompressed {
uncompressedDigester = digest.Canonical.Digester()
}
@ -2365,10 +2377,17 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
if uncompressedDigester != nil {
uncompressedDigest = uncompressedDigester.Digest()
}
if uncompressedDigest == "" && compression == archive.Uncompressed {
uncompressedDigest = compressedDigest
}
updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest, layer.ID)
layer.CompressedDigest = compressedDigest
layer.CompressedSize = compressedCounter.Count
if layerOptions != nil && layerOptions.OriginalDigest != "" && layerOptions.OriginalSize != nil {
layer.CompressedSize = *layerOptions.OriginalSize
} else {
layer.CompressedSize = compressedCounter.Count
}
updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest, layer.ID)
layer.UncompressedDigest = uncompressedDigest
layer.UncompressedSize = uncompressedCounter.Count
@ -2407,7 +2426,7 @@ func (r *layerStore) DifferTarget(id string) (string, error) {
}
// Requires startWriting.
func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error {
func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error {
ddriver, ok := r.driver.(drivers.DriverWithDiffer)
if !ok {
return ErrNotSupported
@ -2426,7 +2445,7 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
}
}
err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, stagingDirectory, diffOutput, options)
err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, diffOutput, options)
if err != nil {
return err
}
@ -2446,6 +2465,10 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
layer.Flags[k] = v
}
}
if err = r.saveFor(layer); err != nil {
return err
}
if len(diffOutput.TarSplit) != 0 {
tsdata := bytes.Buffer{}
compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
@ -2475,9 +2498,6 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
return err
}
}
if err = r.saveFor(layer); err != nil {
return err
}
return err
}

View File

@ -339,12 +339,43 @@ func (compression *Compression) Extension() string {
return ""
}
// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to
// prevent tar.FileInfoHeader from introspecting it and potentially calling into
// glibc.
type nosysFileInfo struct {
os.FileInfo
}
func (fi nosysFileInfo) Sys() interface{} {
// A Sys value of type *tar.Header is safe as it is system-independent.
// The tar.FileInfoHeader function copies the fields into the returned
// header without performing any OS lookups.
if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok {
return sys
}
return nil
}
// sysStatOverride, if non-nil, populates hdr from system-dependent fields of fi.
var sysStatOverride func(fi os.FileInfo, hdr *tar.Header) error
func fileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) {
if sysStatOverride == nil {
return tar.FileInfoHeader(fi, link)
}
hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link)
if err != nil {
return nil, err
}
return hdr, sysStatOverride(fi, hdr)
}
// FileInfoHeader creates a populated Header from fi.
// Compared to archive pkg this function fills in more information.
// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
// which have been deleted since Go 1.9 archive/tar.
func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
hdr, err := tar.FileInfoHeader(fi, link)
hdr, err := fileInfoHeaderNoLookups(fi, link)
if err != nil {
return nil, err
}

View File

@ -15,6 +15,31 @@ import (
"golang.org/x/sys/unix"
)
func init() {
sysStatOverride = statUnix
}
// statUnix populates hdr from system-dependent fields of fi without performing
// any OS lookups.
// Adapted from Moby.
func statUnix(fi os.FileInfo, hdr *tar.Header) error {
s, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return nil
}
hdr.Uid = int(s.Uid)
hdr.Gid = int(s.Gid)
if s.Mode&unix.S_IFBLK != 0 ||
s.Mode&unix.S_IFCHR != 0 {
hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert
hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert
}
return nil
}
// fixVolumePathPrefix does platform specific processing to ensure that if
// the path being passed in is not in a volume path format, convert it to one.
func fixVolumePathPrefix(srcPath string) string {

View File

@ -257,8 +257,8 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann
return decodedBlob, decodedTarSplit, int64(footerData.Offset), err
}
func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedUncompressedChecksum string) ([]byte, error) {
d, err := digest.Parse(expectedUncompressedChecksum)
func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string) ([]byte, error) {
d, err := digest.Parse(expectedCompressedChecksum)
if err != nil {
return nil, err
}

View File

@ -241,6 +241,10 @@ func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Diges
return nil, err
}
if !parseBooleanPullOption(&storeOpts, "enable_partial_images", true) {
return nil, errors.New("enable_partial_images not configured")
}
_, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey]
_, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation]
@ -1612,7 +1616,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
stream := c.stream
var uncompressedDigest digest.Digest
tocDigest := c.tocDigest
if c.convertToZstdChunked {
fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
@ -1668,8 +1671,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
// the file was generated by us and the digest for each file was already computed, no need to validate it again.
c.skipValidation = true
// since we retrieved the whole file and it was validated, do not use the TOC digest, but set the uncompressed digest.
tocDigest = ""
// since we retrieved the whole file and it was validated, set the uncompressed digest.
uncompressedDigest = diffID
}
@ -1699,14 +1701,10 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
Artifacts: map[string]interface{}{
tocKey: toc,
},
TOCDigest: tocDigest,
TOCDigest: c.tocDigest,
UncompressedDigest: uncompressedDigest,
}
if !parseBooleanPullOption(c.storeOpts, "enable_partial_images", false) {
return output, errors.New("enable_partial_images not configured")
}
// When the hard links deduplication is used, file attributes are ignored because setting them
// modifies the source file as well.
useHardLinks := parseBooleanPullOption(c.storeOpts, "use_hard_links", false)

View File

@ -1,6 +1,8 @@
package toc
import (
"errors"
"github.com/containers/storage/pkg/chunked/internal"
digest "github.com/opencontainers/go-digest"
)
@ -16,19 +18,24 @@ const tocJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
// table of contents (TOC) from the image's annotations.
// This is an experimental feature and may be changed/removed in the future.
func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) {
if contentDigest, ok := annotations[tocJSONDigestAnnotation]; ok {
d, err := digest.Parse(contentDigest)
d1, ok1 := annotations[tocJSONDigestAnnotation]
d2, ok2 := annotations[internal.ManifestChecksumKey]
switch {
case ok1 && ok2:
return nil, errors.New("both zstd:chunked and eStargz TOC found")
case ok1:
d, err := digest.Parse(d1)
if err != nil {
return nil, err
}
return &d, nil
}
if contentDigest, ok := annotations[internal.ManifestChecksumKey]; ok {
d, err := digest.Parse(contentDigest)
case ok2:
d, err := digest.Parse(d2)
if err != nil {
return nil, err
}
return &d, nil
default:
return nil, nil
}
return nil, nil
}

View File

@ -17,5 +17,5 @@ func EnableVerity(description string, fd int) error {
// MeasureVerity measures and returns the verity digest for the file represented by 'fd'.
// The 'description' parameter is a human-readable description of the file.
func MeasureVerity(description string, fd int) (string, error) {
return fmt.Errorf("fs-verity is not supported on this platform")
return "", fmt.Errorf("fs-verity is not supported on this platform")
}

View File

@ -59,7 +59,7 @@ additionalimagestores = [
# can deduplicate pulling of content, disk storage of content and can allow the
# kernel to use less memory when running containers.
# containers/storage supports three keys
# containers/storage supports four keys
# * enable_partial_images="true" | "false"
# Tells containers/storage to look for files previously pulled in storage
# rather then always pulling them from the container registry.
@ -70,7 +70,12 @@ additionalimagestores = [
# Tells containers/storage where an ostree repository exists that might have
# previously pulled content which can be used when attempting to avoid
# pulling content from the container registry
pull_options = {enable_partial_images = "false", use_hard_links = "false", ostree_repos=""}
# * convert_images = "false" | "true"
# If set to true, containers/storage will convert images to a
# format compatible with partial pulls in order to take advantage
# of local deduplication and hard linking. It is an expensive
# operation so it is not enabled by default.
pull_options = {enable_partial_images = "true", use_hard_links = "false", ostree_repos=""}
# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
# a container, to the UIDs/GIDs as they should appear outside of the container,

View File

@ -71,6 +71,19 @@ type metadataStore interface {
rwMetadataStore
}
// ApplyStagedLayerOptions contains options to pass to ApplyStagedLayer
type ApplyStagedLayerOptions struct {
ID string // Mandatory
ParentLayer string // Optional
Names []string // Optional
MountLabel string // Optional
Writeable bool // Optional
LayerOptions *LayerOptions // Optional
DiffOutput *drivers.DriverWithDifferOutput // Mandatory
DiffOptions *drivers.ApplyDiffWithDifferOpts // Mandatory
}
// An roBigDataStore wraps up the read-only big-data related methods of the
// various types of file-based lookaside stores that we implement.
type roBigDataStore interface {
@ -318,11 +331,21 @@ type Store interface {
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
// ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff.
// Deprecated: it will be removed soon. Use ApplyStagedLayer instead.
ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
// Deprecated: it will be removed soon. Use CleanupStagedLayer instead.
CleanupStagingDirectory(stagingDirectory string) error
// ApplyStagedLayer combines the functions of CreateLayer and ApplyDiffFromStagingDirectory,
// marking the layer for automatic removal if applying the diff fails
// for any reason.
ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error)
// CleanupStagedLayer cleanups the staging directory. It can be used to cleanup the staging directory on errors
CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) error
// DifferTarget gets the path to the differ target.
DifferTarget(id string) (string, error)
@ -580,10 +603,19 @@ type LayerOptions struct {
// initialize this layer. If set, it should be a child of the layer
// which we want to use as the parent of the new layer.
TemplateLayer string
// OriginalDigest specifies a digest of the tarstream (diff), if one is
// OriginalDigest specifies a digest of the (possibly-compressed) tarstream (diff), if one is
// provided along with these LayerOptions, and reliably known by the caller.
// The digest might not be exactly the digest of the provided tarstream
// (e.g. the digest might be of a compressed representation, while providing
// an uncompressed one); in that case the caller is responsible for the two matching.
// Use the default "" if this fields is not applicable or the value is not known.
OriginalDigest digest.Digest
// OriginalSize specifies a size of the (possibly-compressed) tarstream corresponding
// to OriginalDigest.
// If the digest does not match the provided tarstream, OriginalSize must match OriginalDigest,
// not the tarstream.
// Use nil if not applicable or not known.
OriginalSize *int64
// UncompressedDigest specifies a digest of the uncompressed version (“DiffID”)
// of the tarstream (diff), if one is provided along with these LayerOptions,
// and reliably known by the caller.
@ -1412,8 +1444,7 @@ func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool {
return true
}
func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) {
var parentLayer *Layer
func (s *store) putLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error) {
rlstore, rlstores, err := s.bothLayerStoreKinds()
if err != nil {
return nil, -1, err
@ -1426,6 +1457,8 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
return nil, -1, err
}
defer s.containerStore.stopWriting()
var parentLayer *Layer
var options LayerOptions
if lOptions != nil {
options = *lOptions
@ -1485,6 +1518,7 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
}
layerOptions := LayerOptions{
OriginalDigest: options.OriginalDigest,
OriginalSize: options.OriginalSize,
UncompressedDigest: options.UncompressedDigest,
Flags: options.Flags,
}
@ -1498,7 +1532,11 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
GIDMap: copyIDMap(gidMap),
}
}
return rlstore.create(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, diff)
return rlstore.create(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, diff, slo)
}
func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) {
return s.putLayer(id, parent, names, mountLabel, writeable, lOptions, diff, nil)
}
func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) {
@ -1708,7 +1746,7 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlst
}
}
layerOptions.TemplateLayer = layer.ID
mappedLayer, _, err := rlstore.create("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil)
mappedLayer, _, err := rlstore.create("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil)
if err != nil {
return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err)
}
@ -1879,7 +1917,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
options.Flags[mountLabelFlag] = mountLabel
}
clayer, _, err := rlstore.create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true, nil)
clayer, _, err := rlstore.create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true, nil, nil)
if err != nil {
return nil, err
}
@ -2944,15 +2982,28 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro
}
func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error {
if stagingDirectory != diffOutput.Target {
return fmt.Errorf("invalid value for staging directory, it must be the same as the differ target directory")
}
_, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
if !rlstore.Exists(to) {
return struct{}{}, ErrLayerUnknown
}
return struct{}{}, rlstore.ApplyDiffFromStagingDirectory(to, stagingDirectory, diffOutput, options)
return struct{}{}, rlstore.applyDiffFromStagingDirectory(to, diffOutput, options)
})
return err
}
func (s *store) ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error) {
slo := stagedLayerOptions{
DiffOutput: args.DiffOutput,
DiffOptions: args.DiffOptions,
}
layer, _, err := s.putLayer(args.ID, args.ParentLayer, args.Names, args.MountLabel, args.Writeable, args.LayerOptions, nil, &slo)
return layer, err
}
func (s *store) CleanupStagingDirectory(stagingDirectory string) error {
_, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
return struct{}{}, rlstore.CleanupStagingDirectory(stagingDirectory)
@ -2960,6 +3011,13 @@ func (s *store) CleanupStagingDirectory(stagingDirectory string) error {
return err
}
func (s *store) CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) error {
_, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
return struct{}{}, rlstore.CleanupStagingDirectory(diffOutput.Target)
})
return err
}
func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
return writeToLayerStore(s, func(rlstore rwLayerStore) (*drivers.DriverWithDifferOutput, error) {
if to != "" && !rlstore.Exists(to) {

View File

@ -175,7 +175,7 @@ outer:
// We need to create a temporary layer so we can mount it and lookup the
// maximum IDs used.
clayer, _, err := rlstore.create("", topLayer, nil, "", nil, layerOptions, false, nil)
clayer, _, err := rlstore.create("", topLayer, nil, "", nil, layerOptions, false, nil, nil)
if err != nil {
return 0, err
}

View File

@ -875,14 +875,16 @@ func (c *SQLiteConn) exec(ctx context.Context, query string, args []driver.Named
// consume the number of arguments used in the current
// statement and append all named arguments not
// contained therein
stmtArgs = append(stmtArgs, args[start:start+na]...)
for i := range args {
if (i < start || i >= na) && args[i].Name != "" {
stmtArgs = append(stmtArgs, args[i])
if len(args[start:start+na]) > 0 {
stmtArgs = append(stmtArgs, args[start:start+na]...)
for i := range args {
if (i < start || i >= na) && args[i].Name != "" {
stmtArgs = append(stmtArgs, args[i])
}
}
for i := range stmtArgs {
stmtArgs[i].Ordinal = i + 1
}
}
for i := range stmtArgs {
stmtArgs[i].Ordinal = i + 1
}
res, err = s.(*SQLiteStmt).exec(ctx, stmtArgs)
if err != nil && err != driver.ErrSkip {
@ -1906,6 +1908,7 @@ func (s *SQLiteStmt) Close() error {
if rv != C.SQLITE_OK {
return s.c.lastError()
}
s.c = nil
runtime.SetFinalizer(s, nil)
return nil
}
@ -2011,6 +2014,7 @@ func (s *SQLiteStmt) query(ctx context.Context, args []driver.NamedValue) (drive
closed: false,
ctx: ctx,
}
runtime.SetFinalizer(rows, (*SQLiteRows).Close)
return rows, nil
}
@ -2056,6 +2060,7 @@ func (s *SQLiteStmt) exec(ctx context.Context, args []driver.NamedValue) (driver
err error
}
resultCh := make(chan result)
defer close(resultCh)
go func() {
r, err := s.execSync(args)
resultCh <- result{r, err}
@ -2122,6 +2127,8 @@ func (rc *SQLiteRows) Close() error {
return rc.s.c.lastError()
}
rc.s.mu.Unlock()
rc.s = nil
runtime.SetFinalizer(rc, nil)
return nil
}
@ -2168,6 +2175,7 @@ func (rc *SQLiteRows) Next(dest []driver.Value) error {
return rc.nextSyncLocked(dest)
}
resultCh := make(chan error)
defer close(resultCh)
go func() {
resultCh <- rc.nextSyncLocked(dest)
}()

View File

@ -187,6 +187,10 @@ type Hook struct {
type Hooks struct {
// Prestart is Deprecated. Prestart is a list of hooks to be run before the container process is executed.
// It is called in the Runtime Namespace
//
// Deprecated: use [Hooks.CreateRuntime], [Hooks.CreateContainer], and
// [Hooks.StartContainer] instead, which allow more granular hook control
// during the create and start phase.
Prestart []Hook `json:"prestart,omitempty"`
// CreateRuntime is a list of hooks to be run after the container has been created but before pivot_root or any equivalent operation has been called
// It is called in the Runtime Namespace
@ -371,6 +375,12 @@ type LinuxMemory struct {
// Total memory limit (memory + swap).
Swap *int64 `json:"swap,omitempty"`
// Kernel memory limit (in bytes).
//
// Deprecated: kernel-memory limits are not supported in cgroups v2, and
// were obsoleted in [kernel v5.4]. This field should no longer be used,
// as it may be ignored by runtimes.
//
// [kernel v5.4]: https://github.com/torvalds/linux/commit/0158115f702b0ba208ab0
Kernel *int64 `json:"kernel,omitempty"`
// Kernel memory limit for tcp (in bytes)
KernelTCP *int64 `json:"kernelTCP,omitempty"`

View File

@ -6,12 +6,12 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 1
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 1
VersionMinor = 2
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 0
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = "+dev"
VersionDev = ""
)
// Version is the specification version that the package types support.

View File

@ -19,15 +19,14 @@
#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \
MULLD r0, h0, t0; \
MULLD r0, h1, t4; \
MULHDU r0, h0, t1; \
MULLD r0, h1, t4; \
MULHDU r0, h1, t5; \
ADDC t4, t1, t1; \
MULLD r0, h2, t2; \
ADDZE t5; \
MULHDU r1, h0, t4; \
MULLD r1, h0, h0; \
ADD t5, t2, t2; \
ADDE t5, t2, t2; \
ADDC h0, t1, t1; \
MULLD h2, r1, t3; \
ADDZE t4, h0; \
@ -37,13 +36,11 @@
ADDE t5, t3, t3; \
ADDC h0, t2, t2; \
MOVD $-4, t4; \
MOVD t0, h0; \
MOVD t1, h1; \
ADDZE t3; \
ANDCC $3, t2, h2; \
AND t2, t4, t0; \
RLDICL $0, t2, $62, h2; \
AND t2, t4, h0; \
ADDC t0, h0, h0; \
ADDE t3, h1, h1; \
ADDE t3, t1, h1; \
SLD $62, t3, t4; \
SRD $2, t2; \
ADDZE h2; \
@ -75,6 +72,7 @@ TEXT ·update(SB), $0-32
loop:
POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22)
PCALIGN $16
multiply:
POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21)
ADD $-16, R5

View File

@ -279,21 +279,22 @@ func getOIDFromHashAlgorithm(target crypto.Hash) asn1.ObjectIdentifier {
// This is the exposed reflection of the internal OCSP structures.
// The status values that can be expressed in OCSP. See RFC 6960.
// The status values that can be expressed in OCSP. See RFC 6960.
// These are used for the Response.Status field.
const (
// Good means that the certificate is valid.
Good = iota
Good = 0
// Revoked means that the certificate has been deliberately revoked.
Revoked
Revoked = 1
// Unknown means that the OCSP responder doesn't know about the certificate.
Unknown
Unknown = 2
// ServerFailed is unused and was never used (see
// https://go-review.googlesource.com/#/c/18944). ParseResponse will
// return a ResponseError when an error response is parsed.
ServerFailed
ServerFailed = 3
)
// The enumerated reasons for revoking a certificate. See RFC 5280.
// The enumerated reasons for revoking a certificate. See RFC 5280.
const (
Unspecified = 0
KeyCompromise = 1

View File

@ -209,25 +209,37 @@ func Insert[S ~[]E, E any](s S, i int, v ...E) S {
return s
}
// Delete removes the elements s[i:j] from s, returning the modified slice.
// Delete panics if s[i:j] is not a valid slice of s.
// Delete is O(len(s)-j), so if many items must be deleted, it is better to
// make a single call deleting them all together than to delete one at a time.
// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those
// elements contain pointers you might consider zeroing those elements so that
// objects they reference can be garbage collected.
func Delete[S ~[]E, E any](s S, i, j int) S {
_ = s[i:j] // bounds check
// clearSlice sets all elements up to the length of s to the zero value of E.
// We may use the builtin clear func instead, and remove clearSlice, when upgrading
// to Go 1.21+.
func clearSlice[S ~[]E, E any](s S) {
var zero E
for i := range s {
s[i] = zero
}
}
return append(s[:i], s[j:]...)
// Delete removes the elements s[i:j] from s, returning the modified slice.
// Delete panics if j > len(s) or s[i:j] is not a valid slice of s.
// Delete is O(len(s)-i), so if many items must be deleted, it is better to
// make a single call deleting them all together than to delete one at a time.
// Delete zeroes the elements s[len(s)-(j-i):len(s)].
func Delete[S ~[]E, E any](s S, i, j int) S {
_ = s[i:j:len(s)] // bounds check
if i == j {
return s
}
oldlen := len(s)
s = append(s[:i], s[j:]...)
clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC
return s
}
// DeleteFunc removes any elements from s for which del returns true,
// returning the modified slice.
// When DeleteFunc removes m elements, it might not modify the elements
// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
// zeroing those elements so that objects they reference can be garbage
// collected.
// DeleteFunc zeroes the elements between the new length and the original length.
func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
i := IndexFunc(s, del)
if i == -1 {
@ -240,11 +252,13 @@ func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
i++
}
}
clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
return s[:i]
}
// Replace replaces the elements s[i:j] by the given v, and returns the
// modified slice. Replace panics if s[i:j] is not a valid slice of s.
// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length.
func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
_ = s[i:j] // verify that i:j is a valid subslice
@ -272,6 +286,7 @@ func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
if i+len(v) != j {
copy(r[i+len(v):], s[j:])
}
clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC
return r
}
@ -345,9 +360,7 @@ func Clone[S ~[]E, E any](s S) S {
// This is like the uniq command found on Unix.
// Compact modifies the contents of the slice s and returns the modified slice,
// which may have a smaller length.
// When Compact discards m elements in total, it might not modify the elements
// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
// zeroing those elements so that objects they reference can be garbage collected.
// Compact zeroes the elements between the new length and the original length.
func Compact[S ~[]E, E comparable](s S) S {
if len(s) < 2 {
return s
@ -361,11 +374,13 @@ func Compact[S ~[]E, E comparable](s S) S {
i++
}
}
clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
return s[:i]
}
// CompactFunc is like [Compact] but uses an equality function to compare elements.
// For runs of elements that compare equal, CompactFunc keeps the first one.
// CompactFunc zeroes the elements between the new length and the original length.
func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
if len(s) < 2 {
return s
@ -379,6 +394,7 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
i++
}
}
clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
return s[:i]
}

19
vendor/modules.txt vendored
View File

@ -18,7 +18,7 @@ github.com/Microsoft/go-winio/internal/socket
github.com/Microsoft/go-winio/internal/stringbuffer
github.com/Microsoft/go-winio/pkg/guid
github.com/Microsoft/go-winio/vhd
# github.com/Microsoft/hcsshim v0.12.0-rc.2
# github.com/Microsoft/hcsshim v0.12.0-rc.3
## explicit; go 1.18
github.com/Microsoft/hcsshim
github.com/Microsoft/hcsshim/computestorage
@ -113,6 +113,9 @@ github.com/containerd/containerd/errdefs
github.com/containerd/containerd/log
github.com/containerd/containerd/pkg/userns
github.com/containerd/containerd/platforms
# github.com/containerd/errdefs v0.1.0
## explicit; go 1.20
github.com/containerd/errdefs
# github.com/containerd/log v0.1.0
## explicit; go 1.20
github.com/containerd/log
@ -240,7 +243,7 @@ github.com/containers/conmon/runner/config
# github.com/containers/gvisor-tap-vsock v0.7.3
## explicit; go 1.20
github.com/containers/gvisor-tap-vsock/pkg/types
# github.com/containers/image/v5 v5.29.2-0.20240130233108-e66a1ade2efc
# github.com/containers/image/v5 v5.29.3-0.20240227090231-5bef5e1e1506
## explicit; go 1.19
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
@ -350,7 +353,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
# github.com/containers/storage v1.52.1-0.20240202181245-1419a5980565
# github.com/containers/storage v1.52.1-0.20240227082351-387d7c8480b0
## explicit; go 1.20
github.com/containers/storage
github.com/containers/storage/drivers
@ -777,7 +780,7 @@ github.com/mattn/go-runewidth
# github.com/mattn/go-shellwords v1.0.12
## explicit; go 1.13
github.com/mattn/go-shellwords
# github.com/mattn/go-sqlite3 v1.14.21
# github.com/mattn/go-sqlite3 v1.14.22
## explicit; go 1.19
github.com/mattn/go-sqlite3
# github.com/mdlayher/socket v0.4.1
@ -893,7 +896,7 @@ github.com/opencontainers/runc/libcontainer/devices
github.com/opencontainers/runc/libcontainer/user
github.com/opencontainers/runc/libcontainer/userns
github.com/opencontainers/runc/libcontainer/utils
# github.com/opencontainers/runtime-spec v1.1.1-0.20230823135140-4fec88fd00a4
# github.com/opencontainers/runtime-spec v1.2.0
## explicit
github.com/opencontainers/runtime-spec/specs-go
# github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc
@ -1148,7 +1151,7 @@ go.opentelemetry.io/otel/trace/embedded
# golang.org/x/arch v0.7.0
## explicit; go 1.18
golang.org/x/arch/x86/x86asm
# golang.org/x/crypto v0.19.0
# golang.org/x/crypto v0.20.0
## explicit; go 1.18
golang.org/x/crypto/argon2
golang.org/x/crypto/blake2b
@ -1179,7 +1182,7 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
golang.org/x/crypto/ssh/knownhosts
golang.org/x/crypto/twofish
golang.org/x/crypto/xts
# golang.org/x/exp v0.0.0-20231226003508-02704c960a9b
# golang.org/x/exp v0.0.0-20240119083558-1b970713d09a
## explicit; go 1.20
golang.org/x/exp/constraints
golang.org/x/exp/maps
@ -1204,7 +1207,7 @@ golang.org/x/net/internal/socks
golang.org/x/net/internal/timeseries
golang.org/x/net/proxy
golang.org/x/net/trace
# golang.org/x/oauth2 v0.16.0
# golang.org/x/oauth2 v0.17.0
## explicit; go 1.18
golang.org/x/oauth2
golang.org/x/oauth2/internal