vendor latest c/{common,image,storage}

To prepare for 5.4.0-rc1.

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
This commit is contained in:
Paul Holzinger 2025-01-21 19:02:43 +01:00
parent dbed85889c
commit b6f1364319
No known key found for this signature in database
GPG Key ID: EB145DD938A3CAF2
182 changed files with 14830 additions and 11060 deletions

32
go.mod
View File

@ -14,14 +14,14 @@ require (
github.com/checkpoint-restore/go-criu/v7 v7.2.0
github.com/containernetworking/plugins v1.5.1
github.com/containers/buildah v1.38.1-0.20241119213149-52437ef15d33
github.com/containers/common v0.61.1-0.20250106142059-514bf04d8e6a
github.com/containers/common v0.61.1-0.20250120135258-06628cb958e9
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/gvisor-tap-vsock v0.8.1
github.com/containers/image/v5 v5.33.1-0.20250107140133-43c2a741242f
github.com/containers/image/v5 v5.33.1-0.20250116221711-317a9885aed9
github.com/containers/libhvee v0.9.0
github.com/containers/ocicrypt v1.2.1
github.com/containers/psgo v1.9.0
github.com/containers/storage v1.56.0
github.com/containers/storage v1.56.2-0.20250121150636-c2cdd500e4ef
github.com/containers/winquit v1.1.0
github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09
github.com/crc-org/crc/v2 v2.45.0
@ -29,7 +29,7 @@ require (
github.com/cyphar/filepath-securejoin v0.3.6
github.com/digitalocean/go-qemu v0.0.0-20230711162256-2e3d0186973e
github.com/docker/distribution v2.8.3+incompatible
github.com/docker/docker v27.4.1+incompatible
github.com/docker/docker v27.5.0+incompatible
github.com/docker/go-connections v0.5.0
github.com/docker/go-plugins-helpers v0.0.0-20240701071450-45e2431495c8
github.com/docker/go-units v0.5.0
@ -103,12 +103,12 @@ require (
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/platforms v0.2.1 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
github.com/containerd/typeurl/v2 v2.2.3 // indirect
github.com/containernetworking/cni v1.2.3 // indirect
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
github.com/containers/luksy v0.0.0-20241007190014-e2530d691420 // indirect
github.com/coreos/go-oidc/v3 v3.11.0 // indirect
github.com/coreos/go-oidc/v3 v3.12.0 // indirect
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
@ -195,15 +195,15 @@ require (
github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect
github.com/segmentio/ksuid v1.0.4 // indirect
github.com/sigstore/fulcio v1.6.4 // indirect
github.com/sigstore/rekor v1.3.6 // indirect
github.com/sigstore/sigstore v1.8.11 // indirect
github.com/sigstore/rekor v1.3.8 // indirect
github.com/sigstore/sigstore v1.8.12 // indirect
github.com/skeema/knownhosts v1.3.0 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
github.com/smallstep/pkcs7 v0.1.1 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect
github.com/sylabs/sif/v2 v2.20.2 // indirect
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
github.com/tchap/go-patricia/v2 v2.3.2 // indirect
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
@ -216,17 +216,17 @@ require (
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.mongodb.org/mongo-driver v1.14.0 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
go.opentelemetry.io/otel v1.28.0 // indirect
go.opentelemetry.io/otel/metric v1.28.0 // indirect
go.opentelemetry.io/otel/trace v1.28.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
go.opentelemetry.io/otel v1.31.0 // indirect
go.opentelemetry.io/otel/metric v1.31.0 // indirect
go.opentelemetry.io/otel/trace v1.31.0 // indirect
golang.org/x/arch v0.8.0 // indirect
golang.org/x/mod v0.22.0 // indirect
golang.org/x/oauth2 v0.25.0 // indirect
golang.org/x/time v0.6.0 // indirect
golang.org/x/time v0.9.0 // indirect
golang.org/x/tools v0.28.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
google.golang.org/grpc v1.68.1 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d // indirect
google.golang.org/grpc v1.69.4 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
tags.cncf.io/container-device-interface/specs-go v0.8.0 // indirect

88
go.sum
View File

@ -68,8 +68,8 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8=
github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU=
github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40=
github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk=
github.com/containernetworking/cni v1.2.3 h1:hhOcjNVUQTnzdRJ6alC5XF+wd9mfGIUaj8FuJbEslXM=
@ -78,14 +78,14 @@ github.com/containernetworking/plugins v1.5.1 h1:T5ji+LPYjjgW0QM+KyrigZbLsZ8jaX+
github.com/containernetworking/plugins v1.5.1/go.mod h1:MIQfgMayGuHYs0XdNudf31cLLAC+i242hNm6KuDGqCM=
github.com/containers/buildah v1.38.1-0.20241119213149-52437ef15d33 h1:Ih6KuyByK7ZGGzkS0M5rVBPLWIyeDvdL5klhsKBo8vA=
github.com/containers/buildah v1.38.1-0.20241119213149-52437ef15d33/go.mod h1:RxIuKhwTpRl3ma4d4BF6QzSSeg9zNNvo/xhYJOKeDQs=
github.com/containers/common v0.61.1-0.20250106142059-514bf04d8e6a h1:unXpSi99gyu4xKy3AIRMNqAjKTreqiZwXQkuoueeHEg=
github.com/containers/common v0.61.1-0.20250106142059-514bf04d8e6a/go.mod h1:3DaNw8RIl5ugAk5v4Xby9KMnwG23F9BGqCEnyjp+9Pc=
github.com/containers/common v0.61.1-0.20250120135258-06628cb958e9 h1:aiup0MIiAi2Xnv15vApAPqgy4/49ZGkYOpevDgGHfxg=
github.com/containers/common v0.61.1-0.20250120135258-06628cb958e9/go.mod h1:1S+/XhAEOwMGePCUqoYYh1iZo9fU1IpuIwVzCCIdBVU=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/gvisor-tap-vsock v0.8.1 h1:88qkOjGMF9NmyoVG/orUw73mdwj3z4aOwEbRS01hF78=
github.com/containers/gvisor-tap-vsock v0.8.1/go.mod h1:gjdY4JBWnynrXsxT8+OM7peEOd4FCZpoOWjSadHva0g=
github.com/containers/image/v5 v5.33.1-0.20250107140133-43c2a741242f h1:QbsNC5PejcUiq01uksekZ2cqCUhhQQa0oB9LMk/eXPk=
github.com/containers/image/v5 v5.33.1-0.20250107140133-43c2a741242f/go.mod h1:aUBwvcAgHNVsrU1uoei3H+RNAtJVnz65GRKAPUk5a0g=
github.com/containers/image/v5 v5.33.1-0.20250116221711-317a9885aed9 h1:oAYA8USA2AL4LS+SK9RGw3e6Lv/BEFI7+2Z7B9Bcjs0=
github.com/containers/image/v5 v5.33.1-0.20250116221711-317a9885aed9/go.mod h1:eWqddLtRxT+IYU/063W8rk2HzSS7LotGkROGNJ243CA=
github.com/containers/libhvee v0.9.0 h1:5UxJMka1lDfxTeITA25Pd8QVVttJAG43eQS1Getw1tc=
github.com/containers/libhvee v0.9.0/go.mod h1:p44VJd8jMIx3SRN1eM6PxfCEwXQE0lJ0dQppCAlzjPQ=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
@ -96,12 +96,12 @@ github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpV
github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ=
github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g=
github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A=
github.com/containers/storage v1.56.0 h1:DZ9KSkj6M2tvj/4bBoaJu3QDHRl35BwsZ4kmLJS97ZI=
github.com/containers/storage v1.56.0/go.mod h1:c6WKowcAlED/DkWGNuL9bvGYqIWCVy7isRMdCSKWNjk=
github.com/containers/storage v1.56.2-0.20250121150636-c2cdd500e4ef h1:mzC7dl6SRdqyMd22kLuljhJTdoqqd4gW8m4LTNetBCo=
github.com/containers/storage v1.56.2-0.20250121150636-c2cdd500e4ef/go.mod h1:KbGwnyB0b3cwwiPuAiB9XqSYfsEhRb/ALIPgfqpmLLA=
github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE=
github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8=
github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI=
github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0=
github.com/coreos/go-oidc/v3 v3.12.0 h1:sJk+8G2qq94rDI6ehZ71Bol3oUHy63qNYmkiSjrc/Jo=
github.com/coreos/go-oidc/v3 v3.12.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 h1:OoRAFlvDGCUqDLampLQjk0yeeSGdF9zzst/3G9IkBbc=
@ -129,12 +129,12 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh
github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/cli v27.4.1+incompatible h1:VzPiUlRJ/xh+otB75gva3r05isHMo5wXDfPRi5/b4hI=
github.com/docker/cli v27.4.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v27.5.0+incompatible h1:aMphQkcGtpHixwwhAXJT1rrK/detk2JIvDaFkLctbGM=
github.com/docker/cli v27.5.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v27.4.1+incompatible h1:ZJvcY7gfwHn1JF48PfbyXg7Jyt9ZCWDW+GGXOIxEwp4=
github.com/docker/docker v27.4.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v27.5.0+incompatible h1:um++2NcQtGRTz5eEgO6aJimo6/JxrTXC941hd05JO6U=
github.com/docker/docker v27.5.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
@ -426,8 +426,8 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/proglottis/gpgme v0.1.4 h1:3nE7YNA70o2aLjcg63tXMOhPD7bplfE5CBdV+hLAm2M=
github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM=
github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
@ -457,10 +457,10 @@ github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjB
github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o=
github.com/sigstore/fulcio v1.6.4 h1:d86obfxUAG3Y6CYwOx1pdwCZwKmROB6w6927pKOVIRY=
github.com/sigstore/fulcio v1.6.4/go.mod h1:Y6bn3i3KGhXpaHsAtYP3Z4Np0+VzCo1fLv8Ci6mbPDs=
github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8=
github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc=
github.com/sigstore/sigstore v1.8.11 h1:tEqeQqbT+awtM87ec9KEeSUxT/AFvJNawneYJyAkFrQ=
github.com/sigstore/sigstore v1.8.11/go.mod h1:fdrFQosxCQ4wTL5H1NrZcQkqQ72AQbPjtpcL2QOGKV0=
github.com/sigstore/rekor v1.3.8 h1:B8kJI8mpSIXova4Jxa6vXdJyysRxFGsEsLKBDl0rRjA=
github.com/sigstore/rekor v1.3.8/go.mod h1:/dHFYKSuxEygfDRnEwyJ+ZD6qoVYNXQdi1mJrKvKWsI=
github.com/sigstore/sigstore v1.8.12 h1:S8xMVZbE2z9ZBuQUEG737pxdLjnbOIcFi5v9UFfkJFc=
github.com/sigstore/sigstore v1.8.12/go.mod h1:+PYQAa8rfw0QdPpBcT+Gl3egKD9c+TUgAlF12H3Nmjo=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY=
@ -493,8 +493,8 @@ github.com/sylabs/sif/v2 v2.20.2 h1:HGEPzauCHhIosw5o6xmT3jczuKEuaFzSfdjAsH33vYw=
github.com/sylabs/sif/v2 v2.20.2/go.mod h1:WyYryGRaR4Wp21SAymm5pK0p45qzZCSRiZMFvUZiuhc=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM=
github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
@ -549,20 +549,22 @@ go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I=
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk=
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94=
go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@ -687,8 +689,8 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@ -713,18 +715,18 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c h1:TYOEhrQMrNDTAd2rX9m+WgGr8Ku6YNuj1D7OX6rWSok=
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc=
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk=
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d h1:xJJRGY7TJcvIlpSrN3K6LAWgNFUILlO+OMAqtg9aqnw=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0=
google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw=
google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -26,12 +26,13 @@ import (
"archive/tar"
"bytes"
"compress/gzip"
"crypto/rand"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"io"
"math/rand"
"math/big"
"os"
"path/filepath"
"reflect"
@ -45,10 +46,6 @@ import (
digest "github.com/opencontainers/go-digest"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
// TestingController is Compression with some helper methods necessary for testing.
type TestingController interface {
Compression
@ -920,9 +917,11 @@ func checkVerifyInvalidTOCEntryFail(filename string) check {
}
if sampleEntry == nil {
t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target")
return
}
if targetEntry == nil {
t.Fatalf("rewrite target not found")
return
}
targetEntry.Offset = sampleEntry.Offset
},
@ -2291,7 +2290,11 @@ var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX
func randomContents(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = runes[rand.Intn(len(runes))]
bi, err := rand.Int(rand.Reader, big.NewInt(int64(len(runes))))
if err != nil {
panic(err)
}
b[i] = runes[int(bi.Int64())]
}
return string(b)
}

View File

@ -14,6 +14,7 @@ import (
"github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/capabilities"
"github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/unshare"
units "github.com/docker/go-units"
selinux "github.com/opencontainers/selinux/go-selinux"
@ -740,7 +741,13 @@ func (c *Config) CheckCgroupsAndAdjustConfig() {
session, found := os.LookupEnv("DBUS_SESSION_BUS_ADDRESS")
if !found {
sessionAddr := filepath.Join(os.Getenv("XDG_RUNTIME_DIR"), "bus")
xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR")
if xdgRuntimeDir == "" {
if dir, err := homedir.GetRuntimeDir(); err == nil {
xdgRuntimeDir = dir
}
}
sessionAddr := filepath.Join(xdgRuntimeDir, "bus")
if err := fileutils.Exists(sessionAddr); err == nil {
sessionAddr, err = filepath.EvalSymlinks(sessionAddr)
if err == nil {

View File

@ -46,7 +46,6 @@ var (
"/proc/scsi",
"/proc/timer_list",
"/proc/timer_stats",
"/sys/dev/block",
"/sys/devices/virtual/powercap",
"/sys/firmware",
"/sys/fs/selinux",

View File

@ -385,15 +385,12 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
if out.UncompressedDigest != "" {
s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest
if out.TOCDigest != "" {
s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
options.Cache.RecordTOCUncompressedPair(out.TOCDigest, out.UncompressedDigest)
}
// Dont set indexToTOCDigest on this path:
// - Using UncompressedDigest allows image reuse with non-partially-pulled layers, so we want to set indexToDiffID.
// - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch.
// That TOC is quite unlikely to match any other TOC value.
// The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is
// responsible for ensuring blobDigest has been validated.
// If the whole layer has been consumed, chunked.GetDiffer is responsible for ensuring blobDigest has been validated.
if out.CompressedDigest != "" {
if out.CompressedDigest != blobDigest {
return fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q",
out.CompressedDigest, blobDigest)
@ -402,6 +399,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
// We trust PrepareStagedLayer to validate or create both values correctly.
s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest)
}
} else {
// Use diffID for layer identity if it is known.
if uncompressedDigest := options.Cache.UncompressedDigestForTOC(out.TOCDigest); uncompressedDigest != "" {
@ -566,6 +564,11 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, options.TOCDigest, err)
}
if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
if uncompressedDigest == "" && layers[0].UncompressedDigest != "" {
// Determine an uncompressed digest if at all possible, to use a traditional image ID
// and to maximize image reuse.
uncompressedDigest = layers[0].UncompressedDigest
}
if uncompressedDigest != "" {
s.lockProtected.indexToDiffID[*options.LayerIndex] = uncompressedDigest
}

View File

@ -17,13 +17,13 @@ env:
####
#### Cache-image names to test with (double-quotes around names are critical)
###
FEDORA_NAME: "fedora-39"
FEDORA_NAME: "fedora-41"
DEBIAN_NAME: "debian-13"
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
# VM Image built in containers/automation_images
IMAGE_SUFFIX: "c20241010t105554z-f40f39d13"
IMAGE_SUFFIX: "c20250107t132430z-f41f40d13"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"

View File

@ -35,7 +35,7 @@ TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /de
# N/B: This value is managed by Renovate, manual changes are
# possible, as long as they don't disturb the formatting
# (i.e. DO NOT ADD A 'v' prefix!)
GOLANGCI_LINT_VERSION := 1.61.0
GOLANGCI_LINT_VERSION := 1.63.4
default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs

View File

@ -1 +1 @@
1.56.0
1.57.0-dev

View File

@ -80,7 +80,7 @@ type CheckOptions struct {
// layer to the contents that we'd expect it to have to ignore certain
// discrepancies
type checkIgnore struct {
ownership, timestamps, permissions bool
ownership, timestamps, permissions, filetype bool
}
// CheckMost returns a CheckOptions with mostly just "quick" checks enabled.
@ -139,8 +139,10 @@ func (s *store) Check(options *CheckOptions) (CheckReport, error) {
if strings.Contains(o, "ignore_chown_errors=true") {
ignore.ownership = true
}
if strings.HasPrefix(o, "force_mask=") {
if strings.Contains(o, "force_mask=") {
ignore.ownership = true
ignore.permissions = true
ignore.filetype = true
}
}
for o := range s.pullOptions {
@ -833,7 +835,7 @@ func (s *store) Repair(report CheckReport, options *RepairOptions) []error {
// compareFileInfo returns a string summarizing what's different between the two checkFileInfos
func compareFileInfo(a, b checkFileInfo, idmap *idtools.IDMappings, ignore checkIgnore) string {
var comparison []string
if a.typeflag != b.typeflag {
if a.typeflag != b.typeflag && !ignore.filetype {
comparison = append(comparison, fmt.Sprintf("filetype:%v→%v", a.typeflag, b.typeflag))
}
if idmap != nil && !idmap.Empty() {

View File

@ -776,3 +776,8 @@ func (a *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
func (a *Driver) SupportsShifting() bool {
return false
}
// Dedup performs deduplication of the driver's storage.
func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
return graphdriver.DedupResult{}, nil
}

View File

@ -673,3 +673,8 @@ func (d *Driver) ListLayers() ([]string, error) {
func (d *Driver) AdditionalImageStores() []string {
return nil
}
// Dedup performs deduplication of the driver's storage.
func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
return graphdriver.DedupResult{}, nil
}

View File

@ -83,7 +83,7 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai
}
if uid != int(st.Uid) || gid != int(st.Gid) {
capability, err := system.Lgetxattr(path, "security.capability")
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform {
return fmt.Errorf("%s: %w", os.Args[0], err)
}

View File

@ -101,7 +101,7 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai
}
if uid != int(st.Uid) || gid != int(st.Gid) {
cap, err := system.Lgetxattr(path, "security.capability")
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform {
if err != nil && !errors.Is(err, system.ENOTSUP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform {
return fmt.Errorf("%s: %w", os.Args[0], err)
}

View File

@ -106,7 +106,7 @@ func legacyCopy(srcFile io.Reader, dstFile io.Writer) error {
func copyXattr(srcPath, dstPath, attr string) error {
data, err := system.Lgetxattr(srcPath, attr)
if err != nil && !errors.Is(err, unix.EOPNOTSUPP) {
if err != nil && !errors.Is(err, system.ENOTSUP) {
return err
}
if data != nil {
@ -279,7 +279,7 @@ func doCopyXattrs(srcPath, dstPath string) error {
}
xattrs, err := system.Llistxattr(srcPath)
if err != nil && !errors.Is(err, unix.EOPNOTSUPP) {
if err != nil && !errors.Is(err, system.ENOTSUP) {
return err
}

View File

@ -8,6 +8,7 @@ import (
"path/filepath"
"strings"
"github.com/containers/storage/internal/dedup"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/fileutils"
@ -81,6 +82,23 @@ type ApplyDiffWithDifferOpts struct {
Flags map[string]interface{}
}
// DedupArgs contains the information to perform storage deduplication.
type DedupArgs struct {
// Layers is the list of layers to deduplicate.
Layers []string
// Options that are passed directly to the pkg/dedup.DedupDirs function.
Options dedup.DedupOptions
}
// DedupResult contains the result of the Dedup() call.
type DedupResult struct {
// Deduped represents the total number of bytes saved by deduplication.
// This value accounts also for all previously deduplicated data, not only the savings
// from the last run.
Deduped uint64
}
// InitFunc initializes the storage driver.
type InitFunc func(homedir string, options Options) (Driver, error)
@ -139,6 +157,8 @@ type ProtoDriver interface {
// AdditionalImageStores returns additional image stores supported by the driver
// This API is experimental and can be changed without bumping the major version number.
AdditionalImageStores() []string
// Dedup performs deduplication of the driver's storage.
Dedup(DedupArgs) (DedupResult, error)
}
// DiffDriver is the interface to use to implement graph diffs
@ -211,8 +231,8 @@ const (
// DifferOutputFormatDir means the output is a directory and it will
// keep the original layout.
DifferOutputFormatDir = iota
// DifferOutputFormatFlat will store the files by their checksum, in the form
// checksum[0:2]/checksum[2:]
// DifferOutputFormatFlat will store the files by their checksum, per
// pkg/chunked/internal/composefs.RegularFilePathForValidatedDigest.
DifferOutputFormatFlat
)

View File

@ -10,7 +10,6 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/system"
"golang.org/x/sys/unix"
)
func scanForMountProgramIndicators(home string) (detected bool, err error) {
@ -28,7 +27,7 @@ func scanForMountProgramIndicators(home string) (detected bool, err error) {
}
if d.IsDir() {
xattrs, err := system.Llistxattr(path)
if err != nil && !errors.Is(err, unix.EOPNOTSUPP) {
if err != nil && !errors.Is(err, system.ENOTSUP) {
return err
}
for _, xattr := range xattrs {

View File

@ -1,4 +1,4 @@
//go:build linux && cgo
//go:build linux
package overlay
@ -27,7 +27,7 @@ var (
composeFsHelperErr error
// skipMountViaFile is used to avoid trying to mount EROFS directly via the file if we already know the current kernel
// does not support it. Mounting directly via a file will be supported in kernel 6.12.
// does not support it. Mounting directly via a file is supported from Linux 6.12.
skipMountViaFile atomic.Bool
)

View File

@ -22,6 +22,7 @@ import (
graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/drivers/overlayutils"
"github.com/containers/storage/drivers/quota"
"github.com/containers/storage/internal/dedup"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/directory"
@ -1096,6 +1097,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl
}
if d.options.forceMask != nil {
st.Mode |= os.ModeDir
if err := idtools.SetContainersOverrideXattr(diff, st); err != nil {
return err
}
@ -2740,3 +2742,22 @@ func getMappedMountRoot(path string) string {
}
return dirName
}
// Dedup performs deduplication of the driver's storage.
func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
var dirs []string
for _, layer := range req.Layers {
dir, _, inAdditionalStore := d.dir2(layer, false)
if inAdditionalStore {
continue
}
if err := fileutils.Exists(dir); err == nil {
dirs = append(dirs, filepath.Join(dir, "diff"))
}
}
r, err := dedup.DedupDirs(dirs, req.Options)
if err != nil {
return graphdriver.DedupResult{}, err
}
return graphdriver.DedupResult{Deduped: r.Deduped}, nil
}

View File

@ -1,23 +0,0 @@
//go:build linux && !cgo
package overlay
import (
"fmt"
)
func openComposefsMount(dataDir string) (int, error) {
return 0, fmt.Errorf("composefs not supported on this build")
}
func getComposeFsHelper() (string, error) {
return "", fmt.Errorf("composefs not supported on this build")
}
func mountComposefsBlob(dataDir, mountPoint string) error {
return fmt.Errorf("composefs not supported on this build")
}
func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error {
return fmt.Errorf("composefs not supported on this build")
}

View File

@ -1,4 +1,4 @@
//go:build !exclude_graphdriver_overlay && linux && cgo
//go:build !exclude_graphdriver_overlay && linux
package register

View File

@ -10,6 +10,7 @@ import (
"strings"
graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/internal/dedup"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/fileutils"
@ -348,3 +349,19 @@ func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string,
func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) {
return d.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel)
}
// Dedup performs deduplication of the driver's storage.
func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
var dirs []string
for _, layer := range req.Layers {
dir := d.dir2(layer, false)
if err := fileutils.Exists(dir); err == nil {
dirs = append(dirs, dir)
}
}
r, err := dedup.DedupDirs(dirs, req.Options)
if err != nil {
return graphdriver.DedupResult{}, err
}
return graphdriver.DedupResult{Deduped: r.Deduped}, nil
}

View File

@ -975,6 +975,11 @@ func (d *Driver) AdditionalImageStores() []string {
return nil
}
// Dedup performs deduplication of the driver's storage.
func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
return graphdriver.DedupResult{}, nil
}
// UpdateLayerIDMap changes ownerships in the layer's filesystem tree from
// matching those in toContainer to matching those in toHost.
func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {

View File

@ -511,3 +511,8 @@ func (d *Driver) ListLayers() ([]string, error) {
func (d *Driver) AdditionalImageStores() []string {
return nil
}
// Dedup performs deduplication of the driver's storage.
func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
return graphdriver.DedupResult{}, nil
}

View File

@ -0,0 +1,163 @@
package dedup
import (
"crypto/sha256"
"encoding/binary"
"errors"
"fmt"
"hash/crc64"
"io/fs"
"sync"
"github.com/opencontainers/selinux/pkg/pwalkdir"
"github.com/sirupsen/logrus"
)
var notSupported = errors.New("reflinks are not supported on this platform")
const (
DedupHashInvalid DedupHashMethod = iota
DedupHashCRC
DedupHashFileSize
DedupHashSHA256
)
type DedupHashMethod int
type DedupOptions struct {
// HashMethod is the hash function to use to find identical files
HashMethod DedupHashMethod
}
type DedupResult struct {
// Deduped represents the total number of bytes saved by deduplication.
// This value accounts also for all previously deduplicated data, not only the savings
// from the last run.
Deduped uint64
}
func getFileChecksum(hashMethod DedupHashMethod, path string, info fs.FileInfo) (string, error) {
switch hashMethod {
case DedupHashInvalid:
return "", fmt.Errorf("invalid hash method: %v", hashMethod)
case DedupHashFileSize:
return fmt.Sprintf("%v", info.Size()), nil
case DedupHashSHA256:
return readAllFile(path, info, func(buf []byte) (string, error) {
h := sha256.New()
if _, err := h.Write(buf); err != nil {
return "", err
}
return string(h.Sum(nil)), nil
})
case DedupHashCRC:
return readAllFile(path, info, func(buf []byte) (string, error) {
c := crc64.New(crc64.MakeTable(crc64.ECMA))
if _, err := c.Write(buf); err != nil {
return "", err
}
bufRet := make([]byte, 8)
binary.BigEndian.PutUint64(bufRet, c.Sum64())
return string(bufRet), nil
})
default:
return "", fmt.Errorf("unknown hash method: %v", hashMethod)
}
}
type pathsLocked struct {
paths []string
lock sync.Mutex
}
func DedupDirs(dirs []string, options DedupOptions) (DedupResult, error) {
res := DedupResult{}
hashToPaths := make(map[string]*pathsLocked)
lock := sync.Mutex{} // protects `hashToPaths` and `res`
dedup, err := newDedupFiles()
if err != nil {
return res, err
}
for _, dir := range dirs {
logrus.Debugf("Deduping directory %s", dir)
if err := pwalkdir.Walk(dir, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if !d.Type().IsRegular() {
return nil
}
info, err := d.Info()
if err != nil {
return err
}
size := uint64(info.Size())
if size == 0 {
// do not bother with empty files
return nil
}
// the file was already deduplicated
if visited, err := dedup.isFirstVisitOf(info); err != nil {
return err
} else if visited {
return nil
}
h, err := getFileChecksum(options.HashMethod, path, info)
if err != nil {
return err
}
lock.Lock()
item, foundItem := hashToPaths[h]
if !foundItem {
item = &pathsLocked{paths: []string{path}}
hashToPaths[h] = item
lock.Unlock()
return nil
}
item.lock.Lock()
lock.Unlock()
dedupBytes, err := func() (uint64, error) { // function to have a scope for the defer statement
defer item.lock.Unlock()
var dedupBytes uint64
for _, src := range item.paths {
deduped, err := dedup.dedup(src, path, info)
if err == nil && deduped > 0 {
logrus.Debugf("Deduped %q -> %q (%d bytes)", src, path, deduped)
dedupBytes += deduped
break
}
logrus.Debugf("Failed to deduplicate: %v", err)
if errors.Is(err, notSupported) {
return dedupBytes, err
}
}
if dedupBytes == 0 {
item.paths = append(item.paths, path)
}
return dedupBytes, nil
}()
if err != nil {
return err
}
lock.Lock()
res.Deduped += dedupBytes
lock.Unlock()
return nil
}); err != nil {
// if reflinks are not supported, return immediately without errors
if errors.Is(err, notSupported) {
return res, nil
}
return res, err
}
}
return res, nil
}

View File

@ -0,0 +1,139 @@
package dedup
import (
"errors"
"fmt"
"io"
"io/fs"
"os"
"sync"
"syscall"
"golang.org/x/sys/unix"
)
type deviceInodePair struct {
dev uint64
ino uint64
}
type dedupFiles struct {
lock sync.Mutex
visitedInodes map[deviceInodePair]struct{}
}
func newDedupFiles() (*dedupFiles, error) {
return &dedupFiles{
visitedInodes: make(map[deviceInodePair]struct{}),
}, nil
}
func (d *dedupFiles) recordInode(dev, ino uint64) (bool, error) {
d.lock.Lock()
defer d.lock.Unlock()
di := deviceInodePair{
dev: dev,
ino: ino,
}
_, visited := d.visitedInodes[di]
d.visitedInodes[di] = struct{}{}
return visited, nil
}
// isFirstVisitOf records that the file is being processed. Returns true if the file was already visited.
func (d *dedupFiles) isFirstVisitOf(fi fs.FileInfo) (bool, error) {
st, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return false, fmt.Errorf("unable to get raw syscall.Stat_t data")
}
return d.recordInode(uint64(st.Dev), st.Ino)
}
// dedup deduplicates the file at src path to dst path
func (d *dedupFiles) dedup(src, dst string, fiDst fs.FileInfo) (uint64, error) {
srcFile, err := os.OpenFile(src, os.O_RDONLY, 0)
if err != nil {
return 0, fmt.Errorf("failed to open source file: %w", err)
}
defer srcFile.Close()
dstFile, err := os.OpenFile(dst, os.O_WRONLY, 0)
if err != nil {
return 0, fmt.Errorf("failed to open destination file: %w", err)
}
defer dstFile.Close()
stSrc, err := srcFile.Stat()
if err != nil {
return 0, fmt.Errorf("failed to stat source file: %w", err)
}
sSrc, ok := stSrc.Sys().(*syscall.Stat_t)
if !ok {
return 0, fmt.Errorf("unable to get raw syscall.Stat_t data")
}
sDest, ok := fiDst.Sys().(*syscall.Stat_t)
if !ok {
return 0, fmt.Errorf("unable to get raw syscall.Stat_t data")
}
if sSrc.Dev == sDest.Dev && sSrc.Ino == sDest.Ino {
// same inode, we are dealing with a hard link, no need to deduplicate
return 0, nil
}
value := unix.FileDedupeRange{
Src_offset: 0,
Src_length: uint64(stSrc.Size()),
Info: []unix.FileDedupeRangeInfo{
{
Dest_fd: int64(dstFile.Fd()),
Dest_offset: 0,
},
},
}
err = unix.IoctlFileDedupeRange(int(srcFile.Fd()), &value)
if err == nil {
return uint64(value.Info[0].Bytes_deduped), nil
}
if errors.Is(err, unix.ENOTSUP) {
return 0, notSupported
}
return 0, fmt.Errorf("failed to clone file %q: %w", src, err)
}
func readAllFile(path string, info fs.FileInfo, fn func([]byte) (string, error)) (string, error) {
size := info.Size()
if size == 0 {
return fn(nil)
}
file, err := os.Open(path)
if err != nil {
return "", err
}
defer file.Close()
if size < 4096 {
// small file, read it all
data := make([]byte, size)
_, err = io.ReadFull(file, data)
if err != nil {
return "", err
}
return fn(data)
}
mmap, err := unix.Mmap(int(file.Fd()), 0, int(size), unix.PROT_READ, unix.MAP_PRIVATE)
if err != nil {
return "", fmt.Errorf("failed to mmap file: %w", err)
}
defer func() {
_ = unix.Munmap(mmap)
}()
_ = unix.Madvise(mmap, unix.MADV_SEQUENTIAL)
return fn(mmap)
}

View File

@ -0,0 +1,27 @@
//go:build !linux
package dedup
import (
"io/fs"
)
type dedupFiles struct{}
func newDedupFiles() (*dedupFiles, error) {
return nil, notSupported
}
// isFirstVisitOf records that the file is being processed. Returns true if the file was already visited.
func (d *dedupFiles) isFirstVisitOf(fi fs.FileInfo) (bool, error) {
return false, notSupported
}
// dedup deduplicates the file at src path to dst path
func (d *dedupFiles) dedup(src, dst string, fiDst fs.FileInfo) (uint64, error) {
return 0, notSupported
}
func readAllFile(path string, info fs.FileInfo, fn func([]byte) (string, error)) (string, error) {
return "", notSupported
}

View File

@ -336,6 +336,9 @@ type rwLayerStore interface {
// Clean up unreferenced layers
GarbageCollect() error
// Dedup deduplicates layers in the store.
dedup(drivers.DedupArgs) (drivers.DedupResult, error)
}
type multipleLockFile struct {
@ -913,12 +916,22 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
// user of this storage area marked for deletion but didn't manage to
// actually delete.
var incompleteDeletionErrors error // = nil
var layersToDelete []*Layer
for _, layer := range r.layers {
if layer.Flags == nil {
layer.Flags = make(map[string]interface{})
}
if layerHasIncompleteFlag(layer) {
logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID)
// Important: Do not call r.deleteInternal() here. It modifies r.layers
// which causes unexpected side effects while iterating over r.layers here.
// The range loop has no idea that the underlying elements where shifted
// around.
layersToDelete = append(layersToDelete, layer)
}
}
// Now actually delete the layers
for _, layer := range layersToDelete {
logrus.Warnf("Found incomplete layer %q, deleting it", layer.ID)
err := r.deleteInternal(layer.ID)
if err != nil {
// Don't return the error immediately, because deleteInternal does not saveLayers();
@ -929,7 +942,6 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
}
modifiedLocations |= layerLocation(layer)
}
}
if err := r.saveLayers(modifiedLocations); err != nil {
return false, err
}
@ -2592,6 +2604,11 @@ func (r *layerStore) LayersByTOCDigest(d digest.Digest) ([]Layer, error) {
return r.layersByDigestMap(r.bytocsum, d)
}
// Requires startWriting.
func (r *layerStore) dedup(req drivers.DedupArgs) (drivers.DedupResult, error) {
return r.driver.Dedup(req)
}
func closeAll(closes ...func() error) (rErr error) {
for _, f := range closes {
if err := f(); err != nil {

View File

@ -78,6 +78,7 @@ const (
windows = "windows"
darwin = "darwin"
freebsd = "freebsd"
linux = "linux"
)
var xattrsToIgnore = map[string]interface{}{
@ -427,7 +428,7 @@ func readSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
}
for _, xattr := range []string{"security.capability", "security.ima"} {
capability, err := system.Lgetxattr(path, xattr)
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform {
return fmt.Errorf("failed to read %q attribute from %q: %w", xattr, path, err)
}
if capability != nil {
@ -440,7 +441,7 @@ func readSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
// readUserXattrToTarHeader reads user.* xattr from filesystem to a tar header
func readUserXattrToTarHeader(path string, hdr *tar.Header) error {
xattrs, err := system.Llistxattr(path)
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
if err != nil && !errors.Is(err, system.ENOTSUP) && err != system.ErrNotSupportedPlatform {
return err
}
for _, key := range xattrs {
@ -655,12 +656,20 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
hdrInfo := hdr.FileInfo()
typeFlag := hdr.Typeflag
mask := hdrInfo.Mode()
// update also the implementation of ForceMask in pkg/chunked
if forceMask != nil {
mask = *forceMask
// If we have a forceMask, force the real type to either be a directory,
// a link, or a regular file.
if typeFlag != tar.TypeDir && typeFlag != tar.TypeSymlink && typeFlag != tar.TypeLink {
typeFlag = tar.TypeReg
}
}
switch hdr.Typeflag {
switch typeFlag {
case tar.TypeDir:
// Create directory unless it exists as a directory already.
// In that case we just want to merge the two
@ -728,16 +737,6 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
}
if forceMask != nil && (hdr.Typeflag != tar.TypeSymlink || runtime.GOOS == "darwin") {
value := idtools.Stat{
IDs: idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid},
Mode: hdrInfo.Mode() & 0o7777,
}
if err := idtools.SetContainersOverrideXattr(path, value); err != nil {
return err
}
}
// Lchown is not supported on Windows.
if Lchown && runtime.GOOS != windows {
if chownOpts == nil {
@ -793,18 +792,30 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
continue
}
if err := system.Lsetxattr(path, xattrKey, []byte(value), 0); err != nil {
if errors.Is(err, syscall.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) {
// We ignore errors here because not all graphdrivers support
// xattrs *cough* old versions of AUFS *cough*. However only
// ENOTSUP should be emitted in that case, otherwise we still
// bail. We also ignore EPERM errors if we are running in a
// user namespace.
if errors.Is(err, system.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) {
// Ignore specific error cases:
// - ENOTSUP: Expected for graphdrivers lacking extended attribute support:
// - Legacy AUFS versions
// - FreeBSD with unsupported namespaces (trusted, security)
// - EPERM: Expected when operating within a user namespace
// All other errors will cause a failure.
errs = append(errs, err.Error())
continue
}
return err
}
}
if forceMask != nil && (typeFlag == tar.TypeReg || typeFlag == tar.TypeDir || runtime.GOOS == "darwin") {
value := idtools.Stat{
IDs: idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid},
Mode: hdrInfo.Mode(),
Major: int(hdr.Devmajor),
Minor: int(hdr.Devminor),
}
if err := idtools.SetContainersOverrideXattr(path, value); err != nil {
return err
}
}
// We defer setting flags on directories until the end of
@ -1149,11 +1160,11 @@ loop:
}
if options.ForceMask != nil {
value := idtools.Stat{Mode: 0o755}
value := idtools.Stat{Mode: os.ModeDir | os.FileMode(0o755)}
if rootHdr != nil {
value.IDs.UID = rootHdr.Uid
value.IDs.GID = rootHdr.Gid
value.Mode = os.FileMode(rootHdr.Mode)
value.Mode = os.ModeDir | os.FileMode(rootHdr.Mode)
}
if err := idtools.SetContainersOverrideXattr(dest, value); err != nil {
return err
@ -1379,7 +1390,7 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id
uid, gid = hdr.Uid, hdr.Gid
if xstat, ok := hdr.PAXRecords[PaxSchilyXattr+idtools.ContainersOverrideXattr]; ok {
attrs := strings.Split(string(xstat), ":")
if len(attrs) == 3 {
if len(attrs) >= 3 {
val, err := strconv.ParseUint(attrs[0], 10, 32)
if err != nil {
uid = int(val)

View File

@ -270,6 +270,7 @@ type FileInfo struct {
capability []byte
added bool
xattrs map[string]string
target string
}
// LookUp looks up the file information of a file.
@ -336,6 +337,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
// back mtime
if statDifferent(oldStat, oldInfo, newStat, info) ||
!bytes.Equal(oldChild.capability, newChild.capability) ||
oldChild.target != newChild.target ||
!reflect.DeepEqual(oldChild.xattrs, newChild.xattrs) {
change := Change{
Path: newChild.path(),
@ -390,6 +392,7 @@ func newRootFileInfo(idMappings *idtools.IDMappings) *FileInfo {
name: string(os.PathSeparator),
idMappings: idMappings,
children: make(map[string]*FileInfo),
target: "",
}
return root
}

View File

@ -79,6 +79,7 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
children: make(map[string]*FileInfo),
parent: parent,
idMappings: root.idMappings,
target: "",
}
cpath := filepath.Join(dir, path)
stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
@ -87,11 +88,11 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
}
info.stat = stat
info.capability, err = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
if err != nil && !errors.Is(err, system.EOPNOTSUPP) {
if err != nil && !errors.Is(err, system.ENOTSUP) {
return err
}
xattrs, err := system.Llistxattr(cpath)
if err != nil && !errors.Is(err, system.EOPNOTSUPP) {
if err != nil && !errors.Is(err, system.ENOTSUP) {
return err
}
for _, key := range xattrs {
@ -110,6 +111,12 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
info.xattrs[key] = string(value)
}
}
if fi.Mode()&os.ModeSymlink != 0 {
info.target, err = os.Readlink(cpath)
if err != nil {
return err
}
}
parent.children[info.name] = info
return nil
}

View File

@ -16,7 +16,7 @@ import (
storage "github.com/containers/storage"
graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/chunked/internal"
"github.com/containers/storage/pkg/chunked/internal/minimal"
"github.com/containers/storage/pkg/ioutils"
"github.com/docker/go-units"
jsoniter "github.com/json-iterator/go"
@ -710,7 +710,7 @@ func prepareCacheFile(manifest []byte, format graphdriver.DifferOutputFormat) ([
switch format {
case graphdriver.DifferOutputFormatDir:
case graphdriver.DifferOutputFormatFlat:
entries, err = makeEntriesFlat(entries)
entries, err = makeEntriesFlat(entries, nil)
if err != nil {
return nil, err
}
@ -848,12 +848,12 @@ func (c *layersCache) findFileInOtherLayers(file *fileMetadata, useHardLinks boo
return "", "", nil
}
func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (string, string, int64, error) {
func (c *layersCache) findChunkInOtherLayers(chunk *minimal.FileMetadata) (string, string, int64, error) {
return c.findDigestInternal(chunk.ChunkDigest)
}
func unmarshalToc(manifest []byte) (*internal.TOC, error) {
var toc internal.TOC
func unmarshalToc(manifest []byte) (*minimal.TOC, error) {
var toc minimal.TOC
iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
@ -864,7 +864,7 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
case "entries":
for iter.ReadArray() {
var m internal.FileMetadata
var m minimal.FileMetadata
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
switch strings.ToLower(field) {
case "type":

View File

@ -4,18 +4,18 @@ import (
"io"
"github.com/containers/storage/pkg/chunked/compressor"
"github.com/containers/storage/pkg/chunked/internal"
"github.com/containers/storage/pkg/chunked/internal/minimal"
)
const (
TypeReg = internal.TypeReg
TypeChunk = internal.TypeChunk
TypeLink = internal.TypeLink
TypeChar = internal.TypeChar
TypeBlock = internal.TypeBlock
TypeDir = internal.TypeDir
TypeFifo = internal.TypeFifo
TypeSymlink = internal.TypeSymlink
TypeReg = minimal.TypeReg
TypeChunk = minimal.TypeChunk
TypeLink = minimal.TypeLink
TypeChar = minimal.TypeChar
TypeBlock = minimal.TypeBlock
TypeDir = minimal.TypeDir
TypeFifo = minimal.TypeFifo
TypeSymlink = minimal.TypeSymlink
)
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.

View File

@ -10,7 +10,7 @@ import (
"strconv"
"time"
"github.com/containers/storage/pkg/chunked/internal"
"github.com/containers/storage/pkg/chunked/internal/minimal"
"github.com/klauspost/compress/zstd"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
@ -20,6 +20,12 @@ import (
expMaps "golang.org/x/exp/maps"
)
const (
// maxTocSize is the maximum size of a blob that we will attempt to process.
// It is used to prevent DoS attacks from layers that embed a very large TOC file.
maxTocSize = (1 << 20) * 50
)
var typesToTar = map[string]byte{
TypeReg: tar.TypeReg,
TypeLink: tar.TypeLink,
@ -44,25 +50,21 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
if blobSize <= footerSize {
return nil, 0, errors.New("blob too small")
}
chunk := ImageSourceChunk{
Offset: uint64(blobSize - footerSize),
Length: uint64(footerSize),
}
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
footer := make([]byte, footerSize)
streamsOrErrors, err := getBlobAt(blobStream, ImageSourceChunk{Offset: uint64(blobSize - footerSize), Length: uint64(footerSize)})
if err != nil {
return nil, 0, err
}
var reader io.ReadCloser
select {
case r := <-parts:
reader = r
case err := <-errs:
return nil, 0, err
for soe := range streamsOrErrors {
if soe.stream != nil {
_, err = io.ReadFull(soe.stream, footer)
_ = soe.stream.Close()
}
if soe.err != nil && err == nil {
err = soe.err
}
defer reader.Close()
footer := make([]byte, footerSize)
if _, err := io.ReadFull(reader, footer); err != nil {
return nil, 0, err
}
/* Read the ToC offset:
@ -81,31 +83,25 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
size := int64(blobSize - footerSize - tocOffset)
// set a reasonable limit
if size > (1<<20)*50 {
if size > maxTocSize {
return nil, 0, errors.New("manifest too big")
}
chunk = ImageSourceChunk{
Offset: uint64(tocOffset),
Length: uint64(size),
}
parts, errs, err = blobStream.GetBlobAt([]ImageSourceChunk{chunk})
streamsOrErrors, err = getBlobAt(blobStream, ImageSourceChunk{Offset: uint64(tocOffset), Length: uint64(size)})
if err != nil {
return nil, 0, err
}
var tocReader io.ReadCloser
select {
case r := <-parts:
tocReader = r
case err := <-errs:
return nil, 0, err
}
defer tocReader.Close()
var manifestUncompressed []byte
r, err := pgzip.NewReader(tocReader)
for soe := range streamsOrErrors {
if soe.stream != nil {
err1 := func() error {
defer soe.stream.Close()
r, err := pgzip.NewReader(soe.stream)
if err != nil {
return nil, 0, err
return err
}
defer r.Close()
@ -113,16 +109,28 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
header, err := aTar.Next()
if err != nil {
return nil, 0, err
return err
}
// set a reasonable limit
if header.Size > (1<<20)*50 {
return nil, 0, errors.New("manifest too big")
if header.Size > maxTocSize {
return errors.New("manifest too big")
}
manifestUncompressed := make([]byte, header.Size)
manifestUncompressed = make([]byte, header.Size)
if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil {
return nil, 0, err
return err
}
return nil
}()
if err == nil {
err = err1
}
} else if err == nil {
err = soe.err
}
}
if manifestUncompressed == nil {
return nil, 0, errors.New("manifest not found")
}
manifestDigester := digest.Canonical.Digester()
@ -140,10 +148,10 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream.
// Returns (manifest blob, parsed manifest, tar-split blob or nil, manifest offset).
func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) ([]byte, *internal.TOC, []byte, int64, error) {
offsetMetadata := annotations[internal.ManifestInfoKey]
func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) (_ []byte, _ *minimal.TOC, _ []byte, _ int64, retErr error) {
offsetMetadata := annotations[minimal.ManifestInfoKey]
if offsetMetadata == "" {
return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", internal.ManifestInfoKey)
return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", minimal.ManifestInfoKey)
}
var manifestChunk ImageSourceChunk
var manifestLengthUncompressed, manifestType uint64
@ -153,21 +161,21 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
// The tarSplit… values are valid if tarSplitChunk.Offset > 0
var tarSplitChunk ImageSourceChunk
var tarSplitLengthUncompressed uint64
if tarSplitInfoKeyAnnotation, found := annotations[internal.TarSplitInfoKey]; found {
if tarSplitInfoKeyAnnotation, found := annotations[minimal.TarSplitInfoKey]; found {
if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &tarSplitChunk.Offset, &tarSplitChunk.Length, &tarSplitLengthUncompressed); err != nil {
return nil, nil, nil, 0, err
}
}
if manifestType != internal.ManifestTypeCRFS {
if manifestType != minimal.ManifestTypeCRFS {
return nil, nil, nil, 0, errors.New("invalid manifest type")
}
// set a reasonable limit
if manifestChunk.Length > (1<<20)*50 {
if manifestChunk.Length > maxTocSize {
return nil, nil, nil, 0, errors.New("manifest too big")
}
if manifestLengthUncompressed > (1<<20)*50 {
if manifestLengthUncompressed > maxTocSize {
return nil, nil, nil, 0, errors.New("manifest too big")
}
@ -175,26 +183,31 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
if tarSplitChunk.Offset > 0 {
chunks = append(chunks, tarSplitChunk)
}
parts, errs, err := blobStream.GetBlobAt(chunks)
streamsOrErrors, err := getBlobAt(blobStream, chunks...)
if err != nil {
return nil, nil, nil, 0, err
}
readBlob := func(len uint64) ([]byte, error) {
var reader io.ReadCloser
select {
case r := <-parts:
reader = r
case err := <-errs:
return nil, err
defer func() {
err := ensureAllBlobsDone(streamsOrErrors)
if retErr == nil {
retErr = err
}
}()
readBlob := func(len uint64) ([]byte, error) {
soe, ok := <-streamsOrErrors
if !ok {
return nil, errors.New("stream closed")
}
if soe.err != nil {
return nil, soe.err
}
defer soe.stream.Close()
blob := make([]byte, len)
if _, err := io.ReadFull(reader, blob); err != nil {
reader.Close()
return nil, err
}
if err := reader.Close(); err != nil {
if _, err := io.ReadFull(soe.stream, blob); err != nil {
return nil, err
}
return blob, nil
@ -217,7 +230,7 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
var decodedTarSplit []byte = nil
if toc.TarSplitDigest != "" {
if tarSplitChunk.Offset <= 0 {
return nil, nil, nil, 0, fmt.Errorf("TOC requires a tar-split, but the %s annotation does not describe a position", internal.TarSplitInfoKey)
return nil, nil, nil, 0, fmt.Errorf("TOC requires a tar-split, but the %s annotation does not describe a position", minimal.TarSplitInfoKey)
}
tarSplit, err := readBlob(tarSplitChunk.Length)
if err != nil {
@ -247,11 +260,11 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
}
// ensureTOCMatchesTarSplit validates that toc and tarSplit contain _exactly_ the same entries.
func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error {
pendingFiles := map[string]*internal.FileMetadata{} // Name -> an entry in toc.Entries
func ensureTOCMatchesTarSplit(toc *minimal.TOC, tarSplit []byte) error {
pendingFiles := map[string]*minimal.FileMetadata{} // Name -> an entry in toc.Entries
for i := range toc.Entries {
e := &toc.Entries[i]
if e.Type != internal.TypeChunk {
if e.Type != minimal.TypeChunk {
if _, ok := pendingFiles[e.Name]; ok {
return fmt.Errorf("TOC contains duplicate entries for path %q", e.Name)
}
@ -266,7 +279,7 @@ func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error {
return fmt.Errorf("tar-split contains an entry for %q missing in TOC", hdr.Name)
}
delete(pendingFiles, hdr.Name)
expected, err := internal.NewFileMetadata(hdr)
expected, err := minimal.NewFileMetadata(hdr)
if err != nil {
return fmt.Errorf("determining expected metadata for %q: %w", hdr.Name, err)
}
@ -347,8 +360,8 @@ func ensureTimePointersMatch(a, b *time.Time) error {
// ensureFileMetadataAttributesMatch ensures that a and b match in file attributes (it ignores entries relevant to locating data
// in the tar stream or matching contents)
func ensureFileMetadataAttributesMatch(a, b *internal.FileMetadata) error {
// Keep this in sync with internal.FileMetadata!
func ensureFileMetadataAttributesMatch(a, b *minimal.FileMetadata) error {
// Keep this in sync with minimal.FileMetadata!
if a.Type != b.Type {
return fmt.Errorf("mismatch of Type: %q != %q", a.Type, b.Type)

View File

@ -9,7 +9,7 @@ import (
"bytes"
"io"
"github.com/containers/storage/pkg/chunked/internal"
"github.com/containers/storage/pkg/chunked/internal/minimal"
"github.com/containers/storage/pkg/ioutils"
"github.com/klauspost/compress/zstd"
"github.com/opencontainers/go-digest"
@ -213,7 +213,7 @@ func newTarSplitData(level int) (*tarSplitData, error) {
compressed := bytes.NewBuffer(nil)
digester := digest.Canonical.Digester()
zstdWriter, err := internal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level)
zstdWriter, err := minimal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level)
if err != nil {
return nil, err
}
@ -254,7 +254,7 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
buf := make([]byte, 4096)
zstdWriter, err := internal.ZstdWriterWithLevel(dest, level)
zstdWriter, err := minimal.ZstdWriterWithLevel(dest, level)
if err != nil {
return err
}
@ -276,7 +276,7 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
return offset, nil
}
var metadata []internal.FileMetadata
var metadata []minimal.FileMetadata
for {
hdr, err := tr.Next()
if err != nil {
@ -341,9 +341,9 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
chunkSize := rcReader.WrittenOut - lastChunkOffset
if chunkSize > 0 {
chunkType := internal.ChunkTypeData
chunkType := minimal.ChunkTypeData
if rcReader.IsLastChunkZeros {
chunkType = internal.ChunkTypeZeros
chunkType = minimal.ChunkTypeZeros
}
chunks = append(chunks, chunk{
@ -368,17 +368,17 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
}
}
mainEntry, err := internal.NewFileMetadata(hdr)
mainEntry, err := minimal.NewFileMetadata(hdr)
if err != nil {
return err
}
mainEntry.Digest = checksum
mainEntry.Offset = startOffset
mainEntry.EndOffset = lastOffset
entries := []internal.FileMetadata{mainEntry}
entries := []minimal.FileMetadata{mainEntry}
for i := 1; i < len(chunks); i++ {
entries = append(entries, internal.FileMetadata{
Type: internal.TypeChunk,
entries = append(entries, minimal.FileMetadata{
Type: minimal.TypeChunk,
Name: hdr.Name,
ChunkOffset: chunks[i].ChunkOffset,
})
@ -424,13 +424,13 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
}
tarSplitData.zstd = nil
ts := internal.TarSplitData{
ts := minimal.TarSplitData{
Data: tarSplitData.compressed.Bytes(),
Digest: tarSplitData.digester.Digest(),
UncompressedSize: tarSplitData.uncompressedCounter.Count,
}
return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level)
return minimal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level)
}
type zstdChunkedWriter struct {

View File

@ -9,10 +9,11 @@ import (
"io"
"path/filepath"
"reflect"
"strings"
"time"
"github.com/containers/storage/pkg/chunked/internal"
"github.com/containers/storage/pkg/chunked/internal/minimal"
storagePath "github.com/containers/storage/pkg/chunked/internal/path"
"github.com/opencontainers/go-digest"
"golang.org/x/sys/unix"
)
@ -85,17 +86,17 @@ func escapedOptional(val []byte, escape int) string {
func getStMode(mode uint32, typ string) (uint32, error) {
switch typ {
case internal.TypeReg, internal.TypeLink:
case minimal.TypeReg, minimal.TypeLink:
mode |= unix.S_IFREG
case internal.TypeChar:
case minimal.TypeChar:
mode |= unix.S_IFCHR
case internal.TypeBlock:
case minimal.TypeBlock:
mode |= unix.S_IFBLK
case internal.TypeDir:
case minimal.TypeDir:
mode |= unix.S_IFDIR
case internal.TypeFifo:
case minimal.TypeFifo:
mode |= unix.S_IFIFO
case internal.TypeSymlink:
case minimal.TypeSymlink:
mode |= unix.S_IFLNK
default:
return 0, fmt.Errorf("unknown type %s", typ)
@ -103,24 +104,14 @@ func getStMode(mode uint32, typ string) (uint32, error) {
return mode, nil
}
func sanitizeName(name string) string {
path := filepath.Clean(name)
if path == "." {
path = "/"
} else if path[0] != '/' {
path = "/" + path
}
return path
}
func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error {
path := sanitizeName(entry.Name)
func dumpNode(out io.Writer, added map[string]*minimal.FileMetadata, links map[string]int, verityDigests map[string]string, entry *minimal.FileMetadata) error {
path := storagePath.CleanAbsPath(entry.Name)
parent := filepath.Dir(path)
if _, found := added[parent]; !found && path != "/" {
parentEntry := &internal.FileMetadata{
parentEntry := &minimal.FileMetadata{
Name: parent,
Type: internal.TypeDir,
Type: minimal.TypeDir,
Mode: 0o755,
}
if err := dumpNode(out, added, links, verityDigests, parentEntry); err != nil {
@ -143,7 +134,7 @@ func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[
nlinks := links[entry.Name] + links[entry.Linkname] + 1
link := ""
if entry.Type == internal.TypeLink {
if entry.Type == minimal.TypeLink {
link = "@"
}
@ -169,16 +160,21 @@ func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[
var payload string
if entry.Linkname != "" {
if entry.Type == internal.TypeSymlink {
if entry.Type == minimal.TypeSymlink {
payload = entry.Linkname
} else {
payload = sanitizeName(entry.Linkname)
payload = storagePath.CleanAbsPath(entry.Linkname)
}
} else {
if len(entry.Digest) > 10 {
d := strings.Replace(entry.Digest, "sha256:", "", 1)
payload = d[:2] + "/" + d[2:]
} else if entry.Digest != "" {
d, err := digest.Parse(entry.Digest)
if err != nil {
return fmt.Errorf("invalid digest %q for %q: %w", entry.Digest, entry.Name, err)
}
path, err := storagePath.RegularFilePathForValidatedDigest(d)
if err != nil {
return fmt.Errorf("determining physical file path for %q: %w", entry.Name, err)
}
payload = path
}
if _, err := fmt.Fprint(out, escapedOptional([]byte(payload), ESCAPE_LONE_DASH)); err != nil {
@ -219,7 +215,7 @@ func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[
// GenerateDump generates a dump of the TOC in the same format as `composefs-info dump`
func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, error) {
toc, ok := tocI.(*internal.TOC)
toc, ok := tocI.(*minimal.TOC)
if !ok {
return nil, fmt.Errorf("invalid TOC type")
}
@ -235,21 +231,21 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader,
}()
links := make(map[string]int)
added := make(map[string]*internal.FileMetadata)
added := make(map[string]*minimal.FileMetadata)
for _, e := range toc.Entries {
if e.Linkname == "" {
continue
}
if e.Type == internal.TypeSymlink {
if e.Type == minimal.TypeSymlink {
continue
}
links[e.Linkname] = links[e.Linkname] + 1
}
if len(toc.Entries) == 0 {
root := &internal.FileMetadata{
root := &minimal.FileMetadata{
Name: "/",
Type: internal.TypeDir,
Type: minimal.TypeDir,
Mode: 0o755,
}
@ -261,7 +257,7 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader,
}
for _, e := range toc.Entries {
if e.Type == internal.TypeChunk {
if e.Type == minimal.TypeChunk {
continue
}
if err := dumpNode(w, added, links, verityDigests, &e); err != nil {

View File

@ -15,7 +15,8 @@ import (
driversCopy "github.com/containers/storage/drivers/copy"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chunked/internal"
"github.com/containers/storage/pkg/chunked/internal/minimal"
storagePath "github.com/containers/storage/pkg/chunked/internal/path"
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/vbatts/tar-split/archive/tar"
"golang.org/x/sys/unix"
@ -34,14 +35,14 @@ func procPathForFd(fd int) string {
return fmt.Sprintf("/proc/self/fd/%d", fd)
}
// fileMetadata is a wrapper around internal.FileMetadata with additional private fields that
// fileMetadata is a wrapper around minimal.FileMetadata with additional private fields that
// are not part of the TOC document.
// Type: TypeChunk entries are stored in Chunks, the primary [fileMetadata] entries never use TypeChunk.
type fileMetadata struct {
internal.FileMetadata
minimal.FileMetadata
// chunks stores the TypeChunk entries relevant to this entry when FileMetadata.Type == TypeReg.
chunks []*internal.FileMetadata
chunks []*minimal.FileMetadata
// skipSetAttrs is set when the file attributes must not be
// modified, e.g. it is a hard link from a different source,
@ -49,10 +50,37 @@ type fileMetadata struct {
skipSetAttrs bool
}
// splitPath takes a file path as input and returns two components: dir and base.
// Differently than filepath.Split(), this function handles some edge cases.
// If the path refers to a file in the root directory, the returned dir is "/".
// The returned base value is never empty, it never contains any slash and the
// value "..".
func splitPath(path string) (string, string, error) {
path = storagePath.CleanAbsPath(path)
dir, base := filepath.Split(path)
if base == "" {
base = "."
}
// Remove trailing slashes from dir, but make sure that "/" is preserved.
dir = strings.TrimSuffix(dir, "/")
if dir == "" {
dir = "/"
}
if strings.Contains(base, "/") {
// This should never happen, but be safe as the base is passed to *at syscalls.
return "", "", fmt.Errorf("internal error: splitPath(%q) contains a slash", path)
}
return dir, base, nil
}
func doHardLink(dirfd, srcFd int, destFile string) error {
destDir, destBase := filepath.Split(destFile)
destDir, destBase, err := splitPath(destFile)
if err != nil {
return err
}
destDirFd := dirfd
if destDir != "" && destDir != "." {
if destDir != "/" {
f, err := openOrCreateDirUnderRoot(dirfd, destDir, 0)
if err != nil {
return err
@ -72,7 +100,7 @@ func doHardLink(dirfd, srcFd int, destFile string) error {
return nil
}
err := doLink()
err = doLink()
// if the destination exists, unlink it first and try again
if err != nil && os.IsExist(err) {
@ -281,8 +309,11 @@ func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.Fil
// If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the
// last component as the path to openat().
if hasNoFollow {
dirName, baseName := filepath.Split(name)
if dirName != "" && dirName != "." {
dirName, baseName, err := splitPath(name)
if err != nil {
return -1, err
}
if dirName != "/" {
newRoot, err := securejoin.SecureJoin(root, dirName)
if err != nil {
return -1, err
@ -409,7 +440,8 @@ func openOrCreateDirUnderRoot(dirfd int, name string, mode os.FileMode) (*os.Fil
if errors.Is(err, unix.ENOENT) {
parent := filepath.Dir(name)
if parent != "" {
// do not create the root directory, it should always exist
if parent != name {
pDir, err2 := openOrCreateDirUnderRoot(dirfd, parent, mode)
if err2 != nil {
return nil, err
@ -448,9 +480,12 @@ func appendHole(fd int, name string, size int64) error {
}
func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *fileMetadata, options *archive.TarOptions) error {
parent, base := filepath.Split(name)
parent, base, err := splitPath(name)
if err != nil {
return err
}
parentFd := dirfd
if parent != "" && parent != "." {
if parent != "/" {
parentFile, err := openOrCreateDirUnderRoot(dirfd, parent, 0)
if err != nil {
return err
@ -506,9 +541,12 @@ func safeLink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *arch
}
func safeSymlink(dirfd int, metadata *fileMetadata) error {
destDir, destBase := filepath.Split(metadata.Name)
destDir, destBase, err := splitPath(metadata.Name)
if err != nil {
return err
}
destDirFd := dirfd
if destDir != "" && destDir != "." {
if destDir != "/" {
f, err := openOrCreateDirUnderRoot(dirfd, destDir, 0)
if err != nil {
return err
@ -542,9 +580,12 @@ func (d whiteoutHandler) Setxattr(path, name string, value []byte) error {
}
func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error {
dir, base := filepath.Split(path)
dir, base, err := splitPath(path)
if err != nil {
return err
}
dirfd := d.Dirfd
if dir != "" && dir != "." {
if dir != "/" {
dir, err := openOrCreateDirUnderRoot(d.Dirfd, dir, 0)
if err != nil {
return err

View File

@ -1,4 +1,4 @@
package internal
package minimal
// NOTE: This is used from github.com/containers/image by callers that
// don't otherwise use containers/storage, so don't make this depend on any

View File

@ -0,0 +1,27 @@
package path
import (
"fmt"
"path/filepath"
"github.com/opencontainers/go-digest"
)
// CleanAbsPath removes any ".." and "." from the path
// and ensures it starts with a "/". If the path refers to the root
// directory, it returns "/".
func CleanAbsPath(path string) string {
return filepath.Clean("/" + path)
}
// RegularFilePath returns the path used in the composefs backing store for a
// regular file with the provided content digest.
//
// The caller MUST ensure d is a valid digest (in particular, that it contains no path separators or .. entries)
func RegularFilePathForValidatedDigest(d digest.Digest) (string, error) {
if algo := d.Algorithm(); algo != digest.SHA256 {
return "", fmt.Errorf("unexpected digest algorithm %q", algo)
}
e := d.Encoded()
return e[0:2] + "/" + e[2:], nil
}

View File

@ -2,6 +2,7 @@ package chunked
import (
archivetar "archive/tar"
"bytes"
"context"
"encoding/base64"
"errors"
@ -22,17 +23,21 @@ import (
graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chunked/compressor"
"github.com/containers/storage/pkg/chunked/internal"
"github.com/containers/storage/pkg/chunked/internal/minimal"
path "github.com/containers/storage/pkg/chunked/internal/path"
"github.com/containers/storage/pkg/chunked/toc"
"github.com/containers/storage/pkg/fsverity"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
securejoin "github.com/cyphar/filepath-securejoin"
jsoniter "github.com/json-iterator/go"
"github.com/klauspost/compress/zstd"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
"github.com/vbatts/tar-split/archive/tar"
"github.com/vbatts/tar-split/tar/asm"
tsStorage "github.com/vbatts/tar-split/tar/storage"
"golang.org/x/sys/unix"
)
@ -59,7 +64,7 @@ type compressedFileType int
type chunkedDiffer struct {
stream ImageSourceSeekable
manifest []byte
toc *internal.TOC // The parsed contents of manifest, or nil if not yet available
toc *minimal.TOC // The parsed contents of manifest, or nil if not yet available
tarSplit []byte
layersCache *layersCache
tocOffset int64
@ -92,7 +97,7 @@ type chunkedDiffer struct {
blobSize int64
uncompressedTarSize int64 // -1 if unknown
pullOptions map[string]string
pullOptions pullOptions
useFsVerity graphdriver.DifferFsVerity
fsVerityDigests map[string]string
@ -108,6 +113,42 @@ type chunkedLayerData struct {
Format graphdriver.DifferOutputFormat `json:"format"`
}
// pullOptions contains parsed data from storage.Store.PullOptions.
// TO DO: ideally this should be parsed along with the rest of the config file into StoreOptions directly
// (and then storage.Store.PullOptions would need to be somehow simulated).
type pullOptions struct {
enablePartialImages bool // enable_partial_images
convertImages bool // convert_images
useHardLinks bool // use_hard_links
insecureAllowUnpredictableImageContents bool // insecure_allow_unpredictable_image_contents
ostreeRepos []string // ostree_repos
}
func parsePullOptions(store storage.Store) pullOptions {
options := store.PullOptions()
res := pullOptions{}
for _, e := range []struct {
dest *bool
name string
defaultValue bool
}{
{&res.enablePartialImages, "enable_partial_images", false},
{&res.convertImages, "convert_images", false},
{&res.useHardLinks, "use_hard_links", false},
{&res.insecureAllowUnpredictableImageContents, "insecure_allow_unpredictable_image_contents", false},
} {
if value, ok := options[e.name]; ok {
*e.dest = strings.ToLower(value) == "true"
} else {
*e.dest = e.defaultValue
}
}
res.ostreeRepos = strings.Split(options["ostree_repos"], ":")
return res
}
func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *seekableFile, digest.Digest, map[string]string, error) {
diff, err := archive.DecompressStream(payload)
if err != nil {
@ -147,22 +188,21 @@ func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *o
// If it returns an error that implements IsErrFallbackToOrdinaryLayerDownload, the caller can
// retry the operation with a different method.
func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
pullOptions := store.PullOptions()
pullOptions := parsePullOptions(store)
if !parseBooleanPullOption(pullOptions, "enable_partial_images", false) {
// If convertImages is set, the two options disagree whether fallback is permissible.
if !pullOptions.enablePartialImages {
// If pullOptions.convertImages is set, the two options disagree whether fallback is permissible.
// Right now, we enable it, but thats not a promise; rather, such a configuration should ideally be rejected.
return nil, newErrFallbackToOrdinaryLayerDownload(errors.New("partial images are disabled"))
}
// convertImages also serves as a “must not fallback to non-partial pull” option (?!)
convertImages := parseBooleanPullOption(pullOptions, "convert_images", false)
// pullOptions.convertImages also serves as a “must not fallback to non-partial pull” option (?!)
graphDriver, err := store.GraphDriver()
if err != nil {
return nil, err
}
if _, partialSupported := graphDriver.(graphdriver.DriverWithDiffer); !partialSupported {
if convertImages {
if pullOptions.convertImages {
return nil, fmt.Errorf("graph driver %s does not support partial pull but convert_images requires that", graphDriver.String())
}
return nil, newErrFallbackToOrdinaryLayerDownload(fmt.Errorf("graph driver %s does not support partial pull", graphDriver.String()))
@ -174,7 +214,7 @@ func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Diges
return nil, err
}
// If convert_images is enabled, always attempt to convert it instead of returning an error or falling back to a different method.
if convertImages {
if pullOptions.convertImages {
logrus.Debugf("Created differ to convert blob %q", blobDigest)
return makeConvertFromRawDiffer(store, blobDigest, blobSize, iss, pullOptions)
}
@ -186,10 +226,10 @@ func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Diges
// getProperDiffer is an implementation detail of GetDiffer.
// It returns a “proper” differ (not a convert_images one) if possible.
// On error, the second parameter is true if a fallback to an alternative (either the makeConverToRaw differ, or a non-partial pull)
// On error, the second return value is true if a fallback to an alternative (either the makeConverToRaw differ, or a non-partial pull)
// is permissible.
func getProperDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, pullOptions map[string]string) (graphdriver.Differ, bool, error) {
zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey]
func getProperDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, pullOptions pullOptions) (graphdriver.Differ, bool, error) {
zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[minimal.ManifestChecksumKey]
estargzTOCDigestString, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation]
switch {
@ -201,12 +241,10 @@ func getProperDiffer(store storage.Store, blobDigest digest.Digest, blobSize int
if err != nil {
return nil, false, err
}
differ, err := makeZstdChunkedDiffer(store, blobSize, zstdChunkedTOCDigest, annotations, iss, pullOptions)
differ, canFallback, err := makeZstdChunkedDiffer(store, blobSize, zstdChunkedTOCDigest, annotations, iss, pullOptions)
if err != nil {
logrus.Debugf("Could not create zstd:chunked differ for blob %q: %v", blobDigest, err)
// If the error is a bad request to the server, then signal to the caller that it can try a different method.
var badRequestErr ErrBadRequest
return nil, errors.As(err, &badRequestErr), err
return nil, canFallback, err
}
logrus.Debugf("Created zstd:chunked differ for blob %q", blobDigest)
return differ, false, nil
@ -216,26 +254,23 @@ func getProperDiffer(store storage.Store, blobDigest digest.Digest, blobSize int
if err != nil {
return nil, false, err
}
differ, err := makeEstargzChunkedDiffer(store, blobSize, estargzTOCDigest, iss, pullOptions)
differ, canFallback, err := makeEstargzChunkedDiffer(store, blobSize, estargzTOCDigest, iss, pullOptions)
if err != nil {
logrus.Debugf("Could not create estargz differ for blob %q: %v", blobDigest, err)
// If the error is a bad request to the server, then signal to the caller that it can try a different method.
var badRequestErr ErrBadRequest
return nil, errors.As(err, &badRequestErr), err
return nil, canFallback, err
}
logrus.Debugf("Created eStargz differ for blob %q", blobDigest)
return differ, false, nil
default: // no TOC
convertImages := parseBooleanPullOption(pullOptions, "convert_images", false)
if !convertImages {
if !pullOptions.convertImages {
return nil, true, errors.New("no TOC found and convert_images is not configured")
}
return nil, true, errors.New("no TOC found")
}
}
func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, iss ImageSourceSeekable, pullOptions pullOptions) (*chunkedDiffer, error) {
layersCache, err := getLayersCache(store)
if err != nil {
return nil, err
@ -254,22 +289,31 @@ func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blo
}, nil
}
func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
// makeZstdChunkedDiffer sets up a chunkedDiffer for a zstd:chunked layer.
//
// On error, the second return value is true if a fallback to an alternative (either the makeConverToRaw differ, or a non-partial pull)
// is permissible.
func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, pullOptions pullOptions) (*chunkedDiffer, bool, error) {
manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, tocDigest, annotations)
if err != nil {
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
// If the error is a bad request to the server, then signal to the caller that it can try a different method.
var badRequestErr ErrBadRequest
return nil, errors.As(err, &badRequestErr), fmt.Errorf("read zstd:chunked manifest: %w", err)
}
var uncompressedTarSize int64 = -1
if tarSplit != nil {
uncompressedTarSize, err = tarSizeFromTarSplit(tarSplit)
if err != nil {
return nil, fmt.Errorf("computing size from tar-split: %w", err)
return nil, false, fmt.Errorf("computing size from tar-split: %w", err)
}
} else if !pullOptions.insecureAllowUnpredictableImageContents { // With no tar-split, we can't compute the traditional UncompressedDigest.
return nil, true, fmt.Errorf("zstd:chunked layers without tar-split data don't support partial pulls with guaranteed consistency with non-partial pulls")
}
layersCache, err := getLayersCache(store)
if err != nil {
return nil, err
return nil, false, err
}
return &chunkedDiffer{
@ -286,17 +330,27 @@ func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest
stream: iss,
tarSplit: tarSplit,
tocOffset: tocOffset,
}, nil
}, false, nil
}
func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
// makeZstdChunkedDiffer sets up a chunkedDiffer for an estargz layer.
//
// On error, the second return value is true if a fallback to an alternative (either the makeConverToRaw differ, or a non-partial pull)
// is permissible.
func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, pullOptions pullOptions) (*chunkedDiffer, bool, error) {
if !pullOptions.insecureAllowUnpredictableImageContents { // With no tar-split, we can't compute the traditional UncompressedDigest.
return nil, true, fmt.Errorf("estargz layers don't support partial pulls with guaranteed consistency with non-partial pulls")
}
manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, tocDigest)
if err != nil {
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
// If the error is a bad request to the server, then signal to the caller that it can try a different method.
var badRequestErr ErrBadRequest
return nil, errors.As(err, &badRequestErr), fmt.Errorf("read zstd:chunked manifest: %w", err)
}
layersCache, err := getLayersCache(store)
if err != nil {
return nil, err
return nil, false, err
}
return &chunkedDiffer{
@ -311,7 +365,7 @@ func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest dig
pullOptions: pullOptions,
stream: iss,
tocOffset: tocOffset,
}, nil
}, false, nil
}
func makeCopyBuffer() []byte {
@ -391,7 +445,7 @@ func canDedupFileWithHardLink(file *fileMetadata, fd int, s os.FileInfo) bool {
}
// fill only the attributes used by canDedupMetadataWithHardLink.
otherFile := fileMetadata{
FileMetadata: internal.FileMetadata{
FileMetadata: minimal.FileMetadata{
UID: int(st.Uid),
GID: int(st.Gid),
Mode: int64(st.Mode),
@ -735,7 +789,12 @@ func (d *destinationFile) Close() (Err error) {
}
}
return setFileAttrs(d.dirfd, d.file, os.FileMode(d.metadata.Mode), d.metadata, d.options, false)
mode := os.FileMode(d.metadata.Mode)
if d.options.ForceMask != nil {
mode = *d.options.ForceMask
}
return setFileAttrs(d.dirfd, d.file, mode, d.metadata, d.options, false)
}
func closeDestinationFiles(files chan *destinationFile, errors chan error) {
@ -1038,13 +1097,6 @@ type hardLinkToCreate struct {
metadata *fileMetadata
}
func parseBooleanPullOption(pullOptions map[string]string, name string, def bool) bool {
if value, ok := pullOptions[name]; ok {
return strings.ToLower(value) == "true"
}
return def
}
type findAndCopyFileOptions struct {
useHardLinks bool
ostreeRepos []string
@ -1111,10 +1163,13 @@ func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *fileMetadata, copyOptions
return false, nil
}
func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) {
// makeEntriesFlat collects regular-file entries from mergedEntries, and produces a new list
// where each file content is only represented once, and uses composefs.RegularFilePathForValidatedDigest for its name.
// If flatPathNameMap is not nil, this function writes to it a mapping from filepath.Clean(originalName) to the composefs name.
func makeEntriesFlat(mergedEntries []fileMetadata, flatPathNameMap map[string]string) ([]fileMetadata, error) {
var new []fileMetadata
hashes := make(map[string]string)
knownFlatPaths := make(map[string]struct{})
for i := range mergedEntries {
if mergedEntries[i].Type != TypeReg {
continue
@ -1124,16 +1179,22 @@ func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) {
}
digest, err := digest.Parse(mergedEntries[i].Digest)
if err != nil {
return nil, err
return nil, fmt.Errorf("invalid digest %q for %q: %w", mergedEntries[i].Digest, mergedEntries[i].Name, err)
}
path, err := path.RegularFilePathForValidatedDigest(digest)
if err != nil {
return nil, fmt.Errorf("determining physical file path for %q: %w", mergedEntries[i].Name, err)
}
if flatPathNameMap != nil {
flatPathNameMap[filepath.Clean(mergedEntries[i].Name)] = path
}
d := digest.Encoded()
if hashes[d] != "" {
if _, known := knownFlatPaths[path]; known {
continue
}
hashes[d] = d
knownFlatPaths[path] = struct{}{}
mergedEntries[i].Name = fmt.Sprintf("%s/%s", d[0:2], d[2:])
mergedEntries[i].Name = path
mergedEntries[i].skipSetAttrs = true
new = append(new, mergedEntries[i])
@ -1141,44 +1202,140 @@ func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) {
return new, nil
}
func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, error) {
var payload io.ReadCloser
var streams chan io.ReadCloser
var errs chan error
var err error
type streamOrErr struct {
stream io.ReadCloser
err error
}
chunksToRequest := []ImageSourceChunk{
{
Offset: 0,
Length: uint64(c.blobSize),
},
// ensureAllBlobsDone ensures that all blobs are closed and returns the first error encountered.
func ensureAllBlobsDone(streamsOrErrors chan streamOrErr) (retErr error) {
for soe := range streamsOrErrors {
if soe.stream != nil {
_ = soe.stream.Close()
} else if retErr == nil {
retErr = soe.err
}
}
return
}
streams, errs, err = c.stream.GetBlobAt(chunksToRequest)
// getBlobAtConverterGoroutine reads from the streams and errs channels, then sends
// either a stream or an error to the stream channel. The streams channel is closed when
// there are no more streams and errors to read.
// It ensures that no more than maxStreams streams are returned, and that every item from the
// streams and errs channels is consumed.
func getBlobAtConverterGoroutine(stream chan streamOrErr, streams chan io.ReadCloser, errs chan error, maxStreams int) {
tooManyStreams := false
streamsSoFar := 0
err := errors.New("Unexpected error in getBlobAtGoroutine")
defer func() {
if err != nil {
stream <- streamOrErr{err: err}
}
close(stream)
}()
loop:
for {
select {
case p, ok := <-streams:
if !ok {
streams = nil
break loop
}
if streamsSoFar >= maxStreams {
tooManyStreams = true
_ = p.Close()
continue
}
streamsSoFar++
stream <- streamOrErr{stream: p}
case err, ok := <-errs:
if !ok {
errs = nil
break loop
}
stream <- streamOrErr{err: err}
}
}
if streams != nil {
for p := range streams {
if streamsSoFar >= maxStreams {
tooManyStreams = true
_ = p.Close()
continue
}
streamsSoFar++
stream <- streamOrErr{stream: p}
}
}
if errs != nil {
for err := range errs {
stream <- streamOrErr{err: err}
}
}
if tooManyStreams {
stream <- streamOrErr{err: fmt.Errorf("too many streams returned, got more than %d", maxStreams)}
}
err = nil
}
// getBlobAt provides a much more convenient way to consume data returned by ImageSourceSeekable.GetBlobAt.
// GetBlobAt returns two channels, forcing a caller to `select` on both of them — and in Go, reading a closed channel
// always succeeds in select.
// Instead, getBlobAt provides a single channel with all events, which can be consumed conveniently using `range`.
func getBlobAt(is ImageSourceSeekable, chunksToRequest ...ImageSourceChunk) (chan streamOrErr, error) {
streams, errs, err := is.GetBlobAt(chunksToRequest)
if err != nil {
return nil, err
}
stream := make(chan streamOrErr)
go getBlobAtConverterGoroutine(stream, streams, errs, len(chunksToRequest))
return stream, nil
}
func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, error) {
streamsOrErrors, err := getBlobAt(c.stream, ImageSourceChunk{Offset: 0, Length: uint64(c.blobSize)})
if err != nil {
return "", err
}
select {
case p := <-streams:
payload = p
case err := <-errs:
return "", err
}
if payload == nil {
return "", errors.New("invalid stream returned")
}
defer payload.Close()
originalRawDigester := digest.Canonical.Digester()
r := io.TeeReader(payload, originalRawDigester.Hash())
for soe := range streamsOrErrors {
if soe.stream != nil {
r := io.TeeReader(soe.stream, originalRawDigester.Hash())
// copy the entire tarball and compute its digest
_, err = io.CopyBuffer(destination, r, c.copyBuffer)
_ = soe.stream.Close()
}
if soe.err != nil && err == nil {
err = soe.err
}
}
return originalRawDigester.Digest(), err
}
func typeToOsMode(typ string) (os.FileMode, error) {
switch typ {
case TypeReg, TypeLink:
return 0, nil
case TypeSymlink:
return os.ModeSymlink, nil
case TypeDir:
return os.ModeDir, nil
case TypeChar:
return os.ModeDevice | os.ModeCharDevice, nil
case TypeBlock:
return os.ModeDevice, nil
case TypeFifo:
return os.ModeNamedPipe, nil
}
return 0, fmt.Errorf("unknown file type %q", typ)
}
func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, differOpts *graphdriver.DifferOptions) (graphdriver.DriverWithDifferOutput, error) {
defer c.layersCache.release()
defer func() {
@ -1298,13 +1455,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
Size: c.uncompressedTarSize,
}
// When the hard links deduplication is used, file attributes are ignored because setting them
// modifies the source file as well.
useHardLinks := parseBooleanPullOption(c.pullOptions, "use_hard_links", false)
// List of OSTree repositories to use for deduplication
ostreeRepos := strings.Split(c.pullOptions["ostree_repos"], ":")
whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
var missingParts []missingPart
@ -1325,7 +1475,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
if err == nil {
value := idtools.Stat{
IDs: idtools.IDPair{UID: int(uid), GID: int(gid)},
Mode: os.FileMode(mode),
Mode: os.ModeDir | os.FileMode(mode),
}
if err := idtools.SetContainersOverrideXattr(dest, value); err != nil {
return output, err
@ -1337,16 +1487,20 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
if err != nil {
return output, &fs.PathError{Op: "open", Path: dest, Err: err}
}
defer unix.Close(dirfd)
dirFile := os.NewFile(uintptr(dirfd), dest)
defer dirFile.Close()
var flatPathNameMap map[string]string // = nil
if differOpts != nil && differOpts.Format == graphdriver.DifferOutputFormatFlat {
mergedEntries, err = makeEntriesFlat(mergedEntries)
flatPathNameMap = map[string]string{}
mergedEntries, err = makeEntriesFlat(mergedEntries, flatPathNameMap)
if err != nil {
return output, err
}
createdDirs := make(map[string]struct{})
for _, e := range mergedEntries {
d := e.Name[0:2]
// This hard-codes an assumption that RegularFilePathForValidatedDigest creates paths with exactly one directory component.
d := filepath.Dir(e.Name)
if _, found := createdDirs[d]; !found {
if err := unix.Mkdirat(dirfd, d, 0o755); err != nil {
return output, &fs.PathError{Op: "mkdirat", Path: d, Err: err}
@ -1363,8 +1517,10 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
missingPartsSize, totalChunksSize := int64(0), int64(0)
copyOptions := findAndCopyFileOptions{
useHardLinks: useHardLinks,
ostreeRepos: ostreeRepos,
// When the hard links deduplication is used, file attributes are ignored because setting them
// modifies the source file as well.
useHardLinks: c.pullOptions.useHardLinks,
ostreeRepos: c.pullOptions.ostreeRepos, // List of OSTree repositories to use for deduplication
options: options,
}
@ -1408,13 +1564,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
filesToWaitFor := 0
for i := range mergedEntries {
r := &mergedEntries[i]
if options.ForceMask != nil {
value := idtools.FormatContainersOverrideXattr(r.UID, r.GID, int(r.Mode))
if r.Xattrs == nil {
r.Xattrs = make(map[string]string)
}
r.Xattrs[idtools.ContainersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value))
}
mode := os.FileMode(r.Mode)
@ -1423,10 +1572,37 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
return output, err
}
r.Name = filepath.Clean(r.Name)
size := r.Size
// update also the implementation of ForceMask in pkg/archive
if options.ForceMask != nil {
mode = *options.ForceMask
// special files will be stored as regular files
if t != tar.TypeDir && t != tar.TypeSymlink && t != tar.TypeReg && t != tar.TypeLink {
t = tar.TypeReg
size = 0
}
// if the entry will be stored as a directory or a regular file, store in a xattr the original
// owner and mode.
if t == tar.TypeDir || t == tar.TypeReg {
typeMode, err := typeToOsMode(r.Type)
if err != nil {
return output, err
}
value := idtools.FormatContainersOverrideXattrDevice(r.UID, r.GID, typeMode|fs.FileMode(r.Mode), int(r.Devmajor), int(r.Devminor))
if r.Xattrs == nil {
r.Xattrs = make(map[string]string)
}
r.Xattrs[idtools.ContainersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value))
}
}
r.Name = path.CleanAbsPath(r.Name)
// do not modify the value of symlinks
if r.Linkname != "" && t != tar.TypeSymlink {
r.Linkname = filepath.Clean(r.Linkname)
r.Linkname = path.CleanAbsPath(r.Linkname)
}
if whiteoutConverter != nil {
@ -1434,8 +1610,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
Typeflag: t,
Name: r.Name,
Linkname: r.Linkname,
Size: r.Size,
Mode: r.Mode,
Size: size,
Mode: int64(mode),
Uid: r.UID,
Gid: r.GID,
}
@ -1454,7 +1630,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
switch t {
case tar.TypeReg:
// Create directly empty files.
if r.Size == 0 {
if size == 0 {
// Used to have a scope for cleanup.
createEmptyFile := func() error {
file, err := openFileUnderRoot(dirfd, r.Name, newFileFlags, 0)
@ -1474,7 +1650,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
}
case tar.TypeDir:
if r.Name == "" || r.Name == "." {
if r.Name == "/" {
output.RootDirMode = &mode
}
if err := safeMkdir(dirfd, mode, r.Name, r, options); err != nil {
@ -1509,7 +1685,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
return output, fmt.Errorf("invalid type %q", t)
}
totalChunksSize += r.Size
totalChunksSize += size
if t == tar.TypeReg {
index := i
@ -1572,7 +1748,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
}
switch chunk.ChunkType {
case internal.ChunkTypeData:
case minimal.ChunkTypeData:
root, path, offset, err := c.layersCache.findChunkInOtherLayers(chunk)
if err != nil {
return output, err
@ -1585,7 +1761,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
Offset: offset,
}
}
case internal.ChunkTypeZeros:
case minimal.ChunkTypeZeros:
missingPartsSize -= size
mp.Hole = true
// Mark all chunks belonging to the missing part as holes
@ -1609,6 +1785,39 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
}
}
// To ensure that consumers of the layer who decompress and read the full tar stream,
// and consumers who consume the data via the TOC, both see exactly the same data and metadata,
// compute the UncompressedDigest.
// c/image will then ensure that this value matches the value in the image configs RootFS.DiffID, i.e. the image must commit
// to one UncompressedDigest value for each layer, and that will avoid the ambiguity (in consumers who validate layers against DiffID).
//
// c/image also uses the UncompressedDigest as a layer ID, allowing it to use the traditional layer and image IDs.
//
// This is, sadly, quite costly: Up to now we might have only have had to write, and digest, only the new/modified files.
// Here we need to read, and digest, the whole layer, even if almost all of it was already present locally previously.
// So, really specialized (EXTREMELY RARE) users can opt out of this check using insecureAllowUnpredictableImageContents .
//
// Layers without a tar-split (estargz layers and old zstd:chunked layers) can't produce an UncompressedDigest that
// matches the expected RootFS.DiffID; we always fall back to full pulls, again unless the user opts out
// via insecureAllowUnpredictableImageContents .
if output.UncompressedDigest == "" {
switch {
case c.pullOptions.insecureAllowUnpredictableImageContents:
// Oh well. Skip the costly digest computation.
case output.TarSplit != nil:
metadata := tsStorage.NewJSONUnpacker(bytes.NewReader(output.TarSplit))
fg := newStagedFileGetter(dirFile, flatPathNameMap)
digester := digest.Canonical.Digester()
if err := asm.WriteOutputTarStream(fg, metadata, digester.Hash()); err != nil {
return output, fmt.Errorf("digesting staged uncompressed stream: %w", err)
}
output.UncompressedDigest = digester.Digest()
default:
// We are checking for this earlier in GetDiffer, so this should not be reachable.
return output, fmt.Errorf(`internal error: layer's UncompressedDigest is unknown and "insecure_allow_unpredictable_image_contents" is not set`)
}
}
if totalChunksSize > 0 {
logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize))
}
@ -1618,7 +1827,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
return output, nil
}
func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool {
func mustSkipFile(fileType compressedFileType, e minimal.FileMetadata) bool {
// ignore the metadata files for the estargz format.
if fileType != fileTypeEstargz {
return false
@ -1631,7 +1840,7 @@ func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool {
return false
}
func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]fileMetadata, error) {
func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []minimal.FileMetadata) ([]fileMetadata, error) {
countNextChunks := func(start int) int {
count := 0
for _, e := range entries[start:] {
@ -1668,7 +1877,7 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
if e.Type == TypeReg {
nChunks := countNextChunks(i + 1)
e.chunks = make([]*internal.FileMetadata, nChunks+1)
e.chunks = make([]*minimal.FileMetadata, nChunks+1)
for j := 0; j <= nChunks; j++ {
// we need a copy here, otherwise we override the
// .Size later
@ -1703,7 +1912,7 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
// validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the
// same digest as chunk.ChunkDigest
func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool {
func validateChunkChecksum(chunk *minimal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool {
parentDirfd, err := unix.Open(root, unix.O_PATH|unix.O_CLOEXEC, 0)
if err != nil {
return false
@ -1734,3 +1943,33 @@ func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offs
return digester.Digest() == digest
}
// newStagedFileGetter returns an object usable as storage.FileGetter for rootDir.
// if flatPathNameMap is not nil, it must be used to map logical file names into the backing file paths.
func newStagedFileGetter(rootDir *os.File, flatPathNameMap map[string]string) *stagedFileGetter {
return &stagedFileGetter{
rootDir: rootDir,
flatPathNameMap: flatPathNameMap,
}
}
type stagedFileGetter struct {
rootDir *os.File
flatPathNameMap map[string]string // nil, or a map from filepath.Clean()ed tar file names to expected on-filesystem names
}
func (fg *stagedFileGetter) Get(filename string) (io.ReadCloser, error) {
if fg.flatPathNameMap != nil {
path, ok := fg.flatPathNameMap[filepath.Clean(filename)]
if !ok {
return nil, fmt.Errorf("no path mapping exists for tar entry %q", filename)
}
filename = path
}
pathFD, err := securejoin.OpenatInRoot(fg.rootDir, filename)
if err != nil {
return nil, err
}
defer pathFD.Close()
return securejoin.Reopen(pathFD, unix.O_RDONLY)
}

View File

@ -3,7 +3,7 @@ package toc
import (
"errors"
"github.com/containers/storage/pkg/chunked/internal"
"github.com/containers/storage/pkg/chunked/internal/minimal"
digest "github.com/opencontainers/go-digest"
)
@ -19,7 +19,7 @@ const tocJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
// This is an experimental feature and may be changed/removed in the future.
func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) {
d1, ok1 := annotations[tocJSONDigestAnnotation]
d2, ok2 := annotations[internal.ManifestChecksumKey]
d2, ok2 := annotations[minimal.ManifestChecksumKey]
switch {
case ok1 && ok2:
return nil, errors.New("both zstd:chunked and eStargz TOC found")

View File

@ -4,6 +4,7 @@ import (
"bufio"
"errors"
"fmt"
"io/fs"
"os"
"os/user"
"runtime"
@ -371,25 +372,64 @@ func checkChownErr(err error, name string, uid, gid int) error {
type Stat struct {
IDs IDPair
Mode os.FileMode
Major int
Minor int
}
// FormatContainersOverrideXattr will format the given uid, gid, and mode into a string
// that can be used as the value for the ContainersOverrideXattr xattr.
func FormatContainersOverrideXattr(uid, gid, mode int) string {
return fmt.Sprintf("%d:%d:0%o", uid, gid, mode&0o7777)
return FormatContainersOverrideXattrDevice(uid, gid, fs.FileMode(mode), 0, 0)
}
// FormatContainersOverrideXattrDevice will format the given uid, gid, and mode into a string
// that can be used as the value for the ContainersOverrideXattr xattr. For devices, it also
// needs the major and minor numbers.
func FormatContainersOverrideXattrDevice(uid, gid int, mode fs.FileMode, major, minor int) string {
typ := ""
switch mode & os.ModeType {
case os.ModeDir:
typ = "dir"
case os.ModeSymlink:
typ = "symlink"
case os.ModeNamedPipe:
typ = "pipe"
case os.ModeSocket:
typ = "socket"
case os.ModeDevice:
typ = fmt.Sprintf("block-%d-%d", major, minor)
case os.ModeDevice | os.ModeCharDevice:
typ = fmt.Sprintf("char-%d-%d", major, minor)
default:
typ = "file"
}
unixMode := mode & os.ModePerm
if mode&os.ModeSetuid != 0 {
unixMode |= 0o4000
}
if mode&os.ModeSetgid != 0 {
unixMode |= 0o2000
}
if mode&os.ModeSticky != 0 {
unixMode |= 0o1000
}
return fmt.Sprintf("%d:%d:%04o:%s", uid, gid, unixMode, typ)
}
// GetContainersOverrideXattr will get and decode ContainersOverrideXattr.
func GetContainersOverrideXattr(path string) (Stat, error) {
var stat Stat
xstat, err := system.Lgetxattr(path, ContainersOverrideXattr)
if err != nil {
return stat, err
return Stat{}, err
}
return parseOverrideXattr(xstat) // This will fail if (xstat, err) == (nil, nil), i.e. the xattr does not exist.
}
func parseOverrideXattr(xstat []byte) (Stat, error) {
var stat Stat
attrs := strings.Split(string(xstat), ":")
if len(attrs) != 3 {
return stat, fmt.Errorf("The number of clons in %s does not equal to 3",
if len(attrs) < 3 {
return stat, fmt.Errorf("The number of parts in %s is less than 3",
ContainersOverrideXattr)
}
@ -397,47 +437,105 @@ func GetContainersOverrideXattr(path string) (Stat, error) {
if err != nil {
return stat, fmt.Errorf("Failed to parse UID: %w", err)
}
stat.IDs.UID = int(value)
value, err = strconv.ParseUint(attrs[0], 10, 32)
value, err = strconv.ParseUint(attrs[1], 10, 32)
if err != nil {
return stat, fmt.Errorf("Failed to parse GID: %w", err)
}
stat.IDs.GID = int(value)
value, err = strconv.ParseUint(attrs[2], 8, 32)
if err != nil {
return stat, fmt.Errorf("Failed to parse mode: %w", err)
}
stat.Mode = os.FileMode(value) & os.ModePerm
if value&0o1000 != 0 {
stat.Mode |= os.ModeSticky
}
if value&0o2000 != 0 {
stat.Mode |= os.ModeSetgid
}
if value&0o4000 != 0 {
stat.Mode |= os.ModeSetuid
}
stat.Mode = os.FileMode(value)
if len(attrs) > 3 {
typ := attrs[3]
if strings.HasPrefix(typ, "file") {
} else if strings.HasPrefix(typ, "dir") {
stat.Mode |= os.ModeDir
} else if strings.HasPrefix(typ, "symlink") {
stat.Mode |= os.ModeSymlink
} else if strings.HasPrefix(typ, "pipe") {
stat.Mode |= os.ModeNamedPipe
} else if strings.HasPrefix(typ, "socket") {
stat.Mode |= os.ModeSocket
} else if strings.HasPrefix(typ, "block") {
stat.Mode |= os.ModeDevice
stat.Major, stat.Minor, err = parseDevice(typ)
if err != nil {
return stat, err
}
} else if strings.HasPrefix(typ, "char") {
stat.Mode |= os.ModeDevice | os.ModeCharDevice
stat.Major, stat.Minor, err = parseDevice(typ)
if err != nil {
return stat, err
}
} else {
return stat, fmt.Errorf("Invalid file type %s", typ)
}
}
return stat, nil
}
func parseDevice(typ string) (int, int, error) {
parts := strings.Split(typ, "-")
// If there are more than 3 parts, just ignore them to be forward compatible
if len(parts) < 3 {
return 0, 0, fmt.Errorf("Invalid device type %s", typ)
}
if parts[0] != "block" && parts[0] != "char" {
return 0, 0, fmt.Errorf("Invalid device type %s", typ)
}
major, err := strconv.Atoi(parts[1])
if err != nil {
return 0, 0, fmt.Errorf("Failed to parse major number: %w", err)
}
minor, err := strconv.Atoi(parts[2])
if err != nil {
return 0, 0, fmt.Errorf("Failed to parse minor number: %w", err)
}
return major, minor, nil
}
// SetContainersOverrideXattr will encode and set ContainersOverrideXattr.
func SetContainersOverrideXattr(path string, stat Stat) error {
value := FormatContainersOverrideXattr(stat.IDs.UID, stat.IDs.GID, int(stat.Mode))
value := FormatContainersOverrideXattrDevice(stat.IDs.UID, stat.IDs.GID, stat.Mode, stat.Major, stat.Minor)
return system.Lsetxattr(path, ContainersOverrideXattr, []byte(value), 0)
}
func SafeChown(name string, uid, gid int) error {
if runtime.GOOS == "darwin" {
var mode os.FileMode = 0o0700
stat := Stat{
Mode: os.FileMode(0o0700),
}
xstat, err := system.Lgetxattr(name, ContainersOverrideXattr)
if err == nil {
attrs := strings.Split(string(xstat), ":")
if len(attrs) == 3 {
val, err := strconv.ParseUint(attrs[2], 8, 32)
if err == nil {
mode = os.FileMode(val)
if err == nil && xstat != nil {
stat, err = parseOverrideXattr(xstat)
if err != nil {
return err
}
} else {
st, err := os.Stat(name) // Ideally we would share this with system.Stat below, but then we would need to convert Mode.
if err != nil {
return err
}
stat.Mode = st.Mode()
}
value := Stat{IDPair{uid, gid}, mode}
if err = SetContainersOverrideXattr(name, value); err != nil {
stat.IDs = IDPair{UID: uid, GID: gid}
if err = SetContainersOverrideXattr(name, stat); err != nil {
return err
}
uid = os.Getuid()
@ -453,19 +551,24 @@ func SafeChown(name string, uid, gid int) error {
func SafeLchown(name string, uid, gid int) error {
if runtime.GOOS == "darwin" {
var mode os.FileMode = 0o0700
stat := Stat{
Mode: os.FileMode(0o0700),
}
xstat, err := system.Lgetxattr(name, ContainersOverrideXattr)
if err == nil {
attrs := strings.Split(string(xstat), ":")
if len(attrs) == 3 {
val, err := strconv.ParseUint(attrs[2], 8, 32)
if err == nil {
mode = os.FileMode(val)
if err == nil && xstat != nil {
stat, err = parseOverrideXattr(xstat)
if err != nil {
return err
}
} else {
st, err := os.Lstat(name) // Ideally we would share this with system.Stat below, but then we would need to convert Mode.
if err != nil {
return err
}
stat.Mode = st.Mode()
}
value := Stat{IDPair{uid, gid}, mode}
if err = SetContainersOverrideXattr(name, value); err != nil {
stat.IDs = IDPair{UID: uid, GID: gid}
if err = SetContainersOverrideXattr(name, stat); err != nil {
return err
}
uid = os.Getuid()

View File

@ -1,4 +1,4 @@
//go:build linux && cgo
//go:build linux
package loopback

View File

@ -1,4 +1,4 @@
//go:build linux && cgo
//go:build linux
package loopback

View File

@ -1,21 +1,7 @@
//go:build linux && cgo
//go:build linux
package loopback
/*
#include <linux/loop.h> // FIXME: present only for defines, maybe we can remove it?
#ifndef LOOP_CTL_GET_FREE
#define LOOP_CTL_GET_FREE 0x4C82
#endif
#ifndef LO_FLAGS_PARTSCAN
#define LO_FLAGS_PARTSCAN 8
#endif
*/
import "C"
type loopInfo64 struct {
loDevice uint64 /* ioctl r/o */
loInode uint64 /* ioctl r/o */
@ -34,19 +20,19 @@ type loopInfo64 struct {
// IOCTL consts
const (
LoopSetFd = C.LOOP_SET_FD
LoopCtlGetFree = C.LOOP_CTL_GET_FREE
LoopGetStatus64 = C.LOOP_GET_STATUS64
LoopSetStatus64 = C.LOOP_SET_STATUS64
LoopClrFd = C.LOOP_CLR_FD
LoopSetCapacity = C.LOOP_SET_CAPACITY
LoopSetFd = 0x4C00
LoopCtlGetFree = 0x4C82
LoopGetStatus64 = 0x4C05
LoopSetStatus64 = 0x4C04
LoopClrFd = 0x4C01
LoopSetCapacity = 0x4C07
)
// LOOP consts.
const (
LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR
LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY
LoFlagsPartScan = C.LO_FLAGS_PARTSCAN
LoKeySize = C.LO_KEY_SIZE
LoNameSize = C.LO_NAME_SIZE
LoFlagsAutoClear = 0x4C07
LoFlagsReadOnly = 1
LoFlagsPartScan = 8
LoKeySize = 32
LoNameSize = 64
)

View File

@ -1,4 +1,4 @@
//go:build linux && cgo
//go:build linux
package loopback

View File

@ -0,0 +1,93 @@
//go:build freebsd
package system
import (
"os"
"unsafe"
"golang.org/x/sys/unix"
)
const (
EXTATTR_NAMESPACE_EMPTY = unix.EXTATTR_NAMESPACE_EMPTY
EXTATTR_NAMESPACE_USER = unix.EXTATTR_NAMESPACE_USER
EXTATTR_NAMESPACE_SYSTEM = unix.EXTATTR_NAMESPACE_SYSTEM
)
// ExtattrGetLink retrieves the value of the extended attribute identified by attrname
// in the given namespace and associated with the given path in the file system.
// If the path is a symbolic link, the extended attribute is retrieved from the link itself.
// Returns a []byte slice if the extattr is set and nil otherwise.
func ExtattrGetLink(path string, attrnamespace int, attrname string) ([]byte, error) {
size, errno := unix.ExtattrGetLink(path, attrnamespace, attrname,
uintptr(unsafe.Pointer(nil)), 0)
if errno != nil {
if errno == unix.ENOATTR {
return nil, nil
}
return nil, &os.PathError{Op: "extattr_get_link", Path: path, Err: errno}
}
if size == 0 {
return []byte{}, nil
}
dest := make([]byte, size)
size, errno = unix.ExtattrGetLink(path, attrnamespace, attrname,
uintptr(unsafe.Pointer(&dest[0])), size)
if errno != nil {
return nil, &os.PathError{Op: "extattr_get_link", Path: path, Err: errno}
}
return dest[:size], nil
}
// ExtattrSetLink sets the value of extended attribute identified by attrname
// in the given namespace and associated with the given path in the file system.
// If the path is a symbolic link, the extended attribute is set on the link itself.
func ExtattrSetLink(path string, attrnamespace int, attrname string, data []byte) error {
if len(data) == 0 {
data = []byte{} // ensure non-nil for empty data
}
if _, errno := unix.ExtattrSetLink(path, attrnamespace, attrname,
uintptr(unsafe.Pointer(&data[0])), len(data)); errno != nil {
return &os.PathError{Op: "extattr_set_link", Path: path, Err: errno}
}
return nil
}
// ExtattrListLink lists extended attributes associated with the given path
// in the specified namespace. If the path is a symbolic link, the attributes
// are listed from the link itself.
func ExtattrListLink(path string, attrnamespace int) ([]string, error) {
size, errno := unix.ExtattrListLink(path, attrnamespace,
uintptr(unsafe.Pointer(nil)), 0)
if errno != nil {
return nil, &os.PathError{Op: "extattr_list_link", Path: path, Err: errno}
}
if size == 0 {
return []string{}, nil
}
dest := make([]byte, size)
size, errno = unix.ExtattrListLink(path, attrnamespace,
uintptr(unsafe.Pointer(&dest[0])), size)
if errno != nil {
return nil, &os.PathError{Op: "extattr_list_link", Path: path, Err: errno}
}
var attrs []string
for i := 0; i < size; {
// Each attribute is preceded by a single byte length
length := int(dest[i])
i++
if i+length > size {
break
}
attrs = append(attrs, string(dest[i:i+length]))
i += length
}
return attrs, nil
}

View File

@ -0,0 +1,24 @@
//go:build !freebsd
package system
const (
EXTATTR_NAMESPACE_EMPTY = 0
EXTATTR_NAMESPACE_USER = 0
EXTATTR_NAMESPACE_SYSTEM = 0
)
// ExtattrGetLink is not supported on platforms other than FreeBSD.
func ExtattrGetLink(path string, attrnamespace int, attrname string) ([]byte, error) {
return nil, ErrNotSupportedPlatform
}
// ExtattrSetLink is not supported on platforms other than FreeBSD.
func ExtattrSetLink(path string, attrnamespace int, attrname string, data []byte) error {
return ErrNotSupportedPlatform
}
// ExtattrListLink is not supported on platforms other than FreeBSD.
func ExtattrListLink(path string, attrnamespace int) ([]string, error) {
return nil, ErrNotSupportedPlatform
}

View File

@ -0,0 +1,13 @@
package system
import "syscall"
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
return &StatT{size: s.Size,
mode: uint32(s.Mode),
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
mtim: s.Mtimespec}, nil
}

View File

@ -12,7 +12,7 @@ const (
E2BIG unix.Errno = unix.E2BIG
// Operation not supported
EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP
ENOTSUP unix.Errno = unix.ENOTSUP
)
// Lgetxattr retrieves the value of the extended attribute identified by attr

View File

@ -0,0 +1,85 @@
package system
import (
"strings"
"golang.org/x/sys/unix"
)
const (
// Value is larger than the maximum size allowed
E2BIG unix.Errno = unix.E2BIG
// Operation not supported
ENOTSUP unix.Errno = unix.ENOTSUP
// Value is too small or too large for maximum size allowed
EOVERFLOW unix.Errno = unix.EOVERFLOW
)
var (
namespaceMap = map[string]int{
"user": EXTATTR_NAMESPACE_USER,
"system": EXTATTR_NAMESPACE_SYSTEM,
}
)
func xattrToExtattr(xattr string) (namespace int, extattr string, err error) {
namespaceName, extattr, found := strings.Cut(xattr, ".")
if !found {
return -1, "", ENOTSUP
}
namespace, ok := namespaceMap[namespaceName]
if !ok {
return -1, "", ENOTSUP
}
return namespace, extattr, nil
}
// Lgetxattr retrieves the value of the extended attribute identified by attr
// and associated with the given path in the file system.
// Returns a []byte slice if the xattr is set and nil otherwise.
func Lgetxattr(path string, attr string) ([]byte, error) {
namespace, extattr, err := xattrToExtattr(attr)
if err != nil {
return nil, err
}
return ExtattrGetLink(path, namespace, extattr)
}
// Lsetxattr sets the value of the extended attribute identified by attr
// and associated with the given path in the file system.
func Lsetxattr(path string, attr string, value []byte, flags int) error {
if flags != 0 {
// FIXME: Flags are not supported on FreeBSD, but we can implement
// them mimicking the behavior of the Linux implementation.
// See lsetxattr(2) on Linux for more information.
return ENOTSUP
}
namespace, extattr, err := xattrToExtattr(attr)
if err != nil {
return err
}
return ExtattrSetLink(path, namespace, extattr, value)
}
// Llistxattr lists extended attributes associated with the given path
// in the file system.
func Llistxattr(path string) ([]string, error) {
attrs := []string{}
for namespaceName, namespace := range namespaceMap {
namespaceAttrs, err := ExtattrListLink(path, namespace)
if err != nil {
return nil, err
}
for _, attr := range namespaceAttrs {
attrs = append(attrs, namespaceName+"."+attr)
}
}
return attrs, nil
}

View File

@ -12,7 +12,7 @@ const (
E2BIG unix.Errno = unix.E2BIG
// Operation not supported
EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP
ENOTSUP unix.Errno = unix.ENOTSUP
// Value is too small or too large for maximum size allowed
EOVERFLOW unix.Errno = unix.EOVERFLOW

View File

@ -1,4 +1,4 @@
//go:build !linux && !darwin
//go:build !linux && !darwin && !freebsd
package system
@ -9,7 +9,7 @@ const (
E2BIG syscall.Errno = syscall.Errno(0)
// Operation not supported
EOPNOTSUPP syscall.Errno = syscall.Errno(0)
ENOTSUP syscall.Errno = syscall.Errno(0)
// Value is too small or too large for maximum size allowed
EOVERFLOW syscall.Errno = syscall.Errno(0)

View File

@ -80,6 +80,25 @@ additionalimagestores = [
# This is a "string bool": "false" | "true" (cannot be native TOML boolean)
# convert_images = "false"
# This should ALMOST NEVER be set.
# It allows partial pulls of images without guaranteeing that "partial
# pulls" and non-partial pulls both result in consistent image contents.
# This allows pulling estargz images and early versions of zstd:chunked images;
# otherwise, these layers always use the traditional non-partial pull path.
#
# This option should be enabled EXTREMELY rarely, only if ALL images that could
# EVER be conceivably pulled on this system are GUARANTEED (e.g. using a signature policy)
# to come from a build system trusted to never attack image integrity.
#
# If this consistency enforcement were disabled, malicious images could be built
# in a way designed to evade other audit mechanisms, so presence of most other audit
# mechanisms is not a replacement for the above-mentioned need for all images to come
# from a trusted build system.
#
# As a side effect, enabling this option will also make image IDs unpredictable
# (usually not equal to the traditional value matching the config digest).
# insecure_allow_unpredictable_image_contents = "false"
# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID
# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned
# to containers configured to create automatically a user namespace. Containers

View File

@ -20,6 +20,7 @@ import (
_ "github.com/containers/storage/drivers/register"
drivers "github.com/containers/storage/drivers"
"github.com/containers/storage/internal/dedup"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
@ -166,6 +167,26 @@ type flaggableStore interface {
type StoreOptions = types.StoreOptions
type DedupHashMethod = dedup.DedupHashMethod
const (
DedupHashInvalid = dedup.DedupHashInvalid
DedupHashCRC = dedup.DedupHashCRC
DedupHashFileSize = dedup.DedupHashFileSize
DedupHashSHA256 = dedup.DedupHashSHA256
)
type (
DedupOptions = dedup.DedupOptions
DedupResult = dedup.DedupResult
)
// DedupArgs is used to pass arguments to the Dedup command.
type DedupArgs struct {
// Options that are passed directly to the internal/dedup.DedupDirs function.
Options DedupOptions
}
// Store wraps up the various types of file-based stores that we use into a
// singleton object that initializes and manages them all together.
type Store interface {
@ -589,6 +610,9 @@ type Store interface {
// MultiList returns consistent values as of a single point in time.
// WARNING: The values may already be out of date by the time they are returned to the caller.
MultiList(MultiListOptions) (MultiListResult, error)
// Dedup deduplicates layers in the store.
Dedup(DedupArgs) (drivers.DedupResult, error)
}
// AdditionalLayer represents a layer that is contained in the additional layer store
@ -3843,3 +3867,43 @@ func (s *store) MultiList(options MultiListOptions) (MultiListResult, error) {
}
return out, nil
}
// Dedup deduplicates layers in the store.
func (s *store) Dedup(req DedupArgs) (drivers.DedupResult, error) {
imgs, err := s.Images()
if err != nil {
return drivers.DedupResult{}, err
}
var topLayers []string
for _, i := range imgs {
topLayers = append(topLayers, i.TopLayer)
topLayers = append(topLayers, i.MappedTopLayers...)
}
return writeToLayerStore(s, func(rlstore rwLayerStore) (drivers.DedupResult, error) {
layers := make(map[string]struct{})
for _, i := range topLayers {
cur := i
for cur != "" {
if _, visited := layers[cur]; visited {
break
}
l, err := rlstore.Get(cur)
if err != nil {
if err == ErrLayerUnknown {
break
}
return drivers.DedupResult{}, err
}
layers[cur] = struct{}{}
cur = l.Parent
}
}
r := drivers.DedupArgs{
Options: req.Options,
}
for l := range layers {
r.Layers = append(r.Layers, l)
}
return rlstore.dedup(r)
})
}

View File

@ -154,40 +154,65 @@ var supportedAlgorithms = map[string]bool{
EdDSA: true,
}
// ProviderConfig allows creating providers when discovery isn't supported. It's
// generally easier to use NewProvider directly.
// ProviderConfig allows direct creation of a [Provider] from metadata
// configuration. This is intended for interop with providers that don't support
// discovery, or host the JSON discovery document at an off-spec path.
//
// The ProviderConfig struct specifies JSON struct tags to support document
// parsing.
//
// // Directly fetch the metadata document.
// resp, err := http.Get("https://login.example.com/custom-metadata-path")
// if err != nil {
// // ...
// }
// defer resp.Body.Close()
//
// // Parse config from JSON metadata.
// config := &oidc.ProviderConfig{}
// if err := json.NewDecoder(resp.Body).Decode(config); err != nil {
// // ...
// }
// p := config.NewProvider(context.Background())
//
// For providers that implement discovery, use [NewProvider] instead.
//
// See: https://openid.net/specs/openid-connect-discovery-1_0.html
type ProviderConfig struct {
// IssuerURL is the identity of the provider, and the string it uses to sign
// ID tokens with. For example "https://accounts.google.com". This value MUST
// match ID tokens exactly.
IssuerURL string
IssuerURL string `json:"issuer"`
// AuthURL is the endpoint used by the provider to support the OAuth 2.0
// authorization endpoint.
AuthURL string
AuthURL string `json:"authorization_endpoint"`
// TokenURL is the endpoint used by the provider to support the OAuth 2.0
// token endpoint.
TokenURL string
TokenURL string `json:"token_endpoint"`
// DeviceAuthURL is the endpoint used by the provider to support the OAuth 2.0
// device authorization endpoint.
DeviceAuthURL string
DeviceAuthURL string `json:"device_authorization_endpoint"`
// UserInfoURL is the endpoint used by the provider to support the OpenID
// Connect UserInfo flow.
//
// https://openid.net/specs/openid-connect-core-1_0.html#UserInfo
UserInfoURL string
UserInfoURL string `json:"userinfo_endpoint"`
// JWKSURL is the endpoint used by the provider to advertise public keys to
// verify issued ID tokens. This endpoint is polled as new keys are made
// available.
JWKSURL string
JWKSURL string `json:"jwks_uri"`
// Algorithms, if provided, indicate a list of JWT algorithms allowed to sign
// ID tokens. If not provided, this defaults to the algorithms advertised by
// the JWK endpoint, then the set of algorithms supported by this package.
Algorithms []string
Algorithms []string `json:"id_token_signing_alg_values_supported"`
}
// NewProvider initializes a provider from a set of endpoints, rather than
// through discovery.
//
// The provided context is only used for [http.Client] configuration through
// [ClientContext], not cancelation.
func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider {
return &Provider{
issuer: p.IssuerURL,
@ -202,9 +227,14 @@ func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider {
}
// NewProvider uses the OpenID Connect discovery mechanism to construct a Provider.
//
// The issuer is the URL identifier for the service. For example: "https://accounts.google.com"
// or "https://login.salesforce.com".
//
// OpenID Connect providers that don't implement discovery or host the discovery
// document at a non-spec complaint path (such as requiring a URL parameter),
// should use [ProviderConfig] instead.
//
// See: https://openid.net/specs/openid-connect-discovery-1_0.html
func NewProvider(ctx context.Context, issuer string) (*Provider, error) {
wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration"
req, err := http.NewRequest("GET", wellKnown, nil)

View File

@ -1195,6 +1195,7 @@ definitions:
- "default"
- "process"
- "hyperv"
- ""
MaskedPaths:
type: "array"
description: |
@ -4180,6 +4181,7 @@ definitions:
- "default"
- "process"
- "hyperv"
- ""
Init:
description: |
Run an init inside the container that forwards signals and reaps
@ -5750,6 +5752,7 @@ definitions:
- "default"
- "hyperv"
- "process"
- ""
InitBinary:
description: |
Name and, optional, path of the `docker-init` binary.
@ -11632,6 +11635,7 @@ paths:
example:
ListenAddr: "0.0.0.0:2377"
AdvertiseAddr: "192.168.1.1:2377"
DataPathAddr: "192.168.1.1"
RemoteAddrs:
- "node1:2377"
JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"

View File

@ -10,7 +10,7 @@ import (
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/go-connections/nat"
units "github.com/docker/go-units"
"github.com/docker/go-units"
)
// CgroupnsMode represents the cgroup namespace mode of the container

View File

@ -18,6 +18,8 @@ const blockThreshold = 1e6
var (
// ErrClosed is returned when Write is called on a closed BytesPipe.
//
// Deprecated: this type is only used internally, and will be removed in the next release.
ErrClosed = errors.New("write to closed BytesPipe")
bufPools = make(map[int]*sync.Pool)
@ -28,6 +30,8 @@ var (
// All written data may be read at most once. Also, BytesPipe allocates
// and releases new byte slices to adjust to current needs, so the buffer
// won't be overgrown after peak loads.
//
// Deprecated: this type is only used internally, and will be removed in the next release.
type BytesPipe struct {
mu sync.Mutex
wait *sync.Cond
@ -40,6 +44,8 @@ type BytesPipe struct {
// NewBytesPipe creates new BytesPipe, initialized by specified slice.
// If buf is nil, then it will be initialized with slice which cap is 64.
// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
//
// Deprecated: this function is only used internally, and will be removed in the next release.
func NewBytesPipe() *BytesPipe {
bp := &BytesPipe{}
bp.buf = append(bp.buf, getBuffer(minCap))

View File

@ -80,13 +80,19 @@ func (wf *WriteFlusher) Close() error {
return nil
}
// nopFlusher represents a type which flush operation is nop.
type nopFlusher struct{}
// Flush is a nop operation.
func (f *nopFlusher) Flush() {}
// NewWriteFlusher returns a new WriteFlusher.
func NewWriteFlusher(w io.Writer) *WriteFlusher {
var fl flusher
if f, ok := w.(flusher); ok {
fl = f
} else {
fl = &NopFlusher{}
fl = &nopFlusher{}
}
return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})}
}

View File

@ -6,6 +6,8 @@ import (
)
// NopWriter represents a type which write operation is nop.
//
// Deprecated: use [io.Discard] instead. This type will be removed in the next release.
type NopWriter struct{}
func (*NopWriter) Write(buf []byte) (int, error) {
@ -19,15 +21,16 @@ type nopWriteCloser struct {
func (w *nopWriteCloser) Close() error { return nil }
// NopWriteCloser returns a nopWriteCloser.
//
// Deprecated: This function is no longer used and will be removed in the next release.
func NopWriteCloser(w io.Writer) io.WriteCloser {
return &nopWriteCloser{w}
}
// NopFlusher represents a type which flush operation is nop.
type NopFlusher struct{}
// Flush is a nop operation.
func (f *NopFlusher) Flush() {}
//
// Deprecated: NopFlusher is only used internally and will be removed in the next release.
type NopFlusher = nopFlusher
type writeCloserWrapper struct {
io.Writer
@ -55,12 +58,16 @@ func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
// of bytes written to the writer during a "session".
// This can be convenient when write return is masked
// (e.g., json.Encoder.Encode())
//
// Deprecated: this type is no longer used and will be removed in the next release.
type WriteCounter struct {
Count int64
Writer io.Writer
}
// NewWriteCounter returns a new WriteCounter.
//
// Deprecated: this function is no longer used and will be removed in the next release.
func NewWriteCounter(w io.Writer) *WriteCounter {
return &WriteCounter{
Writer: w,

View File

@ -7,7 +7,7 @@ import (
"strings"
"time"
units "github.com/docker/go-units"
"github.com/docker/go-units"
"github.com/moby/term"
"github.com/morikuni/aec"
)

View File

@ -109,6 +109,12 @@ to github):
git push origin your-branch --force
Alternatively, a core member can squash your commits within Github.
## DCO Signoff
Make sure to sign the [Developer Certificate of
Origin](https://git-scm.com/docs/git-commit#Documentation/git-commit.txt---signoff).
## Code of Conduct
Rekor adheres to and enforces the [Contributor Covenant](http://contributor-covenant.org/version/1/4/) Code of Conduct.

View File

@ -16,6 +16,7 @@ package client
import (
"net/http"
"time"
"github.com/hashicorp/go-retryablehttp"
)
@ -26,8 +27,12 @@ type Option func(*options)
type options struct {
UserAgent string
RetryCount uint
RetryWaitMin time.Duration
RetryWaitMax time.Duration
InsecureTLS bool
Logger interface{}
NoDisableKeepalives bool
Headers map[string][]string
}
const (
@ -62,6 +67,20 @@ func WithRetryCount(retryCount uint) Option {
}
}
// WithRetryWaitMin sets the minimum length of time to wait between retries.
func WithRetryWaitMin(t time.Duration) Option {
return func(o *options) {
o.RetryWaitMin = t
}
}
// WithRetryWaitMax sets the minimum length of time to wait between retries.
func WithRetryWaitMax(t time.Duration) Option {
return func(o *options) {
o.RetryWaitMax = t
}
}
// WithLogger sets the logger; it must implement either retryablehttp.Logger or retryablehttp.LeveledLogger; if not, this will not take effect.
func WithLogger(logger interface{}) Option {
return func(o *options) {
@ -72,20 +91,41 @@ func WithLogger(logger interface{}) Option {
}
}
// WithInsecureTLS disables TLS verification.
func WithInsecureTLS(enabled bool) Option {
return func(o *options) {
o.InsecureTLS = enabled
}
}
// WithNoDisableKeepalives unsets the default DisableKeepalives setting.
func WithNoDisableKeepalives(noDisableKeepalives bool) Option {
return func(o *options) {
o.NoDisableKeepalives = noDisableKeepalives
}
}
// WithHeaders sets default headers for every client request.
func WithHeaders(h map[string][]string) Option {
return func(o *options) {
o.Headers = h
}
}
type roundTripper struct {
http.RoundTripper
UserAgent string
Headers map[string][]string
}
// RoundTrip implements `http.RoundTripper`
func (rt *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
req.Header.Set("User-Agent", rt.UserAgent)
for k, v := range rt.Headers {
for _, h := range v {
req.Header.Add(k, h)
}
}
return rt.RoundTripper.RoundTrip(req)
}
@ -93,12 +133,13 @@ func createRoundTripper(inner http.RoundTripper, o *options) http.RoundTripper {
if inner == nil {
inner = http.DefaultTransport
}
if o.UserAgent == "" {
if o.UserAgent == "" && o.Headers == nil {
// There's nothing to do...
return inner
}
return &roundTripper{
RoundTripper: inner,
UserAgent: o.UserAgent,
Headers: o.Headers,
}
}

View File

@ -22,6 +22,7 @@ import (
"github.com/go-openapi/runtime"
httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/hashicorp/go-cleanhttp"
retryablehttp "github.com/hashicorp/go-retryablehttp"
"github.com/sigstore/rekor/pkg/generated/client"
@ -37,6 +38,9 @@ func GetRekorClient(rekorServerURL string, opts ...Option) (*client.Rekor, error
retryableClient := retryablehttp.NewClient()
defaultTransport := cleanhttp.DefaultTransport()
if o.NoDisableKeepalives {
defaultTransport.DisableKeepAlives = false
}
if o.InsecureTLS {
/* #nosec G402 */
defaultTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
@ -45,6 +49,8 @@ func GetRekorClient(rekorServerURL string, opts ...Option) (*client.Rekor, error
Transport: defaultTransport,
}
retryableClient.RetryMax = int(o.RetryCount)
retryableClient.RetryWaitMin = o.RetryWaitMin
retryableClient.RetryWaitMax = o.RetryWaitMax
retryableClient.Logger = o.Logger
httpClient := retryableClient.StandardClient()

View File

@ -22,6 +22,7 @@ package entries
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
@ -126,11 +127,13 @@ func (o *CreateLogEntryCreated) Code() int {
}
func (o *CreateLogEntryCreated) Error() string {
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryCreated %+v", 201, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryCreated %s", 201, payload)
}
func (o *CreateLogEntryCreated) String() string {
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryCreated %+v", 201, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryCreated %s", 201, payload)
}
func (o *CreateLogEntryCreated) GetPayload() models.LogEntry {
@ -210,11 +213,13 @@ func (o *CreateLogEntryBadRequest) Code() int {
}
func (o *CreateLogEntryBadRequest) Error() string {
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryBadRequest %+v", 400, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryBadRequest %s", 400, payload)
}
func (o *CreateLogEntryBadRequest) String() string {
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryBadRequest %+v", 400, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryBadRequest %s", 400, payload)
}
func (o *CreateLogEntryBadRequest) GetPayload() *models.Error {
@ -280,11 +285,13 @@ func (o *CreateLogEntryConflict) Code() int {
}
func (o *CreateLogEntryConflict) Error() string {
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryConflict %+v", 409, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryConflict %s", 409, payload)
}
func (o *CreateLogEntryConflict) String() string {
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryConflict %+v", 409, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryConflict %s", 409, payload)
}
func (o *CreateLogEntryConflict) GetPayload() *models.Error {
@ -363,11 +370,13 @@ func (o *CreateLogEntryDefault) Code() int {
}
func (o *CreateLogEntryDefault) Error() string {
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntry default %+v", o._statusCode, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntry default %s", o._statusCode, payload)
}
func (o *CreateLogEntryDefault) String() string {
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntry default %+v", o._statusCode, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntry default %s", o._statusCode, payload)
}
func (o *CreateLogEntryDefault) GetPayload() *models.Error {

View File

@ -23,6 +23,7 @@ package entries
import (
"github.com/go-openapi/runtime"
httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
@ -31,6 +32,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi
return &Client{transport: transport, formats: formats}
}
// New creates a new entries API client with basic auth credentials.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - user: user for basic authentication header.
// - password: password for basic authentication header.
func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
return &Client{transport: transport, formats: strfmt.Default}
}
// New creates a new entries API client with a bearer token for authentication.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - bearerToken: bearer token for Bearer authentication header.
func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
return &Client{transport: transport, formats: strfmt.Default}
}
/*
Client for entries API
*/
@ -39,7 +65,7 @@ type Client struct {
formats strfmt.Registry
}
// ClientOption is the option for Client methods
// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods

View File

@ -22,6 +22,7 @@ package entries
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
@ -108,11 +109,13 @@ func (o *GetLogEntryByIndexOK) Code() int {
}
func (o *GetLogEntryByIndexOK) Error() string {
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexOK %+v", 200, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexOK %s", 200, payload)
}
func (o *GetLogEntryByIndexOK) String() string {
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexOK %+v", 200, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexOK %s", 200, payload)
}
func (o *GetLogEntryByIndexOK) GetPayload() models.LogEntry {
@ -173,11 +176,11 @@ func (o *GetLogEntryByIndexNotFound) Code() int {
}
func (o *GetLogEntryByIndexNotFound) Error() string {
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexNotFound ", 404)
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexNotFound", 404)
}
func (o *GetLogEntryByIndexNotFound) String() string {
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexNotFound ", 404)
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexNotFound", 404)
}
func (o *GetLogEntryByIndexNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
@ -234,11 +237,13 @@ func (o *GetLogEntryByIndexDefault) Code() int {
}
func (o *GetLogEntryByIndexDefault) Error() string {
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndex default %+v", o._statusCode, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndex default %s", o._statusCode, payload)
}
func (o *GetLogEntryByIndexDefault) String() string {
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndex default %+v", o._statusCode, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndex default %s", o._statusCode, payload)
}
func (o *GetLogEntryByIndexDefault) GetPayload() *models.Error {

View File

@ -22,6 +22,7 @@ package entries
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
@ -108,11 +109,13 @@ func (o *GetLogEntryByUUIDOK) Code() int {
}
func (o *GetLogEntryByUUIDOK) Error() string {
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidOK %+v", 200, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidOK %s", 200, payload)
}
func (o *GetLogEntryByUUIDOK) String() string {
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidOK %+v", 200, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidOK %s", 200, payload)
}
func (o *GetLogEntryByUUIDOK) GetPayload() models.LogEntry {
@ -173,11 +176,11 @@ func (o *GetLogEntryByUUIDNotFound) Code() int {
}
func (o *GetLogEntryByUUIDNotFound) Error() string {
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidNotFound ", 404)
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidNotFound", 404)
}
func (o *GetLogEntryByUUIDNotFound) String() string {
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidNotFound ", 404)
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidNotFound", 404)
}
func (o *GetLogEntryByUUIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
@ -234,11 +237,13 @@ func (o *GetLogEntryByUUIDDefault) Code() int {
}
func (o *GetLogEntryByUUIDDefault) Error() string {
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUUID default %+v", o._statusCode, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUUID default %s", o._statusCode, payload)
}
func (o *GetLogEntryByUUIDDefault) String() string {
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUUID default %+v", o._statusCode, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUUID default %s", o._statusCode, payload)
}
func (o *GetLogEntryByUUIDDefault) GetPayload() *models.Error {

View File

@ -22,6 +22,7 @@ package entries
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
@ -114,11 +115,13 @@ func (o *SearchLogQueryOK) Code() int {
}
func (o *SearchLogQueryOK) Error() string {
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryOK %+v", 200, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryOK %s", 200, payload)
}
func (o *SearchLogQueryOK) String() string {
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryOK %+v", 200, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryOK %s", 200, payload)
}
func (o *SearchLogQueryOK) GetPayload() []models.LogEntry {
@ -180,11 +183,13 @@ func (o *SearchLogQueryBadRequest) Code() int {
}
func (o *SearchLogQueryBadRequest) Error() string {
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryBadRequest %+v", 400, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryBadRequest %s", 400, payload)
}
func (o *SearchLogQueryBadRequest) String() string {
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryBadRequest %+v", 400, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryBadRequest %s", 400, payload)
}
func (o *SearchLogQueryBadRequest) GetPayload() *models.Error {
@ -248,11 +253,13 @@ func (o *SearchLogQueryUnprocessableEntity) Code() int {
}
func (o *SearchLogQueryUnprocessableEntity) Error() string {
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryUnprocessableEntity %+v", 422, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryUnprocessableEntity %s", 422, payload)
}
func (o *SearchLogQueryUnprocessableEntity) String() string {
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryUnprocessableEntity %+v", 422, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryUnprocessableEntity %s", 422, payload)
}
func (o *SearchLogQueryUnprocessableEntity) GetPayload() *models.Error {
@ -320,11 +327,13 @@ func (o *SearchLogQueryDefault) Code() int {
}
func (o *SearchLogQueryDefault) Error() string {
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQuery default %+v", o._statusCode, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQuery default %s", o._statusCode, payload)
}
func (o *SearchLogQueryDefault) String() string {
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQuery default %+v", o._statusCode, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQuery default %s", o._statusCode, payload)
}
func (o *SearchLogQueryDefault) GetPayload() *models.Error {

View File

@ -23,6 +23,7 @@ package index
import (
"github.com/go-openapi/runtime"
httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
@ -31,6 +32,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi
return &Client{transport: transport, formats: formats}
}
// New creates a new index API client with basic auth credentials.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - user: user for basic authentication header.
// - password: password for basic authentication header.
func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
return &Client{transport: transport, formats: strfmt.Default}
}
// New creates a new index API client with a bearer token for authentication.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - bearerToken: bearer token for Bearer authentication header.
func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
return &Client{transport: transport, formats: strfmt.Default}
}
/*
Client for index API
*/
@ -39,7 +65,7 @@ type Client struct {
formats strfmt.Registry
}
// ClientOption is the option for Client methods
// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods

View File

@ -22,6 +22,7 @@ package index
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
@ -108,11 +109,13 @@ func (o *SearchIndexOK) Code() int {
}
func (o *SearchIndexOK) Error() string {
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexOK %+v", 200, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexOK %s", 200, payload)
}
func (o *SearchIndexOK) String() string {
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexOK %+v", 200, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexOK %s", 200, payload)
}
func (o *SearchIndexOK) GetPayload() []string {
@ -174,11 +177,13 @@ func (o *SearchIndexBadRequest) Code() int {
}
func (o *SearchIndexBadRequest) Error() string {
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexBadRequest %+v", 400, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexBadRequest %s", 400, payload)
}
func (o *SearchIndexBadRequest) String() string {
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexBadRequest %+v", 400, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexBadRequest %s", 400, payload)
}
func (o *SearchIndexBadRequest) GetPayload() *models.Error {
@ -246,11 +251,13 @@ func (o *SearchIndexDefault) Code() int {
}
func (o *SearchIndexDefault) Error() string {
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndex default %+v", o._statusCode, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndex default %s", o._statusCode, payload)
}
func (o *SearchIndexDefault) String() string {
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndex default %+v", o._statusCode, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndex default %s", o._statusCode, payload)
}
func (o *SearchIndexDefault) GetPayload() *models.Error {

View File

@ -22,6 +22,7 @@ package pubkey
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
@ -102,11 +103,13 @@ func (o *GetPublicKeyOK) Code() int {
}
func (o *GetPublicKeyOK) Error() string {
return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKeyOK %+v", 200, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKeyOK %s", 200, payload)
}
func (o *GetPublicKeyOK) String() string {
return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKeyOK %+v", 200, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKeyOK %s", 200, payload)
}
func (o *GetPublicKeyOK) GetPayload() string {
@ -172,11 +175,13 @@ func (o *GetPublicKeyDefault) Code() int {
}
func (o *GetPublicKeyDefault) Error() string {
return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKey default %+v", o._statusCode, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKey default %s", o._statusCode, payload)
}
func (o *GetPublicKeyDefault) String() string {
return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKey default %+v", o._statusCode, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKey default %s", o._statusCode, payload)
}
func (o *GetPublicKeyDefault) GetPayload() *models.Error {

View File

@ -23,6 +23,7 @@ package pubkey
import (
"github.com/go-openapi/runtime"
httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
@ -31,6 +32,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi
return &Client{transport: transport, formats: formats}
}
// New creates a new pubkey API client with basic auth credentials.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - user: user for basic authentication header.
// - password: password for basic authentication header.
func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
return &Client{transport: transport, formats: strfmt.Default}
}
// New creates a new pubkey API client with a bearer token for authentication.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - bearerToken: bearer token for Bearer authentication header.
func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
return &Client{transport: transport, formats: strfmt.Default}
}
/*
Client for pubkey API
*/
@ -39,9 +65,33 @@ type Client struct {
formats strfmt.Registry
}
// ClientOption is the option for Client methods
// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// This client is generated with a few options you might find useful for your swagger spec.
//
// Feel free to add you own set of options.
// WithAccept allows the client to force the Accept header
// to negotiate a specific Producer from the server.
//
// You may use this option to set arbitrary extensions to your MIME media type.
func WithAccept(mime string) ClientOption {
return func(r *runtime.ClientOperation) {
r.ProducesMediaTypes = []string{mime}
}
}
// WithAcceptApplicationJSON sets the Accept header to "application/json".
func WithAcceptApplicationJSON(r *runtime.ClientOperation) {
r.ProducesMediaTypes = []string{"application/json"}
}
// WithAcceptApplicationxPemFile sets the Accept header to "application/x-pem-file".
func WithAcceptApplicationxPemFile(r *runtime.ClientOperation) {
r.ProducesMediaTypes = []string{"application/x-pem-file"}
}
// ClientService is the interface for Client methods
type ClientService interface {
GetPublicKey(params *GetPublicKeyParams, opts ...ClientOption) (*GetPublicKeyOK, error)

View File

@ -22,6 +22,7 @@ package tlog
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
@ -102,11 +103,13 @@ func (o *GetLogInfoOK) Code() int {
}
func (o *GetLogInfoOK) Error() string {
return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfoOK %+v", 200, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfoOK %s", 200, payload)
}
func (o *GetLogInfoOK) String() string {
return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfoOK %+v", 200, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfoOK %s", 200, payload)
}
func (o *GetLogInfoOK) GetPayload() *models.LogInfo {
@ -174,11 +177,13 @@ func (o *GetLogInfoDefault) Code() int {
}
func (o *GetLogInfoDefault) Error() string {
return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfo default %+v", o._statusCode, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfo default %s", o._statusCode, payload)
}
func (o *GetLogInfoDefault) String() string {
return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfo default %+v", o._statusCode, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfo default %s", o._statusCode, payload)
}
func (o *GetLogInfoDefault) GetPayload() *models.Error {

View File

@ -22,6 +22,7 @@ package tlog
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"fmt"
"io"
@ -108,11 +109,13 @@ func (o *GetLogProofOK) Code() int {
}
func (o *GetLogProofOK) Error() string {
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofOK %+v", 200, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofOK %s", 200, payload)
}
func (o *GetLogProofOK) String() string {
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofOK %+v", 200, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofOK %s", 200, payload)
}
func (o *GetLogProofOK) GetPayload() *models.ConsistencyProof {
@ -176,11 +179,13 @@ func (o *GetLogProofBadRequest) Code() int {
}
func (o *GetLogProofBadRequest) Error() string {
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofBadRequest %+v", 400, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofBadRequest %s", 400, payload)
}
func (o *GetLogProofBadRequest) String() string {
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofBadRequest %+v", 400, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofBadRequest %s", 400, payload)
}
func (o *GetLogProofBadRequest) GetPayload() *models.Error {
@ -248,11 +253,13 @@ func (o *GetLogProofDefault) Code() int {
}
func (o *GetLogProofDefault) Error() string {
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProof default %+v", o._statusCode, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProof default %s", o._statusCode, payload)
}
func (o *GetLogProofDefault) String() string {
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProof default %+v", o._statusCode, o.Payload)
payload, _ := json.Marshal(o.Payload)
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProof default %s", o._statusCode, payload)
}
func (o *GetLogProofDefault) GetPayload() *models.Error {

View File

@ -23,6 +23,7 @@ package tlog
import (
"github.com/go-openapi/runtime"
httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
)
@ -31,6 +32,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi
return &Client{transport: transport, formats: formats}
}
// New creates a new tlog API client with basic auth credentials.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - user: user for basic authentication header.
// - password: password for basic authentication header.
func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
return &Client{transport: transport, formats: strfmt.Default}
}
// New creates a new tlog API client with a bearer token for authentication.
// It takes the following parameters:
// - host: http host (github.com).
// - basePath: any base path for the API client ("/v1", "/v3").
// - scheme: http scheme ("http", "https").
// - bearerToken: bearer token for Bearer authentication header.
func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
transport := httptransport.New(host, basePath, []string{scheme})
transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
return &Client{transport: transport, formats: strfmt.Default}
}
/*
Client for tlog API
*/
@ -39,7 +65,7 @@ type Client struct {
formats strfmt.Registry
}
// ClientOption is the option for Client methods
// ClientOption may be used to customize the behavior of Client methods.
type ClientOption func(*runtime.ClientOperation)
// ClientService is the interface for Client methods

View File

@ -294,7 +294,7 @@ type AlpineV001SchemaPackageHash struct {
// The hashing function used to compute the hash value
// Required: true
// Enum: [sha256]
// Enum: ["sha256"]
Algorithm *string `json:"algorithm"`
// The hash value for the package

View File

@ -307,7 +307,7 @@ type CoseV001SchemaDataEnvelopeHash struct {
// The hashing function used to compute the hash value
// Required: true
// Enum: [sha256]
// Enum: ["sha256"]
Algorithm *string `json:"algorithm"`
// The hash value for the envelope
@ -417,7 +417,7 @@ type CoseV001SchemaDataPayloadHash struct {
// The hashing function used to compute the hash value
// Required: true
// Enum: [sha256]
// Enum: ["sha256"]
Algorithm *string `json:"algorithm"`
// The hash value for the content

View File

@ -312,7 +312,7 @@ type DSSEV001SchemaEnvelopeHash struct {
// The hashing function used to compute the hash value
// Required: true
// Enum: [sha256]
// Enum: ["sha256"]
Algorithm *string `json:"algorithm"`
// The value of the computed digest over the entire envelope
@ -422,7 +422,7 @@ type DSSEV001SchemaPayloadHash struct {
// The hashing function used to compute the hash value
// Required: true
// Enum: [sha256]
// Enum: ["sha256"]
Algorithm *string `json:"algorithm"`
// The value of the computed digest over the payload within the envelope

View File

@ -21,9 +21,9 @@ package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// HashedrekordSchema Rekor Schema
// HashedrekordSchema Hashedrekord Schema
//
// # Schema for Rekord objects
// # Schema for Hashedrekord objects
//
// swagger:model hashedrekordSchema
type HashedrekordSchema interface{}

View File

@ -277,7 +277,7 @@ type HashedrekordV001SchemaDataHash struct {
// The hashing function used to compute the hash value
// Required: true
// Enum: [sha256 sha384 sha512]
// Enum: ["sha256","sha384","sha512"]
Algorithm *string `json:"algorithm"`
// The hash value for the content, as represented by a lower case hexadecimal string

View File

@ -326,7 +326,7 @@ type HelmV001SchemaChartHash struct {
// The hashing function used to compute the hash value
// Required: true
// Enum: [sha256]
// Enum: ["sha256"]
Algorithm *string `json:"algorithm"`
// The hash value for the chart

View File

@ -300,7 +300,7 @@ type IntotoV001SchemaContentHash struct {
// The hashing function used to compute the hash value
// Required: true
// Enum: [sha256]
// Enum: ["sha256"]
Algorithm *string `json:"algorithm"`
// The hash value for the archive
@ -410,7 +410,7 @@ type IntotoV001SchemaContentPayloadHash struct {
// The hashing function used to compute the hash value
// Required: true
// Enum: [sha256]
// Enum: ["sha256"]
Algorithm *string `json:"algorithm"`
// The hash value for the envelope's payload

View File

@ -543,7 +543,7 @@ type IntotoV002SchemaContentHash struct {
// The hashing function used to compute the hash value
// Required: true
// Enum: [sha256]
// Enum: ["sha256"]
Algorithm *string `json:"algorithm"`
// The hash value for the archive
@ -653,7 +653,7 @@ type IntotoV002SchemaContentPayloadHash struct {
// The hashing function used to compute the hash value
// Required: true
// Enum: [sha256]
// Enum: ["sha256"]
Algorithm *string `json:"algorithm"`
// The hash value of the payload

View File

@ -283,7 +283,7 @@ type JarV001SchemaArchiveHash struct {
// The hashing function used to compute the hash value
// Required: true
// Enum: [sha256]
// Enum: ["sha256"]
Algorithm *string `json:"algorithm"`
// The hash value for the archive

View File

@ -281,7 +281,7 @@ type RekordV001SchemaDataHash struct {
// The hashing function used to compute the hash value
// Required: true
// Enum: [sha256]
// Enum: ["sha256"]
Algorithm *string `json:"algorithm"`
// The hash value for the content
@ -396,7 +396,7 @@ type RekordV001SchemaSignature struct {
// Specifies the format of the signature
// Required: true
// Enum: [pgp minisign x509 ssh]
// Enum: ["pgp","minisign","x509","ssh"]
Format *string `json:"format"`
// public key

View File

@ -294,7 +294,7 @@ type RpmV001SchemaPackageHash struct {
// The hashing function used to compute the hash value
// Required: true
// Enum: [sha256]
// Enum: ["sha256"]
Algorithm *string `json:"algorithm"`
// The hash value for the package

View File

@ -45,7 +45,7 @@ type SearchIndex struct {
Hash string `json:"hash,omitempty"`
// operator
// Enum: [and or]
// Enum: ["and","or"]
Operator string `json:"operator,omitempty"`
// public key
@ -227,7 +227,7 @@ type SearchIndexPublicKey struct {
// format
// Required: true
// Enum: [pgp x509 minisign ssh tuf]
// Enum: ["pgp","x509","minisign","ssh","tuf"]
Format *string `json:"format"`
// url

View File

@ -18,6 +18,7 @@ package util
import (
"bufio"
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/rsa"
@ -53,16 +54,14 @@ func (s *SignedNote) Sign(identity string, signer signature.Signer, opts signatu
if err != nil {
return nil, fmt.Errorf("retrieving public key: %w", err)
}
pubKeyBytes, err := x509.MarshalPKIXPublicKey(pk)
pkHash, err := getPublicKeyHash(pk)
if err != nil {
return nil, fmt.Errorf("marshalling public key: %w", err)
return nil, err
}
pkSha := sha256.Sum256(pubKeyBytes)
signature := note.Signature{
Name: identity,
Hash: binary.BigEndian.Uint32(pkSha[:]),
Hash: pkHash,
Base64: base64.StdEncoding.EncodeToString(sig),
}
@ -80,15 +79,25 @@ func (s SignedNote) Verify(verifier signature.Verifier) bool {
msg := []byte(s.Note)
digest := sha256.Sum256(msg)
pk, err := verifier.PublicKey()
if err != nil {
return false
}
verifierPkHash, err := getPublicKeyHash(pk)
if err != nil {
return false
}
for _, s := range s.Signatures {
sigBytes, err := base64.StdEncoding.DecodeString(s.Base64)
if err != nil {
return false
}
pk, err := verifier.PublicKey()
if err != nil {
if s.Hash != verifierPkHash {
return false
}
opts := []signature.VerifyOption{}
switch pk.(type) {
case *rsa.PublicKey, *ecdsa.PublicKey:
@ -190,3 +199,13 @@ func SignedNoteValidator(strToValidate string) bool {
s := SignedNote{}
return s.UnmarshalText([]byte(strToValidate)) == nil
}
func getPublicKeyHash(publicKey crypto.PublicKey) (uint32, error) {
pubKeyBytes, err := x509.MarshalPKIXPublicKey(publicKey)
if err != nil {
return 0, fmt.Errorf("marshalling public key: %w", err)
}
pkSha := sha256.Sum256(pubKeyBytes)
hash := binary.BigEndian.Uint32(pkSha[:])
return hash, nil
}

View File

@ -18,13 +18,6 @@ const (
WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded)
)
// Server HTTP metrics.
const (
serverRequestSize = "http.server.request.size" // Incoming request bytes total
serverResponseSize = "http.server.response.size" // Incoming response bytes total
serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds
)
// Client HTTP metrics.
const (
clientRequestSize = "http.client.request.size" // Outgoing request bytes total

View File

@ -8,6 +8,8 @@ import (
"net/http"
"net/http/httptrace"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/propagation"
@ -35,6 +37,7 @@ type config struct {
TracerProvider trace.TracerProvider
MeterProvider metric.MeterProvider
MetricAttributesFn func(*http.Request) []attribute.KeyValue
}
// Option interface used for setting optional config properties.
@ -194,3 +197,11 @@ func WithServerName(server string) Option {
c.ServerName = server
})
}
// WithMetricAttributesFn returns an Option to set a function that maps an HTTP request to a slice of attribute.KeyValue.
// These attributes will be included in metrics for every request.
func WithMetricAttributesFn(metricAttributesFn func(r *http.Request) []attribute.KeyValue) Option {
return optionFunc(func(c *config) {
c.MetricAttributesFn = metricAttributesFn
})
}

View File

@ -9,11 +9,9 @@ import (
"github.com/felixge/httpsnoop"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
)
@ -24,7 +22,6 @@ type middleware struct {
server string
tracer trace.Tracer
meter metric.Meter
propagators propagation.TextMapPropagator
spanStartOptions []trace.SpanStartOption
readEvent bool
@ -34,10 +31,7 @@ type middleware struct {
publicEndpoint bool
publicEndpointFn func(*http.Request) bool
traceSemconv semconv.HTTPServer
requestBytesCounter metric.Int64Counter
responseBytesCounter metric.Int64Counter
serverLatencyMeasure metric.Float64Histogram
semconv semconv.HTTPServer
}
func defaultHandlerFormatter(operation string, _ *http.Request) string {
@ -56,8 +50,6 @@ func NewHandler(handler http.Handler, operation string, opts ...Option) http.Han
func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler {
h := middleware{
operation: operation,
traceSemconv: semconv.NewHTTPServer(),
}
defaultOpts := []Option{
@ -67,7 +59,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han
c := newConfig(append(defaultOpts, opts...)...)
h.configure(c)
h.createMeasures()
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@ -78,7 +69,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han
func (h *middleware) configure(c *config) {
h.tracer = c.Tracer
h.meter = c.Meter
h.propagators = c.Propagators
h.spanStartOptions = c.SpanStartOptions
h.readEvent = c.ReadEvent
@ -88,6 +78,7 @@ func (h *middleware) configure(c *config) {
h.publicEndpoint = c.PublicEndpoint
h.publicEndpointFn = c.PublicEndpointFn
h.server = c.ServerName
h.semconv = semconv.NewHTTPServer(c.Meter)
}
func handleErr(err error) {
@ -96,30 +87,6 @@ func handleErr(err error) {
}
}
func (h *middleware) createMeasures() {
var err error
h.requestBytesCounter, err = h.meter.Int64Counter(
serverRequestSize,
metric.WithUnit("By"),
metric.WithDescription("Measures the size of HTTP request messages."),
)
handleErr(err)
h.responseBytesCounter, err = h.meter.Int64Counter(
serverResponseSize,
metric.WithUnit("By"),
metric.WithDescription("Measures the size of HTTP response messages."),
)
handleErr(err)
h.serverLatencyMeasure, err = h.meter.Float64Histogram(
serverDuration,
metric.WithUnit("ms"),
metric.WithDescription("Measures the duration of inbound HTTP requests."),
)
handleErr(err)
}
// serveHTTP sets up tracing and calls the given next http.Handler with the span
// context injected into the request context.
func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) {
@ -134,7 +101,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header))
opts := []trace.SpanStartOption{
trace.WithAttributes(h.traceSemconv.RequestTraceAttrs(h.server, r)...),
trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r)...),
}
opts = append(opts, h.spanStartOptions...)
@ -166,14 +133,12 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
}
}
var bw bodyWrapper
// if request body is nil or NoBody, we don't want to mutate the body as it
// will affect the identity of it in an unforeseeable way because we assert
// ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
bw := request.NewBodyWrapper(r.Body, readRecordFunc)
if r.Body != nil && r.Body != http.NoBody {
bw.ReadCloser = r.Body
bw.record = readRecordFunc
r.Body = &bw
r.Body = bw
}
writeRecordFunc := func(int64) {}
@ -183,13 +148,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
}
}
rww := &respWriterWrapper{
ResponseWriter: w,
record: writeRecordFunc,
ctx: ctx,
props: h.propagators,
statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything
}
rww := request.NewRespWriterWrapper(w, writeRecordFunc)
// Wrap w to use our ResponseWriter methods while also exposing
// other interfaces that w may implement (http.CloseNotifier,
@ -217,35 +176,35 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
next.ServeHTTP(w, r.WithContext(ctx))
span.SetStatus(semconv.ServerStatus(rww.statusCode))
span.SetAttributes(h.traceSemconv.ResponseTraceAttrs(semconv.ResponseTelemetry{
StatusCode: rww.statusCode,
ReadBytes: bw.read.Load(),
ReadError: bw.err,
WriteBytes: rww.written,
WriteError: rww.err,
statusCode := rww.StatusCode()
bytesWritten := rww.BytesWritten()
span.SetStatus(h.semconv.Status(statusCode))
span.SetAttributes(h.semconv.ResponseTraceAttrs(semconv.ResponseTelemetry{
StatusCode: statusCode,
ReadBytes: bw.BytesRead(),
ReadError: bw.Error(),
WriteBytes: bytesWritten,
WriteError: rww.Error(),
})...)
// Add metrics
attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...)
if rww.statusCode > 0 {
attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode))
}
o := metric.WithAttributeSet(attribute.NewSet(attributes...))
addOpts := []metric.AddOption{o} // Allocate vararg slice once.
h.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...)
h.responseBytesCounter.Add(ctx, rww.written, addOpts...)
// Use floating point division here for higher precision (instead of Millisecond method).
elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
h.serverLatencyMeasure.Record(ctx, elapsedTime, o)
h.semconv.RecordMetrics(ctx, semconv.MetricData{
ServerName: h.server,
Req: r,
StatusCode: statusCode,
AdditionalAttributes: labeler.Get(),
RequestSize: bw.BytesRead(),
ResponseSize: bytesWritten,
ElapsedTime: elapsedTime,
})
}
// WithRouteTag annotates spans and metrics with the provided route name
// with HTTP route attribute.
func WithRouteTag(route string, h http.Handler) http.Handler {
attr := semconv.NewHTTPServer().Route(route)
attr := semconv.NewHTTPServer(nil).Route(route)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
span := trace.SpanFromContext(r.Context())
span.SetAttributes(attr)

View File

@ -0,0 +1,75 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
import (
"io"
"sync"
)
var _ io.ReadCloser = &BodyWrapper{}
// BodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number
// of bytes read and the last error.
type BodyWrapper struct {
io.ReadCloser
OnRead func(n int64) // must not be nil
mu sync.Mutex
read int64
err error
}
// NewBodyWrapper creates a new BodyWrapper.
//
// The onRead attribute is a callback that will be called every time the data
// is read, with the number of bytes being read.
func NewBodyWrapper(body io.ReadCloser, onRead func(int64)) *BodyWrapper {
return &BodyWrapper{
ReadCloser: body,
OnRead: onRead,
}
}
// Read reads the data from the io.ReadCloser, and stores the number of bytes
// read and the error.
func (w *BodyWrapper) Read(b []byte) (int, error) {
n, err := w.ReadCloser.Read(b)
n1 := int64(n)
w.updateReadData(n1, err)
w.OnRead(n1)
return n, err
}
func (w *BodyWrapper) updateReadData(n int64, err error) {
w.mu.Lock()
defer w.mu.Unlock()
w.read += n
if err != nil {
w.err = err
}
}
// Closes closes the io.ReadCloser.
func (w *BodyWrapper) Close() error {
return w.ReadCloser.Close()
}
// BytesRead returns the number of bytes read up to this point.
func (w *BodyWrapper) BytesRead() int64 {
w.mu.Lock()
defer w.mu.Unlock()
return w.read
}
// Error returns the last error.
func (w *BodyWrapper) Error() error {
w.mu.Lock()
defer w.mu.Unlock()
return w.err
}

View File

@ -0,0 +1,112 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
import (
"net/http"
"sync"
)
var _ http.ResponseWriter = &RespWriterWrapper{}
// RespWriterWrapper wraps a http.ResponseWriter in order to track the number of
// bytes written, the last error, and to catch the first written statusCode.
// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional
// types (http.Hijacker, http.Pusher, http.CloseNotifier, etc)
// that may be useful when using it in real life situations.
type RespWriterWrapper struct {
http.ResponseWriter
OnWrite func(n int64) // must not be nil
mu sync.RWMutex
written int64
statusCode int
err error
wroteHeader bool
}
// NewRespWriterWrapper creates a new RespWriterWrapper.
//
// The onWrite attribute is a callback that will be called every time the data
// is written, with the number of bytes that were written.
func NewRespWriterWrapper(w http.ResponseWriter, onWrite func(int64)) *RespWriterWrapper {
return &RespWriterWrapper{
ResponseWriter: w,
OnWrite: onWrite,
statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything
}
}
// Write writes the bytes array into the [ResponseWriter], and tracks the
// number of bytes written and last error.
func (w *RespWriterWrapper) Write(p []byte) (int, error) {
w.mu.Lock()
defer w.mu.Unlock()
w.writeHeader(http.StatusOK)
n, err := w.ResponseWriter.Write(p)
n1 := int64(n)
w.OnWrite(n1)
w.written += n1
w.err = err
return n, err
}
// WriteHeader persists initial statusCode for span attribution.
// All calls to WriteHeader will be propagated to the underlying ResponseWriter
// and will persist the statusCode from the first call.
// Blocking consecutive calls to WriteHeader alters expected behavior and will
// remove warning logs from net/http where developers will notice incorrect handler implementations.
func (w *RespWriterWrapper) WriteHeader(statusCode int) {
w.mu.Lock()
defer w.mu.Unlock()
w.writeHeader(statusCode)
}
// writeHeader persists the status code for span attribution, and propagates
// the call to the underlying ResponseWriter.
// It does not acquire a lock, and therefore assumes that is being handled by a
// parent method.
func (w *RespWriterWrapper) writeHeader(statusCode int) {
if !w.wroteHeader {
w.wroteHeader = true
w.statusCode = statusCode
}
w.ResponseWriter.WriteHeader(statusCode)
}
// Flush implements [http.Flusher].
func (w *RespWriterWrapper) Flush() {
w.WriteHeader(http.StatusOK)
if f, ok := w.ResponseWriter.(http.Flusher); ok {
f.Flush()
}
}
// BytesWritten returns the number of bytes written.
func (w *RespWriterWrapper) BytesWritten() int64 {
w.mu.RLock()
defer w.mu.RUnlock()
return w.written
}
// BytesWritten returns the HTTP status code that was sent.
func (w *RespWriterWrapper) StatusCode() int {
w.mu.RLock()
defer w.mu.RUnlock()
return w.statusCode
}
// Error returns the last error.
func (w *RespWriterWrapper) Error() error {
w.mu.RLock()
defer w.mu.RUnlock()
return w.err
}

View File

@ -4,6 +4,7 @@
package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
import (
"context"
"fmt"
"net/http"
"os"
@ -11,6 +12,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/metric"
)
type ResponseTelemetry struct {
@ -23,6 +25,11 @@ type ResponseTelemetry struct {
type HTTPServer struct {
duplicate bool
// Old metrics
requestBytesCounter metric.Int64Counter
responseBytesCounter metric.Int64Counter
serverLatencyMeasure metric.Float64Histogram
}
// RequestTraceAttrs returns trace attributes for an HTTP request received by a
@ -63,15 +70,10 @@ func (s HTTPServer) Route(route string) attribute.KeyValue {
return oldHTTPServer{}.Route(route)
}
func NewHTTPServer() HTTPServer {
env := strings.ToLower(os.Getenv("OTEL_HTTP_CLIENT_COMPATIBILITY_MODE"))
return HTTPServer{duplicate: env == "http/dup"}
}
// ServerStatus returns a span status code and message for an HTTP status code
// Status returns a span status code and message for an HTTP status code
// value returned by a server. Status codes in the 400-499 range are not
// returned as errors.
func ServerStatus(code int) (codes.Code, string) {
func (s HTTPServer) Status(code int) (codes.Code, string) {
if code < 100 || code >= 600 {
return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
}
@ -80,3 +82,84 @@ func ServerStatus(code int) (codes.Code, string) {
}
return codes.Unset, ""
}
type MetricData struct {
ServerName string
Req *http.Request
StatusCode int
AdditionalAttributes []attribute.KeyValue
RequestSize int64
ResponseSize int64
ElapsedTime float64
}
func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) {
if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil {
// This will happen if an HTTPServer{} is used insted of NewHTTPServer.
return
}
attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
o := metric.WithAttributeSet(attribute.NewSet(attributes...))
addOpts := []metric.AddOption{o} // Allocate vararg slice once.
s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...)
s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...)
s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o)
// TODO: Duplicate Metrics
}
func NewHTTPServer(meter metric.Meter) HTTPServer {
env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN"))
duplicate := env == "http/dup"
server := HTTPServer{
duplicate: duplicate,
}
server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter)
return server
}
type HTTPClient struct {
duplicate bool
}
func NewHTTPClient() HTTPClient {
env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN"))
return HTTPClient{duplicate: env == "http/dup"}
}
// RequestTraceAttrs returns attributes for an HTTP request made by a client.
func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
if c.duplicate {
return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...)
}
return oldHTTPClient{}.RequestTraceAttrs(req)
}
// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client.
func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
if c.duplicate {
return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...)
}
return oldHTTPClient{}.ResponseTraceAttrs(resp)
}
func (c HTTPClient) Status(code int) (codes.Code, string) {
if code < 100 || code >= 600 {
return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
}
if code >= 400 {
return codes.Error, ""
}
return codes.Unset, ""
}
func (c HTTPClient) ErrorType(err error) attribute.KeyValue {
if c.duplicate {
return newHTTPClient{}.ErrorType(err)
}
return attribute.KeyValue{}
}

View File

@ -4,11 +4,14 @@
package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
import (
"fmt"
"net/http"
"reflect"
"strconv"
"strings"
"go.opentelemetry.io/otel/attribute"
semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0"
semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
)
type newHTTPServer struct{}
@ -195,3 +198,151 @@ func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke
func (n newHTTPServer) Route(route string) attribute.KeyValue {
return semconvNew.HTTPRoute(route)
}
type newHTTPClient struct{}
// RequestTraceAttrs returns trace attributes for an HTTP request made by a client.
func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
/*
below attributes are returned:
- http.request.method
- http.request.method.original
- url.full
- server.address
- server.port
- network.protocol.name
- network.protocol.version
*/
numOfAttributes := 3 // URL, server address, proto, and method.
var urlHost string
if req.URL != nil {
urlHost = req.URL.Host
}
var requestHost string
var requestPort int
for _, hostport := range []string{urlHost, req.Header.Get("Host")} {
requestHost, requestPort = splitHostPort(hostport)
if requestHost != "" || requestPort > 0 {
break
}
}
eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort)
if eligiblePort > 0 {
numOfAttributes++
}
useragent := req.UserAgent()
if useragent != "" {
numOfAttributes++
}
protoName, protoVersion := netProtocol(req.Proto)
if protoName != "" && protoName != "http" {
numOfAttributes++
}
if protoVersion != "" {
numOfAttributes++
}
method, originalMethod := n.method(req.Method)
if originalMethod != (attribute.KeyValue{}) {
numOfAttributes++
}
attrs := make([]attribute.KeyValue, 0, numOfAttributes)
attrs = append(attrs, method)
if originalMethod != (attribute.KeyValue{}) {
attrs = append(attrs, originalMethod)
}
var u string
if req.URL != nil {
// Remove any username/password info that may be in the URL.
userinfo := req.URL.User
req.URL.User = nil
u = req.URL.String()
// Restore any username/password info that was removed.
req.URL.User = userinfo
}
attrs = append(attrs, semconvNew.URLFull(u))
attrs = append(attrs, semconvNew.ServerAddress(requestHost))
if eligiblePort > 0 {
attrs = append(attrs, semconvNew.ServerPort(eligiblePort))
}
if protoName != "" && protoName != "http" {
attrs = append(attrs, semconvNew.NetworkProtocolName(protoName))
}
if protoVersion != "" {
attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion))
}
return attrs
}
// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client.
func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
/*
below attributes are returned:
- http.response.status_code
- error.type
*/
var count int
if resp.StatusCode > 0 {
count++
}
if isErrorStatusCode(resp.StatusCode) {
count++
}
attrs := make([]attribute.KeyValue, 0, count)
if resp.StatusCode > 0 {
attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode))
}
if isErrorStatusCode(resp.StatusCode) {
errorType := strconv.Itoa(resp.StatusCode)
attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType))
}
return attrs
}
func (n newHTTPClient) ErrorType(err error) attribute.KeyValue {
t := reflect.TypeOf(err)
var value string
if t.PkgPath() == "" && t.Name() == "" {
// Likely a builtin type.
value = t.String()
} else {
value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
}
if value == "" {
return semconvNew.ErrorTypeOther
}
return semconvNew.ErrorTypeKey.String(value)
}
func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) {
if method == "" {
return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{}
}
if attr, ok := methodLookup[method]; ok {
return attr, attribute.KeyValue{}
}
orig := semconvNew.HTTPRequestMethodOriginal(method)
if attr, ok := methodLookup[strings.ToUpper(method)]; ok {
return attr, orig
}
return semconvNew.HTTPRequestMethodGet, orig
}
func isErrorStatusCode(code int) bool {
return code >= 400 || code < 100
}

View File

@ -9,8 +9,9 @@ import (
"strconv"
"strings"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0"
semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
)
// splitHostPort splits a network address hostport of the form "host",
@ -49,7 +50,7 @@ func splitHostPort(hostport string) (host string, port int) {
if err != nil {
return
}
return host, int(p)
return host, int(p) // nolint: gosec // Byte size checked 16 above.
}
func requiredHTTPPort(https bool, port int) int { // nolint:revive
@ -89,3 +90,9 @@ var methodLookup = map[string]attribute.KeyValue{
http.MethodPut: semconvNew.HTTPRequestMethodPut,
http.MethodTrace: semconvNew.HTTPRequestMethodTrace,
}
func handleErr(err error) {
if err != nil {
otel.Handle(err)
}
}

View File

@ -7,9 +7,13 @@ import (
"errors"
"io"
"net/http"
"slices"
"strings"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/noop"
semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
)
@ -72,3 +76,117 @@ func (o oldHTTPServer) Route(route string) attribute.KeyValue {
func HTTPStatusCode(status int) attribute.KeyValue {
return semconv.HTTPStatusCode(status)
}
// Server HTTP metrics.
const (
serverRequestSize = "http.server.request.size" // Incoming request bytes total
serverResponseSize = "http.server.response.size" // Incoming response bytes total
serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds
)
func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
if meter == nil {
return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
}
var err error
requestBytesCounter, err := meter.Int64Counter(
serverRequestSize,
metric.WithUnit("By"),
metric.WithDescription("Measures the size of HTTP request messages."),
)
handleErr(err)
responseBytesCounter, err := meter.Int64Counter(
serverResponseSize,
metric.WithUnit("By"),
metric.WithDescription("Measures the size of HTTP response messages."),
)
handleErr(err)
serverLatencyMeasure, err := meter.Float64Histogram(
serverDuration,
metric.WithUnit("ms"),
metric.WithDescription("Measures the duration of inbound HTTP requests."),
)
handleErr(err)
return requestBytesCounter, responseBytesCounter, serverLatencyMeasure
}
func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
n := len(additionalAttributes) + 3
var host string
var p int
if server == "" {
host, p = splitHostPort(req.Host)
} else {
// Prioritize the primary server name.
host, p = splitHostPort(server)
if p < 0 {
_, p = splitHostPort(req.Host)
}
}
hostPort := requiredHTTPPort(req.TLS != nil, p)
if hostPort > 0 {
n++
}
protoName, protoVersion := netProtocol(req.Proto)
if protoName != "" {
n++
}
if protoVersion != "" {
n++
}
if statusCode > 0 {
n++
}
attributes := slices.Grow(additionalAttributes, n)
attributes = append(attributes,
o.methodMetric(req.Method),
o.scheme(req.TLS != nil),
semconv.NetHostName(host))
if hostPort > 0 {
attributes = append(attributes, semconv.NetHostPort(hostPort))
}
if protoName != "" {
attributes = append(attributes, semconv.NetProtocolName(protoName))
}
if protoVersion != "" {
attributes = append(attributes, semconv.NetProtocolVersion(protoVersion))
}
if statusCode > 0 {
attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
}
return attributes
}
func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue {
method = strings.ToUpper(method)
switch method {
case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace:
default:
method = "_OTHER"
}
return semconv.HTTPMethod(method)
}
func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
if https {
return semconv.HTTPSchemeHTTPS
}
return semconv.HTTPSchemeHTTP
}
type oldHTTPClient struct{}
func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
return semconvutil.HTTPClientRequest(req)
}
func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
return semconvutil.HTTPClientResponse(resp)
}

View File

@ -195,7 +195,7 @@ func splitHostPort(hostport string) (host string, port int) {
if err != nil {
return
}
return host, int(p)
return host, int(p) // nolint: gosec // Bitsize checked to be 16 above.
}
func netProtocol(proto string) (name string, version string) {

Some files were not shown because too many files have changed in this diff Show More