mirror of https://github.com/containers/podman.git
Merge pull request #24326 from Luap99/vendor
vendor: update c/{buildah,common,image,storage}
This commit is contained in:
commit
e4d6fa7740
12
go.mod
12
go.mod
|
@ -12,15 +12,15 @@ require (
|
||||||
github.com/checkpoint-restore/checkpointctl v1.2.1
|
github.com/checkpoint-restore/checkpointctl v1.2.1
|
||||||
github.com/checkpoint-restore/go-criu/v7 v7.2.0
|
github.com/checkpoint-restore/go-criu/v7 v7.2.0
|
||||||
github.com/containernetworking/plugins v1.5.1
|
github.com/containernetworking/plugins v1.5.1
|
||||||
github.com/containers/buildah v1.37.1-0.20241002152719-c68e17b4ffed
|
github.com/containers/buildah v1.37.1-0.20241018144937-2551c8f3d110
|
||||||
github.com/containers/common v0.60.1-0.20241011155906-25644f144d66
|
github.com/containers/common v0.60.1-0.20241018183244-7e6f2b4d6de7
|
||||||
github.com/containers/conmon v2.0.20+incompatible
|
github.com/containers/conmon v2.0.20+incompatible
|
||||||
github.com/containers/gvisor-tap-vsock v0.7.5
|
github.com/containers/gvisor-tap-vsock v0.7.5
|
||||||
github.com/containers/image/v5 v5.32.3-0.20240923171149-9e1153a28c46
|
github.com/containers/image/v5 v5.32.3-0.20241016192323-a66152c1cdf6
|
||||||
github.com/containers/libhvee v0.7.1
|
github.com/containers/libhvee v0.7.1
|
||||||
github.com/containers/ocicrypt v1.2.0
|
github.com/containers/ocicrypt v1.2.0
|
||||||
github.com/containers/psgo v1.9.0
|
github.com/containers/psgo v1.9.0
|
||||||
github.com/containers/storage v1.55.1-0.20241008185503-a397602515fd
|
github.com/containers/storage v1.55.1-0.20241017155235-4db236377c55
|
||||||
github.com/containers/winquit v1.1.0
|
github.com/containers/winquit v1.1.0
|
||||||
github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09
|
github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09
|
||||||
github.com/coreos/stream-metadata-go v0.4.4
|
github.com/coreos/stream-metadata-go v0.4.4
|
||||||
|
@ -104,7 +104,7 @@ require (
|
||||||
github.com/containerd/typeurl/v2 v2.2.0 // indirect
|
github.com/containerd/typeurl/v2 v2.2.0 // indirect
|
||||||
github.com/containernetworking/cni v1.2.3 // indirect
|
github.com/containernetworking/cni v1.2.3 // indirect
|
||||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||||
github.com/containers/luksy v0.0.0-20240812184316-2e7307c02f06 // indirect
|
github.com/containers/luksy v0.0.0-20241007190014-e2530d691420 // indirect
|
||||||
github.com/coreos/go-oidc/v3 v3.11.0 // indirect
|
github.com/coreos/go-oidc/v3 v3.11.0 // indirect
|
||||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
|
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
|
||||||
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect
|
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect
|
||||||
|
@ -153,7 +153,7 @@ require (
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/jinzhu/copier v0.4.0 // indirect
|
github.com/jinzhu/copier v0.4.0 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/klauspost/compress v1.17.10 // indirect
|
github.com/klauspost/compress v1.17.11 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.2.8 // indirect
|
github.com/klauspost/cpuid/v2 v2.2.8 // indirect
|
||||||
github.com/kr/fs v0.1.0 // indirect
|
github.com/kr/fs v0.1.0 // indirect
|
||||||
github.com/leodido/go-urn v1.2.4 // indirect
|
github.com/leodido/go-urn v1.2.4 // indirect
|
||||||
|
|
24
go.sum
24
go.sum
|
@ -77,28 +77,28 @@ github.com/containernetworking/cni v1.2.3 h1:hhOcjNVUQTnzdRJ6alC5XF+wd9mfGIUaj8F
|
||||||
github.com/containernetworking/cni v1.2.3/go.mod h1:DuLgF+aPd3DzcTQTtp/Nvl1Kim23oFKdm2okJzBQA5M=
|
github.com/containernetworking/cni v1.2.3/go.mod h1:DuLgF+aPd3DzcTQTtp/Nvl1Kim23oFKdm2okJzBQA5M=
|
||||||
github.com/containernetworking/plugins v1.5.1 h1:T5ji+LPYjjgW0QM+KyrigZbLsZ8jaX+E5J/EcKOE4gQ=
|
github.com/containernetworking/plugins v1.5.1 h1:T5ji+LPYjjgW0QM+KyrigZbLsZ8jaX+E5J/EcKOE4gQ=
|
||||||
github.com/containernetworking/plugins v1.5.1/go.mod h1:MIQfgMayGuHYs0XdNudf31cLLAC+i242hNm6KuDGqCM=
|
github.com/containernetworking/plugins v1.5.1/go.mod h1:MIQfgMayGuHYs0XdNudf31cLLAC+i242hNm6KuDGqCM=
|
||||||
github.com/containers/buildah v1.37.1-0.20241002152719-c68e17b4ffed h1:qiE4J6RukT5+a2wV+Xeimu0c4Xx6DZrdt8JiP3c9CY8=
|
github.com/containers/buildah v1.37.1-0.20241018144937-2551c8f3d110 h1:YcrjUM1CwDTEnaPHgpVmjid/R3zAVXRRHcgknQsajlI=
|
||||||
github.com/containers/buildah v1.37.1-0.20241002152719-c68e17b4ffed/go.mod h1:ytj7qYHUdP/p+2lAXVaFSHDyYFJZ3y1ikpFERypXbCI=
|
github.com/containers/buildah v1.37.1-0.20241018144937-2551c8f3d110/go.mod h1:SVyERMThmMXGTdle/9MdRuX2Ae7eVY9qDVartYXIx7E=
|
||||||
github.com/containers/common v0.60.1-0.20241011155906-25644f144d66 h1:3Op65/b+uB4ech61GRBHNggW5aGDoChPUDG2++tkHB8=
|
github.com/containers/common v0.60.1-0.20241018183244-7e6f2b4d6de7 h1:EFzq3sjwy0vBr9RoZPzTbtoGZR4hoZsmgxuIs5Uc5FA=
|
||||||
github.com/containers/common v0.60.1-0.20241011155906-25644f144d66/go.mod h1:GRT29AbW4CdqEWP/jSxHyUvV5fprOzsCdhsFhqJiU4s=
|
github.com/containers/common v0.60.1-0.20241018183244-7e6f2b4d6de7/go.mod h1:T8vpUWd7AspK7CMELf/c+NCZB6bKrRkLriRCspdFGyQ=
|
||||||
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
||||||
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
||||||
github.com/containers/gvisor-tap-vsock v0.7.5 h1:bTy4u3DOmmUPwurL6me2rsgfypAFDhyeJleUcQmBR/E=
|
github.com/containers/gvisor-tap-vsock v0.7.5 h1:bTy4u3DOmmUPwurL6me2rsgfypAFDhyeJleUcQmBR/E=
|
||||||
github.com/containers/gvisor-tap-vsock v0.7.5/go.mod h1:GW9jOqAEEGdaS20XwTYdm6KCYDHIulOE/yEEOabkoE4=
|
github.com/containers/gvisor-tap-vsock v0.7.5/go.mod h1:GW9jOqAEEGdaS20XwTYdm6KCYDHIulOE/yEEOabkoE4=
|
||||||
github.com/containers/image/v5 v5.32.3-0.20240923171149-9e1153a28c46 h1:eIwxm8+oAoTk+PDuOTbZRFG1DBF5tAlFO+niIamyzaM=
|
github.com/containers/image/v5 v5.32.3-0.20241016192323-a66152c1cdf6 h1:kDsMVMhEFmWFLN6QEn0ul0MbpXCxLiIL5pqxADOqB8g=
|
||||||
github.com/containers/image/v5 v5.32.3-0.20240923171149-9e1153a28c46/go.mod h1:GgaW+YZJaJmcGtyPZNtsggfM4BBYIMfu/fFK62ZKU0o=
|
github.com/containers/image/v5 v5.32.3-0.20241016192323-a66152c1cdf6/go.mod h1:Ulwf/jQO4757C/uOJyNiZ10dRiXRwVnyhF9wYFno3GQ=
|
||||||
github.com/containers/libhvee v0.7.1 h1:dWGF5GLq9DZvXo3P8aDp3cNieL5eCaSell4UmeA/jY4=
|
github.com/containers/libhvee v0.7.1 h1:dWGF5GLq9DZvXo3P8aDp3cNieL5eCaSell4UmeA/jY4=
|
||||||
github.com/containers/libhvee v0.7.1/go.mod h1:fRKB3AyIqHMvq6xaeYhTpckM2cdoq0oecolyoiuLP7M=
|
github.com/containers/libhvee v0.7.1/go.mod h1:fRKB3AyIqHMvq6xaeYhTpckM2cdoq0oecolyoiuLP7M=
|
||||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||||
github.com/containers/luksy v0.0.0-20240812184316-2e7307c02f06 h1:XDof6h9ujycjzF89x7hCBRpCSvFs9plbPHa7c2EtZrk=
|
github.com/containers/luksy v0.0.0-20241007190014-e2530d691420 h1:57rxgU2wdI3lZMDZtao09WjCWmxBKOxI/Sj37IpCV50=
|
||||||
github.com/containers/luksy v0.0.0-20240812184316-2e7307c02f06/go.mod h1:jaWkqhbHvO+7rFQ86KcXlNmkM9UZsyWm8alsmaYcMzw=
|
github.com/containers/luksy v0.0.0-20241007190014-e2530d691420/go.mod h1:MYzFCudLgMcXgFl7XuFjUowNDTBqL09BfEgMf7QHtO4=
|
||||||
github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sirVuPM=
|
github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sirVuPM=
|
||||||
github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U=
|
github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U=
|
||||||
github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g=
|
github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g=
|
||||||
github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A=
|
github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A=
|
||||||
github.com/containers/storage v1.55.1-0.20241008185503-a397602515fd h1:Yh3v4wrVxMpccXjA451OsF4CdKuQEEGCNDHtK84y+10=
|
github.com/containers/storage v1.55.1-0.20241017155235-4db236377c55 h1:lqNa07muv5Gkfyw8aivrLwy8cCZgewHN2OxB0cuNNyY=
|
||||||
github.com/containers/storage v1.55.1-0.20241008185503-a397602515fd/go.mod h1:H3XVD+Fwqe26DEP+Ev3s9VmdtXlAd9rV/WFC+dgALSI=
|
github.com/containers/storage v1.55.1-0.20241017155235-4db236377c55/go.mod h1:iq56tOFXnj8kA8DAytN28fhUm77eKLWrashQrImaqBs=
|
||||||
github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE=
|
github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE=
|
||||||
github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8=
|
github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8=
|
||||||
github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI=
|
github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI=
|
||||||
|
@ -308,8 +308,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
|
||||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0=
|
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||||
github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
|
github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
|
||||||
github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||||
|
|
|
@ -32,7 +32,7 @@ env:
|
||||||
DEBIAN_NAME: "debian-13"
|
DEBIAN_NAME: "debian-13"
|
||||||
|
|
||||||
# Image identifiers
|
# Image identifiers
|
||||||
IMAGE_SUFFIX: "c20240826t190000z-f40f39d13"
|
IMAGE_SUFFIX: "c20241010t105554z-f40f39d13"
|
||||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||||
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
|
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
|
||||||
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
|
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
|
||||||
|
|
|
@ -103,6 +103,7 @@ jobs:
|
||||||
# Fedora Koji build
|
# Fedora Koji build
|
||||||
- job: koji_build
|
- job: koji_build
|
||||||
trigger: commit
|
trigger: commit
|
||||||
|
packages: [buildah-fedora]
|
||||||
sidetag_group: podman-releases
|
sidetag_group: podman-releases
|
||||||
# Dependents are not rpm dependencies, but the package whose bodhi update
|
# Dependents are not rpm dependencies, but the package whose bodhi update
|
||||||
# should include this package.
|
# should include this package.
|
||||||
|
|
|
@ -1,28 +1,15 @@
|
||||||
approvers:
|
approvers:
|
||||||
- TomSweeneyRedHat
|
- TomSweeneyRedHat
|
||||||
- ashley-cui
|
|
||||||
- cevich
|
|
||||||
- flouthoc
|
|
||||||
- giuseppe
|
|
||||||
- lsm5
|
- lsm5
|
||||||
- nalind
|
- nalind
|
||||||
- rhatdan
|
- rhatdan
|
||||||
- umohnani8
|
- umohnani8
|
||||||
- vrothberg
|
|
||||||
reviewers:
|
reviewers:
|
||||||
- QiWang19
|
|
||||||
- TomSweeneyRedHat
|
|
||||||
- ashley-cui
|
- ashley-cui
|
||||||
- baude
|
- baude
|
||||||
- cevich
|
|
||||||
- edsantiago
|
- edsantiago
|
||||||
|
- flouthoc
|
||||||
- giuseppe
|
- giuseppe
|
||||||
- haircommander
|
- Honny1
|
||||||
- jwhonce
|
|
||||||
- lsm5
|
|
||||||
- mheon
|
- mheon
|
||||||
- mrunalp
|
|
||||||
- nalind
|
|
||||||
- rhatdan
|
|
||||||
- umohnani8
|
|
||||||
- vrothberg
|
- vrothberg
|
||||||
|
|
|
@ -373,10 +373,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
|
||||||
return errors.New("COPY --parents is not supported")
|
return errors.New("COPY --parents is not supported")
|
||||||
}
|
}
|
||||||
if len(cp.Excludes) > 0 {
|
if len(cp.Excludes) > 0 {
|
||||||
if cp.Download {
|
excludes = append(slices.Clone(excludes), cp.Excludes...)
|
||||||
return errors.New("ADD --excludes is not supported")
|
|
||||||
}
|
|
||||||
return errors.New("COPY --excludes is not supported")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.builder.ContentDigester.Restart()
|
s.builder.ContentDigester.Restart()
|
||||||
|
@ -1325,12 +1322,12 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
||||||
// Also check the chmod and the chown flags for validity.
|
// Also check the chmod and the chown flags for validity.
|
||||||
for _, flag := range step.Flags {
|
for _, flag := range step.Flags {
|
||||||
command := strings.ToUpper(step.Command)
|
command := strings.ToUpper(step.Command)
|
||||||
// chmod, chown and from flags should have an '=' sign, '--chmod=', '--chown=' or '--from='
|
// chmod, chown and from flags should have an '=' sign, '--chmod=', '--chown=' or '--from=' or '--exclude='
|
||||||
if command == "COPY" && (flag == "--chmod" || flag == "--chown" || flag == "--from") {
|
if command == "COPY" && (flag == "--chmod" || flag == "--chown" || flag == "--from" || flag == "--exclude") {
|
||||||
return "", nil, false, fmt.Errorf("COPY only supports the --chmod=<permissions> --chown=<uid:gid> and the --from=<image|stage> flags")
|
return "", nil, false, fmt.Errorf("COPY only supports the --chmod=<permissions> --chown=<uid:gid> --from=<image|stage> and the --exclude=<pattern> flags")
|
||||||
}
|
}
|
||||||
if command == "ADD" && (flag == "--chmod" || flag == "--chown" || flag == "--checksum") {
|
if command == "ADD" && (flag == "--chmod" || flag == "--chown" || flag == "--checksum" || flag == "--exclude") {
|
||||||
return "", nil, false, fmt.Errorf("ADD only supports the --chmod=<permissions>, --chown=<uid:gid>, and --checksum=<checksum> flags")
|
return "", nil, false, fmt.Errorf("ADD only supports the --chmod=<permissions>, --chown=<uid:gid>, and --checksum=<checksum> --exclude=<pattern> flags")
|
||||||
}
|
}
|
||||||
if strings.Contains(flag, "--from") && command == "COPY" {
|
if strings.Contains(flag, "--from") && command == "COPY" {
|
||||||
arr := strings.Split(flag, "=")
|
arr := strings.Split(flag, "=")
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
"github.com/containers/storage/pkg/lockfile"
|
"github.com/containers/storage/pkg/lockfile"
|
||||||
"github.com/containers/storage/pkg/unshare"
|
"github.com/containers/storage/pkg/unshare"
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
selinux "github.com/opencontainers/selinux/go-selinux"
|
selinux "github.com/opencontainers/selinux/go-selinux"
|
||||||
)
|
)
|
||||||
|
@ -374,7 +375,11 @@ func GetCacheMount(args []string, _ storage.Store, _ string, additionalMountPoin
|
||||||
return newMount, nil, fmt.Errorf("no stage found with name %s", fromStage)
|
return newMount, nil, fmt.Errorf("no stage found with name %s", fromStage)
|
||||||
}
|
}
|
||||||
// path should be /contextDir/specified path
|
// path should be /contextDir/specified path
|
||||||
newMount.Source = filepath.Join(mountPoint, filepath.Clean(string(filepath.Separator)+newMount.Source))
|
evaluated, err := copier.Eval(mountPoint, string(filepath.Separator)+newMount.Source, copier.EvalOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return newMount, nil, err
|
||||||
|
}
|
||||||
|
newMount.Source = evaluated
|
||||||
} else {
|
} else {
|
||||||
// we need to create the cache directory on the host if no image is being used
|
// we need to create the cache directory on the host if no image is being used
|
||||||
|
|
||||||
|
@ -391,11 +396,15 @@ func GetCacheMount(args []string, _ storage.Store, _ string, additionalMountPoin
|
||||||
}
|
}
|
||||||
|
|
||||||
if id != "" {
|
if id != "" {
|
||||||
newMount.Source = filepath.Join(cacheParent, filepath.Clean(id))
|
// Don't let the user control where we place the directory.
|
||||||
buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, filepath.Clean(id))
|
dirID := digest.FromString(id).Encoded()[:16]
|
||||||
|
newMount.Source = filepath.Join(cacheParent, dirID)
|
||||||
|
buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, dirID)
|
||||||
} else {
|
} else {
|
||||||
newMount.Source = filepath.Join(cacheParent, filepath.Clean(newMount.Destination))
|
// Don't let the user control where we place the directory.
|
||||||
buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, filepath.Clean(newMount.Destination))
|
dirID := digest.FromString(newMount.Destination).Encoded()[:16]
|
||||||
|
newMount.Source = filepath.Join(cacheParent, dirID)
|
||||||
|
buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, dirID)
|
||||||
}
|
}
|
||||||
idPair := idtools.IDPair{
|
idPair := idtools.IDPair{
|
||||||
UID: uid,
|
UID: uid,
|
||||||
|
|
|
@ -57,6 +57,8 @@ const (
|
||||||
BuildahCacheDir = "buildah-cache"
|
BuildahCacheDir = "buildah-cache"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var errInvalidSecretSyntax = errors.New("incorrect secret flag format: should be --secret id=foo,src=bar[,env=ENV][,type=file|env]")
|
||||||
|
|
||||||
// RepoNamesToNamedReferences parse the raw string to Named reference
|
// RepoNamesToNamedReferences parse the raw string to Named reference
|
||||||
func RepoNamesToNamedReferences(destList []string) ([]reference.Named, error) {
|
func RepoNamesToNamedReferences(destList []string) ([]reference.Named, error) {
|
||||||
var result []reference.Named
|
var result []reference.Named
|
||||||
|
@ -1240,7 +1242,6 @@ func GetTempDir() string {
|
||||||
|
|
||||||
// Secrets parses the --secret flag
|
// Secrets parses the --secret flag
|
||||||
func Secrets(secrets []string) (map[string]define.Secret, error) {
|
func Secrets(secrets []string) (map[string]define.Secret, error) {
|
||||||
invalidSyntax := fmt.Errorf("incorrect secret flag format: should be --secret id=foo,src=bar[,env=ENV,type=file|env]")
|
|
||||||
parsed := make(map[string]define.Secret)
|
parsed := make(map[string]define.Secret)
|
||||||
for _, secret := range secrets {
|
for _, secret := range secrets {
|
||||||
tokens := strings.Split(secret, ",")
|
tokens := strings.Split(secret, ",")
|
||||||
|
@ -1260,10 +1261,12 @@ func Secrets(secrets []string) (map[string]define.Secret, error) {
|
||||||
return nil, errors.New("invalid secret type, must be file or env")
|
return nil, errors.New("invalid secret type, must be file or env")
|
||||||
}
|
}
|
||||||
typ = kv[1]
|
typ = kv[1]
|
||||||
|
default:
|
||||||
|
return nil, errInvalidSecretSyntax
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if id == "" {
|
if id == "" {
|
||||||
return nil, invalidSyntax
|
return nil, errInvalidSecretSyntax
|
||||||
}
|
}
|
||||||
if src == "" {
|
if src == "" {
|
||||||
src = id
|
src = id
|
||||||
|
@ -1288,6 +1291,7 @@ func Secrets(secrets []string) (map[string]define.Secret, error) {
|
||||||
src = fullPath
|
src = fullPath
|
||||||
}
|
}
|
||||||
newSecret := define.Secret{
|
newSecret := define.Secret{
|
||||||
|
ID: id,
|
||||||
Source: src,
|
Source: src,
|
||||||
SourceType: typ,
|
SourceType: typ,
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,6 +26,7 @@ import (
|
||||||
"github.com/containers/buildah/copier"
|
"github.com/containers/buildah/copier"
|
||||||
"github.com/containers/buildah/define"
|
"github.com/containers/buildah/define"
|
||||||
"github.com/containers/buildah/internal"
|
"github.com/containers/buildah/internal"
|
||||||
|
"github.com/containers/buildah/internal/tmpdir"
|
||||||
internalUtil "github.com/containers/buildah/internal/util"
|
internalUtil "github.com/containers/buildah/internal/util"
|
||||||
"github.com/containers/buildah/internal/volumes"
|
"github.com/containers/buildah/internal/volumes"
|
||||||
"github.com/containers/buildah/pkg/overlay"
|
"github.com/containers/buildah/pkg/overlay"
|
||||||
|
@ -1735,7 +1736,7 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr
|
||||||
if id == "" {
|
if id == "" {
|
||||||
return nil, "", errInvalidSyntax
|
return nil, "", errInvalidSyntax
|
||||||
}
|
}
|
||||||
// Default location for secretis is /run/secrets/id
|
// Default location for secrets is /run/secrets/id
|
||||||
if target == "" {
|
if target == "" {
|
||||||
target = "/run/secrets/" + id
|
target = "/run/secrets/" + id
|
||||||
}
|
}
|
||||||
|
@ -1743,7 +1744,7 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr
|
||||||
secr, ok := secrets[id]
|
secr, ok := secrets[id]
|
||||||
if !ok {
|
if !ok {
|
||||||
if required {
|
if required {
|
||||||
return nil, "", fmt.Errorf("secret required but no secret with id %s found", id)
|
return nil, "", fmt.Errorf("secret required but no secret with id %q found", id)
|
||||||
}
|
}
|
||||||
return nil, "", nil
|
return nil, "", nil
|
||||||
}
|
}
|
||||||
|
@ -1754,7 +1755,7 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr
|
||||||
switch secr.SourceType {
|
switch secr.SourceType {
|
||||||
case "env":
|
case "env":
|
||||||
data = []byte(os.Getenv(secr.Source))
|
data = []byte(os.Getenv(secr.Source))
|
||||||
tmpFile, err := os.CreateTemp(define.TempDir, "buildah*")
|
tmpFile, err := os.CreateTemp(tmpdir.GetTempDir(), "buildah*")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
|
@ -1774,7 +1775,7 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
ctrFileOnHost = filepath.Join(containerWorkingDir, "secrets", id)
|
ctrFileOnHost = filepath.Join(containerWorkingDir, "secrets", digest.FromString(id).Encoded()[:16])
|
||||||
default:
|
default:
|
||||||
return nil, "", errors.New("invalid source secret type")
|
return nil, "", errors.New("invalid source secret type")
|
||||||
}
|
}
|
||||||
|
@ -1818,7 +1819,7 @@ func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string]
|
||||||
var id, target string
|
var id, target string
|
||||||
var required bool
|
var required bool
|
||||||
var uid, gid uint32
|
var uid, gid uint32
|
||||||
var mode uint32 = 400
|
var mode uint32 = 0o600
|
||||||
for _, val := range tokens {
|
for _, val := range tokens {
|
||||||
kv := strings.SplitN(val, "=", 2)
|
kv := strings.SplitN(val, "=", 2)
|
||||||
if len(kv) < 2 {
|
if len(kv) < 2 {
|
||||||
|
@ -1863,7 +1864,7 @@ func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string]
|
||||||
if id == "" {
|
if id == "" {
|
||||||
id = "default"
|
id = "default"
|
||||||
}
|
}
|
||||||
// Default location for secretis is /run/buildkit/ssh_agent.{i}
|
// Default location for secrets is /run/buildkit/ssh_agent.{i}
|
||||||
if target == "" {
|
if target == "" {
|
||||||
target = fmt.Sprintf("/run/buildkit/ssh_agent.%d", count)
|
target = fmt.Sprintf("/run/buildkit/ssh_agent.%d", count)
|
||||||
}
|
}
|
||||||
|
|
|
@ -886,7 +886,15 @@ default_sysctls = [
|
||||||
|
|
||||||
# Virtualization provider used to run Podman machine.
|
# Virtualization provider used to run Podman machine.
|
||||||
# If it is empty or commented out, the default provider will be used.
|
# If it is empty or commented out, the default provider will be used.
|
||||||
#
|
# Linux:
|
||||||
|
# qemu - Open source machine emulator and virtualizer. (Default)
|
||||||
|
# Windows: there are currently two options:
|
||||||
|
# wsl - Windows Subsystem for Linux (Default)
|
||||||
|
# hyperv - Windows Server Virtualization
|
||||||
|
# Mac: there are currently two options:
|
||||||
|
# applehv - Default Apple Hypervisor (Default)
|
||||||
|
# libkrun - Launch virtual machines using the libkrun platform, optimized
|
||||||
|
# for sharing GPU with the machine.
|
||||||
#provider = ""
|
#provider = ""
|
||||||
|
|
||||||
# Rosetta supports running x86_64 Linux binaries on a Podman machine on Apple silicon.
|
# Rosetta supports running x86_64 Linux binaries on a Podman machine on Apple silicon.
|
||||||
|
|
|
@ -819,11 +819,16 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||||
logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
|
logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
|
||||||
return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob), nil
|
return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob), nil
|
||||||
}
|
}
|
||||||
|
// On a "partial content not available" error, ignore it and retrieve the whole layer.
|
||||||
|
var perr private.ErrFallbackToOrdinaryLayerDownload
|
||||||
|
if errors.As(err, &perr) {
|
||||||
logrus.Debugf("Failed to retrieve partial blob: %v", err)
|
logrus.Debugf("Failed to retrieve partial blob: %v", err)
|
||||||
return false, types.BlobInfo{}, nil
|
return false, types.BlobInfo{}, nil
|
||||||
|
}
|
||||||
|
return false, types.BlobInfo{}, err
|
||||||
}()
|
}()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, "", err
|
return types.BlobInfo{}, "", fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err)
|
||||||
}
|
}
|
||||||
if reused {
|
if reused {
|
||||||
return blobInfo, cachedDiffID, nil
|
return blobInfo, cachedDiffID, nil
|
||||||
|
|
|
@ -42,7 +42,6 @@ const (
|
||||||
dockerRegistry = "registry-1.docker.io"
|
dockerRegistry = "registry-1.docker.io"
|
||||||
|
|
||||||
resolvedPingV2URL = "%s://%s/v2/"
|
resolvedPingV2URL = "%s://%s/v2/"
|
||||||
resolvedPingV1URL = "%s://%s/v1/_ping"
|
|
||||||
tagsPath = "/v2/%s/tags/list"
|
tagsPath = "/v2/%s/tags/list"
|
||||||
manifestPath = "/v2/%s/manifests/%s"
|
manifestPath = "/v2/%s/manifests/%s"
|
||||||
blobsPath = "/v2/%s/blobs/%s"
|
blobsPath = "/v2/%s/blobs/%s"
|
||||||
|
@ -936,34 +935,6 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("pinging container registry %s: %w", c.registry, err)
|
err = fmt.Errorf("pinging container registry %s: %w", c.registry, err)
|
||||||
if c.sys != nil && c.sys.DockerDisableV1Ping {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// best effort to understand if we're talking to a V1 registry
|
|
||||||
pingV1 := func(scheme string) bool {
|
|
||||||
pingURL, err := url.Parse(fmt.Sprintf(resolvedPingV1URL, scheme, c.registry))
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, pingURL, nil, nil, -1, noAuth, nil)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Debugf("Ping %s err %s (%#v)", pingURL.Redacted(), err.Error(), err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
logrus.Debugf("Ping %s status %d", pingURL.Redacted(), resp.StatusCode)
|
|
||||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
isV1 := pingV1("https")
|
|
||||||
if !isV1 && c.tlsClientConfig.InsecureSkipVerify {
|
|
||||||
isV1 = pingV1("http")
|
|
||||||
}
|
|
||||||
if isV1 {
|
|
||||||
err = ErrV1NotSupported
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
var (
|
var (
|
||||||
// ErrV1NotSupported is returned when we're trying to talk to a
|
// ErrV1NotSupported is returned when we're trying to talk to a
|
||||||
// docker V1 registry.
|
// docker V1 registry.
|
||||||
|
// Deprecated: The V1 container registry detection is no longer performed, so this error is never returned.
|
||||||
ErrV1NotSupported = errors.New("can't talk to a V1 container registry")
|
ErrV1NotSupported = errors.New("can't talk to a V1 container registry")
|
||||||
// ErrTooManyRequests is returned when the status code returned is 429
|
// ErrTooManyRequests is returned when the status code returned is 429
|
||||||
ErrTooManyRequests = errors.New("too many requests to registry")
|
ErrTooManyRequests = errors.New("too many requests to registry")
|
||||||
|
|
|
@ -36,8 +36,9 @@ func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool {
|
||||||
// PutBlobPartial attempts to create a blob using the data that is already present
|
// PutBlobPartial attempts to create a blob using the data that is already present
|
||||||
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
|
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
|
||||||
// It is available only if SupportsPutBlobPartial().
|
// It is available only if SupportsPutBlobPartial().
|
||||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
// Even if SupportsPutBlobPartial() returns true, the call can fail.
|
||||||
// should fall back to PutBlobWithOptions.
|
// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
|
||||||
|
// The fallback _must not_ be done otherwise.
|
||||||
func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
|
func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
|
||||||
return private.UploadedBlob{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName)
|
return private.UploadedBlob{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName)
|
||||||
}
|
}
|
||||||
|
|
|
@ -53,8 +53,9 @@ type ImageDestinationInternalOnly interface {
|
||||||
// PutBlobPartial attempts to create a blob using the data that is already present
|
// PutBlobPartial attempts to create a blob using the data that is already present
|
||||||
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
|
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
|
||||||
// It is available only if SupportsPutBlobPartial().
|
// It is available only if SupportsPutBlobPartial().
|
||||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
// Even if SupportsPutBlobPartial() returns true, the call can fail.
|
||||||
// should fall back to PutBlobWithOptions.
|
// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
|
||||||
|
// The fallback _must not_ be done otherwise.
|
||||||
PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, options PutBlobPartialOptions) (UploadedBlob, error)
|
PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, options PutBlobPartialOptions) (UploadedBlob, error)
|
||||||
|
|
||||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||||
|
@ -183,3 +184,22 @@ type UnparsedImage interface {
|
||||||
// UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need.
|
// UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need.
|
||||||
UntrustedSignatures(ctx context.Context) ([]signature.Signature, error)
|
UntrustedSignatures(ctx context.Context) ([]signature.Signature, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ErrFallbackToOrdinaryLayerDownload is a custom error type returned by PutBlobPartial.
|
||||||
|
// It suggests to the caller that a fallback mechanism can be used instead of a hard failure;
|
||||||
|
// otherwise the caller of PutBlobPartial _must not_ fall back to PutBlob.
|
||||||
|
type ErrFallbackToOrdinaryLayerDownload struct {
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c ErrFallbackToOrdinaryLayerDownload) Error() string {
|
||||||
|
return c.err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c ErrFallbackToOrdinaryLayerDownload) Unwrap() error {
|
||||||
|
return c.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewErrFallbackToOrdinaryLayerDownload(err error) error {
|
||||||
|
return ErrFallbackToOrdinaryLayerDownload{err: err}
|
||||||
|
}
|
||||||
|
|
|
@ -117,8 +117,9 @@ func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, str
|
||||||
// PutBlobPartial attempts to create a blob using the data that is already present
|
// PutBlobPartial attempts to create a blob using the data that is already present
|
||||||
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
|
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
|
||||||
// It is available only if SupportsPutBlobPartial().
|
// It is available only if SupportsPutBlobPartial().
|
||||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
// Even if SupportsPutBlobPartial() returns true, the call can fail.
|
||||||
// should fall back to PutBlobWithOptions.
|
// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
|
||||||
|
// The fallback _must not_ be done otherwise.
|
||||||
func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
|
func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
|
||||||
return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, options)
|
return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, options)
|
||||||
}
|
}
|
||||||
|
|
|
@ -125,8 +125,9 @@ func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stre
|
||||||
// PutBlobPartial attempts to create a blob using the data that is already present
|
// PutBlobPartial attempts to create a blob using the data that is already present
|
||||||
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
|
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
|
||||||
// It is available only if SupportsPutBlobPartial().
|
// It is available only if SupportsPutBlobPartial().
|
||||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
// Even if SupportsPutBlobPartial() returns true, the call can fail.
|
||||||
// should fall back to PutBlobWithOptions.
|
// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
|
||||||
|
// The fallback _must not_ be done otherwise.
|
||||||
func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
|
func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
|
||||||
return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, options)
|
return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, options)
|
||||||
}
|
}
|
||||||
|
|
|
@ -238,8 +238,9 @@ func (d *blobCacheDestination) SupportsPutBlobPartial() bool {
|
||||||
// PutBlobPartial attempts to create a blob using the data that is already present
|
// PutBlobPartial attempts to create a blob using the data that is already present
|
||||||
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
|
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
|
||||||
// It is available only if SupportsPutBlobPartial().
|
// It is available only if SupportsPutBlobPartial().
|
||||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
// Even if SupportsPutBlobPartial() returns true, the call can fail.
|
||||||
// should fall back to PutBlobWithOptions.
|
// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
|
||||||
|
// The fallback _must not_ be done otherwise.
|
||||||
func (d *blobCacheDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
|
func (d *blobCacheDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
|
||||||
return d.destination.PutBlobPartial(ctx, chunkAccessor, srcInfo, options)
|
return d.destination.PutBlobPartial(ctx, chunkAccessor, srcInfo, options)
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,28 +51,39 @@ func (err InvalidPolicyFormatError) Error() string {
|
||||||
// NOTE: When this function returns an error, report it to the user and abort.
|
// NOTE: When this function returns an error, report it to the user and abort.
|
||||||
// DO NOT hard-code fallback policies in your application.
|
// DO NOT hard-code fallback policies in your application.
|
||||||
func DefaultPolicy(sys *types.SystemContext) (*Policy, error) {
|
func DefaultPolicy(sys *types.SystemContext) (*Policy, error) {
|
||||||
return NewPolicyFromFile(defaultPolicyPath(sys))
|
policyPath, err := defaultPolicyPath(sys)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return NewPolicyFromFile(policyPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// defaultPolicyPath returns a path to the default policy of the system.
|
// defaultPolicyPath returns a path to the relevant policy of the system, or an error if the policy is missing.
|
||||||
func defaultPolicyPath(sys *types.SystemContext) string {
|
func defaultPolicyPath(sys *types.SystemContext) (string, error) {
|
||||||
return defaultPolicyPathWithHomeDir(sys, homedir.Get())
|
policyFilePath, err := defaultPolicyPathWithHomeDir(sys, homedir.Get(), systemDefaultPolicyPath)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return policyFilePath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// defaultPolicyPathWithHomeDir is an internal implementation detail of defaultPolicyPath,
|
// defaultPolicyPathWithHomeDir is an internal implementation detail of defaultPolicyPath,
|
||||||
// it exists only to allow testing it with an artificial home directory.
|
// it exists only to allow testing it with artificial paths.
|
||||||
func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) string {
|
func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string, systemPolicyPath string) (string, error) {
|
||||||
if sys != nil && sys.SignaturePolicyPath != "" {
|
if sys != nil && sys.SignaturePolicyPath != "" {
|
||||||
return sys.SignaturePolicyPath
|
return sys.SignaturePolicyPath, nil
|
||||||
}
|
}
|
||||||
userPolicyFilePath := filepath.Join(homeDir, userPolicyFile)
|
userPolicyFilePath := filepath.Join(homeDir, userPolicyFile)
|
||||||
if err := fileutils.Exists(userPolicyFilePath); err == nil {
|
if err := fileutils.Exists(userPolicyFilePath); err == nil {
|
||||||
return userPolicyFilePath
|
return userPolicyFilePath, nil
|
||||||
}
|
}
|
||||||
if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
|
if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
|
||||||
return filepath.Join(sys.RootForImplicitAbsolutePaths, systemDefaultPolicyPath)
|
return filepath.Join(sys.RootForImplicitAbsolutePaths, systemPolicyPath), nil
|
||||||
}
|
}
|
||||||
return systemDefaultPolicyPath
|
if err := fileutils.Exists(systemPolicyPath); err == nil {
|
||||||
|
return systemPolicyPath, nil
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("no policy.json file found at any of the following: %q, %q", userPolicyFilePath, systemPolicyPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPolicyFromFile returns a policy configured in the specified file.
|
// NewPolicyFromFile returns a policy configured in the specified file.
|
||||||
|
|
|
@ -311,15 +311,23 @@ func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.Read
|
||||||
// PutBlobPartial attempts to create a blob using the data that is already present
|
// PutBlobPartial attempts to create a blob using the data that is already present
|
||||||
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
|
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
|
||||||
// It is available only if SupportsPutBlobPartial().
|
// It is available only if SupportsPutBlobPartial().
|
||||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
// Even if SupportsPutBlobPartial() returns true, the call can fail.
|
||||||
// should fall back to PutBlobWithOptions.
|
// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
|
||||||
func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
|
// The fallback _must not_ be done otherwise.
|
||||||
|
func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (_ private.UploadedBlob, retErr error) {
|
||||||
fetcher := zstdFetcher{
|
fetcher := zstdFetcher{
|
||||||
chunkAccessor: chunkAccessor,
|
chunkAccessor: chunkAccessor,
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
blobInfo: srcInfo,
|
blobInfo: srcInfo,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
var perr chunked.ErrFallbackToOrdinaryLayerDownload
|
||||||
|
if errors.As(retErr, &perr) {
|
||||||
|
retErr = private.NewErrFallbackToOrdinaryLayerDownload(retErr)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Digest, srcInfo.Size, srcInfo.Annotations, &fetcher)
|
differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Digest, srcInfo.Size, srcInfo.Annotations, &fetcher)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return private.UploadedBlob{}, err
|
return private.UploadedBlob{}, err
|
||||||
|
|
|
@ -643,6 +643,7 @@ type SystemContext struct {
|
||||||
// if true, a V1 ping attempt isn't done to give users a better error. Default is false.
|
// if true, a V1 ping attempt isn't done to give users a better error. Default is false.
|
||||||
// Note that this field is used mainly to integrate containers/image into projectatomic/docker
|
// Note that this field is used mainly to integrate containers/image into projectatomic/docker
|
||||||
// in order to not break any existing docker's integration tests.
|
// in order to not break any existing docker's integration tests.
|
||||||
|
// Deprecated: The V1 container registry detection is no longer performed, so setting this flag has no effect.
|
||||||
DockerDisableV1Ping bool
|
DockerDisableV1Ping bool
|
||||||
// If true, dockerImageDestination.SupportedManifestMIMETypes will omit the Schema1 media types from the supported list
|
// If true, dockerImageDestination.SupportedManifestMIMETypes will omit the Schema1 media types from the supported list
|
||||||
DockerDisableDestSchema1MIMETypes bool
|
DockerDisableDestSchema1MIMETypes bool
|
||||||
|
|
|
@ -23,7 +23,7 @@ env:
|
||||||
# GCE project where images live
|
# GCE project where images live
|
||||||
IMAGE_PROJECT: "libpod-218412"
|
IMAGE_PROJECT: "libpod-218412"
|
||||||
# VM Image built in containers/automation_images
|
# VM Image built in containers/automation_images
|
||||||
IMAGE_SUFFIX: "c20240821t171500z-f40f39d13"
|
IMAGE_SUFFIX: "c20241010t105554z-f40f39d13"
|
||||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||||
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
|
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
|
||||||
|
|
||||||
|
@ -180,6 +180,13 @@ gofix_task:
|
||||||
build_script: go fix ./...
|
build_script: go fix ./...
|
||||||
test_script: git diff --exit-code
|
test_script: git diff --exit-code
|
||||||
|
|
||||||
|
codespell_task:
|
||||||
|
alias: codespell
|
||||||
|
container:
|
||||||
|
image: python
|
||||||
|
build_script: pip install codespell
|
||||||
|
test_script: codespell
|
||||||
|
|
||||||
|
|
||||||
# Status aggregator for all tests. This task simply ensures a defined
|
# Status aggregator for all tests. This task simply ensures a defined
|
||||||
# set of tasks all passed, and allows confirming that based on the status
|
# set of tasks all passed, and allows confirming that based on the status
|
||||||
|
@ -197,6 +204,7 @@ success_task:
|
||||||
- vendor
|
- vendor
|
||||||
- cross
|
- cross
|
||||||
- gofix
|
- gofix
|
||||||
|
- codespell
|
||||||
container:
|
container:
|
||||||
image: golang:1.21
|
image: golang:1.21
|
||||||
clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed
|
clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
[codespell]
|
||||||
|
skip = ./.git,./vendor,./tests/tools/vendor,AUTHORS
|
||||||
|
ignore-words-list = afile,flate,prevend,Plack,worl
|
|
@ -46,7 +46,7 @@ containers-storage: ## build using gc on the host
|
||||||
$(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
|
$(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
|
||||||
|
|
||||||
codespell:
|
codespell:
|
||||||
codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L plack,worl,flate,uint,iff,od,ERRO -w
|
codespell
|
||||||
|
|
||||||
binary local-binary: containers-storage
|
binary local-binary: containers-storage
|
||||||
|
|
||||||
|
|
|
@ -189,14 +189,14 @@ type Driver interface {
|
||||||
type DriverWithDifferOutput struct {
|
type DriverWithDifferOutput struct {
|
||||||
Differ Differ
|
Differ Differ
|
||||||
Target string
|
Target string
|
||||||
Size int64
|
Size int64 // Size of the uncompressed layer, -1 if unknown. Must be known if UncompressedDigest is set.
|
||||||
UIDs []uint32
|
UIDs []uint32
|
||||||
GIDs []uint32
|
GIDs []uint32
|
||||||
UncompressedDigest digest.Digest
|
UncompressedDigest digest.Digest
|
||||||
CompressedDigest digest.Digest
|
CompressedDigest digest.Digest
|
||||||
Metadata string
|
Metadata string
|
||||||
BigData map[string][]byte
|
BigData map[string][]byte
|
||||||
TarSplit []byte
|
TarSplit []byte // nil if not available
|
||||||
TOCDigest digest.Digest
|
TOCDigest digest.Digest
|
||||||
// RootDirMode is the mode of the root directory of the layer, if specified.
|
// RootDirMode is the mode of the root directory of the layer, if specified.
|
||||||
RootDirMode *os.FileMode
|
RootDirMode *os.FileMode
|
||||||
|
|
|
@ -18,6 +18,16 @@ package quota
|
||||||
#include <linux/quota.h>
|
#include <linux/quota.h>
|
||||||
#include <linux/dqblk_xfs.h>
|
#include <linux/dqblk_xfs.h>
|
||||||
|
|
||||||
|
#ifndef FS_XFLAG_PROJINHERIT
|
||||||
|
struct fsxattr {
|
||||||
|
__u32 fsx_xflags;
|
||||||
|
__u32 fsx_extsize;
|
||||||
|
__u32 fsx_nextents;
|
||||||
|
__u32 fsx_projid;
|
||||||
|
unsigned char fsx_pad[12];
|
||||||
|
};
|
||||||
|
#define FS_XFLAG_PROJINHERIT 0x00000200
|
||||||
|
#endif
|
||||||
#ifndef FS_IOC_FSGETXATTR
|
#ifndef FS_IOC_FSGETXATTR
|
||||||
#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr)
|
#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr)
|
||||||
#endif
|
#endif
|
||||||
|
@ -162,6 +172,11 @@ func NewControl(basePath string) (*Control, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clear inherit flag from top-level directory if necessary.
|
||||||
|
if err := stripProjectInherit(basePath); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// get first project id to be used for next container
|
// get first project id to be used for next container
|
||||||
//
|
//
|
||||||
|
@ -339,6 +354,8 @@ func setProjectID(targetPath string, projectID uint32) error {
|
||||||
}
|
}
|
||||||
defer closeDir(dir)
|
defer closeDir(dir)
|
||||||
|
|
||||||
|
logrus.Debugf("Setting quota project ID %d on %s", projectID, targetPath)
|
||||||
|
|
||||||
var fsx C.struct_fsxattr
|
var fsx C.struct_fsxattr
|
||||||
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
|
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
|
||||||
uintptr(unsafe.Pointer(&fsx)))
|
uintptr(unsafe.Pointer(&fsx)))
|
||||||
|
@ -346,6 +363,7 @@ func setProjectID(targetPath string, projectID uint32) error {
|
||||||
return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno)
|
return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno)
|
||||||
}
|
}
|
||||||
fsx.fsx_projid = C.__u32(projectID)
|
fsx.fsx_projid = C.__u32(projectID)
|
||||||
|
fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT
|
||||||
_, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
|
_, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
|
||||||
uintptr(unsafe.Pointer(&fsx)))
|
uintptr(unsafe.Pointer(&fsx)))
|
||||||
if errno != 0 {
|
if errno != 0 {
|
||||||
|
@ -355,6 +373,36 @@ func setProjectID(targetPath string, projectID uint32) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// stripProjectInherit strips the project inherit flag from a directory.
|
||||||
|
// Used on the top-level directory to ensure project IDs are only inherited for
|
||||||
|
// files in directories we set quotas on - not the directories we want to set
|
||||||
|
// the quotas on, as that would make everything use the same project ID.
|
||||||
|
func stripProjectInherit(targetPath string) error {
|
||||||
|
dir, err := openDir(targetPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closeDir(dir)
|
||||||
|
|
||||||
|
var fsx C.struct_fsxattr
|
||||||
|
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
|
||||||
|
uintptr(unsafe.Pointer(&fsx)))
|
||||||
|
if errno != 0 {
|
||||||
|
return fmt.Errorf("failed to get xfs attrs for %s: %w", targetPath, errno)
|
||||||
|
}
|
||||||
|
if fsx.fsx_xflags&C.FS_XFLAG_PROJINHERIT != 0 {
|
||||||
|
// Flag is set, need to clear it.
|
||||||
|
logrus.Debugf("Clearing PROJINHERIT flag from directory %s", targetPath)
|
||||||
|
fsx.fsx_xflags = fsx.fsx_xflags &^ C.FS_XFLAG_PROJINHERIT
|
||||||
|
_, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
|
||||||
|
uintptr(unsafe.Pointer(&fsx)))
|
||||||
|
if errno != 0 {
|
||||||
|
return fmt.Errorf("failed to clear PROJINHERIT for %s: %w", targetPath, errno)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// findNextProjectID - find the next project id to be used for containers
|
// findNextProjectID - find the next project id to be used for containers
|
||||||
// by scanning driver home directory to find used project ids
|
// by scanning driver home directory to find used project ids
|
||||||
func (q *Control) findNextProjectID() error {
|
func (q *Control) findNextProjectID() error {
|
||||||
|
|
|
@ -136,8 +136,11 @@ type Layer struct {
|
||||||
TOCDigest digest.Digest `json:"toc-digest,omitempty"`
|
TOCDigest digest.Digest `json:"toc-digest,omitempty"`
|
||||||
|
|
||||||
// UncompressedSize is the length of the blob that was last passed to
|
// UncompressedSize is the length of the blob that was last passed to
|
||||||
// ApplyDiff() or create(), after we decompressed it. If
|
// ApplyDiff() or create(), after we decompressed it.
|
||||||
// UncompressedDigest is not set, this should be treated as if it were
|
//
|
||||||
|
// - If UncompressedDigest is set, this must be set to a valid value.
|
||||||
|
// - Otherwise, if TOCDigest is set, this is either valid or -1.
|
||||||
|
// - If neither of this digests is set, this should be treated as if it were
|
||||||
// an uninitialized value.
|
// an uninitialized value.
|
||||||
UncompressedSize int64 `json:"diff-size,omitempty"`
|
UncompressedSize int64 `json:"diff-size,omitempty"`
|
||||||
|
|
||||||
|
@ -1214,8 +1217,8 @@ func (r *layerStore) Size(name string) (int64, error) {
|
||||||
// We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that
|
// We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that
|
||||||
// a zero value is not just present because it was never set to anything else (which can happen if the layer was
|
// a zero value is not just present because it was never set to anything else (which can happen if the layer was
|
||||||
// created by a version of this library that didn't keep track of digest and size information).
|
// created by a version of this library that didn't keep track of digest and size information).
|
||||||
if layer.TOCDigest != "" || layer.UncompressedDigest != "" {
|
if layer.UncompressedDigest != "" || layer.TOCDigest != "" {
|
||||||
return layer.UncompressedSize, nil
|
return layer.UncompressedSize, nil // This may return -1 if only TOCDigest is set
|
||||||
}
|
}
|
||||||
return -1, nil
|
return -1, nil
|
||||||
}
|
}
|
||||||
|
@ -2510,7 +2513,7 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(diffOutput.TarSplit) != 0 {
|
if diffOutput.TarSplit != nil {
|
||||||
tsdata := bytes.Buffer{}
|
tsdata := bytes.Buffer{}
|
||||||
compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
|
compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -182,6 +182,9 @@ func makeBinaryDigest(stringDigest string) ([]byte, error) {
|
||||||
return buf, nil
|
return buf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// loadLayerCache attempts to load the cache file for the specified layer.
|
||||||
|
// If the cache file is not present or it it using a different cache file version, then
|
||||||
|
// the function returns (nil, nil).
|
||||||
func (c *layersCache) loadLayerCache(layerID string) (_ *layer, errRet error) {
|
func (c *layersCache) loadLayerCache(layerID string) (_ *layer, errRet error) {
|
||||||
buffer, mmapBuffer, err := c.loadLayerBigData(layerID, cacheKey)
|
buffer, mmapBuffer, err := c.loadLayerBigData(layerID, cacheKey)
|
||||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||||
|
@ -202,6 +205,9 @@ func (c *layersCache) loadLayerCache(layerID string) (_ *layer, errRet error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if cacheFile == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
return c.createLayer(layerID, cacheFile, mmapBuffer)
|
return c.createLayer(layerID, cacheFile, mmapBuffer)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -268,7 +274,7 @@ func (c *layersCache) load() error {
|
||||||
var newLayers []*layer
|
var newLayers []*layer
|
||||||
for _, r := range allLayers {
|
for _, r := range allLayers {
|
||||||
// The layer is present in the store and it is already loaded. Attempt to
|
// The layer is present in the store and it is already loaded. Attempt to
|
||||||
// re-use it if mmap'ed.
|
// reuse it if mmap'ed.
|
||||||
if l, found := loadedLayers[r.ID]; found {
|
if l, found := loadedLayers[r.ID]; found {
|
||||||
// If the layer is not marked for re-load, move it to newLayers.
|
// If the layer is not marked for re-load, move it to newLayers.
|
||||||
if !l.reloadWithMmap {
|
if !l.reloadWithMmap {
|
||||||
|
@ -618,6 +624,8 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// readCacheFileFromMemory reads a cache file from a buffer.
|
||||||
|
// It can return (nil, nil) if the cache file uses a different file version that the one currently supported.
|
||||||
func readCacheFileFromMemory(bigDataBuffer []byte) (*cacheFile, error) {
|
func readCacheFileFromMemory(bigDataBuffer []byte) (*cacheFile, error) {
|
||||||
bigData := bytes.NewReader(bigDataBuffer)
|
bigData := bytes.NewReader(bigDataBuffer)
|
||||||
|
|
||||||
|
|
|
@ -139,7 +139,7 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
|
||||||
}
|
}
|
||||||
|
|
||||||
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream.
|
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream.
|
||||||
// Returns (manifest blob, parsed manifest, tar-split blob, manifest offset).
|
// Returns (manifest blob, parsed manifest, tar-split blob or nil, manifest offset).
|
||||||
func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) ([]byte, *internal.TOC, []byte, int64, error) {
|
func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) ([]byte, *internal.TOC, []byte, int64, error) {
|
||||||
offsetMetadata := annotations[internal.ManifestInfoKey]
|
offsetMetadata := annotations[internal.ManifestInfoKey]
|
||||||
if offsetMetadata == "" {
|
if offsetMetadata == "" {
|
||||||
|
@ -214,7 +214,7 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
|
||||||
return nil, nil, nil, 0, fmt.Errorf("unmarshaling TOC: %w", err)
|
return nil, nil, nil, 0, fmt.Errorf("unmarshaling TOC: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
decodedTarSplit := []byte{}
|
var decodedTarSplit []byte = nil
|
||||||
if toc.TarSplitDigest != "" {
|
if toc.TarSplitDigest != "" {
|
||||||
if tarSplitChunk.Offset <= 0 {
|
if tarSplitChunk.Offset <= 0 {
|
||||||
return nil, nil, nil, 0, fmt.Errorf("TOC requires a tar-split, but the %s annotation does not describe a position", internal.TarSplitInfoKey)
|
return nil, nil, nil, 0, fmt.Errorf("TOC requires a tar-split, but the %s annotation does not describe a position", internal.TarSplitInfoKey)
|
||||||
|
@ -288,6 +288,36 @@ func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// tarSizeFromTarSplit computes the total tarball size, using only the tarSplit metadata
|
||||||
|
func tarSizeFromTarSplit(tarSplit []byte) (int64, error) {
|
||||||
|
var res int64 = 0
|
||||||
|
|
||||||
|
unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit))
|
||||||
|
for {
|
||||||
|
entry, err := unpacker.Next()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return -1, fmt.Errorf("reading tar-split entries: %w", err)
|
||||||
|
}
|
||||||
|
switch entry.Type {
|
||||||
|
case storage.SegmentType:
|
||||||
|
res += int64(len(entry.Payload))
|
||||||
|
case storage.FileType:
|
||||||
|
// entry.Size is the “logical size”, which might not be the physical size for sparse entries;
|
||||||
|
// but the way tar-split/tar/asm.WriteOutputTarStream combines FileType entries and returned files contents,
|
||||||
|
// sparse files are not supported.
|
||||||
|
// Also https://github.com/opencontainers/image-spec/blob/main/layer.md says
|
||||||
|
// > Sparse files SHOULD NOT be used because they lack consistent support across tar implementations.
|
||||||
|
res += entry.Size
|
||||||
|
default:
|
||||||
|
return -1, fmt.Errorf("unexpected tar-split entry type %q", entry.Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
// ensureTimePointersMatch ensures that a and b are equal
|
// ensureTimePointersMatch ensures that a and b are equal
|
||||||
func ensureTimePointersMatch(a, b *time.Time) error {
|
func ensureTimePointersMatch(a, b *time.Time) error {
|
||||||
// We didn’t always use “timeIfNotZero” when creating the TOC, so treat time.IsZero the same as nil.
|
// We didn’t always use “timeIfNotZero” when creating the TOC, so treat time.IsZero the same as nil.
|
||||||
|
|
|
@ -90,6 +90,7 @@ type chunkedDiffer struct {
|
||||||
blobDigest digest.Digest
|
blobDigest digest.Digest
|
||||||
|
|
||||||
blobSize int64
|
blobSize int64
|
||||||
|
uncompressedTarSize int64 // -1 if unknown
|
||||||
|
|
||||||
pullOptions map[string]string
|
pullOptions map[string]string
|
||||||
|
|
||||||
|
@ -216,6 +217,7 @@ func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blo
|
||||||
fsVerityDigests: make(map[string]string),
|
fsVerityDigests: make(map[string]string),
|
||||||
blobDigest: blobDigest,
|
blobDigest: blobDigest,
|
||||||
blobSize: blobSize,
|
blobSize: blobSize,
|
||||||
|
uncompressedTarSize: -1, // Will be computed later
|
||||||
convertToZstdChunked: true,
|
convertToZstdChunked: true,
|
||||||
copyBuffer: makeCopyBuffer(),
|
copyBuffer: makeCopyBuffer(),
|
||||||
layersCache: layersCache,
|
layersCache: layersCache,
|
||||||
|
@ -229,6 +231,14 @@ func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
||||||
}
|
}
|
||||||
|
var uncompressedTarSize int64 = -1
|
||||||
|
if tarSplit != nil {
|
||||||
|
uncompressedTarSize, err = tarSizeFromTarSplit(tarSplit)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("computing size from tar-split: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
layersCache, err := getLayersCache(store)
|
layersCache, err := getLayersCache(store)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -237,6 +247,7 @@ func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest
|
||||||
return &chunkedDiffer{
|
return &chunkedDiffer{
|
||||||
fsVerityDigests: make(map[string]string),
|
fsVerityDigests: make(map[string]string),
|
||||||
blobSize: blobSize,
|
blobSize: blobSize,
|
||||||
|
uncompressedTarSize: uncompressedTarSize,
|
||||||
tocDigest: tocDigest,
|
tocDigest: tocDigest,
|
||||||
copyBuffer: makeCopyBuffer(),
|
copyBuffer: makeCopyBuffer(),
|
||||||
fileType: fileTypeZstdChunked,
|
fileType: fileTypeZstdChunked,
|
||||||
|
@ -263,6 +274,7 @@ func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest dig
|
||||||
return &chunkedDiffer{
|
return &chunkedDiffer{
|
||||||
fsVerityDigests: make(map[string]string),
|
fsVerityDigests: make(map[string]string),
|
||||||
blobSize: blobSize,
|
blobSize: blobSize,
|
||||||
|
uncompressedTarSize: -1, // We would have to read and decompress the whole layer
|
||||||
tocDigest: tocDigest,
|
tocDigest: tocDigest,
|
||||||
copyBuffer: makeCopyBuffer(),
|
copyBuffer: makeCopyBuffer(),
|
||||||
fileType: fileTypeEstargz,
|
fileType: fileTypeEstargz,
|
||||||
|
@ -1153,7 +1165,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||||
|
|
||||||
var compressedDigest digest.Digest
|
var compressedDigest digest.Digest
|
||||||
var uncompressedDigest digest.Digest
|
var uncompressedDigest digest.Digest
|
||||||
var convertedBlobSize int64
|
|
||||||
|
|
||||||
if c.convertToZstdChunked {
|
if c.convertToZstdChunked {
|
||||||
fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
|
fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
|
||||||
|
@ -1185,7 +1196,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return graphdriver.DriverWithDifferOutput{}, err
|
return graphdriver.DriverWithDifferOutput{}, err
|
||||||
}
|
}
|
||||||
convertedBlobSize = tarSize
|
c.uncompressedTarSize = tarSize
|
||||||
// fileSource is a O_TMPFILE file descriptor, so we
|
// fileSource is a O_TMPFILE file descriptor, so we
|
||||||
// need to keep it open until the entire file is processed.
|
// need to keep it open until the entire file is processed.
|
||||||
defer fileSource.Close()
|
defer fileSource.Close()
|
||||||
|
@ -1255,6 +1266,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||||
TOCDigest: c.tocDigest,
|
TOCDigest: c.tocDigest,
|
||||||
UncompressedDigest: uncompressedDigest,
|
UncompressedDigest: uncompressedDigest,
|
||||||
CompressedDigest: compressedDigest,
|
CompressedDigest: compressedDigest,
|
||||||
|
Size: c.uncompressedTarSize,
|
||||||
}
|
}
|
||||||
|
|
||||||
// When the hard links deduplication is used, file attributes are ignored because setting them
|
// When the hard links deduplication is used, file attributes are ignored because setting them
|
||||||
|
@ -1268,19 +1280,12 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||||
|
|
||||||
var missingParts []missingPart
|
var missingParts []missingPart
|
||||||
|
|
||||||
mergedEntries, totalSizeFromTOC, err := c.mergeTocEntries(c.fileType, toc.Entries)
|
mergedEntries, err := c.mergeTocEntries(c.fileType, toc.Entries)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return output, err
|
return output, err
|
||||||
}
|
}
|
||||||
|
|
||||||
output.UIDs, output.GIDs = collectIDs(mergedEntries)
|
output.UIDs, output.GIDs = collectIDs(mergedEntries)
|
||||||
if convertedBlobSize > 0 {
|
|
||||||
// if the image was converted, store the original tar size, so that
|
|
||||||
// it can be recreated correctly.
|
|
||||||
output.Size = convertedBlobSize
|
|
||||||
} else {
|
|
||||||
output.Size = totalSizeFromTOC
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := maybeDoIDRemap(mergedEntries, options); err != nil {
|
if err := maybeDoIDRemap(mergedEntries, options); err != nil {
|
||||||
return output, err
|
return output, err
|
||||||
|
@ -1597,9 +1602,7 @@ func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]fileMetadata, int64, error) {
|
func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]fileMetadata, error) {
|
||||||
var totalFilesSize int64
|
|
||||||
|
|
||||||
countNextChunks := func(start int) int {
|
countNextChunks := func(start int) int {
|
||||||
count := 0
|
count := 0
|
||||||
for _, e := range entries[start:] {
|
for _, e := range entries[start:] {
|
||||||
|
@ -1629,10 +1632,8 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
totalFilesSize += e.Size
|
|
||||||
|
|
||||||
if e.Type == TypeChunk {
|
if e.Type == TypeChunk {
|
||||||
return nil, -1, fmt.Errorf("chunk type without a regular file")
|
return nil, fmt.Errorf("chunk type without a regular file")
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.Type == TypeReg {
|
if e.Type == TypeReg {
|
||||||
|
@ -1668,7 +1669,7 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
|
||||||
lastChunkOffset = mergedEntries[i].chunks[j].Offset
|
lastChunkOffset = mergedEntries[i].chunks[j].Offset
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return mergedEntries, totalFilesSize, nil
|
return mergedEntries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the
|
// validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the
|
||||||
|
|
|
@ -367,7 +367,7 @@ func checkChownErr(err error, name string, uid, gid int) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stat contains file states that can be overriden with ContainersOverrideXattr.
|
// Stat contains file states that can be overridden with ContainersOverrideXattr.
|
||||||
type Stat struct {
|
type Stat struct {
|
||||||
IDs IDPair
|
IDs IDPair
|
||||||
Mode os.FileMode
|
Mode os.FileMode
|
||||||
|
|
|
@ -2201,7 +2201,7 @@ func (s *store) ImageSize(id string) (int64, error) {
|
||||||
}
|
}
|
||||||
// The UncompressedSize is only valid if there's a digest to go with it.
|
// The UncompressedSize is only valid if there's a digest to go with it.
|
||||||
n := layer.UncompressedSize
|
n := layer.UncompressedSize
|
||||||
if layer.UncompressedDigest == "" {
|
if layer.UncompressedDigest == "" || n == -1 {
|
||||||
// Compute the size.
|
// Compute the size.
|
||||||
n, err = layerStore.DiffSize("", layer.ID)
|
n, err = layerStore.DiffSize("", layer.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -1,18 +1,21 @@
|
||||||
|
//go:build linux
|
||||||
|
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/user"
|
"os/user"
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
drivers "github.com/containers/storage/drivers"
|
drivers "github.com/containers/storage/drivers"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
"github.com/containers/storage/pkg/unshare"
|
"github.com/containers/storage/pkg/unshare"
|
||||||
"github.com/containers/storage/types"
|
"github.com/containers/storage/types"
|
||||||
|
securejoin "github.com/cyphar/filepath-securejoin"
|
||||||
libcontainerUser "github.com/moby/sys/user"
|
libcontainerUser "github.com/moby/sys/user"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// getAdditionalSubIDs looks up the additional IDs configured for
|
// getAdditionalSubIDs looks up the additional IDs configured for
|
||||||
|
@ -85,16 +88,23 @@ const nobodyUser = 65534
|
||||||
// parseMountedFiles returns the maximum UID and GID found in the /etc/passwd and
|
// parseMountedFiles returns the maximum UID and GID found in the /etc/passwd and
|
||||||
// /etc/group files.
|
// /etc/group files.
|
||||||
func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 {
|
func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 {
|
||||||
|
var (
|
||||||
|
passwd *os.File
|
||||||
|
group *os.File
|
||||||
|
size int
|
||||||
|
err error
|
||||||
|
)
|
||||||
if passwdFile == "" {
|
if passwdFile == "" {
|
||||||
passwdFile = filepath.Join(containerMount, "etc/passwd")
|
passwd, err = secureOpen(containerMount, "/etc/passwd")
|
||||||
}
|
} else {
|
||||||
if groupFile == "" {
|
// User-specified override from a volume. Will not be in
|
||||||
groupFile = filepath.Join(containerMount, "etc/group")
|
// container root.
|
||||||
|
passwd, err = os.Open(passwdFile)
|
||||||
}
|
}
|
||||||
|
if err == nil {
|
||||||
|
defer passwd.Close()
|
||||||
|
|
||||||
size := 0
|
users, err := libcontainerUser.ParsePasswd(passwd)
|
||||||
|
|
||||||
users, err := libcontainerUser.ParsePasswdFile(passwdFile)
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
for _, u := range users {
|
for _, u := range users {
|
||||||
// Skip the "nobody" user otherwise we end up with 65536
|
// Skip the "nobody" user otherwise we end up with 65536
|
||||||
|
@ -110,8 +120,19 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
groups, err := libcontainerUser.ParseGroupFile(groupFile)
|
if groupFile == "" {
|
||||||
|
group, err = secureOpen(containerMount, "/etc/group")
|
||||||
|
} else {
|
||||||
|
// User-specified override from a volume. Will not be in
|
||||||
|
// container root.
|
||||||
|
group, err = os.Open(groupFile)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
defer group.Close()
|
||||||
|
|
||||||
|
groups, err := libcontainerUser.ParseGroup(group)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
for _, g := range groups {
|
for _, g := range groups {
|
||||||
if g.Name == "nobody" || g.Name == "nogroup" {
|
if g.Name == "nobody" || g.Name == "nogroup" {
|
||||||
|
@ -122,6 +143,7 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return uint32(size)
|
return uint32(size)
|
||||||
}
|
}
|
||||||
|
@ -309,3 +331,14 @@ func getAutoUserNSIDMappings(
|
||||||
gidMap := append(availableGIDs.zip(requestedContainerGIDs), additionalGIDMappings...)
|
gidMap := append(availableGIDs.zip(requestedContainerGIDs), additionalGIDMappings...)
|
||||||
return uidMap, gidMap, nil
|
return uidMap, gidMap, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Securely open (read-only) a file in a container mount.
|
||||||
|
func secureOpen(containerMount, file string) (*os.File, error) {
|
||||||
|
tmpFile, err := securejoin.OpenInRoot(containerMount, file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer tmpFile.Close()
|
||||||
|
|
||||||
|
return securejoin.Reopen(tmpFile, unix.O_RDONLY)
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
//go:build !linux
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/containers/storage/pkg/idtools"
|
||||||
|
"github.com/containers/storage/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *store) getAutoUserNS(_ *types.AutoUserNsOptions, _ *Image, _ rwLayerStore, _ []roLayerStore) ([]idtools.IDMap, []idtools.IDMap, error) {
|
||||||
|
return nil, nil, errors.New("user namespaces are not supported on this platform")
|
||||||
|
}
|
|
@ -1,5 +1,5 @@
|
||||||
# This is an example goreleaser.yaml file with some sane defaults.
|
version: 2
|
||||||
# Make sure to check the documentation at http://goreleaser.com
|
|
||||||
before:
|
before:
|
||||||
hooks:
|
hooks:
|
||||||
- ./gen.sh
|
- ./gen.sh
|
||||||
|
@ -99,7 +99,7 @@ archives:
|
||||||
checksum:
|
checksum:
|
||||||
name_template: 'checksums.txt'
|
name_template: 'checksums.txt'
|
||||||
snapshot:
|
snapshot:
|
||||||
name_template: "{{ .Tag }}-next"
|
version_template: "{{ .Tag }}-next"
|
||||||
changelog:
|
changelog:
|
||||||
sort: asc
|
sort: asc
|
||||||
filters:
|
filters:
|
||||||
|
|
|
@ -16,6 +16,13 @@ This package provides various compression algorithms.
|
||||||
|
|
||||||
# changelog
|
# changelog
|
||||||
|
|
||||||
|
* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10)
|
||||||
|
* gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978
|
||||||
|
* gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002
|
||||||
|
* s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982
|
||||||
|
* zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007
|
||||||
|
* flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996
|
||||||
|
|
||||||
* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9)
|
* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9)
|
||||||
* s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949
|
* s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949
|
||||||
* flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963
|
* flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963
|
||||||
|
|
|
@ -6,6 +6,7 @@ package zstd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
|
@ -149,6 +150,9 @@ func (e *Encoder) ResetContentSize(w io.Writer, size int64) {
|
||||||
// and write CRC if requested.
|
// and write CRC if requested.
|
||||||
func (e *Encoder) Write(p []byte) (n int, err error) {
|
func (e *Encoder) Write(p []byte) (n int, err error) {
|
||||||
s := &e.state
|
s := &e.state
|
||||||
|
if s.eofWritten {
|
||||||
|
return 0, ErrEncoderClosed
|
||||||
|
}
|
||||||
for len(p) > 0 {
|
for len(p) > 0 {
|
||||||
if len(p)+len(s.filling) < e.o.blockSize {
|
if len(p)+len(s.filling) < e.o.blockSize {
|
||||||
if e.o.crc {
|
if e.o.crc {
|
||||||
|
@ -288,6 +292,9 @@ func (e *Encoder) nextBlock(final bool) error {
|
||||||
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
|
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
|
||||||
s.nInput += int64(len(s.current))
|
s.nInput += int64(len(s.current))
|
||||||
s.wg.Add(1)
|
s.wg.Add(1)
|
||||||
|
if final {
|
||||||
|
s.eofWritten = true
|
||||||
|
}
|
||||||
go func(src []byte) {
|
go func(src []byte) {
|
||||||
if debugEncoder {
|
if debugEncoder {
|
||||||
println("Adding block,", len(src), "bytes, final:", final)
|
println("Adding block,", len(src), "bytes, final:", final)
|
||||||
|
@ -303,9 +310,6 @@ func (e *Encoder) nextBlock(final bool) error {
|
||||||
blk := enc.Block()
|
blk := enc.Block()
|
||||||
enc.Encode(blk, src)
|
enc.Encode(blk, src)
|
||||||
blk.last = final
|
blk.last = final
|
||||||
if final {
|
|
||||||
s.eofWritten = true
|
|
||||||
}
|
|
||||||
// Wait for pending writes.
|
// Wait for pending writes.
|
||||||
s.wWg.Wait()
|
s.wWg.Wait()
|
||||||
if s.writeErr != nil {
|
if s.writeErr != nil {
|
||||||
|
@ -401,12 +405,20 @@ func (e *Encoder) Flush() error {
|
||||||
if len(s.filling) > 0 {
|
if len(s.filling) > 0 {
|
||||||
err := e.nextBlock(false)
|
err := e.nextBlock(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// Ignore Flush after Close.
|
||||||
|
if errors.Is(s.err, ErrEncoderClosed) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.wg.Wait()
|
s.wg.Wait()
|
||||||
s.wWg.Wait()
|
s.wWg.Wait()
|
||||||
if s.err != nil {
|
if s.err != nil {
|
||||||
|
// Ignore Flush after Close.
|
||||||
|
if errors.Is(s.err, ErrEncoderClosed) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return s.err
|
return s.err
|
||||||
}
|
}
|
||||||
return s.writeErr
|
return s.writeErr
|
||||||
|
@ -422,6 +434,9 @@ func (e *Encoder) Close() error {
|
||||||
}
|
}
|
||||||
err := e.nextBlock(true)
|
err := e.nextBlock(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if errors.Is(s.err, ErrEncoderClosed) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if s.frameContentSize > 0 {
|
if s.frameContentSize > 0 {
|
||||||
|
@ -459,6 +474,11 @@ func (e *Encoder) Close() error {
|
||||||
}
|
}
|
||||||
_, s.err = s.w.Write(frame)
|
_, s.err = s.w.Write(frame)
|
||||||
}
|
}
|
||||||
|
if s.err == nil {
|
||||||
|
s.err = ErrEncoderClosed
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
return s.err
|
return s.err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -88,6 +88,10 @@ var (
|
||||||
// Close has been called.
|
// Close has been called.
|
||||||
ErrDecoderClosed = errors.New("decoder used after Close")
|
ErrDecoderClosed = errors.New("decoder used after Close")
|
||||||
|
|
||||||
|
// ErrEncoderClosed will be returned if the Encoder was used after
|
||||||
|
// Close has been called.
|
||||||
|
ErrEncoderClosed = errors.New("encoder used after Close")
|
||||||
|
|
||||||
// ErrDecoderNilInput is returned when a nil Reader was provided
|
// ErrDecoderNilInput is returned when a nil Reader was provided
|
||||||
// and an operation other than Reset/DecodeAll/Close was attempted.
|
// and an operation other than Reset/DecodeAll/Close was attempted.
|
||||||
ErrDecoderNilInput = errors.New("nil input provided as reader")
|
ErrDecoderNilInput = errors.New("nil input provided as reader")
|
||||||
|
|
|
@ -138,7 +138,7 @@ github.com/containernetworking/cni/pkg/version
|
||||||
# github.com/containernetworking/plugins v1.5.1
|
# github.com/containernetworking/plugins v1.5.1
|
||||||
## explicit; go 1.20
|
## explicit; go 1.20
|
||||||
github.com/containernetworking/plugins/pkg/ns
|
github.com/containernetworking/plugins/pkg/ns
|
||||||
# github.com/containers/buildah v1.37.1-0.20241002152719-c68e17b4ffed
|
# github.com/containers/buildah v1.37.1-0.20241018144937-2551c8f3d110
|
||||||
## explicit; go 1.22.0
|
## explicit; go 1.22.0
|
||||||
github.com/containers/buildah
|
github.com/containers/buildah
|
||||||
github.com/containers/buildah/bind
|
github.com/containers/buildah/bind
|
||||||
|
@ -169,7 +169,7 @@ github.com/containers/buildah/pkg/sshagent
|
||||||
github.com/containers/buildah/pkg/util
|
github.com/containers/buildah/pkg/util
|
||||||
github.com/containers/buildah/pkg/volumes
|
github.com/containers/buildah/pkg/volumes
|
||||||
github.com/containers/buildah/util
|
github.com/containers/buildah/util
|
||||||
# github.com/containers/common v0.60.1-0.20241011155906-25644f144d66
|
# github.com/containers/common v0.60.1-0.20241018183244-7e6f2b4d6de7
|
||||||
## explicit; go 1.22.0
|
## explicit; go 1.22.0
|
||||||
github.com/containers/common/internal
|
github.com/containers/common/internal
|
||||||
github.com/containers/common/internal/attributedstring
|
github.com/containers/common/internal/attributedstring
|
||||||
|
@ -242,7 +242,7 @@ github.com/containers/conmon/runner/config
|
||||||
# github.com/containers/gvisor-tap-vsock v0.7.5
|
# github.com/containers/gvisor-tap-vsock v0.7.5
|
||||||
## explicit; go 1.21
|
## explicit; go 1.21
|
||||||
github.com/containers/gvisor-tap-vsock/pkg/types
|
github.com/containers/gvisor-tap-vsock/pkg/types
|
||||||
# github.com/containers/image/v5 v5.32.3-0.20240923171149-9e1153a28c46
|
# github.com/containers/image/v5 v5.32.3-0.20241016192323-a66152c1cdf6
|
||||||
## explicit; go 1.22.6
|
## explicit; go 1.22.6
|
||||||
github.com/containers/image/v5/copy
|
github.com/containers/image/v5/copy
|
||||||
github.com/containers/image/v5/directory
|
github.com/containers/image/v5/directory
|
||||||
|
@ -323,7 +323,7 @@ github.com/containers/libhvee/pkg/wmiext
|
||||||
# github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01
|
# github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01
|
||||||
## explicit
|
## explicit
|
||||||
github.com/containers/libtrust
|
github.com/containers/libtrust
|
||||||
# github.com/containers/luksy v0.0.0-20240812184316-2e7307c02f06
|
# github.com/containers/luksy v0.0.0-20241007190014-e2530d691420
|
||||||
## explicit; go 1.20
|
## explicit; go 1.20
|
||||||
github.com/containers/luksy
|
github.com/containers/luksy
|
||||||
# github.com/containers/ocicrypt v1.2.0
|
# github.com/containers/ocicrypt v1.2.0
|
||||||
|
@ -353,7 +353,7 @@ github.com/containers/psgo/internal/dev
|
||||||
github.com/containers/psgo/internal/host
|
github.com/containers/psgo/internal/host
|
||||||
github.com/containers/psgo/internal/proc
|
github.com/containers/psgo/internal/proc
|
||||||
github.com/containers/psgo/internal/process
|
github.com/containers/psgo/internal/process
|
||||||
# github.com/containers/storage v1.55.1-0.20241008185503-a397602515fd
|
# github.com/containers/storage v1.55.1-0.20241017155235-4db236377c55
|
||||||
## explicit; go 1.22.0
|
## explicit; go 1.22.0
|
||||||
github.com/containers/storage
|
github.com/containers/storage
|
||||||
github.com/containers/storage/drivers
|
github.com/containers/storage/drivers
|
||||||
|
@ -722,7 +722,7 @@ github.com/josharian/intern
|
||||||
# github.com/json-iterator/go v1.1.12
|
# github.com/json-iterator/go v1.1.12
|
||||||
## explicit; go 1.12
|
## explicit; go 1.12
|
||||||
github.com/json-iterator/go
|
github.com/json-iterator/go
|
||||||
# github.com/klauspost/compress v1.17.10
|
# github.com/klauspost/compress v1.17.11
|
||||||
## explicit; go 1.21
|
## explicit; go 1.21
|
||||||
github.com/klauspost/compress
|
github.com/klauspost/compress
|
||||||
github.com/klauspost/compress/flate
|
github.com/klauspost/compress/flate
|
||||||
|
|
Loading…
Reference in New Issue