mirror of https://github.com/containers/podman.git
Merge pull request #22683 from grisu48/podmansh_sh
Add configuration for podmansh
This commit is contained in:
commit
e53b96cb25
|
@ -43,12 +43,13 @@ func main() {
|
|||
if filepath.Base(os.Args[0]) == registry.PodmanSh ||
|
||||
(len(os.Args[0]) > 0 && filepath.Base(os.Args[0][1:]) == registry.PodmanSh) {
|
||||
shell := strings.TrimPrefix(os.Args[0], "-")
|
||||
cfg := registry.PodmanConfig()
|
||||
|
||||
args := []string{shell, "exec", "-i", "--wait", strconv.FormatUint(uint64(registry.PodmanConfig().ContainersConfDefaultsRO.Engine.PodmanshTimeout), 10)}
|
||||
args := []string{shell, "exec", "-i", "--wait", strconv.FormatUint(uint64(cfg.ContainersConfDefaultsRO.PodmanshTimeout()), 10)}
|
||||
if term.IsTerminal(0) || term.IsTerminal(1) || term.IsTerminal(2) {
|
||||
args = append(args, "-t")
|
||||
}
|
||||
args = append(args, registry.PodmanSh, "/bin/sh")
|
||||
args = append(args, cfg.ContainersConfDefaultsRO.Podmansh.Container, cfg.ContainersConfDefaultsRO.Podmansh.Shell)
|
||||
if len(os.Args) > 1 {
|
||||
args = append(args, os.Args[1:]...)
|
||||
}
|
||||
|
|
6
go.mod
6
go.mod
|
@ -14,14 +14,14 @@ require (
|
|||
github.com/checkpoint-restore/go-criu/v7 v7.1.0
|
||||
github.com/containernetworking/plugins v1.5.0
|
||||
github.com/containers/buildah v1.35.1-0.20240510150258-77f239ae12e5
|
||||
github.com/containers/common v0.58.1-0.20240517090124-fa276b325847
|
||||
github.com/containers/common v0.58.1-0.20240523020001-79d954c77663
|
||||
github.com/containers/conmon v2.0.20+incompatible
|
||||
github.com/containers/gvisor-tap-vsock v0.7.4-0.20240515153903-01a1a0cd3f70
|
||||
github.com/containers/image/v5 v5.30.2-0.20240509191815-9318d0eaaf78
|
||||
github.com/containers/image/v5 v5.31.0
|
||||
github.com/containers/libhvee v0.7.1
|
||||
github.com/containers/ocicrypt v1.1.10
|
||||
github.com/containers/psgo v1.9.0
|
||||
github.com/containers/storage v1.53.1-0.20240507041447-6cee10795c2d
|
||||
github.com/containers/storage v1.54.0
|
||||
github.com/containers/winquit v1.1.0
|
||||
github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09
|
||||
github.com/coreos/stream-metadata-go v0.4.4
|
||||
|
|
16
go.sum
16
go.sum
|
@ -79,14 +79,14 @@ github.com/containernetworking/plugins v1.5.0 h1:P09DMlfvvsLSskDoftnuwXY7lwa7IAh
|
|||
github.com/containernetworking/plugins v1.5.0/go.mod h1:bcXMvG9gWGc6jVXeodmMzuXmXqpqMguZm6Zu/oIr7AA=
|
||||
github.com/containers/buildah v1.35.1-0.20240510150258-77f239ae12e5 h1:xtKtw/g2iDkirqSw6Dvvc2ZMPxBYhyN9xPdH81a7hO4=
|
||||
github.com/containers/buildah v1.35.1-0.20240510150258-77f239ae12e5/go.mod h1:ezOOMchy0Dcu/jKNNsTJbtxvOrhdogVkbG+UxkG77EY=
|
||||
github.com/containers/common v0.58.1-0.20240517090124-fa276b325847 h1:34cLMWNLLytr35gxiklxsKfjrbYIW/GArhTF7hakx2Q=
|
||||
github.com/containers/common v0.58.1-0.20240517090124-fa276b325847/go.mod h1:9BdyHXC2fM6q+gqTVmnaf1tdGLnne0votxdPOTN3aY4=
|
||||
github.com/containers/common v0.58.1-0.20240523020001-79d954c77663 h1:uuVZV1SZO4Mdtiyngf91HytchzlXPW90F8weyXk71hY=
|
||||
github.com/containers/common v0.58.1-0.20240523020001-79d954c77663/go.mod h1:53VicJCZ2AD0O+Br7VVoyrS7viXF4YmwlTIocWUT8XE=
|
||||
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
||||
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
||||
github.com/containers/gvisor-tap-vsock v0.7.4-0.20240515153903-01a1a0cd3f70 h1:aACcXSIgcuPq5QdNZZ8B53BCdhqYvw33/8QmZWJATvg=
|
||||
github.com/containers/gvisor-tap-vsock v0.7.4-0.20240515153903-01a1a0cd3f70/go.mod h1:v2JP4sZFltFJ8smHLVm12Ng3jHetrNh565ZwWpB5pzs=
|
||||
github.com/containers/image/v5 v5.30.2-0.20240509191815-9318d0eaaf78 h1:1fktdUOKdvMbDbAullFBjslw1VewscLwTjsH2S+6ieM=
|
||||
github.com/containers/image/v5 v5.30.2-0.20240509191815-9318d0eaaf78/go.mod h1:nw5UU0qHFIsg+3cj1u1kP/CmwEioiJrVSDgg1QLhirw=
|
||||
github.com/containers/image/v5 v5.31.0 h1:eDFVlz5XaYICxe9dXpf23htEKvyosgkl62mJlIATXE4=
|
||||
github.com/containers/image/v5 v5.31.0/go.mod h1:5QfOqSackPkSbF7Qxc1DnVNnPJKQ+KWLkfEfDpK590Q=
|
||||
github.com/containers/libhvee v0.7.1 h1:dWGF5GLq9DZvXo3P8aDp3cNieL5eCaSell4UmeA/jY4=
|
||||
github.com/containers/libhvee v0.7.1/go.mod h1:fRKB3AyIqHMvq6xaeYhTpckM2cdoq0oecolyoiuLP7M=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||
|
@ -97,8 +97,8 @@ github.com/containers/ocicrypt v1.1.10 h1:r7UR6o8+lyhkEywetubUUgcKFjOWOaWz8cEBrC
|
|||
github.com/containers/ocicrypt v1.1.10/go.mod h1:YfzSSr06PTHQwSTUKqDSjish9BeW1E4HUmreluQcMd8=
|
||||
github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g=
|
||||
github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A=
|
||||
github.com/containers/storage v1.53.1-0.20240507041447-6cee10795c2d h1:AYhkfrN62V4+14wj2kC+HSHXWr0gKsUdzE1sWcSN6g4=
|
||||
github.com/containers/storage v1.53.1-0.20240507041447-6cee10795c2d/go.mod h1:PlMOoinRrBSnhYODLxt4EXl0nmJt+X0kjG0Xdt9fMTw=
|
||||
github.com/containers/storage v1.54.0 h1:xwYAlf6n9OnIlURQLLg3FYHbO74fQ/2W2N6EtQEUM4I=
|
||||
github.com/containers/storage v1.54.0/go.mod h1:PlMOoinRrBSnhYODLxt4EXl0nmJt+X0kjG0Xdt9fMTw=
|
||||
github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE=
|
||||
github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8=
|
||||
github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU=
|
||||
|
@ -132,8 +132,8 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh
|
|||
github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/cli v26.1.2+incompatible h1:/MWZpUMMlr1hCGyquL8QNbL1hbivQ1kLuT3Z9s1Tlpg=
|
||||
github.com/docker/cli v26.1.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v26.1.3+incompatible h1:bUpXT/N0kDE3VUHI2r5VMsYQgi38kYuoC0oL9yt3lqc=
|
||||
github.com/docker/cli v26.1.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo=
|
||||
|
|
|
@ -650,7 +650,7 @@ var _ = Describe("Podman create", func() {
|
|||
It("podman create --platform", func() {
|
||||
session := podmanTest.Podman([]string{"create", "--platform=linux/bogus", ALPINE})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).Should(ExitWithError(125, "no image found in manifest list for architecture bogus"))
|
||||
Expect(session).Should(ExitWithError(125, `no image found in manifest list for architecture "bogus"`))
|
||||
|
||||
session = podmanTest.Podman([]string{"create", "--platform=linux/arm64", "--os", "windows", ALPINE})
|
||||
session.WaitWithDefaultTimeout()
|
||||
|
|
|
@ -542,7 +542,7 @@ var _ = Describe("Podman pull", func() {
|
|||
It("podman pull --platform", func() {
|
||||
session := podmanTest.Podman([]string{"pull", "-q", "--platform=linux/bogus", ALPINE})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).Should(ExitWithError(125, "no image found in manifest list for architecture bogus"))
|
||||
Expect(session).Should(ExitWithError(125, `no image found in manifest list for architecture "bogus"`))
|
||||
|
||||
session = podmanTest.Podman([]string{"pull", "-q", "--platform=linux/arm64", "--os", "windows", ALPINE})
|
||||
session.WaitWithDefaultTimeout()
|
||||
|
@ -565,7 +565,7 @@ var _ = Describe("Podman pull", func() {
|
|||
It("podman pull --arch", func() {
|
||||
session := podmanTest.Podman([]string{"pull", "-q", "--arch=bogus", ALPINE})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).Should(ExitWithError(125, "no image found in manifest list for architecture bogus"))
|
||||
Expect(session).Should(ExitWithError(125, `no image found in manifest list for architecture "bogus"`))
|
||||
|
||||
session = podmanTest.Podman([]string{"pull", "-q", "--arch=arm64", "--os", "windows", ALPINE})
|
||||
session.WaitWithDefaultTimeout()
|
||||
|
|
|
@ -21,6 +21,10 @@ profile {{.Name}} flags=(attach_disconnected,mediate_deleted) {
|
|||
# Allow signals from privileged profiles and from within the same profile
|
||||
signal (receive) peer=unconfined,
|
||||
signal (send,receive) peer={{.Name}},
|
||||
# Allow certain signals from OCI runtimes (podman, runc and crun)
|
||||
signal (receive) peer={/usr/bin/,/usr/sbin/,}runc,
|
||||
signal (receive) peer={/usr/bin/,/usr/sbin/,}crun*,
|
||||
signal (receive) set=(int, quit, kill, term) peer={/usr/bin/,/usr/sbin/,}podman,
|
||||
{{end}}
|
||||
|
||||
deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir)
|
||||
|
|
|
@ -57,6 +57,8 @@ type Config struct {
|
|||
ConfigMaps ConfigMapConfig `toml:"configmaps"`
|
||||
// Farms defines configurations for the buildfarm farms
|
||||
Farms FarmConfig `toml:"farms"`
|
||||
// Podmansh defined configurations for the podman shell
|
||||
Podmansh PodmanshConfig `toml:"podmansh"`
|
||||
|
||||
loadedModules []string // only used at runtime to store which modules were loaded
|
||||
}
|
||||
|
@ -543,6 +545,7 @@ type EngineConfig struct {
|
|||
// PodmanshTimeout is the number of seconds to wait for podmansh logins.
|
||||
// In other words, the timeout for the `podmansh` container to be in running
|
||||
// state.
|
||||
// Deprecated: Use podmansh.Timeout instead. podmansh.Timeout has precedence.
|
||||
PodmanshTimeout uint `toml:"podmansh_timeout,omitempty,omitzero"`
|
||||
}
|
||||
|
||||
|
@ -695,6 +698,19 @@ type Destination struct {
|
|||
IsMachine bool `json:",omitempty" toml:"is_machine,omitempty"`
|
||||
}
|
||||
|
||||
// PodmanshConfig represents configuration for the podman shell
|
||||
type PodmanshConfig struct {
|
||||
// Shell to start in container, default: "/bin/sh"
|
||||
Shell string `toml:"shell,omitempty"`
|
||||
// Name of the container the podmansh user should join
|
||||
Container string `toml:"container,omitempty"`
|
||||
|
||||
// Timeout is the number of seconds to wait for podmansh logins.
|
||||
// In other words, the timeout for the `podmansh` container to be in running
|
||||
// state.
|
||||
Timeout uint `toml:"timeout,omitempty,omitzero"`
|
||||
}
|
||||
|
||||
// Consumes container image's os and arch and returns if any dedicated runtime was
|
||||
// configured otherwise returns default runtime.
|
||||
func (c *EngineConfig) ImagePlatformToRuntime(os string, arch string) string {
|
||||
|
@ -713,9 +729,19 @@ func (c *Config) CheckCgroupsAndAdjustConfig() {
|
|||
return
|
||||
}
|
||||
|
||||
session := os.Getenv("DBUS_SESSION_BUS_ADDRESS")
|
||||
hasSession := session != ""
|
||||
if hasSession {
|
||||
hasSession := false
|
||||
|
||||
session, found := os.LookupEnv("DBUS_SESSION_BUS_ADDRESS")
|
||||
if !found {
|
||||
sessionAddr := filepath.Join(os.Getenv("XDG_RUNTIME_DIR"), "bus")
|
||||
if err := fileutils.Exists(sessionAddr); err == nil {
|
||||
sessionAddr, err = filepath.EvalSymlinks(sessionAddr)
|
||||
if err == nil {
|
||||
os.Setenv("DBUS_SESSION_BUS_ADDRESS", "unix:path="+sessionAddr)
|
||||
hasSession = true
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, part := range strings.Split(session, ",") {
|
||||
if strings.HasPrefix(part, "unix:path=") {
|
||||
err := fileutils.Exists(strings.TrimPrefix(part, "unix:path="))
|
||||
|
@ -1197,3 +1223,13 @@ func (c *Config) FindInitBinary() (string, error) {
|
|||
}
|
||||
return c.FindHelperBinary(defaultInitName, true)
|
||||
}
|
||||
|
||||
// PodmanshTimeout returns the timeout in seconds for podmansh to connect to the container.
|
||||
// Returns podmansh.Timeout if set, otherwise engine.PodmanshTimeout for backwards compatibility.
|
||||
func (c *Config) PodmanshTimeout() uint {
|
||||
// podmansh.Timeout has precedence, if set
|
||||
if c.Podmansh.Timeout > 0 {
|
||||
return c.Podmansh.Timeout
|
||||
}
|
||||
return c.Engine.PodmanshTimeout
|
||||
}
|
||||
|
|
|
@ -759,9 +759,6 @@ default_sysctls = [
|
|||
# A value of 0 is treated as no timeout.
|
||||
#volume_plugin_timeout = 5
|
||||
|
||||
# Default timeout in seconds for podmansh logins.
|
||||
#podmansh_timeout = 30
|
||||
|
||||
# Paths to look for a valid OCI runtime (crun, runc, kata, runsc, krun, etc)
|
||||
[engine.runtimes]
|
||||
#crun = [
|
||||
|
@ -889,3 +886,14 @@ default_sysctls = [
|
|||
#
|
||||
# map of existing farms
|
||||
#[farms.list]
|
||||
|
||||
[podmansh]
|
||||
# Shell to spawn in container. Default: /bin/sh.
|
||||
#shell = "/bin/sh"
|
||||
#
|
||||
# Name of the container the podmansh user should join.
|
||||
#container = "podmansh"
|
||||
#
|
||||
# Default timeout in seconds for podmansh logins.
|
||||
# Favored over the deprecated "podmansh_timeout" field.
|
||||
#timeout = 30
|
||||
|
|
|
@ -265,10 +265,11 @@ func defaultConfig() (*Config, error) {
|
|||
CNIPluginDirs: attributedstring.NewSlice(DefaultCNIPluginDirs),
|
||||
NetavarkPluginDirs: attributedstring.NewSlice(DefaultNetavarkPluginDirs),
|
||||
},
|
||||
Engine: *defaultEngineConfig,
|
||||
Secrets: defaultSecretConfig(),
|
||||
Machine: defaultMachineConfig(),
|
||||
Farms: defaultFarmConfig(),
|
||||
Engine: *defaultEngineConfig,
|
||||
Secrets: defaultSecretConfig(),
|
||||
Machine: defaultMachineConfig(),
|
||||
Farms: defaultFarmConfig(),
|
||||
Podmansh: defaultPodmanshConfig(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -307,6 +308,18 @@ func defaultFarmConfig() FarmConfig {
|
|||
}
|
||||
}
|
||||
|
||||
// defaultPodmanshConfig returns the default podmansh configuration.
|
||||
func defaultPodmanshConfig() PodmanshConfig {
|
||||
return PodmanshConfig{
|
||||
Shell: "/bin/sh",
|
||||
Container: "podmansh",
|
||||
|
||||
// A value of 0 means "not set", needed to distinguish if engine.podmansh_timeout or podmansh.timeout should be used
|
||||
// This is needed to keep backwards compatibility to engine.PodmanshTimeout.
|
||||
Timeout: uint(0),
|
||||
}
|
||||
}
|
||||
|
||||
// defaultEngineConfig returns a default engine configuration. Note that the
|
||||
// config is different for root and rootless. It also parses the storage.conf.
|
||||
func defaultEngineConfig() (*EngineConfig, error) {
|
||||
|
@ -360,7 +373,7 @@ func defaultEngineConfig() (*EngineConfig, error) {
|
|||
c.CgroupManager = defaultCgroupManager()
|
||||
c.ServiceTimeout = uint(5)
|
||||
c.StopTimeout = uint(10)
|
||||
c.PodmanshTimeout = uint(30)
|
||||
c.PodmanshTimeout = uint(30) // deprecated: use podmansh.timeout instead, kept for backwards-compatibility
|
||||
c.ExitCommandDelay = uint(5 * 60)
|
||||
c.Remote = isRemote()
|
||||
c.Retry = 3
|
||||
|
|
|
@ -48,6 +48,7 @@ func (m *Manager) Monitor(ctx context.Context, sync chan<- error) {
|
|||
for {
|
||||
select {
|
||||
case event := <-watcher.Events:
|
||||
m.lock.Lock()
|
||||
m.hooks = make(map[string]*current.Hook)
|
||||
for _, dir := range m.directories {
|
||||
err = ReadDir(dir, m.extensionStages, m.hooks)
|
||||
|
@ -55,6 +56,7 @@ func (m *Manager) Monitor(ctx context.Context, sync chan<- error) {
|
|||
logrus.Errorf("Failed loading hooks for %s: %v", event.Name, err)
|
||||
}
|
||||
}
|
||||
m.lock.Unlock()
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
logrus.Debugf("hook monitoring canceled: %v", err)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package version
|
||||
|
||||
// Version is the version of the build.
|
||||
const Version = "0.59.0-dev"
|
||||
const Version = "0.60.0-dev"
|
||||
|
|
|
@ -47,13 +47,17 @@ func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo
|
|||
desc := imgspecv1.Descriptor{
|
||||
Annotations: stream.info.Annotations,
|
||||
}
|
||||
reader, decryptedDigest, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false)
|
||||
// DecryptLayer supposedly returns a digest of the decrypted stream.
|
||||
// In pratice, that value is never set in the current implementation.
|
||||
// And we shouldn’t use it anyway, because it is not trusted: encryption can be made to a public key,
|
||||
// i.e. it doesn’t authenticate the origin of the metadata in any way.
|
||||
reader, _, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err)
|
||||
}
|
||||
|
||||
stream.reader = reader
|
||||
stream.info.Digest = decryptedDigest
|
||||
stream.info.Digest = ""
|
||||
stream.info.Size = -1
|
||||
maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool {
|
||||
return strings.HasPrefix(k, "org.opencontainers.image.enc")
|
||||
|
|
|
@ -74,7 +74,7 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
|
|||
srcType := in.srcMIMEType
|
||||
normalizedSrcType := manifest.NormalizedMIMEType(srcType)
|
||||
if srcType != normalizedSrcType {
|
||||
logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType)
|
||||
logrus.Debugf("Source manifest MIME type %q, treating it as %q", srcType, normalizedSrcType)
|
||||
srcType = normalizedSrcType
|
||||
}
|
||||
|
||||
|
@ -237,7 +237,7 @@ func (c *copier) determineListConversion(currentListMIMEType string, destSupport
|
|||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("Manifest list has MIME type %s, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", "))
|
||||
logrus.Debugf("Manifest list has MIME type %q, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", "))
|
||||
if len(prioritizedTypes.list) == 0 {
|
||||
return "", nil, fmt.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes)
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
|
@ -151,12 +152,18 @@ type blobChunkAccessorProxy struct {
|
|||
// The specified chunks must be not overlapping and sorted by their offset.
|
||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||
// to read the next chunk.
|
||||
// If the Length for the last chunk is set to math.MaxUint64, then it
|
||||
// fully fetches the remaining data from the offset to the end of the blob.
|
||||
func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||
start := time.Now()
|
||||
rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks)
|
||||
if err == nil {
|
||||
total := int64(0)
|
||||
for _, c := range chunks {
|
||||
// do not update the progress bar if there is a chunk with unknown length.
|
||||
if c.Length == math.MaxUint64 {
|
||||
return rc, errs, err
|
||||
}
|
||||
total += int64(c.Length)
|
||||
}
|
||||
s.bar.EwmaIncrInt64(total, time.Since(start))
|
||||
|
|
|
@ -78,7 +78,7 @@ func (r *Reader) List() ([][]types.ImageReference, error) {
|
|||
}
|
||||
nt, ok := parsedTag.(reference.NamedTagged)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Invalid tag %s (%s): does not contain a tag", tag, parsedTag.String())
|
||||
return nil, fmt.Errorf("Invalid tag %q (%s): does not contain a tag", tag, parsedTag.String())
|
||||
}
|
||||
ref, err := newReference(r.path, nt, -1, r.archive, nil)
|
||||
if err != nil {
|
||||
|
|
|
@ -116,7 +116,7 @@ func imageLoad(ctx context.Context, c *client.Client, reader *io.PipeReader) err
|
|||
return fmt.Errorf("parsing docker load progress: %w", err)
|
||||
}
|
||||
if msg.Error != nil {
|
||||
return fmt.Errorf("docker engine reported: %s", msg.Error.Message)
|
||||
return fmt.Errorf("docker engine reported: %q", msg.Error.Message)
|
||||
}
|
||||
}
|
||||
return nil // No error reported = success
|
||||
|
|
|
@ -1097,6 +1097,11 @@ func isManifestUnknownError(err error) bool {
|
|||
if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && e.Message == "Not Found" {
|
||||
return true
|
||||
}
|
||||
// Harbor v2.10.2
|
||||
if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && strings.Contains(strings.ToLower(e.Message), "not found") {
|
||||
return true
|
||||
}
|
||||
|
||||
// opencontainers/distribution-spec does not require the errcode.Error payloads to be used,
|
||||
// but specifies that the HTTP status must be 404.
|
||||
var unexpected *unexpectedHTTPResponseError
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
|
@ -260,9 +261,15 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error,
|
|||
}
|
||||
currentOffset += toSkip
|
||||
}
|
||||
var reader io.Reader
|
||||
if c.Length == math.MaxUint64 {
|
||||
reader = body
|
||||
} else {
|
||||
reader = io.LimitReader(body, int64(c.Length))
|
||||
}
|
||||
s := signalCloseReader{
|
||||
closed: make(chan struct{}),
|
||||
stream: io.NopCloser(io.LimitReader(body, int64(c.Length))),
|
||||
stream: io.NopCloser(reader),
|
||||
consumeStream: true,
|
||||
}
|
||||
streams <- s
|
||||
|
@ -343,12 +350,24 @@ func parseMediaType(contentType string) (string, map[string]string, error) {
|
|||
// The specified chunks must be not overlapping and sorted by their offset.
|
||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||
// to read the next chunk.
|
||||
// If the Length for the last chunk is set to math.MaxUint64, then it
|
||||
// fully fetches the remaining data from the offset to the end of the blob.
|
||||
func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||
headers := make(map[string][]string)
|
||||
|
||||
rangeVals := make([]string, 0, len(chunks))
|
||||
lastFound := false
|
||||
for _, c := range chunks {
|
||||
rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1))
|
||||
if lastFound {
|
||||
return nil, nil, fmt.Errorf("internal error: another chunk requested after an util-EOF chunk")
|
||||
}
|
||||
// If the Length is set to -1, then request anything after the specified offset.
|
||||
if c.Length == math.MaxUint64 {
|
||||
lastFound = true
|
||||
rangeVals = append(rangeVals, fmt.Sprintf("%d-", c.Offset))
|
||||
} else {
|
||||
rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1))
|
||||
}
|
||||
}
|
||||
|
||||
headers["Range"] = []string{fmt.Sprintf("bytes=%s", strings.Join(rangeVals, ","))}
|
||||
|
|
|
@ -231,7 +231,7 @@ func (r *Reader) openTarComponent(componentPath string) (io.ReadCloser, error) {
|
|||
}
|
||||
|
||||
if !header.FileInfo().Mode().IsRegular() {
|
||||
return nil, fmt.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
|
||||
return nil, fmt.Errorf("Error reading tar archive component %q: not a regular file", header.Name)
|
||||
}
|
||||
succeeded = true
|
||||
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
|
||||
|
@ -262,7 +262,7 @@ func findTarComponent(inputFile io.Reader, componentPath string) (*tar.Reader, *
|
|||
func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) {
|
||||
file, err := r.openTarComponent(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading tar component %s: %w", path, err)
|
||||
return nil, fmt.Errorf("loading tar component %q: %w", path, err)
|
||||
}
|
||||
defer file.Close()
|
||||
bytes, err := iolimits.ReadAtMost(file, limit)
|
||||
|
|
|
@ -95,10 +95,10 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error {
|
|||
}
|
||||
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
|
||||
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
|
||||
return fmt.Errorf("decoding tar config %s: %w", tarManifest.Config, err)
|
||||
return fmt.Errorf("decoding tar config %q: %w", tarManifest.Config, err)
|
||||
}
|
||||
if parsedConfig.RootFS == nil {
|
||||
return fmt.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config)
|
||||
return fmt.Errorf("Invalid image config (rootFS is not set): %q", tarManifest.Config)
|
||||
}
|
||||
|
||||
knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
|
||||
|
@ -144,7 +144,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
|||
}
|
||||
layerPath := path.Clean(tarManifest.Layers[i])
|
||||
if _, ok := unknownLayerSizes[layerPath]; ok {
|
||||
return nil, fmt.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
|
||||
return nil, fmt.Errorf("Layer tarfile %q used for two different DiffID values", layerPath)
|
||||
}
|
||||
li := &layerInfo{ // A new element in each iteration
|
||||
path: layerPath,
|
||||
|
@ -179,7 +179,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
|||
// the slower method of checking if it's compressed.
|
||||
uncompressedStream, isCompressed, err := compression.AutoDecompress(t)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("auto-decompressing %s to determine its size: %w", layerPath, err)
|
||||
return nil, fmt.Errorf("auto-decompressing %q to determine its size: %w", layerPath, err)
|
||||
}
|
||||
defer uncompressedStream.Close()
|
||||
|
||||
|
@ -187,7 +187,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
|||
if isCompressed {
|
||||
uncompressedSize, err = io.Copy(io.Discard, uncompressedStream)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading %s to find its size: %w", layerPath, err)
|
||||
return nil, fmt.Errorf("reading %q to find its size: %w", layerPath, err)
|
||||
}
|
||||
}
|
||||
li.size = uncompressedSize
|
||||
|
|
|
@ -164,7 +164,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
|
|||
return fmt.Errorf("marshaling layer config: %w", err)
|
||||
}
|
||||
delete(layerConfig, "layer_id")
|
||||
layerID := digest.Canonical.FromBytes(b).Hex()
|
||||
layerID := digest.Canonical.FromBytes(b).Encoded()
|
||||
layerConfig["id"] = layerID
|
||||
|
||||
configBytes, err := json.Marshal(layerConfig)
|
||||
|
@ -309,10 +309,10 @@ func (w *Writer) Close() error {
|
|||
// NOTE: This is an internal implementation detail, not a format property, and can change
|
||||
// any time.
|
||||
func (w *Writer) configPath(configDigest digest.Digest) (string, error) {
|
||||
if err := configDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
|
||||
if err := configDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
|
||||
return "", err
|
||||
}
|
||||
return configDigest.Hex() + ".json", nil
|
||||
return configDigest.Encoded() + ".json", nil
|
||||
}
|
||||
|
||||
// physicalLayerPath returns a path we choose for storing a layer with the specified digest
|
||||
|
@ -320,15 +320,15 @@ func (w *Writer) configPath(configDigest digest.Digest) (string, error) {
|
|||
// NOTE: This is an internal implementation detail, not a format property, and can change
|
||||
// any time.
|
||||
func (w *Writer) physicalLayerPath(layerDigest digest.Digest) (string, error) {
|
||||
if err := layerDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
|
||||
if err := layerDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
|
||||
return "", err
|
||||
}
|
||||
// Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
|
||||
// Note that this can't be e.g. filepath.Join(l.Digest.Encoded(), legacyLayerFileName); due to the way
|
||||
// writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described
|
||||
// inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
|
||||
// tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
|
||||
// in the root of the tarball.
|
||||
return layerDigest.Hex() + ".tar", nil
|
||||
return layerDigest.Encoded() + ".tar", nil
|
||||
}
|
||||
|
||||
type tarFI struct {
|
||||
|
|
|
@ -140,7 +140,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
|||
|
||||
if config.DefaultDocker != nil {
|
||||
if mergedConfig.DefaultDocker != nil {
|
||||
return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
|
||||
return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in %q and %q`,
|
||||
dockerDefaultMergedFrom, configPath)
|
||||
}
|
||||
mergedConfig.DefaultDocker = config.DefaultDocker
|
||||
|
@ -149,7 +149,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
|||
|
||||
for nsName, nsConfig := range config.Docker { // includes config.Docker == nil
|
||||
if _, ok := mergedConfig.Docker[nsName]; ok {
|
||||
return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
|
||||
return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace %q defined both in %q and %q`,
|
||||
nsName, nsMergedFrom[nsName], configPath)
|
||||
}
|
||||
mergedConfig.Docker[nsName] = nsConfig
|
||||
|
@ -288,10 +288,10 @@ func (ns registryNamespace) signatureTopLevel(write bool) string {
|
|||
// base is not nil from the caller
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) (*url.URL, error) {
|
||||
if err := manifestDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in a path with ../, so validate explicitly.
|
||||
if err := manifestDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
|
||||
return nil, err
|
||||
}
|
||||
sigURL := *base
|
||||
sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
|
||||
sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Encoded(), index+1)
|
||||
return &sigURL, nil
|
||||
}
|
||||
|
|
|
@ -366,7 +366,7 @@ func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string)
|
|||
if err := blobDigest.Validate(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
parts := append([]string{blobDigest.Hex()}, others...)
|
||||
parts := append([]string{blobDigest.Encoded()}, others...)
|
||||
v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " ")))
|
||||
return hex.EncodeToString(v1IDHash[:]), nil
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src
|
|||
case imgspecv1.MediaTypeImageIndex:
|
||||
return manifestOCI1FromImageIndex(ctx, sys, src, manblob)
|
||||
default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values.
|
||||
return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt)
|
||||
return nil, fmt.Errorf("Unimplemented manifest MIME type %q", mt)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
2
vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go
generated
vendored
2
vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go
generated
vendored
|
@ -39,6 +39,8 @@ func (stub NoGetBlobAtInitialize) SupportsGetBlobAt() bool {
|
|||
// The specified chunks must be not overlapping and sorted by their offset.
|
||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||
// to read the next chunk.
|
||||
// If the Length for the last chunk is set to math.MaxUint64, then it
|
||||
// fully fetches the remaining data from the offset to the end of the blob.
|
||||
func (stub NoGetBlobAtInitialize) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||
return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", stub.transportName)
|
||||
}
|
||||
|
|
|
@ -164,7 +164,7 @@ func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.
|
|||
}
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
|
||||
return "", fmt.Errorf("no image found in manifest list for architecture %q, variant %q, OS %q", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
|
||||
}
|
||||
|
||||
// Serialize returns the list in a blob format.
|
||||
|
|
|
@ -129,5 +129,5 @@ func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) {
|
|||
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
|
||||
return nil, fmt.Errorf("Treating single images as manifest lists is not implemented")
|
||||
}
|
||||
return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized)
|
||||
return nil, fmt.Errorf("Unimplemented manifest list MIME type %q (normalized as %q)", manifestMIMEType, normalized)
|
||||
}
|
||||
|
|
|
@ -260,7 +260,7 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi
|
|||
if bestMatch != nil {
|
||||
return bestMatch.digest, nil
|
||||
}
|
||||
return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
|
||||
return "", fmt.Errorf("no image found in image index for architecture %q, variant %q, OS %q", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
|
||||
}
|
||||
|
||||
func (index *OCI1Index) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
|
||||
|
|
8
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
8
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
|
@ -64,8 +64,8 @@ func getCPUInfo(pattern string) (info string, err error) {
|
|||
return "", fmt.Errorf("getCPUInfo for pattern: %s not found", pattern)
|
||||
}
|
||||
|
||||
func getCPUVariantWindows(arch string) string {
|
||||
// Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use
|
||||
func getCPUVariantDarwinWindows(arch string) string {
|
||||
// Darwin and Windows only support v7 for ARM32 and v8 for ARM64 and so we can use
|
||||
// runtime.GOARCH to determine the variants
|
||||
var variant string
|
||||
switch arch {
|
||||
|
@ -133,8 +133,8 @@ func getCPUVariantArm() string {
|
|||
}
|
||||
|
||||
func getCPUVariant(os string, arch string) string {
|
||||
if os == "windows" {
|
||||
return getCPUVariantWindows(arch)
|
||||
if os == "darwin" || os == "windows" {
|
||||
return getCPUVariantDarwinWindows(arch)
|
||||
}
|
||||
if arch == "arm" || arch == "arm64" {
|
||||
return getCPUVariantArm()
|
||||
|
|
|
@ -143,7 +143,11 @@ type ReusedBlob struct {
|
|||
// ImageSourceChunk is a portion of a blob.
|
||||
// This API is experimental and can be changed without bumping the major version number.
|
||||
type ImageSourceChunk struct {
|
||||
// Offset specifies the starting position of the chunk within the source blob.
|
||||
Offset uint64
|
||||
|
||||
// Length specifies the size of the chunk. If it is set to math.MaxUint64,
|
||||
// then it refers to all the data from Offset to the end of the blob.
|
||||
Length uint64
|
||||
}
|
||||
|
||||
|
@ -154,6 +158,8 @@ type BlobChunkAccessor interface {
|
|||
// The specified chunks must be not overlapping and sorted by their offset.
|
||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||
// to read the next chunk.
|
||||
// If the Length for the last chunk is set to math.MaxUint64, then it
|
||||
// fully fetches the remaining data from the offset to the end of the blob.
|
||||
GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error)
|
||||
}
|
||||
|
||||
|
|
|
@ -67,15 +67,15 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType
|
|||
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)}
|
||||
}
|
||||
if name != mtsUncompressed {
|
||||
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mimeType)}
|
||||
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %q", name, mimeType)}
|
||||
}
|
||||
// We can't very well say “the idea of no compression is unknown”
|
||||
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)}
|
||||
}
|
||||
if algorithm != nil {
|
||||
return "", fmt.Errorf("unsupported MIME type for compression: %s", mimeType)
|
||||
return "", fmt.Errorf("unsupported MIME type for compression: %q", mimeType)
|
||||
}
|
||||
return "", fmt.Errorf("unsupported MIME type for decompression: %s", mimeType)
|
||||
return "", fmt.Errorf("unsupported MIME type for decompression: %q", mimeType)
|
||||
}
|
||||
|
||||
// updatedMIMEType returns the result of applying edits in updated (MediaType, CompressionOperation) to
|
||||
|
|
|
@ -221,7 +221,7 @@ func (m *Schema1) fixManifestLayers() error {
|
|||
m.History = slices.Delete(m.History, i, i+1)
|
||||
m.ExtractedV1Compatibility = slices.Delete(m.ExtractedV1Compatibility, i, i+1)
|
||||
} else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID {
|
||||
return fmt.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent)
|
||||
return fmt.Errorf("Invalid parent ID. Expected %v, got %q", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -342,5 +342,5 @@ func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) {
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return digest.FromBytes(image).Hex(), nil
|
||||
return digest.FromBytes(image).Encoded(), nil
|
||||
}
|
||||
|
|
|
@ -295,7 +295,7 @@ func (m *Schema2) ImageID([]digest.Digest) (string, error) {
|
|||
if err := m.ConfigDescriptor.Digest.Validate(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return m.ConfigDescriptor.Digest.Hex(), nil
|
||||
return m.ConfigDescriptor.Digest.Encoded(), nil
|
||||
}
|
||||
|
||||
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
|
||||
|
|
|
@ -166,5 +166,5 @@ func FromBlob(manblob []byte, mt string) (Manifest, error) {
|
|||
return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented")
|
||||
}
|
||||
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
|
||||
return nil, fmt.Errorf("Unimplemented manifest MIME type %s (normalized as %s)", mt, nmt)
|
||||
return nil, fmt.Errorf("Unimplemented manifest MIME type %q (normalized as %q)", mt, nmt)
|
||||
}
|
||||
|
|
|
@ -167,7 +167,7 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
|||
// an error if the mediatype does not support encryption
|
||||
func getEncryptedMediaType(mediatype string) (string, error) {
|
||||
if slices.Contains(strings.Split(mediatype, "+")[1:], "encrypted") {
|
||||
return "", fmt.Errorf("unsupported mediaType: %v already encrypted", mediatype)
|
||||
return "", fmt.Errorf("unsupported mediaType: %q already encrypted", mediatype)
|
||||
}
|
||||
unsuffixedMediatype := strings.Split(mediatype, "+")[0]
|
||||
switch unsuffixedMediatype {
|
||||
|
@ -176,7 +176,7 @@ func getEncryptedMediaType(mediatype string) (string, error) {
|
|||
return mediatype + "+encrypted", nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("unsupported mediaType to encrypt: %v", mediatype)
|
||||
return "", fmt.Errorf("unsupported mediaType to encrypt: %q", mediatype)
|
||||
}
|
||||
|
||||
// getDecryptedMediaType will return the mediatype to its encrypted counterpart and return
|
||||
|
@ -184,7 +184,7 @@ func getEncryptedMediaType(mediatype string) (string, error) {
|
|||
func getDecryptedMediaType(mediatype string) (string, error) {
|
||||
res, ok := strings.CutSuffix(mediatype, "+encrypted")
|
||||
if !ok {
|
||||
return "", fmt.Errorf("unsupported mediaType to decrypt: %v", mediatype)
|
||||
return "", fmt.Errorf("unsupported mediaType to decrypt: %q", mediatype)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
|
@ -260,7 +260,7 @@ func (m *OCI1) ImageID(diffIDs []digest.Digest) (string, error) {
|
|||
if err := m.Config.Digest.Validate(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return m.Config.Digest.Hex(), nil
|
||||
return m.Config.Digest.Encoded(), nil
|
||||
}
|
||||
|
||||
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
|
||||
|
|
|
@ -149,6 +149,8 @@ func (s *ociArchiveImageSource) SupportsGetBlobAt() bool {
|
|||
// The specified chunks must be not overlapping and sorted by their offset.
|
||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||
// to read the next chunk.
|
||||
// If the Length for the last chunk is set to math.MaxUint64, then it
|
||||
// fully fetches the remaining data from the offset to the end of the blob.
|
||||
func (s *ociArchiveImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||
return s.unpackedSrc.GetBlobAt(ctx, info, chunks)
|
||||
}
|
||||
|
|
|
@ -182,19 +182,19 @@ func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io
|
|||
hasSupportedURL = true
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil)
|
||||
if err != nil {
|
||||
errWrap = fmt.Errorf("fetching %s failed %s: %w", u, err.Error(), errWrap)
|
||||
errWrap = fmt.Errorf("fetching %q failed %s: %w", u, err.Error(), errWrap)
|
||||
continue
|
||||
}
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil {
|
||||
errWrap = fmt.Errorf("fetching %s failed %s: %w", u, err.Error(), errWrap)
|
||||
errWrap = fmt.Errorf("fetching %q failed %s: %w", u, err.Error(), errWrap)
|
||||
continue
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
resp.Body.Close()
|
||||
errWrap = fmt.Errorf("fetching %s failed, response code not 200: %w", u, errWrap)
|
||||
errWrap = fmt.Errorf("fetching %q failed, response code not 200: %w", u, errWrap)
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -256,5 +256,5 @@ func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (st
|
|||
} else {
|
||||
blobDir = filepath.Join(ref.dir, imgspecv1.ImageBlobsDir)
|
||||
}
|
||||
return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil
|
||||
return filepath.Join(blobDir, digest.Algorithm().String(), digest.Encoded()), nil
|
||||
}
|
||||
|
|
|
@ -553,7 +553,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
|
|||
continue
|
||||
}
|
||||
if err != nil {
|
||||
errlist = append(errlist, fmt.Errorf("loading config file \"%s\": %w", filename, err))
|
||||
errlist = append(errlist, fmt.Errorf("loading config file %q: %w", filename, err))
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -152,7 +152,7 @@ func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName str
|
|||
func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) {
|
||||
_, repo, gotRepo := strings.Cut(ref, "/")
|
||||
if !gotRepo {
|
||||
return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref)
|
||||
return "", fmt.Errorf("Invalid format of docker reference %q: missing '/'", ref)
|
||||
}
|
||||
return reference.Domain(c.ref.dockerReference) + "/" + repo, nil
|
||||
}
|
||||
|
|
|
@ -164,7 +164,7 @@ func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
|||
return private.UploadedBlob{}, err
|
||||
}
|
||||
|
||||
hash := blobDigest.Hex()
|
||||
hash := blobDigest.Encoded()
|
||||
d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath}
|
||||
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
|
||||
}
|
||||
|
@ -282,8 +282,8 @@ func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest,
|
|||
func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error {
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
|
||||
|
||||
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
|
||||
destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root")
|
||||
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Encoded())
|
||||
destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Encoded(), "root")
|
||||
if err := ensureDirectoryExists(destinationPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -323,7 +323,7 @@ func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle,
|
|||
}
|
||||
|
||||
func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error {
|
||||
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
|
||||
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Encoded())
|
||||
destinationPath := filepath.Dir(blob.BlobPath)
|
||||
|
||||
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
|
||||
|
@ -348,10 +348,10 @@ func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||
d.repo = repo
|
||||
}
|
||||
|
||||
if err := info.Digest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, so validate explicitly.
|
||||
if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
|
||||
return false, private.ReusedBlob{}, err
|
||||
}
|
||||
branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex())
|
||||
branch := fmt.Sprintf("ociimage/%s", info.Digest.Encoded())
|
||||
|
||||
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
|
||||
if err != nil || !found {
|
||||
|
@ -479,7 +479,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
|
|||
if err := layer.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
|
||||
return err
|
||||
}
|
||||
hash := layer.Digest.Hex()
|
||||
hash := layer.Digest.Encoded()
|
||||
if err = checkLayer(hash); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -488,7 +488,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
|
|||
if err := layer.BlobSum.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
|
||||
return err
|
||||
}
|
||||
hash := layer.BlobSum.Hex()
|
||||
hash := layer.BlobSum.Encoded()
|
||||
if err = checkLayer(hash); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -289,7 +289,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
|
|||
if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
|
||||
return nil, -1, err
|
||||
}
|
||||
blob := info.Digest.Hex()
|
||||
blob := info.Digest.Encoded()
|
||||
|
||||
// Ensure s.compressed is initialized. It is build by LayerInfosForCopy.
|
||||
if s.compressed == nil {
|
||||
|
@ -301,7 +301,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
|
|||
}
|
||||
compressedBlob, isCompressed := s.compressed[info.Digest]
|
||||
if isCompressed {
|
||||
blob = compressedBlob.Hex()
|
||||
blob = compressedBlob.Encoded()
|
||||
}
|
||||
branch := fmt.Sprintf("ociimage/%s", blob)
|
||||
|
||||
|
@ -424,7 +424,7 @@ func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDiges
|
|||
layerBlobs := man.LayerInfos()
|
||||
|
||||
for _, layerBlob := range layerBlobs {
|
||||
branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Hex())
|
||||
branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Encoded())
|
||||
found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest")
|
||||
if err != nil || !found {
|
||||
return nil, err
|
||||
|
@ -439,7 +439,10 @@ func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDiges
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uncompressedDigest := digest.Digest(uncompressedDigestStr)
|
||||
uncompressedDigest, err := digest.Parse(uncompressedDigestStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobInfo := types.BlobInfo{
|
||||
Digest: uncompressedDigest,
|
||||
Size: uncompressedSize,
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
|
@ -116,6 +117,63 @@ func (s *blobCacheSource) GetSignaturesWithFormat(ctx context.Context, instanceD
|
|||
return s.source.GetSignaturesWithFormat(ctx, instanceDigest)
|
||||
}
|
||||
|
||||
// layerInfoForCopy returns a possibly-updated version of info for LayerInfosForCopy
|
||||
func (s *blobCacheSource) layerInfoForCopy(info types.BlobInfo) (types.BlobInfo, error) {
|
||||
var replaceDigestBytes []byte
|
||||
blobFile, err := s.reference.blobPath(info.Digest, false)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
switch s.reference.compress {
|
||||
case types.Compress:
|
||||
replaceDigestBytes, err = os.ReadFile(blobFile + compressedNote)
|
||||
case types.Decompress:
|
||||
replaceDigestBytes, err = os.ReadFile(blobFile + decompressedNote)
|
||||
}
|
||||
if err != nil {
|
||||
return info, nil
|
||||
}
|
||||
replaceDigest, err := digest.Parse(string(replaceDigestBytes))
|
||||
if err != nil {
|
||||
return info, nil
|
||||
}
|
||||
alternate, err := s.reference.blobPath(replaceDigest, false)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
fileInfo, err := os.Stat(alternate)
|
||||
if err != nil {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
switch info.MediaType {
|
||||
case v1.MediaTypeImageLayer, v1.MediaTypeImageLayerGzip:
|
||||
switch s.reference.compress {
|
||||
case types.Compress:
|
||||
info.MediaType = v1.MediaTypeImageLayerGzip
|
||||
info.CompressionAlgorithm = &compression.Gzip
|
||||
case types.Decompress: // FIXME: This should remove zstd:chunked annotations (but those annotations being left with incorrect values should not break pulls)
|
||||
info.MediaType = v1.MediaTypeImageLayer
|
||||
info.CompressionAlgorithm = nil
|
||||
}
|
||||
case manifest.DockerV2SchemaLayerMediaTypeUncompressed, manifest.DockerV2Schema2LayerMediaType:
|
||||
switch s.reference.compress {
|
||||
case types.Compress:
|
||||
info.MediaType = manifest.DockerV2Schema2LayerMediaType
|
||||
info.CompressionAlgorithm = &compression.Gzip
|
||||
case types.Decompress:
|
||||
// nope, not going to suggest anything, it's not allowed by the spec
|
||||
return info, nil
|
||||
}
|
||||
}
|
||||
logrus.Debugf("suggesting cached blob with digest %q, type %q, and compression %v in place of blob with digest %q", replaceDigest.String(), info.MediaType, s.reference.compress, info.Digest.String())
|
||||
info.CompressionOperation = s.reference.compress
|
||||
info.Digest = replaceDigest
|
||||
info.Size = fileInfo.Size()
|
||||
logrus.Debugf("info = %#v", info)
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
|
||||
signatures, err := s.source.GetSignaturesWithFormat(ctx, instanceDigest)
|
||||
if err != nil {
|
||||
|
@ -138,55 +196,10 @@ func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest
|
|||
if canReplaceBlobs && s.reference.compress != types.PreserveOriginal {
|
||||
replacedInfos := make([]types.BlobInfo, 0, len(infos))
|
||||
for _, info := range infos {
|
||||
var replaceDigest []byte
|
||||
blobFile, err := s.reference.blobPath(info.Digest, false)
|
||||
info, err = s.layerInfoForCopy(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var alternate string
|
||||
switch s.reference.compress {
|
||||
case types.Compress:
|
||||
alternate = blobFile + compressedNote
|
||||
replaceDigest, err = os.ReadFile(alternate)
|
||||
case types.Decompress:
|
||||
alternate = blobFile + decompressedNote
|
||||
replaceDigest, err = os.ReadFile(alternate)
|
||||
}
|
||||
if err == nil && digest.Digest(replaceDigest).Validate() == nil {
|
||||
alternate, err = s.reference.blobPath(digest.Digest(replaceDigest), false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fileInfo, err := os.Stat(alternate)
|
||||
if err == nil {
|
||||
switch info.MediaType {
|
||||
case v1.MediaTypeImageLayer, v1.MediaTypeImageLayerGzip:
|
||||
switch s.reference.compress {
|
||||
case types.Compress:
|
||||
info.MediaType = v1.MediaTypeImageLayerGzip
|
||||
info.CompressionAlgorithm = &compression.Gzip
|
||||
case types.Decompress: // FIXME: This should remove zstd:chunked annotations (but those annotations being left with incorrect values should not break pulls)
|
||||
info.MediaType = v1.MediaTypeImageLayer
|
||||
info.CompressionAlgorithm = nil
|
||||
}
|
||||
case manifest.DockerV2SchemaLayerMediaTypeUncompressed, manifest.DockerV2Schema2LayerMediaType:
|
||||
switch s.reference.compress {
|
||||
case types.Compress:
|
||||
info.MediaType = manifest.DockerV2Schema2LayerMediaType
|
||||
info.CompressionAlgorithm = &compression.Gzip
|
||||
case types.Decompress:
|
||||
// nope, not going to suggest anything, it's not allowed by the spec
|
||||
replacedInfos = append(replacedInfos, info)
|
||||
continue
|
||||
}
|
||||
}
|
||||
logrus.Debugf("suggesting cached blob with digest %q, type %q, and compression %v in place of blob with digest %q", string(replaceDigest), info.MediaType, s.reference.compress, info.Digest.String())
|
||||
info.CompressionOperation = s.reference.compress
|
||||
info.Digest = digest.Digest(replaceDigest)
|
||||
info.Size = fileInfo.Size()
|
||||
logrus.Debugf("info = %#v", info)
|
||||
}
|
||||
}
|
||||
replacedInfos = append(replacedInfos, info)
|
||||
}
|
||||
infos = replacedInfos
|
||||
|
@ -214,9 +227,15 @@ func streamChunksFromFile(streams chan io.ReadCloser, errs chan error, file io.R
|
|||
errs <- err
|
||||
break
|
||||
}
|
||||
var stream io.Reader
|
||||
if c.Length != math.MaxUint64 {
|
||||
stream = io.LimitReader(file, int64(c.Length))
|
||||
} else {
|
||||
stream = file
|
||||
}
|
||||
s := signalCloseReader{
|
||||
closed: make(chan struct{}),
|
||||
stream: io.LimitReader(file, int64(c.Length)),
|
||||
stream: stream,
|
||||
}
|
||||
streams <- s
|
||||
|
||||
|
@ -244,6 +263,8 @@ func (s signalCloseReader) Close() error {
|
|||
// The specified chunks must be not overlapping and sorted by their offset.
|
||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||
// to read the next chunk.
|
||||
// If the Length for the last chunk is set to math.MaxUint64, then it
|
||||
// fully fetches the remaining data from the offset to the end of the blob.
|
||||
func (s *blobCacheSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||
blobPath, _, _, err := s.reference.findBlob(info)
|
||||
if err != nil {
|
||||
|
|
|
@ -111,7 +111,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifRefere
|
|||
History: []imgspecv1.History{
|
||||
{
|
||||
Created: &created,
|
||||
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Hex(), os.PathSeparator),
|
||||
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Encoded(), os.PathSeparator),
|
||||
Comment: "imported from SIF, uuid: " + sifImg.ID(),
|
||||
},
|
||||
{
|
||||
|
|
|
@ -76,10 +76,10 @@ func VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unver
|
|||
validateSignedDockerReference: func(signedDockerReference string) error {
|
||||
signedRef, err := reference.ParseNormalizedNamed(signedDockerReference)
|
||||
if err != nil {
|
||||
return internal.NewInvalidSignatureError(fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference))
|
||||
return internal.NewInvalidSignatureError(fmt.Sprintf("Invalid docker reference %q in signature", signedDockerReference))
|
||||
}
|
||||
if signedRef.String() != expectedRef.String() {
|
||||
return internal.NewInvalidSignatureError(fmt.Sprintf("Docker reference %s does not match %s",
|
||||
return internal.NewInvalidSignatureError(fmt.Sprintf("Docker reference %q does not match %q",
|
||||
signedDockerReference, expectedDockerReference))
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -178,7 +178,7 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time,
|
|||
|
||||
// == Validate the OIDC subject
|
||||
if !slices.Contains(untrustedCertificate.EmailAddresses, f.subjectEmail) {
|
||||
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %s not found (got %#v)",
|
||||
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %q not found (got %q)",
|
||||
f.subjectEmail,
|
||||
untrustedCertificate.EmailAddresses))
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) er
|
|||
return JSONFormatError(err.Error())
|
||||
}
|
||||
if t != json.Delim('{') {
|
||||
return JSONFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t))
|
||||
return JSONFormatError(fmt.Sprintf("JSON object expected, got %#v", t))
|
||||
}
|
||||
for {
|
||||
t, err := dec.Token()
|
||||
|
@ -45,16 +45,16 @@ func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) er
|
|||
key, ok := t.(string)
|
||||
if !ok {
|
||||
// Coverage: This should never happen, dec.Token() rejects non-string-literals in this state.
|
||||
return JSONFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t))
|
||||
return JSONFormatError(fmt.Sprintf("Key string literal expected, got %#v", t))
|
||||
}
|
||||
if seenKeys.Contains(key) {
|
||||
return JSONFormatError(fmt.Sprintf("Duplicate key \"%s\"", key))
|
||||
return JSONFormatError(fmt.Sprintf("Duplicate key %q", key))
|
||||
}
|
||||
seenKeys.Add(key)
|
||||
|
||||
valuePtr := fieldResolver(key)
|
||||
if valuePtr == nil {
|
||||
return JSONFormatError(fmt.Sprintf("Unknown key \"%s\"", key))
|
||||
return JSONFormatError(fmt.Sprintf("Unknown key %q", key))
|
||||
}
|
||||
// This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value.
|
||||
if err := dec.Decode(valuePtr); err != nil {
|
||||
|
@ -83,7 +83,7 @@ func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]
|
|||
}
|
||||
for key := range exactFields {
|
||||
if !seenKeys.Contains(key) {
|
||||
return JSONFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key))
|
||||
return JSONFormatError(fmt.Sprintf(`Key %q missing in a JSON object`, key))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -150,7 +150,11 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
|
|||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
s.untrustedDockerManifestDigest = digest.Digest(digestString)
|
||||
digestValue, err := digest.Parse(digestString)
|
||||
if err != nil {
|
||||
return NewInvalidSignatureError(fmt.Sprintf(`invalid docker-manifest-digest value %q: %v`, digestString, err))
|
||||
}
|
||||
s.untrustedDockerManifestDigest = digestValue
|
||||
|
||||
return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
|
||||
"docker-reference": &s.untrustedDockerReference,
|
||||
|
|
|
@ -247,7 +247,7 @@ func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) {
|
|||
case prTypeSigstoreSigned:
|
||||
res = &prSigstoreSigned{}
|
||||
default:
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type))
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type %q", typeField.Type))
|
||||
}
|
||||
if err := json.Unmarshal(data, &res); err != nil {
|
||||
return nil, err
|
||||
|
@ -279,7 +279,7 @@ func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
if tmp.Type != prTypeInsecureAcceptAnything {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||
}
|
||||
*pr = *newPRInsecureAcceptAnything()
|
||||
return nil
|
||||
|
@ -309,7 +309,7 @@ func (pr *prReject) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
if tmp.Type != prTypeReject {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||
}
|
||||
*pr = *newPRReject()
|
||||
return nil
|
||||
|
@ -318,7 +318,7 @@ func (pr *prReject) UnmarshalJSON(data []byte) error {
|
|||
// newPRSignedBy returns a new prSignedBy if parameters are valid.
|
||||
func newPRSignedBy(keyType sbKeyType, keyPath string, keyPaths []string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
|
||||
if !keyType.IsValid() {
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType))
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType %q", keyType))
|
||||
}
|
||||
keySources := 0
|
||||
if keyPath != "" {
|
||||
|
@ -410,7 +410,7 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
if tmp.Type != prTypeSignedBy {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||
}
|
||||
if signedIdentity == nil {
|
||||
tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact()
|
||||
|
@ -466,7 +466,7 @@ func (kt *sbKeyType) UnmarshalJSON(data []byte) error {
|
|||
return err
|
||||
}
|
||||
if !sbKeyType(s).IsValid() {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value %q", s))
|
||||
}
|
||||
*kt = sbKeyType(s)
|
||||
return nil
|
||||
|
@ -504,7 +504,7 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
if tmp.Type != prTypeSignedBaseLayer {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||
}
|
||||
bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity)
|
||||
if err != nil {
|
||||
|
@ -540,7 +540,7 @@ func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error)
|
|||
case prmTypeRemapIdentity:
|
||||
res = &prmRemapIdentity{}
|
||||
default:
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type))
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type %q", typeField.Type))
|
||||
}
|
||||
if err := json.Unmarshal(data, &res); err != nil {
|
||||
return nil, err
|
||||
|
@ -572,7 +572,7 @@ func (prm *prmMatchExact) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
if tmp.Type != prmTypeMatchExact {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||
}
|
||||
*prm = *newPRMMatchExact()
|
||||
return nil
|
||||
|
@ -602,7 +602,7 @@ func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
if tmp.Type != prmTypeMatchRepoDigestOrExact {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||
}
|
||||
*prm = *newPRMMatchRepoDigestOrExact()
|
||||
return nil
|
||||
|
@ -632,7 +632,7 @@ func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
if tmp.Type != prmTypeMatchRepository {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||
}
|
||||
*prm = *newPRMMatchRepository()
|
||||
return nil
|
||||
|
@ -642,10 +642,10 @@ func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
|
|||
func newPRMExactReference(dockerReference string) (*prmExactReference, error) {
|
||||
ref, err := reference.ParseNormalizedNamed(dockerReference)
|
||||
if err != nil {
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error()))
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %q: %s", dockerReference, err.Error()))
|
||||
}
|
||||
if reference.IsNameOnly(ref) {
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference))
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %q contains neither a tag nor digest", dockerReference))
|
||||
}
|
||||
return &prmExactReference{
|
||||
prmCommon: prmCommon{Type: prmTypeExactReference},
|
||||
|
@ -673,7 +673,7 @@ func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
if tmp.Type != prmTypeExactReference {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||
}
|
||||
|
||||
res, err := newPRMExactReference(tmp.DockerReference)
|
||||
|
@ -687,7 +687,7 @@ func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
|
|||
// newPRMExactRepository is NewPRMExactRepository, except it returns the private type.
|
||||
func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) {
|
||||
if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil {
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error()))
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %q: %s", dockerRepository, err.Error()))
|
||||
}
|
||||
return &prmExactRepository{
|
||||
prmCommon: prmCommon{Type: prmTypeExactRepository},
|
||||
|
@ -715,7 +715,7 @@ func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
if tmp.Type != prmTypeExactRepository {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||
}
|
||||
|
||||
res, err := newPRMExactRepository(tmp.DockerRepository)
|
||||
|
@ -788,7 +788,7 @@ func (prm *prmRemapIdentity) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
if tmp.Type != prmTypeRemapIdentity {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||
}
|
||||
|
||||
res, err := newPRMRemapIdentity(tmp.Prefix, tmp.SignedPrefix)
|
||||
|
|
|
@ -176,7 +176,7 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
if tmp.Type != prTypeSigstoreSigned {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type))
|
||||
}
|
||||
if signedIdentity == nil {
|
||||
tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact()
|
||||
|
|
|
@ -97,7 +97,7 @@ const (
|
|||
// changeState changes pc.state, or fails if the state is unexpected
|
||||
func (pc *PolicyContext) changeState(expected, new policyContextState) error {
|
||||
if pc.state != expected {
|
||||
return fmt.Errorf(`Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state)
|
||||
return fmt.Errorf(`Invalid PolicyContext state, expected %q, found %q`, expected, pc.state)
|
||||
}
|
||||
pc.state = new
|
||||
return nil
|
||||
|
@ -140,21 +140,21 @@ func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) Polic
|
|||
// Look for a full match.
|
||||
identity := ref.PolicyConfigurationIdentity()
|
||||
if req, ok := transportScopes[identity]; ok {
|
||||
logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity)
|
||||
logrus.Debugf(` Using transport %q policy section %q`, transportName, identity)
|
||||
return req
|
||||
}
|
||||
|
||||
// Look for a match of the possible parent namespaces.
|
||||
for _, name := range ref.PolicyConfigurationNamespaces() {
|
||||
if req, ok := transportScopes[name]; ok {
|
||||
logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name)
|
||||
logrus.Debugf(` Using transport %q specific policy section %q`, transportName, name)
|
||||
return req
|
||||
}
|
||||
}
|
||||
|
||||
// Look for a default match for the transport.
|
||||
if req, ok := transportScopes[""]; ok {
|
||||
logrus.Debugf(` Using transport "%s" policy section ""`, transportName)
|
||||
logrus.Debugf(` Using transport %q policy section ""`, transportName)
|
||||
return req
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,10 +20,10 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image priva
|
|||
case SBKeyTypeGPGKeys:
|
||||
case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs:
|
||||
// FIXME? Reject this at policy parsing time already?
|
||||
return sarRejected, nil, fmt.Errorf(`Unimplemented "keyType" value "%s"`, string(pr.KeyType))
|
||||
return sarRejected, nil, fmt.Errorf(`Unimplemented "keyType" value %q`, string(pr.KeyType))
|
||||
default:
|
||||
// This should never happen, newPRSignedBy ensures KeyType.IsValid()
|
||||
return sarRejected, nil, fmt.Errorf(`Unknown "keyType" value "%s"`, string(pr.KeyType))
|
||||
return sarRejected, nil, fmt.Errorf(`Unknown "keyType" value %q`, string(pr.KeyType))
|
||||
}
|
||||
|
||||
// FIXME: move this to per-context initialization
|
||||
|
@ -77,7 +77,7 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image priva
|
|||
},
|
||||
validateSignedDockerReference: func(ref string) error {
|
||||
if !pr.SignedIdentity.matchesDockerReference(image, ref) {
|
||||
return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref))
|
||||
return PolicyRequirementError(fmt.Sprintf("Signature for identity %q is not accepted", ref))
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
@ -123,7 +123,7 @@ func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image private.U
|
|||
// Huh?! This should not happen at all; treat it as any other invalid value.
|
||||
fallthrough
|
||||
default:
|
||||
reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
|
||||
reason = fmt.Errorf(`Internal error: Unexpected signature verification result %q`, string(res))
|
||||
}
|
||||
rejections = append(rejections, reason)
|
||||
}
|
||||
|
|
|
@ -194,7 +194,7 @@ func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image priva
|
|||
signature, err := internal.VerifySigstorePayload(publicKey, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{
|
||||
ValidateSignedDockerReference: func(ref string) error {
|
||||
if !pr.SignedIdentity.matchesDockerReference(image, ref) {
|
||||
return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref))
|
||||
return PolicyRequirementError(fmt.Sprintf("Signature for identity %q is not accepted", ref))
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
@ -253,7 +253,7 @@ func (pr *prSigstoreSigned) isRunningImageAllowed(ctx context.Context, image pri
|
|||
// Huh?! This should not happen at all; treat it as any other invalid value.
|
||||
fallthrough
|
||||
default:
|
||||
reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
|
||||
reason = fmt.Errorf(`Internal error: Unexpected signature verification result %q`, string(res))
|
||||
}
|
||||
rejections = append(rejections, reason)
|
||||
}
|
||||
|
|
|
@ -136,7 +136,7 @@ func (prm *prmRemapIdentity) remapReferencePrefix(ref reference.Named) (referenc
|
|||
newNamedRef := strings.Replace(refString, prm.Prefix, prm.SignedPrefix, 1)
|
||||
newParsedRef, err := reference.ParseNamed(newNamedRef)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`error rewriting reference from "%s" to "%s": %v`, refString, newNamedRef, err)
|
||||
return nil, fmt.Errorf(`error rewriting reference from %q to %q: %v`, refString, newNamedRef, err)
|
||||
}
|
||||
return newParsedRef, nil
|
||||
}
|
||||
|
|
|
@ -173,7 +173,11 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
|
|||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
s.untrustedDockerManifestDigest = digest.Digest(digestString)
|
||||
digestValue, err := digest.Parse(digestString)
|
||||
if err != nil {
|
||||
return internal.NewInvalidSignatureError(fmt.Sprintf(`invalid docker-manifest-digest value %q: %v`, digestString, err))
|
||||
}
|
||||
s.untrustedDockerManifestDigest = digestValue
|
||||
|
||||
return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
|
||||
"docker-reference": &s.untrustedDockerReference,
|
||||
|
|
|
@ -59,7 +59,7 @@ type storageImageDestination struct {
|
|||
nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs
|
||||
manifest []byte // Manifest contents, temporary
|
||||
manifestDigest digest.Digest // Valid if len(manifest) != 0
|
||||
untrustedDiffIDValues []digest.Digest // From config’s RootFS.DiffIDs, valid if not nil
|
||||
untrustedDiffIDValues []digest.Digest // From config’s RootFS.DiffIDs (not even validated to be valid digest.Digest!); or nil if not read yet
|
||||
signatures []byte // Signature contents, temporary
|
||||
signatureses map[digest.Digest][]byte // Instance signature contents, temporary
|
||||
metadata storageImageMetadata // Metadata contents being built
|
||||
|
@ -94,11 +94,11 @@ type storageImageDestinationLockProtected struct {
|
|||
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
|
||||
indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest, IFF the layer was created/found/reused by TOC digest
|
||||
|
||||
// Layer data: Before commitLayer is called, either at least one of (diffOutputs, blobAdditionalLayer, filenames)
|
||||
// Layer data: Before commitLayer is called, either at least one of (diffOutputs, indexToAdditionalLayer, filenames)
|
||||
// should be available; or indexToTOCDigest/blobDiffIDs should be enough to locate an existing c/storage layer.
|
||||
// They are looked up in the order they are mentioned above.
|
||||
diffOutputs map[int]*graphdriver.DriverWithDifferOutput // Mapping from layer index to a partially-pulled layer intermediate data
|
||||
blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
|
||||
diffOutputs map[int]*graphdriver.DriverWithDifferOutput // Mapping from layer index to a partially-pulled layer intermediate data
|
||||
indexToAdditionalLayer map[int]storage.AdditionalLayer // Mapping from layer index to their corresponding additional layer
|
||||
// Mapping from layer blobsums to names of files we used to hold them. If set, fileSizes and blobDiffIDs must also be set.
|
||||
filenames map[digest.Digest]string
|
||||
// Mapping from layer blobsums to their sizes. If set, filenames and blobDiffIDs must also be set.
|
||||
|
@ -145,13 +145,13 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*
|
|||
},
|
||||
indexToStorageID: make(map[int]string),
|
||||
lockProtected: storageImageDestinationLockProtected{
|
||||
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
|
||||
blobDiffIDs: make(map[digest.Digest]digest.Digest),
|
||||
indexToTOCDigest: make(map[int]digest.Digest),
|
||||
diffOutputs: make(map[int]*graphdriver.DriverWithDifferOutput),
|
||||
blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
|
||||
filenames: make(map[digest.Digest]string),
|
||||
fileSizes: make(map[digest.Digest]int64),
|
||||
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
|
||||
blobDiffIDs: make(map[digest.Digest]digest.Digest),
|
||||
indexToTOCDigest: make(map[int]digest.Digest),
|
||||
diffOutputs: make(map[int]*graphdriver.DriverWithDifferOutput),
|
||||
indexToAdditionalLayer: make(map[int]storage.AdditionalLayer),
|
||||
filenames: make(map[digest.Digest]string),
|
||||
fileSizes: make(map[digest.Digest]int64),
|
||||
},
|
||||
}
|
||||
dest.Compat = impl.AddCompat(dest)
|
||||
|
@ -167,13 +167,11 @@ func (s *storageImageDestination) Reference() types.ImageReference {
|
|||
// Close cleans up the temporary directory and additional layer store handlers.
|
||||
func (s *storageImageDestination) Close() error {
|
||||
// This is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock.
|
||||
for _, al := range s.lockProtected.blobAdditionalLayer {
|
||||
for _, al := range s.lockProtected.indexToAdditionalLayer {
|
||||
al.Release()
|
||||
}
|
||||
for _, v := range s.lockProtected.diffOutputs {
|
||||
if v.Target != "" {
|
||||
_ = s.imageRef.transport.store.CleanupStagedLayer(v)
|
||||
}
|
||||
_ = s.imageRef.transport.store.CleanupStagedLayer(v)
|
||||
}
|
||||
return os.RemoveAll(s.directory)
|
||||
}
|
||||
|
@ -310,6 +308,12 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
|||
if err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
_ = s.imageRef.transport.store.CleanupStagedLayer(out)
|
||||
}
|
||||
}()
|
||||
|
||||
if out.TOCDigest == "" && out.UncompressedDigest == "" {
|
||||
return private.UploadedBlob{}, errors.New("internal error: ApplyDiffWithDiffer succeeded with neither TOCDigest nor UncompressedDigest set")
|
||||
|
@ -332,6 +336,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
|||
s.lockProtected.diffOutputs[options.LayerIndex] = out
|
||||
s.lock.Unlock()
|
||||
|
||||
succeeded = true
|
||||
return private.UploadedBlob{
|
||||
Digest: blobDigest,
|
||||
Size: srcInfo.Size,
|
||||
|
@ -377,14 +382,24 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
|
|||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
if options.SrcRef != nil {
|
||||
if options.SrcRef != nil && options.TOCDigest != "" && options.LayerIndex != nil {
|
||||
// Check if we have the layer in the underlying additional layer store.
|
||||
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobDigest, options.SrcRef.String())
|
||||
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(options.TOCDigest, options.SrcRef.String())
|
||||
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobDigest, err)
|
||||
} else if err == nil {
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = aLayer.UncompressedDigest()
|
||||
s.lockProtected.blobAdditionalLayer[blobDigest] = aLayer
|
||||
alsTOCDigest := aLayer.TOCDigest()
|
||||
if alsTOCDigest != options.TOCDigest {
|
||||
// FIXME: If alsTOCDigest is "", the Additional Layer Store FUSE server is probably just too old, and we could
|
||||
// probably go on reading the layer from other sources.
|
||||
//
|
||||
// Currently it should not be possible for alsTOCDigest to be set and not the expected value, but there’s
|
||||
// not that much benefit to checking for equality — we trust the FUSE server to validate the digest either way.
|
||||
return false, private.ReusedBlob{}, fmt.Errorf("additional layer for TOCDigest %q reports unexpected TOCDigest %q",
|
||||
options.TOCDigest, alsTOCDigest)
|
||||
}
|
||||
s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest
|
||||
s.lockProtected.indexToAdditionalLayer[*options.LayerIndex] = aLayer
|
||||
return true, private.ReusedBlob{
|
||||
Digest: blobDigest,
|
||||
Size: aLayer.CompressedSize(),
|
||||
|
@ -564,7 +579,7 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string {
|
|||
}
|
||||
// ordinaryImageID is a digest of a config, which is a JSON value.
|
||||
// To avoid the risk of collisions, start the input with @ so that the input is not a valid JSON.
|
||||
tocImageID := digest.FromString("@With TOC:" + tocIDInput).Hex()
|
||||
tocImageID := digest.FromString("@With TOC:" + tocIDInput).Encoded()
|
||||
logrus.Debugf("Ordinary storage image ID %s; a layer was looked up by TOC, so using image ID %s", ordinaryImageID, tocImageID)
|
||||
return tocImageID
|
||||
}
|
||||
|
@ -651,11 +666,11 @@ func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDig
|
|||
defer s.lock.Unlock()
|
||||
|
||||
if d, found := s.lockProtected.indexToTOCDigest[layerIndex]; found {
|
||||
return "@TOC=" + d.Hex(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
|
||||
return "@TOC=" + d.Encoded(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
|
||||
}
|
||||
|
||||
if d, found := s.lockProtected.blobDiffIDs[blobDigest]; found {
|
||||
return d.Hex(), true // This looks like chain IDs, and it uses the traditional value.
|
||||
return d.Encoded(), true // This looks like chain IDs, and it uses the traditional value.
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
@ -731,7 +746,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
|||
|
||||
id := layerIDComponent
|
||||
if !layerIDComponentStandalone || parentLayer != "" {
|
||||
id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Hex()
|
||||
id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Encoded()
|
||||
}
|
||||
if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
|
||||
// There's already a layer that should have the right contents, just reuse it.
|
||||
|
@ -767,7 +782,13 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
|||
logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
untrustedUncompressedDigest = d
|
||||
// While the contents of the digest are untrusted, make sure at least the _format_ is valid,
|
||||
// because we are going to write it to durable storage in expectedLayerDiffIDFlag .
|
||||
if err := untrustedUncompressedDigest.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
flags := make(map[string]interface{})
|
||||
|
@ -793,7 +814,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
|||
}
|
||||
|
||||
s.lock.Lock()
|
||||
al, ok := s.lockProtected.blobAdditionalLayer[layerDigest]
|
||||
al, ok := s.lockProtected.indexToAdditionalLayer[index]
|
||||
s.lock.Unlock()
|
||||
if ok {
|
||||
layer, err := al.PutAs(newLayerID, parentLayer, nil)
|
||||
|
|
|
@ -107,12 +107,11 @@ func (s *storageImageSource) Close() error {
|
|||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
|
||||
func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
// We need a valid digest value.
|
||||
digest := info.Digest
|
||||
|
||||
err = digest.Validate()
|
||||
if err != nil {
|
||||
if err := digest.Validate(); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
|
@ -154,7 +153,7 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c
|
|||
// NOTE: the blob is first written to a temporary file and subsequently
|
||||
// closed. The intention is to keep the time we own the storage lock
|
||||
// as short as possible to allow other processes to access the storage.
|
||||
rc, n, _, err = s.getBlobAndLayerID(digest, layers)
|
||||
rc, n, _, err := s.getBlobAndLayerID(digest, layers)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
@ -177,7 +176,7 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c
|
|||
// On Unix and modern Windows (2022 at least) we can eagerly unlink the file to ensure it's automatically
|
||||
// cleaned up on process termination (or if the caller forgets to invoke Close())
|
||||
// On older versions of Windows we will have to fallback to relying on the caller to invoke Close()
|
||||
if err := os.Remove(tmpFile.Name()); err != nil {
|
||||
if err := os.Remove(tmpFile.Name()); err == nil {
|
||||
tmpFileRemovePending = false
|
||||
}
|
||||
|
||||
|
@ -308,9 +307,6 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("reading layer %q in image %q: %w", layerID, s.image.ID, err)
|
||||
}
|
||||
if layer.UncompressedSize < 0 {
|
||||
return nil, fmt.Errorf("uncompressed size for layer %q is unknown", layerID)
|
||||
}
|
||||
|
||||
blobDigest := layer.UncompressedDigest
|
||||
if blobDigest == "" {
|
||||
|
@ -332,12 +328,16 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige
|
|||
return nil, fmt.Errorf("parsing expected diffID %q for layer %q: %w", expectedDigest, layerID, err)
|
||||
}
|
||||
}
|
||||
size := layer.UncompressedSize
|
||||
if size < 0 {
|
||||
size = -1
|
||||
}
|
||||
s.getBlobMutex.Lock()
|
||||
s.getBlobMutexProtected.digestToLayerID[blobDigest] = layer.ID
|
||||
s.getBlobMutex.Unlock()
|
||||
blobInfo := types.BlobInfo{
|
||||
Digest: blobDigest,
|
||||
Size: layer.UncompressedSize,
|
||||
Size: size,
|
||||
MediaType: uncompressedLayerType,
|
||||
}
|
||||
physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...)
|
||||
|
@ -453,10 +453,16 @@ func (s *storageImageSource) getSize() (int64, error) {
|
|||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if (layer.TOCDigest == "" && layer.UncompressedDigest == "") || layer.UncompressedSize < 0 {
|
||||
if (layer.TOCDigest == "" && layer.UncompressedDigest == "") || (layer.TOCDigest == "" && layer.UncompressedSize < 0) {
|
||||
return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID)
|
||||
}
|
||||
sum += layer.UncompressedSize
|
||||
// FIXME: We allow layer.UncompressedSize < 0 above, because currently images in an Additional Layer Store don’t provide that value.
|
||||
// Right now, various callers in Podman (and, also, newImage in this package) don’t expect the size computation to fail.
|
||||
// Should we update the callers, or do we need to continue returning inaccurate information here? Or should we pay the cost
|
||||
// to compute the size from the diff?
|
||||
if layer.UncompressedSize >= 0 {
|
||||
sum += layer.UncompressedSize
|
||||
}
|
||||
if layer.Parent == "" {
|
||||
break
|
||||
}
|
||||
|
|
|
@ -117,7 +117,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
|
|||
|
||||
history = append(history, imgspecv1.History{
|
||||
Created: &blobTime,
|
||||
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffID.Hex(), os.PathSeparator),
|
||||
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffID.Encoded(), os.PathSeparator),
|
||||
Comment: comment,
|
||||
})
|
||||
// Use the mtime of the most recently modified file as the image's creation time.
|
||||
|
|
4
vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go
generated
vendored
4
vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go
generated
vendored
|
@ -28,11 +28,11 @@ func ParseImageName(imgName string) (types.ImageReference, error) {
|
|||
// Keep this in sync with TransportFromImageName!
|
||||
transportName, withinTransport, valid := strings.Cut(imgName, ":")
|
||||
if !valid {
|
||||
return nil, fmt.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName)
|
||||
return nil, fmt.Errorf(`Invalid image name %q, expected colon-separated transport:reference`, imgName)
|
||||
}
|
||||
transport := transports.Get(transportName)
|
||||
if transport == nil {
|
||||
return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, transportName)
|
||||
return nil, fmt.Errorf(`Invalid image name %q, unknown transport %q`, imgName, transportName)
|
||||
}
|
||||
return transport.ParseReference(withinTransport)
|
||||
}
|
||||
|
|
|
@ -6,12 +6,12 @@ const (
|
|||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 5
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 30
|
||||
VersionMinor = 31
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 2
|
||||
VersionPatch = 0
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = "-dev"
|
||||
VersionDev = ""
|
||||
)
|
||||
|
||||
// Version is the specification version that the package types support.
|
||||
|
|
|
@ -23,7 +23,7 @@ env:
|
|||
# GCE project where images live
|
||||
IMAGE_PROJECT: "libpod-218412"
|
||||
# VM Image built in containers/automation_images
|
||||
IMAGE_SUFFIX: "c20240411t124913z-f39f38d13"
|
||||
IMAGE_SUFFIX: "c20240513t140131z-f40f39d13"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
|
||||
|
||||
|
|
|
@ -1 +1 @@
|
|||
1.53.1-dev
|
||||
1.54.0
|
||||
|
|
|
@ -208,8 +208,6 @@ type LayerStore interface {
|
|||
ParentOwners(id string) (uids, gids []int, err error)
|
||||
ApplyDiff(to string, diff io.Reader) (int64, error)
|
||||
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
|
||||
CleanupStagingDirectory(stagingDirectory string) error
|
||||
ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error
|
||||
DifferTarget(id string) (string, error)
|
||||
LoadLocked() error
|
||||
PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error)
|
||||
|
|
|
@ -298,8 +298,8 @@ type AdditionalLayerStoreDriver interface {
|
|||
Driver
|
||||
|
||||
// LookupAdditionalLayer looks up additional layer store by the specified
|
||||
// digest and ref and returns an object representing that layer.
|
||||
LookupAdditionalLayer(d digest.Digest, ref string) (AdditionalLayer, error)
|
||||
// TOC digest and ref and returns an object representing that layer.
|
||||
LookupAdditionalLayer(tocDigest digest.Digest, ref string) (AdditionalLayer, error)
|
||||
|
||||
// LookupAdditionalLayer looks up additional layer store by the specified
|
||||
// ID and returns an object representing that layer.
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"github.com/containers/storage/pkg/fsutils"
|
||||
"github.com/containers/storage/pkg/idmap"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/lockfile"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/parsers"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
|
@ -83,6 +84,8 @@ const (
|
|||
lowerFile = "lower"
|
||||
maxDepth = 500
|
||||
|
||||
stagingLockFile = "staging.lock"
|
||||
|
||||
tocArtifact = "toc"
|
||||
fsVerityDigestsArtifact = "fs-verity-digests"
|
||||
|
||||
|
@ -127,6 +130,8 @@ type Driver struct {
|
|||
usingMetacopy bool
|
||||
usingComposefs bool
|
||||
|
||||
stagingDirsLocks map[string]*lockfile.LockFile
|
||||
|
||||
supportsIDMappedMounts *bool
|
||||
}
|
||||
|
||||
|
@ -460,6 +465,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
|||
supportsVolatile: supportsVolatile,
|
||||
usingComposefs: opts.useComposefs,
|
||||
options: *opts,
|
||||
stagingDirsLocks: make(map[string]*lockfile.LockFile),
|
||||
}
|
||||
|
||||
d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d))
|
||||
|
@ -876,20 +882,54 @@ func (d *Driver) Metadata(id string) (map[string]string, error) {
|
|||
return metadata, nil
|
||||
}
|
||||
|
||||
// Cleanup any state created by overlay which should be cleaned when daemon
|
||||
// is being shutdown. For now, we just have to unmount the bind mounted
|
||||
// we had created.
|
||||
// Cleanup any state created by overlay which should be cleaned when
|
||||
// the storage is being shutdown. The only state created by the driver
|
||||
// is the bind mount on the home directory.
|
||||
func (d *Driver) Cleanup() error {
|
||||
_ = os.RemoveAll(filepath.Join(d.home, stagingDir))
|
||||
anyPresent := d.pruneStagingDirectories()
|
||||
if anyPresent {
|
||||
return nil
|
||||
}
|
||||
return mount.Unmount(d.home)
|
||||
}
|
||||
|
||||
// pruneStagingDirectories cleans up any staging directory that was leaked.
|
||||
// It returns whether any staging directory is still present.
|
||||
func (d *Driver) pruneStagingDirectories() bool {
|
||||
for _, lock := range d.stagingDirsLocks {
|
||||
lock.Unlock()
|
||||
}
|
||||
d.stagingDirsLocks = make(map[string]*lockfile.LockFile)
|
||||
|
||||
anyPresent := false
|
||||
|
||||
homeStagingDir := filepath.Join(d.home, stagingDir)
|
||||
dirs, err := os.ReadDir(homeStagingDir)
|
||||
if err == nil {
|
||||
for _, dir := range dirs {
|
||||
stagingDirToRemove := filepath.Join(homeStagingDir, dir.Name())
|
||||
lock, err := lockfile.GetLockFile(filepath.Join(stagingDirToRemove, stagingLockFile))
|
||||
if err != nil {
|
||||
anyPresent = true
|
||||
continue
|
||||
}
|
||||
if err := lock.TryLock(); err != nil {
|
||||
anyPresent = true
|
||||
continue
|
||||
}
|
||||
_ = os.RemoveAll(stagingDirToRemove)
|
||||
lock.Unlock()
|
||||
}
|
||||
}
|
||||
return anyPresent
|
||||
}
|
||||
|
||||
// LookupAdditionalLayer looks up additional layer store by the specified
|
||||
// digest and ref and returns an object representing that layer.
|
||||
// TOC digest and ref and returns an object representing that layer.
|
||||
// This API is experimental and can be changed without bumping the major version number.
|
||||
// TODO: to remove the comment once it's no longer experimental.
|
||||
func (d *Driver) LookupAdditionalLayer(dgst digest.Digest, ref string) (graphdriver.AdditionalLayer, error) {
|
||||
l, err := d.getAdditionalLayerPath(dgst, ref)
|
||||
func (d *Driver) LookupAdditionalLayer(tocDigest digest.Digest, ref string) (graphdriver.AdditionalLayer, error) {
|
||||
l, err := d.getAdditionalLayerPath(tocDigest, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -2029,7 +2069,14 @@ func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
|
|||
|
||||
// CleanupStagingDirectory cleanups the staging directory.
|
||||
func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error {
|
||||
return os.RemoveAll(stagingDirectory)
|
||||
parentStagingDir := filepath.Dir(stagingDirectory)
|
||||
|
||||
if lock, ok := d.stagingDirsLocks[parentStagingDir]; ok {
|
||||
delete(d.stagingDirsLocks, parentStagingDir)
|
||||
lock.Unlock()
|
||||
}
|
||||
|
||||
return os.RemoveAll(parentStagingDir)
|
||||
}
|
||||
|
||||
func supportsDataOnlyLayersCached(home, runhome string) (bool, error) {
|
||||
|
@ -2050,8 +2097,8 @@ func supportsDataOnlyLayersCached(home, runhome string) (bool, error) {
|
|||
return supportsDataOnly, err
|
||||
}
|
||||
|
||||
// ApplyDiff applies the changes in the new layer using the specified function
|
||||
func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, err error) {
|
||||
// ApplyDiffWithDiffer applies the changes in the new layer using the specified function
|
||||
func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, errRet error) {
|
||||
var idMappings *idtools.IDMappings
|
||||
if options != nil {
|
||||
idMappings = options.Mappings
|
||||
|
@ -2068,7 +2115,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
|
|||
if err != nil && !os.IsExist(err) {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
applyDir, err = os.MkdirTemp(stagingDir, "")
|
||||
layerDir, err := os.MkdirTemp(stagingDir, "")
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
|
@ -2076,9 +2123,23 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
|
|||
if d.options.forceMask != nil {
|
||||
perms = *d.options.forceMask
|
||||
}
|
||||
if err := os.Chmod(applyDir, perms); err != nil {
|
||||
applyDir = filepath.Join(layerDir, "dir")
|
||||
if err := os.Mkdir(applyDir, perms); err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
|
||||
lock, err := lockfile.GetLockFile(filepath.Join(layerDir, stagingLockFile))
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
defer func() {
|
||||
if errRet != nil {
|
||||
delete(d.stagingDirsLocks, layerDir)
|
||||
lock.Unlock()
|
||||
}
|
||||
}()
|
||||
d.stagingDirsLocks[layerDir] = lock
|
||||
lock.Lock()
|
||||
} else {
|
||||
var err error
|
||||
applyDir, err = d.getDiffPath(id)
|
||||
|
@ -2112,9 +2173,19 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
|
|||
// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory.
|
||||
func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) error {
|
||||
stagingDirectory := diffOutput.Target
|
||||
if filepath.Dir(stagingDirectory) != d.getStagingDir(id) {
|
||||
parentStagingDir := filepath.Dir(stagingDirectory)
|
||||
|
||||
defer func() {
|
||||
if lock, ok := d.stagingDirsLocks[parentStagingDir]; ok {
|
||||
delete(d.stagingDirsLocks, parentStagingDir)
|
||||
lock.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
if filepath.Dir(parentStagingDir) != d.getStagingDir(id) {
|
||||
return fmt.Errorf("%q is not a staging directory", stagingDirectory)
|
||||
}
|
||||
|
||||
diffPath, err := d.getDiffPath(id)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -2405,14 +2476,14 @@ func nameWithSuffix(name string, number int) string {
|
|||
return fmt.Sprintf("%s%d", name, number)
|
||||
}
|
||||
|
||||
func (d *Driver) getAdditionalLayerPath(dgst digest.Digest, ref string) (string, error) {
|
||||
func (d *Driver) getAdditionalLayerPath(tocDigest digest.Digest, ref string) (string, error) {
|
||||
refElem := base64.StdEncoding.EncodeToString([]byte(ref))
|
||||
for _, ls := range d.options.layerStores {
|
||||
ref := ""
|
||||
if ls.withReference {
|
||||
ref = refElem
|
||||
}
|
||||
target := path.Join(ls.path, ref, dgst.String())
|
||||
target := path.Join(ls.path, ref, tocDigest.String())
|
||||
// Check if all necessary files exist
|
||||
for _, p := range []string{
|
||||
filepath.Join(target, "diff"),
|
||||
|
@ -2427,7 +2498,7 @@ func (d *Driver) getAdditionalLayerPath(dgst digest.Digest, ref string) (string,
|
|||
return target, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("additional layer (%q, %q) not found: %w", dgst, ref, graphdriver.ErrLayerUnknown)
|
||||
return "", fmt.Errorf("additional layer (%q, %q) not found: %w", tocDigest, ref, graphdriver.ErrLayerUnknown)
|
||||
}
|
||||
|
||||
func (d *Driver) releaseAdditionalLayerByID(id string) {
|
||||
|
|
|
@ -823,81 +823,90 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
|
|||
iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
|
||||
|
||||
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||
if strings.ToLower(field) == "version" {
|
||||
switch strings.ToLower(field) {
|
||||
case "version":
|
||||
toc.Version = iter.ReadInt()
|
||||
continue
|
||||
}
|
||||
if strings.ToLower(field) != "entries" {
|
||||
iter.Skip()
|
||||
continue
|
||||
}
|
||||
for iter.ReadArray() {
|
||||
var m internal.FileMetadata
|
||||
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||
switch strings.ToLower(field) {
|
||||
case "type":
|
||||
m.Type = iter.ReadString()
|
||||
case "name":
|
||||
m.Name = iter.ReadString()
|
||||
case "linkname":
|
||||
m.Linkname = iter.ReadString()
|
||||
case "mode":
|
||||
m.Mode = iter.ReadInt64()
|
||||
case "size":
|
||||
m.Size = iter.ReadInt64()
|
||||
case "uid":
|
||||
m.UID = iter.ReadInt()
|
||||
case "gid":
|
||||
m.GID = iter.ReadInt()
|
||||
case "modtime":
|
||||
time, err := time.Parse(time.RFC3339, iter.ReadString())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
case "entries":
|
||||
for iter.ReadArray() {
|
||||
var m internal.FileMetadata
|
||||
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||
switch strings.ToLower(field) {
|
||||
case "type":
|
||||
m.Type = iter.ReadString()
|
||||
case "name":
|
||||
m.Name = iter.ReadString()
|
||||
case "linkname":
|
||||
m.Linkname = iter.ReadString()
|
||||
case "mode":
|
||||
m.Mode = iter.ReadInt64()
|
||||
case "size":
|
||||
m.Size = iter.ReadInt64()
|
||||
case "uid":
|
||||
m.UID = iter.ReadInt()
|
||||
case "gid":
|
||||
m.GID = iter.ReadInt()
|
||||
case "modtime":
|
||||
time, err := time.Parse(time.RFC3339, iter.ReadString())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.ModTime = &time
|
||||
case "accesstime":
|
||||
time, err := time.Parse(time.RFC3339, iter.ReadString())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.AccessTime = &time
|
||||
case "changetime":
|
||||
time, err := time.Parse(time.RFC3339, iter.ReadString())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.ChangeTime = &time
|
||||
case "devmajor":
|
||||
m.Devmajor = iter.ReadInt64()
|
||||
case "devminor":
|
||||
m.Devminor = iter.ReadInt64()
|
||||
case "digest":
|
||||
m.Digest = iter.ReadString()
|
||||
case "offset":
|
||||
m.Offset = iter.ReadInt64()
|
||||
case "endoffset":
|
||||
m.EndOffset = iter.ReadInt64()
|
||||
case "chunksize":
|
||||
m.ChunkSize = iter.ReadInt64()
|
||||
case "chunkoffset":
|
||||
m.ChunkOffset = iter.ReadInt64()
|
||||
case "chunkdigest":
|
||||
m.ChunkDigest = iter.ReadString()
|
||||
case "chunktype":
|
||||
m.ChunkType = iter.ReadString()
|
||||
case "xattrs":
|
||||
m.Xattrs = make(map[string]string)
|
||||
for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
|
||||
m.Xattrs[key] = iter.ReadString()
|
||||
}
|
||||
default:
|
||||
iter.Skip()
|
||||
}
|
||||
m.ModTime = &time
|
||||
case "accesstime":
|
||||
time, err := time.Parse(time.RFC3339, iter.ReadString())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.AccessTime = &time
|
||||
case "changetime":
|
||||
time, err := time.Parse(time.RFC3339, iter.ReadString())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.ChangeTime = &time
|
||||
case "devmajor":
|
||||
m.Devmajor = iter.ReadInt64()
|
||||
case "devminor":
|
||||
m.Devminor = iter.ReadInt64()
|
||||
case "digest":
|
||||
m.Digest = iter.ReadString()
|
||||
case "offset":
|
||||
m.Offset = iter.ReadInt64()
|
||||
case "endoffset":
|
||||
m.EndOffset = iter.ReadInt64()
|
||||
case "chunksize":
|
||||
m.ChunkSize = iter.ReadInt64()
|
||||
case "chunkoffset":
|
||||
m.ChunkOffset = iter.ReadInt64()
|
||||
case "chunkdigest":
|
||||
m.ChunkDigest = iter.ReadString()
|
||||
case "chunktype":
|
||||
m.ChunkType = iter.ReadString()
|
||||
case "xattrs":
|
||||
m.Xattrs = make(map[string]string)
|
||||
for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
|
||||
m.Xattrs[key] = iter.ReadString()
|
||||
}
|
||||
default:
|
||||
iter.Skip()
|
||||
}
|
||||
if m.Type == TypeReg && m.Size == 0 && m.Digest == "" {
|
||||
m.Digest = digestSha256Empty
|
||||
}
|
||||
toc.Entries = append(toc.Entries, m)
|
||||
}
|
||||
if m.Type == TypeReg && m.Size == 0 && m.Digest == "" {
|
||||
m.Digest = digestSha256Empty
|
||||
|
||||
case "tarsplitdigest": // strings.ToLower("tarSplitDigest")
|
||||
s := iter.ReadString()
|
||||
d, err := digest.Parse(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Invalid tarSplitDigest %q: %w", s, err)
|
||||
}
|
||||
toc.Entries = append(toc.Entries, m)
|
||||
toc.TarSplitDigest = d
|
||||
|
||||
default:
|
||||
iter.Skip()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -133,37 +133,36 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
|
|||
}
|
||||
|
||||
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream.
|
||||
func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) ([]byte, []byte, int64, error) {
|
||||
// Returns (manifest blob, parsed manifest, tar-split blob, manifest offset).
|
||||
func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) ([]byte, *internal.TOC, []byte, int64, error) {
|
||||
offsetMetadata := annotations[internal.ManifestInfoKey]
|
||||
if offsetMetadata == "" {
|
||||
return nil, nil, 0, fmt.Errorf("%q annotation missing", internal.ManifestInfoKey)
|
||||
return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", internal.ManifestInfoKey)
|
||||
}
|
||||
var manifestChunk ImageSourceChunk
|
||||
var manifestLengthUncompressed, manifestType uint64
|
||||
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &manifestChunk.Offset, &manifestChunk.Length, &manifestLengthUncompressed, &manifestType); err != nil {
|
||||
return nil, nil, 0, err
|
||||
return nil, nil, nil, 0, err
|
||||
}
|
||||
// The tarSplit… values are valid if tarSplitChunk.Offset > 0
|
||||
var tarSplitChunk ImageSourceChunk
|
||||
var tarSplitLengthUncompressed uint64
|
||||
var tarSplitChecksum string
|
||||
if tarSplitInfoKeyAnnotation, found := annotations[internal.TarSplitInfoKey]; found {
|
||||
if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &tarSplitChunk.Offset, &tarSplitChunk.Length, &tarSplitLengthUncompressed); err != nil {
|
||||
return nil, nil, 0, err
|
||||
return nil, nil, nil, 0, err
|
||||
}
|
||||
tarSplitChecksum = annotations[internal.TarSplitChecksumKey]
|
||||
}
|
||||
|
||||
if manifestType != internal.ManifestTypeCRFS {
|
||||
return nil, nil, 0, errors.New("invalid manifest type")
|
||||
return nil, nil, nil, 0, errors.New("invalid manifest type")
|
||||
}
|
||||
|
||||
// set a reasonable limit
|
||||
if manifestChunk.Length > (1<<20)*50 {
|
||||
return nil, nil, 0, errors.New("manifest too big")
|
||||
return nil, nil, nil, 0, errors.New("manifest too big")
|
||||
}
|
||||
if manifestLengthUncompressed > (1<<20)*50 {
|
||||
return nil, nil, 0, errors.New("manifest too big")
|
||||
return nil, nil, nil, 0, errors.New("manifest too big")
|
||||
}
|
||||
|
||||
chunks := []ImageSourceChunk{manifestChunk}
|
||||
|
@ -172,7 +171,7 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
|
|||
}
|
||||
parts, errs, err := blobStream.GetBlobAt(chunks)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
return nil, nil, nil, 0, err
|
||||
}
|
||||
|
||||
readBlob := func(len uint64) ([]byte, error) {
|
||||
|
@ -197,32 +196,37 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
|
|||
|
||||
manifest, err := readBlob(manifestChunk.Length)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
return nil, nil, nil, 0, err
|
||||
}
|
||||
|
||||
decodedBlob, err := decodeAndValidateBlob(manifest, manifestLengthUncompressed, tocDigest.String())
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
return nil, nil, nil, 0, fmt.Errorf("validating and decompressing TOC: %w", err)
|
||||
}
|
||||
toc, err := unmarshalToc(decodedBlob)
|
||||
if err != nil {
|
||||
return nil, nil, nil, 0, fmt.Errorf("unmarshaling TOC: %w", err)
|
||||
}
|
||||
|
||||
decodedTarSplit := []byte{}
|
||||
if tarSplitChunk.Offset > 0 {
|
||||
tarSplit, err := readBlob(tarSplitChunk.Length)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
return nil, nil, nil, 0, err
|
||||
}
|
||||
|
||||
decodedTarSplit, err = decodeAndValidateBlob(tarSplit, tarSplitLengthUncompressed, tarSplitChecksum)
|
||||
decodedTarSplit, err = decodeAndValidateBlob(tarSplit, tarSplitLengthUncompressed, toc.TarSplitDigest.String())
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
return nil, nil, nil, 0, fmt.Errorf("validating and decompressing tar-split: %w", err)
|
||||
}
|
||||
}
|
||||
return decodedBlob, decodedTarSplit, int64(manifestChunk.Offset), err
|
||||
return decodedBlob, toc, decodedTarSplit, int64(manifestChunk.Offset), err
|
||||
}
|
||||
|
||||
func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string) ([]byte, error) {
|
||||
d, err := digest.Parse(expectedCompressedChecksum)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("invalid digest %q: %w", expectedCompressedChecksum, err)
|
||||
}
|
||||
|
||||
blobDigester := d.Algorithm().Digester()
|
||||
|
|
|
@ -18,8 +18,9 @@ import (
|
|||
)
|
||||
|
||||
type TOC struct {
|
||||
Version int `json:"version"`
|
||||
Entries []FileMetadata `json:"entries"`
|
||||
Version int `json:"version"`
|
||||
Entries []FileMetadata `json:"entries"`
|
||||
TarSplitDigest digest.Digest `json:"tarSplitDigest,omitempty"`
|
||||
}
|
||||
|
||||
type FileMetadata struct {
|
||||
|
@ -84,9 +85,10 @@ func GetType(t byte) (string, error) {
|
|||
const (
|
||||
ManifestChecksumKey = "io.github.containers.zstd-chunked.manifest-checksum"
|
||||
ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position"
|
||||
TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum"
|
||||
TarSplitInfoKey = "io.github.containers.zstd-chunked.tarsplit-position"
|
||||
|
||||
TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum" // Deprecated: Use the TOC.TarSplitDigest field instead, this annotation is no longer read nor written.
|
||||
|
||||
// ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
|
||||
ManifestTypeCRFS = 1
|
||||
|
||||
|
@ -133,8 +135,9 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
|
|||
manifestOffset := offset + zstdSkippableFrameHeader
|
||||
|
||||
toc := TOC{
|
||||
Version: 1,
|
||||
Entries: metadata,
|
||||
Version: 1,
|
||||
Entries: metadata,
|
||||
TarSplitDigest: tarSplitData.Digest,
|
||||
}
|
||||
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
|
@ -170,7 +173,6 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
|
|||
return err
|
||||
}
|
||||
|
||||
outMetadata[TarSplitChecksumKey] = tarSplitData.Digest.String()
|
||||
tarSplitOffset := manifestOffset + uint64(len(compressedManifest)) + zstdSkippableFrameHeader
|
||||
outMetadata[TarSplitInfoKey] = fmt.Sprintf("%d:%d:%d", tarSplitOffset, len(tarSplitData.Data), tarSplitData.UncompressedSize)
|
||||
if err := appendZstdSkippableFrame(dest, tarSplitData.Data); err != nil {
|
||||
|
|
|
@ -79,6 +79,7 @@ type compressedFileType int
|
|||
type chunkedDiffer struct {
|
||||
stream ImageSourceSeekable
|
||||
manifest []byte
|
||||
toc *internal.TOC // The parsed contents of manifest, or nil if not yet available
|
||||
tarSplit []byte
|
||||
layersCache *layersCache
|
||||
tocOffset int64
|
||||
|
@ -314,7 +315,7 @@ func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobDige
|
|||
}
|
||||
|
||||
func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) {
|
||||
manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, tocDigest, annotations)
|
||||
manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, tocDigest, annotations)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
||||
}
|
||||
|
@ -331,6 +332,7 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
|
|||
fileType: fileTypeZstdChunked,
|
||||
layersCache: layersCache,
|
||||
manifest: manifest,
|
||||
toc: toc,
|
||||
storeOpts: storeOpts,
|
||||
stream: iss,
|
||||
tarSplit: tarSplit,
|
||||
|
@ -1701,7 +1703,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
if tocDigest == nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("internal error: just-created zstd:chunked missing TOC digest")
|
||||
}
|
||||
manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(fileSource, *tocDigest, annotations)
|
||||
manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(fileSource, *tocDigest, annotations)
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
||||
}
|
||||
|
@ -1712,6 +1714,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
// fill the chunkedDiffer with the data we just read.
|
||||
c.fileType = fileTypeZstdChunked
|
||||
c.manifest = manifest
|
||||
c.toc = toc
|
||||
c.tarSplit = tarSplit
|
||||
c.tocOffset = tocOffset
|
||||
|
||||
|
@ -1732,9 +1735,13 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
}
|
||||
|
||||
// Generate the manifest
|
||||
toc, err := unmarshalToc(c.manifest)
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
toc := c.toc
|
||||
if toc == nil {
|
||||
toc_, err := unmarshalToc(c.manifest)
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
toc = toc_
|
||||
}
|
||||
|
||||
output := graphdriver.DriverWithDifferOutput{
|
||||
|
|
|
@ -115,6 +115,9 @@ type OptionsConfig struct {
|
|||
// Btrfs container options to be handed to btrfs drivers
|
||||
Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs,omitempty"`
|
||||
|
||||
// Thinpool container options to be handed to thinpool drivers (NOP)
|
||||
Thinpool struct{} `toml:"thinpool,omitempty"`
|
||||
|
||||
// Overlay container options to be handed to overlay drivers
|
||||
Overlay struct{ OverlayOptionsConfig } `toml:"overlay,omitempty"`
|
||||
|
||||
|
|
|
@ -228,7 +228,7 @@ func getOverflowUID() int {
|
|||
return overflowUID
|
||||
}
|
||||
|
||||
// getOverflowUID returns the GID mapped to the overflow user
|
||||
// getOverflowGID returns the GID mapped to the overflow user
|
||||
func getOverflowGID() int {
|
||||
overflowGIDOnce.Do(func() {
|
||||
// 65534 is the value on older kernels where /proc/sys/kernel/overflowgid is not present
|
||||
|
|
|
@ -133,11 +133,25 @@ func (l *LockFile) Lock() {
|
|||
}
|
||||
}
|
||||
|
||||
// LockRead locks the lockfile as a reader.
|
||||
// RLock locks the lockfile as a reader.
|
||||
func (l *LockFile) RLock() {
|
||||
l.lock(readLock)
|
||||
}
|
||||
|
||||
// TryLock attempts to lock the lockfile as a writer. Panic if the lock is a read-only one.
|
||||
func (l *LockFile) TryLock() error {
|
||||
if l.ro {
|
||||
panic("can't take write lock on read-only lock file")
|
||||
} else {
|
||||
return l.tryLock(writeLock)
|
||||
}
|
||||
}
|
||||
|
||||
// TryRLock attempts to lock the lockfile as a reader.
|
||||
func (l *LockFile) TryRLock() error {
|
||||
return l.tryLock(readLock)
|
||||
}
|
||||
|
||||
// Unlock unlocks the lockfile.
|
||||
func (l *LockFile) Unlock() {
|
||||
l.stateMutex.Lock()
|
||||
|
@ -401,9 +415,47 @@ func (l *LockFile) lock(lType lockType) {
|
|||
// Optimization: only use the (expensive) syscall when
|
||||
// the counter is 0. In this case, we're either the first
|
||||
// reader lock or a writer lock.
|
||||
lockHandle(l.fd, lType)
|
||||
lockHandle(l.fd, lType, false)
|
||||
}
|
||||
l.lockType = lType
|
||||
l.locked = true
|
||||
l.counter++
|
||||
}
|
||||
|
||||
// lock locks the lockfile via syscall based on the specified type and
|
||||
// command.
|
||||
func (l *LockFile) tryLock(lType lockType) error {
|
||||
var success bool
|
||||
if lType == readLock {
|
||||
success = l.rwMutex.TryRLock()
|
||||
} else {
|
||||
success = l.rwMutex.TryLock()
|
||||
}
|
||||
if !success {
|
||||
return fmt.Errorf("resource temporarily unavailable")
|
||||
}
|
||||
l.stateMutex.Lock()
|
||||
defer l.stateMutex.Unlock()
|
||||
if l.counter == 0 {
|
||||
// If we're the first reference on the lock, we need to open the file again.
|
||||
fd, err := openLock(l.file, l.ro)
|
||||
if err != nil {
|
||||
l.rwMutex.Unlock()
|
||||
return err
|
||||
}
|
||||
l.fd = fd
|
||||
|
||||
// Optimization: only use the (expensive) syscall when
|
||||
// the counter is 0. In this case, we're either the first
|
||||
// reader lock or a writer lock.
|
||||
if err = lockHandle(l.fd, lType, true); err != nil {
|
||||
closeHandle(fd)
|
||||
l.rwMutex.Unlock()
|
||||
return err
|
||||
}
|
||||
}
|
||||
l.lockType = lType
|
||||
l.locked = true
|
||||
l.counter++
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ func openHandle(path string, mode int) (fileHandle, error) {
|
|||
return fileHandle(fd), err
|
||||
}
|
||||
|
||||
func lockHandle(fd fileHandle, lType lockType) {
|
||||
func lockHandle(fd fileHandle, lType lockType, nonblocking bool) error {
|
||||
fType := unix.F_RDLCK
|
||||
if lType != readLock {
|
||||
fType = unix.F_WRLCK
|
||||
|
@ -85,7 +85,15 @@ func lockHandle(fd fileHandle, lType lockType) {
|
|||
Start: 0,
|
||||
Len: 0,
|
||||
}
|
||||
for unix.FcntlFlock(uintptr(fd), unix.F_SETLKW, &lk) != nil {
|
||||
cmd := unix.F_SETLKW
|
||||
if nonblocking {
|
||||
cmd = unix.F_SETLK
|
||||
}
|
||||
for {
|
||||
err := unix.FcntlFlock(uintptr(fd), cmd, &lk)
|
||||
if err == nil || nonblocking {
|
||||
return err
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
@ -93,3 +101,7 @@ func lockHandle(fd fileHandle, lType lockType) {
|
|||
func unlockAndCloseHandle(fd fileHandle) {
|
||||
unix.Close(int(fd))
|
||||
}
|
||||
|
||||
func closeHandle(fd fileHandle) {
|
||||
unix.Close(int(fd))
|
||||
}
|
||||
|
|
|
@ -81,19 +81,30 @@ func openHandle(path string, mode int) (fileHandle, error) {
|
|||
return fileHandle(fd), err
|
||||
}
|
||||
|
||||
func lockHandle(fd fileHandle, lType lockType) {
|
||||
func lockHandle(fd fileHandle, lType lockType, nonblocking bool) error {
|
||||
flags := 0
|
||||
if lType != readLock {
|
||||
flags = windows.LOCKFILE_EXCLUSIVE_LOCK
|
||||
}
|
||||
if nonblocking {
|
||||
flags |= windows.LOCKFILE_FAIL_IMMEDIATELY
|
||||
}
|
||||
ol := new(windows.Overlapped)
|
||||
if err := windows.LockFileEx(windows.Handle(fd), uint32(flags), reserved, allBytes, allBytes, ol); err != nil {
|
||||
if nonblocking {
|
||||
return err
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unlockAndCloseHandle(fd fileHandle) {
|
||||
ol := new(windows.Overlapped)
|
||||
windows.UnlockFileEx(windows.Handle(fd), reserved, allBytes, allBytes, ol)
|
||||
closeHandle(fd)
|
||||
}
|
||||
|
||||
func closeHandle(fd fileHandle) {
|
||||
windows.Close(windows.Handle(fd))
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <termios.h>
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
#include <libgen.h>
|
||||
#include <sys/vfs.h>
|
||||
#include <sys/mount.h>
|
||||
#include <linux/limits.h>
|
||||
|
|
|
@ -330,17 +330,9 @@ type Store interface {
|
|||
// successfully applied with ApplyDiffFromStagingDirectory.
|
||||
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
|
||||
|
||||
// ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff.
|
||||
// Deprecated: it will be removed soon. Use ApplyStagedLayer instead.
|
||||
ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error
|
||||
|
||||
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
|
||||
// Deprecated: it will be removed soon. Use CleanupStagedLayer instead.
|
||||
CleanupStagingDirectory(stagingDirectory string) error
|
||||
|
||||
// ApplyStagedLayer combines the functions of CreateLayer and ApplyDiffFromStagingDirectory,
|
||||
// marking the layer for automatic removal if applying the diff fails
|
||||
// for any reason.
|
||||
// ApplyStagedLayer combines the functions of creating a layer and using the staging
|
||||
// directory to populate it.
|
||||
// It marks the layer for automatic removal if applying the diff fails for any reason.
|
||||
ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error)
|
||||
|
||||
// CleanupStagedLayer cleanups the staging directory. It can be used to cleanup the staging directory on errors
|
||||
|
@ -549,14 +541,14 @@ type Store interface {
|
|||
GetDigestLock(digest.Digest) (Locker, error)
|
||||
|
||||
// LayerFromAdditionalLayerStore searches the additional layer store and returns an object
|
||||
// which can create a layer with the specified digest associated with the specified image
|
||||
// which can create a layer with the specified TOC digest associated with the specified image
|
||||
// reference. Note that this hasn't been stored to this store yet: the actual creation of
|
||||
// a usable layer is done by calling the returned object's PutAs() method. After creating
|
||||
// a layer, the caller must then call the object's Release() method to free any temporary
|
||||
// resources which were allocated for the object by this method or the object's PutAs()
|
||||
// method.
|
||||
// This API is experimental and can be changed without bumping the major version number.
|
||||
LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error)
|
||||
LookupAdditionalLayer(tocDigest digest.Digest, imageref string) (AdditionalLayer, error)
|
||||
|
||||
// Tries to clean up remainders of previous containers or layers that are not
|
||||
// references in the json files. These can happen in the case of unclean
|
||||
|
@ -578,8 +570,8 @@ type AdditionalLayer interface {
|
|||
// layer store.
|
||||
PutAs(id, parent string, names []string) (*Layer, error)
|
||||
|
||||
// UncompressedDigest returns the uncompressed digest of this layer
|
||||
UncompressedDigest() digest.Digest
|
||||
// TOCDigest returns the digest of TOC of this layer. Returns "" if unknown.
|
||||
TOCDigest() digest.Digest
|
||||
|
||||
// CompressedSize returns the compressed size of this layer
|
||||
CompressedSize() int64
|
||||
|
@ -1445,20 +1437,8 @@ func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (s *store) putLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error) {
|
||||
rlstore, rlstores, err := s.bothLayerStoreKinds()
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
if err := rlstore.startWriting(); err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
defer rlstore.stopWriting()
|
||||
if err := s.containerStore.startWriting(); err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
defer s.containerStore.stopWriting()
|
||||
|
||||
// putLayer requires the rlstore, rlstores, as well as s.containerStore (even if not an argument to this function) to be locked for write.
|
||||
func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error) {
|
||||
var parentLayer *Layer
|
||||
var options LayerOptions
|
||||
if lOptions != nil {
|
||||
|
@ -1537,7 +1517,19 @@ func (s *store) putLayer(id, parent string, names []string, mountLabel string, w
|
|||
}
|
||||
|
||||
func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) {
|
||||
return s.putLayer(id, parent, names, mountLabel, writeable, lOptions, diff, nil)
|
||||
rlstore, rlstores, err := s.bothLayerStoreKinds()
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
if err := rlstore.startWriting(); err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
defer rlstore.stopWriting()
|
||||
if err := s.containerStore.startWriting(); err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
defer s.containerStore.stopWriting()
|
||||
return s.putLayer(rlstore, rlstores, id, parent, names, mountLabel, writeable, lOptions, diff, nil)
|
||||
}
|
||||
|
||||
func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) {
|
||||
|
@ -3002,36 +2994,39 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro
|
|||
return nil, ErrLayerUnknown
|
||||
}
|
||||
|
||||
func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error {
|
||||
if stagingDirectory != diffOutput.Target {
|
||||
return fmt.Errorf("invalid value for staging directory, it must be the same as the differ target directory")
|
||||
}
|
||||
_, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
|
||||
if !rlstore.Exists(to) {
|
||||
return struct{}{}, ErrLayerUnknown
|
||||
}
|
||||
return struct{}{}, rlstore.applyDiffFromStagingDirectory(to, diffOutput, options)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *store) ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error) {
|
||||
rlstore, rlstores, err := s.bothLayerStoreKinds()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rlstore.startWriting(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rlstore.stopWriting()
|
||||
|
||||
layer, err := rlstore.Get(args.ID)
|
||||
if err != nil && !errors.Is(err, ErrLayerUnknown) {
|
||||
return layer, err
|
||||
}
|
||||
if err == nil {
|
||||
return layer, rlstore.applyDiffFromStagingDirectory(args.ID, args.DiffOutput, args.DiffOptions)
|
||||
}
|
||||
|
||||
// if the layer doesn't exist yet, try to create it.
|
||||
|
||||
if err := s.containerStore.startWriting(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer s.containerStore.stopWriting()
|
||||
|
||||
slo := stagedLayerOptions{
|
||||
DiffOutput: args.DiffOutput,
|
||||
DiffOptions: args.DiffOptions,
|
||||
}
|
||||
|
||||
layer, _, err := s.putLayer(args.ID, args.ParentLayer, args.Names, args.MountLabel, args.Writeable, args.LayerOptions, nil, &slo)
|
||||
layer, _, err = s.putLayer(rlstore, rlstores, args.ID, args.ParentLayer, args.Names, args.MountLabel, args.Writeable, args.LayerOptions, nil, &slo)
|
||||
return layer, err
|
||||
}
|
||||
|
||||
func (s *store) CleanupStagingDirectory(stagingDirectory string) error {
|
||||
_, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
|
||||
return struct{}{}, rlstore.CleanupStagingDirectory(stagingDirectory)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *store) CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) error {
|
||||
_, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
|
||||
return struct{}{}, rlstore.CleanupStagingDirectory(diffOutput.Target)
|
||||
|
@ -3213,7 +3208,7 @@ func (s *store) Layer(id string) (*Layer, error) {
|
|||
return nil, ErrLayerUnknown
|
||||
}
|
||||
|
||||
func (s *store) LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error) {
|
||||
func (s *store) LookupAdditionalLayer(tocDigest digest.Digest, imageref string) (AdditionalLayer, error) {
|
||||
var adriver drivers.AdditionalLayerStoreDriver
|
||||
if err := func() error { // A scope for defer
|
||||
if err := s.startUsingGraphDriver(); err != nil {
|
||||
|
@ -3230,7 +3225,7 @@ func (s *store) LookupAdditionalLayer(d digest.Digest, imageref string) (Additio
|
|||
return nil, err
|
||||
}
|
||||
|
||||
al, err := adriver.LookupAdditionalLayer(d, imageref)
|
||||
al, err := adriver.LookupAdditionalLayer(tocDigest, imageref)
|
||||
if err != nil {
|
||||
if errors.Is(err, drivers.ErrLayerUnknown) {
|
||||
return nil, ErrLayerUnknown
|
||||
|
@ -3255,8 +3250,8 @@ type additionalLayer struct {
|
|||
s *store
|
||||
}
|
||||
|
||||
func (al *additionalLayer) UncompressedDigest() digest.Digest {
|
||||
return al.layer.UncompressedDigest
|
||||
func (al *additionalLayer) TOCDigest() digest.Digest {
|
||||
return al.layer.TOCDigest
|
||||
}
|
||||
|
||||
func (al *additionalLayer) CompressedSize() int64 {
|
||||
|
|
|
@ -171,7 +171,7 @@ github.com/containers/buildah/pkg/sshagent
|
|||
github.com/containers/buildah/pkg/util
|
||||
github.com/containers/buildah/pkg/volumes
|
||||
github.com/containers/buildah/util
|
||||
# github.com/containers/common v0.58.1-0.20240517090124-fa276b325847
|
||||
# github.com/containers/common v0.58.1-0.20240523020001-79d954c77663
|
||||
## explicit; go 1.21
|
||||
github.com/containers/common/internal
|
||||
github.com/containers/common/internal/attributedstring
|
||||
|
@ -244,7 +244,7 @@ github.com/containers/conmon/runner/config
|
|||
# github.com/containers/gvisor-tap-vsock v0.7.4-0.20240515153903-01a1a0cd3f70
|
||||
## explicit; go 1.20
|
||||
github.com/containers/gvisor-tap-vsock/pkg/types
|
||||
# github.com/containers/image/v5 v5.30.2-0.20240509191815-9318d0eaaf78
|
||||
# github.com/containers/image/v5 v5.31.0
|
||||
## explicit; go 1.21
|
||||
github.com/containers/image/v5/copy
|
||||
github.com/containers/image/v5/directory
|
||||
|
@ -355,7 +355,7 @@ github.com/containers/psgo/internal/dev
|
|||
github.com/containers/psgo/internal/host
|
||||
github.com/containers/psgo/internal/proc
|
||||
github.com/containers/psgo/internal/process
|
||||
# github.com/containers/storage v1.53.1-0.20240507041447-6cee10795c2d
|
||||
# github.com/containers/storage v1.54.0
|
||||
## explicit; go 1.21
|
||||
github.com/containers/storage
|
||||
github.com/containers/storage/drivers
|
||||
|
|
Loading…
Reference in New Issue