From e4c810a8f1223149c2a74566c2d5c8b47cf4e872 Mon Sep 17 00:00:00 2001 From: Paul Holzinger Date: Thu, 19 Jun 2025 12:26:36 +0200 Subject: [PATCH] fix panic on state refresh In order to use parallel.Enqueue() it is required to call parallel.SetMaxThreads() first. However in our main call we have been doing this after we setup the initial runtime so just move this up. And while at it move up the cpu and memory profile setup as well so we can capture the earlier parts as well. This was most likely introduced by commit 46d874aa52 ("Refactor graph traversal & use for pod stop") which started using parallel.Enqueue() in removePod() which then can get called from refresh() when a container has autoremoval configured. I tried many hard resets in VMs to reproduce but was unable to do so. I always got "retrieving temporary directory for container xxx: no such container" erros instead and it failed to autoremove but no panics. Besides that many times c/storage was corrupted which made the image I used unusable and it had to be deleted which is concerning in itself. Fixes #26469 Signed-off-by: Paul Holzinger --- cmd/podman/root.go | 57 +++++++++++++++++++++++----------------------- 1 file changed, 28 insertions(+), 29 deletions(-) diff --git a/cmd/podman/root.go b/cmd/podman/root.go index cc35c6c09d..26133e025b 100644 --- a/cmd/podman/root.go +++ b/cmd/podman/root.go @@ -293,6 +293,34 @@ func persistentPreRunE(cmd *cobra.Command, args []string) error { } } } + + if cmd.Flag("cpu-profile").Changed { + f, err := os.Create(podmanConfig.CPUProfile) + if err != nil { + return err + } + if err := pprof.StartCPUProfile(f); err != nil { + return err + } + } + if cmd.Flag("memory-profile").Changed { + // Same value as the default in github.com/pkg/profile. + runtime.MemProfileRate = 4096 + if rate := os.Getenv("MemProfileRate"); rate != "" { + r, err := strconv.Atoi(rate) + if err != nil { + return err + } + runtime.MemProfileRate = r + } + } + + if podmanConfig.MaxWorks <= 0 { + return fmt.Errorf("maximum workers must be set to a positive number (got %d)", podmanConfig.MaxWorks) + } + if err := parallel.SetMaxThreads(uint(podmanConfig.MaxWorks)); err != nil { + return err + } } if err := readRemoteCliFlags(cmd, podmanConfig); err != nil { @@ -346,35 +374,6 @@ func persistentPreRunE(cmd *cobra.Command, args []string) error { } } - if !registry.IsRemote() { - if cmd.Flag("cpu-profile").Changed { - f, err := os.Create(podmanConfig.CPUProfile) - if err != nil { - return err - } - if err := pprof.StartCPUProfile(f); err != nil { - return err - } - } - if cmd.Flag("memory-profile").Changed { - // Same value as the default in github.com/pkg/profile. - runtime.MemProfileRate = 4096 - if rate := os.Getenv("MemProfileRate"); rate != "" { - r, err := strconv.Atoi(rate) - if err != nil { - return err - } - runtime.MemProfileRate = r - } - } - - if podmanConfig.MaxWorks <= 0 { - return fmt.Errorf("maximum workers must be set to a positive number (got %d)", podmanConfig.MaxWorks) - } - if err := parallel.SetMaxThreads(uint(podmanConfig.MaxWorks)); err != nil { - return err - } - } // Setup Rootless environment, IFF: // 1) in ABI mode // 2) running as non-root