Enable cleanup processes for detached exec

The cleanup command creation logic is made public as part of this
and wired such that we can call it both within SpecGen (to make
container exit commands) and from the ABI detached exec handler.
Exit commands are presently only used for detached exec, but
theoretically could be turned on for all exec sessions if we
wanted (I'm declining to do this because of potential overhead).

I also forgot to copy the exit command from the exec config into
the ExecOptions struct used by the OCI runtime, so it was not
being added.

There are also two significant bugfixes for exec in here. One is
for updating the status of running exec sessions - this was
always failing as I had coded it to remove the exit file *before*
reading it, instead of after (oops). The second was that removing
a running exec session would always fail because I inverted the
check to see if it was running.

Signed-off-by: Matthew Heon <matthew.heon@pm.me>
This commit is contained in:
Matthew Heon 2020-05-20 15:29:24 -04:00
parent 5ec56dc790
commit 6b9e9610d8
5 changed files with 58 additions and 18 deletions

View File

@ -8,6 +8,7 @@ import (
"github.com/containers/libpod/cmd/podman/utils"
"github.com/containers/libpod/pkg/domain/entities"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -68,6 +69,12 @@ func cleanup(cmd *cobra.Command, args []string) error {
responses, err := registry.ContainerEngine().ContainerCleanup(registry.GetContext(), args, cleanupOptions)
if err != nil {
// `podman container cleanup` is almost always run in the
// background. Our only way of relaying information to the user
// is via syslog.
// As such, we need to logrus.Errorf our errors to ensure they
// are properly printed if --syslog is set.
logrus.Errorf("Error running container cleanup: %v", err)
return err
}
for _, r := range responses {
@ -76,12 +83,15 @@ func cleanup(cmd *cobra.Command, args []string) error {
continue
}
if r.RmErr != nil {
logrus.Errorf("Error removing container: %v", r.RmErr)
errs = append(errs, r.RmErr)
}
if r.RmiErr != nil {
logrus.Errorf("Error removing image: %v", r.RmiErr)
errs = append(errs, r.RmiErr)
}
if r.CleanErr != nil {
logrus.Errorf("Error cleaning up container: %v", r.CleanErr)
errs = append(errs, r.CleanErr)
}
}

View File

@ -65,6 +65,9 @@ type ExecConfig struct {
// ExitCommand is the exec session's exit command.
// This command will be executed when the exec session exits.
// If unset, no command will be executed.
// Two arguments will be appended to the exit command by Libpod:
// The ID of the exec session, and the ID of the container the exec
// session is a part of (in that order).
ExitCommand []string `json:"exitCommand,omitempty"`
}
@ -195,6 +198,10 @@ func (c *Container) ExecCreate(config *ExecConfig) (string, error) {
return "", errors.Wrapf(err, "error copying exec configuration into exec session")
}
if len(session.Config.ExitCommand) > 0 {
session.Config.ExitCommand = append(session.Config.ExitCommand, []string{session.ID(), c.ID()}...)
}
if c.state.ExecSessions == nil {
c.state.ExecSessions = make(map[string]*ExecSession)
}
@ -606,11 +613,11 @@ func (c *Container) ExecRemove(sessionID string, force bool) error {
// Update status of exec session if running, so we cna check if it
// stopped in the meantime.
if session.State == define.ExecStateRunning {
stopped, err := c.ociRuntime.ExecUpdateStatus(c, session.ID())
running, err := c.ociRuntime.ExecUpdateStatus(c, session.ID())
if err != nil {
return err
}
if stopped {
if !running {
session.State = define.ExecStateStopped
// TODO: should we retrieve exit code here?
// TODO: Might be worth saving state here.
@ -865,13 +872,6 @@ func (c *Container) getActiveExecSessions() ([]string, error) {
continue
}
if !alive {
if err := c.cleanupExecBundle(id); err != nil {
if lastErr != nil {
logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr)
}
lastErr = err
}
_, isLegacy := c.state.LegacyExecSessions[id]
if isLegacy {
delete(c.state.LegacyExecSessions, id)
@ -891,6 +891,12 @@ func (c *Container) getActiveExecSessions() ([]string, error) {
needSave = true
}
if err := c.cleanupExecBundle(id); err != nil {
if lastErr != nil {
logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr)
}
lastErr = err
}
} else {
activeSessions = append(activeSessions, id)
}
@ -911,6 +917,8 @@ func (c *Container) getActiveExecSessions() ([]string, error) {
func (c *Container) removeAllExecSessions() error {
knownSessions := c.getKnownExecSessions()
logrus.Debugf("Removing all exec sessions for container %s", c.ID())
var lastErr error
for _, id := range knownSessions {
if err := c.ociRuntime.ExecStopContainer(c, id, c.StopTimeout()); err != nil {
@ -975,6 +983,7 @@ func prepareForExec(c *Container, session *ExecSession) (*ExecOptions, error) {
opts.User = user
opts.PreserveFDs = session.Config.PreserveFDs
opts.DetachKeys = session.Config.DetachKeys
opts.ExitCommand = session.Config.ExitCommand
return opts, nil
}

View File

@ -390,6 +390,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
}
logrus.Debugf("Removing container %s", c.ID())
// We need to lock the pod before we lock the container.
// To avoid races around removing a container and the pod it is in.
// Don't need to do this in pod removal case - we're evicting the entire

View File

@ -607,6 +607,20 @@ func (ic *ContainerEngine) ContainerExecDetached(ctx context.Context, nameOrId s
execConfig := makeExecConfig(options)
// Make an exit command
storageConfig := ic.Libpod.StorageConfig()
runtimeConfig, err := ic.Libpod.GetConfig()
if err != nil {
return "", errors.Wrapf(err, "error retrieving Libpod configuration to build exec exit command")
}
podmanPath, err := os.Executable()
if err != nil {
return "", errors.Wrapf(err, "error retrieving executable to build exec exit command")
}
// TODO: Add some ability to toggle syslog
exitCommandArgs := generate.CreateExitCommandArgs(storageConfig, runtimeConfig, podmanPath, false, true, true)
execConfig.ExitCommand = exitCommandArgs
// Create and start the exec session
id, err := ctr.ExecCreate(execConfig)
if err != nil {
@ -615,7 +629,7 @@ func (ic *ContainerEngine) ContainerExecDetached(ctx context.Context, nameOrId s
// TODO: we should try and retrieve exit code if this fails.
if err := ctr.ExecStart(id); err != nil {
return "", err
return "", err
}
return id, nil
}

View File

@ -111,7 +111,8 @@ func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGener
if err != nil {
return nil, err
}
options = append(options, createExitCommandOption(s, rt.StorageConfig(), rtc, podmanPath))
// TODO: Enable syslog support - we'll need to put this in SpecGen.
options = append(options, libpod.WithExitCommand(CreateExitCommandArgs(rt.StorageConfig(), rtc, podmanPath, false, s.Remove, false)))
runtimeSpec, err := SpecGenToOCI(ctx, s, rt, rtc, newImage, finalMounts)
if err != nil {
@ -228,7 +229,7 @@ func createContainerOptions(ctx context.Context, rt *libpod.Runtime, s *specgen.
return options, nil
}
func createExitCommandOption(s *specgen.SpecGenerator, storageConfig storage.StoreOptions, config *config.Config, podmanPath string) libpod.CtrCreateOption {
func CreateExitCommandArgs(storageConfig storage.StoreOptions, config *config.Config, podmanPath string, syslog, rm bool, exec bool) []string {
// We need a cleanup process for containers in the current model.
// But we can't assume that the caller is Podman - it could be another
// user of the API.
@ -255,14 +256,18 @@ func createExitCommandOption(s *specgen.SpecGenerator, storageConfig storage.Sto
command = append(command, []string{"--events-backend", config.Engine.EventsLogger}...)
}
// TODO Mheon wants to leave this for now
//if s.sys {
// command = append(command, "--syslog", "true")
//}
if syslog {
command = append(command, "--syslog", "true")
}
command = append(command, []string{"container", "cleanup"}...)
if s.Remove {
if rm {
command = append(command, "--rm")
}
return libpod.WithExitCommand(command)
if exec {
command = append(command, "--exec")
}
return command
}