mirror of https://github.com/containers/podman.git
play kube: service container
Add the notion of a "service container" to play kube. A service container is started before the pods in play kube and is (reverse) linked to them. The service container is stopped/removed *after* all pods it is associated with are stopped/removed. In other words, a service container tracks the entire life cycle of a service started via `podman play kube`. This is required to enable `play kube` in a systemd unit file. The service container is only used when the `--service-container` flag is set on the CLI. This flag has been marked as hidden as it is not meant to be used outside the context of `play kube`. It is further not supported on the remote client. The wiring with systemd will be done in a later commit. Signed-off-by: Valentin Rothberg <vrothberg@redhat.com>
This commit is contained in:
parent
ecf0177a01
commit
840c120c21
|
@ -139,6 +139,15 @@ func init() {
|
||||||
flags.StringVar(&kubeOptions.ContextDir, contextDirFlagName, "", "Path to top level of context directory")
|
flags.StringVar(&kubeOptions.ContextDir, contextDirFlagName, "", "Path to top level of context directory")
|
||||||
_ = kubeCmd.RegisterFlagCompletionFunc(contextDirFlagName, completion.AutocompleteDefault)
|
_ = kubeCmd.RegisterFlagCompletionFunc(contextDirFlagName, completion.AutocompleteDefault)
|
||||||
|
|
||||||
|
// NOTE: The service-container flag is marked as hidden as it
|
||||||
|
// is purely designed for running play-kube in systemd units.
|
||||||
|
// It is not something users should need to know or care about.
|
||||||
|
//
|
||||||
|
// Having a flag rather than an env variable is cleaner.
|
||||||
|
serviceFlagName := "service-container"
|
||||||
|
flags.BoolVar(&kubeOptions.ServiceContainer, serviceFlagName, false, "Starts a service container before all pods")
|
||||||
|
_ = flags.MarkHidden("service-container")
|
||||||
|
|
||||||
flags.StringVar(&kubeOptions.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)")
|
flags.StringVar(&kubeOptions.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)")
|
||||||
|
|
||||||
_ = flags.MarkHidden("signature-policy")
|
_ = flags.MarkHidden("signature-policy")
|
||||||
|
|
|
@ -211,6 +211,14 @@ type ContainerState struct {
|
||||||
// network and an interface names
|
// network and an interface names
|
||||||
NetInterfaceDescriptions ContainerNetworkDescriptions `json:"networkDescriptions,omitempty"`
|
NetInterfaceDescriptions ContainerNetworkDescriptions `json:"networkDescriptions,omitempty"`
|
||||||
|
|
||||||
|
// Service indicates that container is the service container of a
|
||||||
|
// service. A service consists of one or more pods. The service
|
||||||
|
// container is started before all pods and is stopped when the last
|
||||||
|
// pod stops. The service container allows for tracking and managing
|
||||||
|
// the entire life cycle of service which may be started via
|
||||||
|
// `podman-play-kube`.
|
||||||
|
Service Service
|
||||||
|
|
||||||
// containerPlatformState holds platform-specific container state.
|
// containerPlatformState holds platform-specific container state.
|
||||||
containerPlatformState
|
containerPlatformState
|
||||||
|
|
||||||
|
|
|
@ -382,6 +382,9 @@ type ContainerMiscConfig struct {
|
||||||
// IsInfra is a bool indicating whether this container is an infra container used for
|
// IsInfra is a bool indicating whether this container is an infra container used for
|
||||||
// sharing kernel namespaces in a pod
|
// sharing kernel namespaces in a pod
|
||||||
IsInfra bool `json:"pause"`
|
IsInfra bool `json:"pause"`
|
||||||
|
// IsService is a bool indicating whether this container is a service container used for
|
||||||
|
// tracking the life cycle of K8s service.
|
||||||
|
IsService bool `json:"isService"`
|
||||||
// SdNotifyMode tells libpod what to do with a NOTIFY_SOCKET if passed
|
// SdNotifyMode tells libpod what to do with a NOTIFY_SOCKET if passed
|
||||||
SdNotifyMode string `json:"sdnotifyMode,omitempty"`
|
SdNotifyMode string `json:"sdnotifyMode,omitempty"`
|
||||||
// Systemd tells libpod to setup the container in systemd mode, a value of nil denotes false
|
// Systemd tells libpod to setup the container in systemd mode, a value of nil denotes false
|
||||||
|
|
|
@ -171,6 +171,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *define.Driver
|
||||||
Mounts: inspectMounts,
|
Mounts: inspectMounts,
|
||||||
Dependencies: c.Dependencies(),
|
Dependencies: c.Dependencies(),
|
||||||
IsInfra: c.IsInfra(),
|
IsInfra: c.IsInfra(),
|
||||||
|
IsService: c.isService(),
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.state.ConfigPath != "" {
|
if c.state.ConfigPath != "" {
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
package libpod
|
package libpod
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"github.com/containers/podman/v4/libpod/define"
|
"github.com/containers/podman/v4/libpod/define"
|
||||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
@ -27,6 +29,12 @@ func (c *Container) validate() error {
|
||||||
return errors.Wrapf(define.ErrInvalidArg, "must set root filesystem source to either image or rootfs")
|
return errors.Wrapf(define.ErrInvalidArg, "must set root filesystem source to either image or rootfs")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A container cannot be marked as an infra and service container at
|
||||||
|
// the same time.
|
||||||
|
if c.IsInfra() && c.isService() {
|
||||||
|
return fmt.Errorf("cannot be infra and service container at the same time: %w", define.ErrInvalidArg)
|
||||||
|
}
|
||||||
|
|
||||||
// Cannot make a network namespace if we are joining another container's
|
// Cannot make a network namespace if we are joining another container's
|
||||||
// network namespace
|
// network namespace
|
||||||
if c.config.CreateNetNS && c.config.NetNsCtr != "" {
|
if c.config.CreateNetNS && c.config.NetNsCtr != "" {
|
||||||
|
|
|
@ -683,6 +683,7 @@ type InspectContainerData struct {
|
||||||
NetworkSettings *InspectNetworkSettings `json:"NetworkSettings"`
|
NetworkSettings *InspectNetworkSettings `json:"NetworkSettings"`
|
||||||
Namespace string `json:"Namespace"`
|
Namespace string `json:"Namespace"`
|
||||||
IsInfra bool `json:"IsInfra"`
|
IsInfra bool `json:"IsInfra"`
|
||||||
|
IsService bool `json:"IsService"`
|
||||||
Config *InspectContainerConfig `json:"Config"`
|
Config *InspectContainerConfig `json:"Config"`
|
||||||
HostConfig *InspectContainerHostConfig `json:"HostConfig"`
|
HostConfig *InspectContainerHostConfig `json:"HostConfig"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package libpod
|
package libpod
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -1477,7 +1478,7 @@ func WithCreateCommand(cmd []string) CtrCreateOption {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// withIsInfra allows us to dfferentiate between infra containers and regular containers
|
// withIsInfra allows us to dfferentiate between infra containers and other containers
|
||||||
// within the container config
|
// within the container config
|
||||||
func withIsInfra() CtrCreateOption {
|
func withIsInfra() CtrCreateOption {
|
||||||
return func(ctr *Container) error {
|
return func(ctr *Container) error {
|
||||||
|
@ -1491,6 +1492,20 @@ func withIsInfra() CtrCreateOption {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithIsService allows us to dfferentiate between service containers and other container
|
||||||
|
// within the container config
|
||||||
|
func WithIsService() CtrCreateOption {
|
||||||
|
return func(ctr *Container) error {
|
||||||
|
if ctr.valid {
|
||||||
|
return define.ErrCtrFinalized
|
||||||
|
}
|
||||||
|
|
||||||
|
ctr.config.IsService = true
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WithCreateWorkingDir tells Podman to create the container's working directory
|
// WithCreateWorkingDir tells Podman to create the container's working directory
|
||||||
// if it does not exist.
|
// if it does not exist.
|
||||||
func WithCreateWorkingDir() CtrCreateOption {
|
func WithCreateWorkingDir() CtrCreateOption {
|
||||||
|
@ -2081,6 +2096,27 @@ func WithInfraContainer() PodCreateOption {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithServiceContainer associates the specified service container ID with the pod.
|
||||||
|
func WithServiceContainer(id string) PodCreateOption {
|
||||||
|
return func(pod *Pod) error {
|
||||||
|
if pod.valid {
|
||||||
|
return define.ErrPodFinalized
|
||||||
|
}
|
||||||
|
|
||||||
|
ctr, err := pod.runtime.LookupContainer(id)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("looking up service container: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ctr.addServicePodLocked(pod.ID()); err != nil {
|
||||||
|
return fmt.Errorf("associating service container %s with pod %s: %w", id, pod.ID(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pod.config.ServiceContainerID = id
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WithVolatile sets the volatile flag for the container storage.
|
// WithVolatile sets the volatile flag for the container storage.
|
||||||
// The option can potentially cause data loss when used on a container that must survive a machine reboot.
|
// The option can potentially cause data loss when used on a container that must survive a machine reboot.
|
||||||
func WithVolatile() CtrCreateOption {
|
func WithVolatile() CtrCreateOption {
|
||||||
|
|
|
@ -64,6 +64,13 @@ type PodConfig struct {
|
||||||
|
|
||||||
HasInfra bool `json:"hasInfra,omitempty"`
|
HasInfra bool `json:"hasInfra,omitempty"`
|
||||||
|
|
||||||
|
// ServiceContainerID is the main container of a service. A service
|
||||||
|
// consists of one or more pods. The service container is started
|
||||||
|
// before all pods and is stopped when the last pod stops.
|
||||||
|
// The service container allows for tracking and managing the entire
|
||||||
|
// life cycle of service which may be started via `podman-play-kube`.
|
||||||
|
ServiceContainerID string `json:"serviceContainerID,omitempty"`
|
||||||
|
|
||||||
// Time pod was created
|
// Time pod was created
|
||||||
CreatedTime time.Time `json:"created"`
|
CreatedTime time.Time `json:"created"`
|
||||||
|
|
||||||
|
|
|
@ -75,6 +75,10 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
|
||||||
return nil, define.ErrPodRemoved
|
return nil, define.ErrPodRemoved
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := p.maybeStartServiceContainer(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// Before "regular" containers start in the pod, all init containers
|
// Before "regular" containers start in the pod, all init containers
|
||||||
// must have run and exited successfully.
|
// must have run and exited successfully.
|
||||||
if err := p.startInitContainers(ctx); err != nil {
|
if err := p.startInitContainers(ctx); err != nil {
|
||||||
|
@ -197,6 +201,11 @@ func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
|
||||||
if len(ctrErrors) > 0 {
|
if len(ctrErrors) > 0 {
|
||||||
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error stopping some containers")
|
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error stopping some containers")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := p.maybeStopServiceContainer(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -297,6 +306,10 @@ func (p *Pod) Cleanup(ctx context.Context) (map[string]error, error) {
|
||||||
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error cleaning up some containers")
|
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error cleaning up some containers")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := p.maybeStopServiceContainer(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -443,6 +456,10 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
|
||||||
return nil, define.ErrPodRemoved
|
return nil, define.ErrPodRemoved
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := p.maybeStartServiceContainer(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
allCtrs, err := p.runtime.state.PodContainers(p)
|
allCtrs, err := p.runtime.state.PodContainers(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -530,6 +547,11 @@ func (p *Pod) Kill(ctx context.Context, signal uint) (map[string]error, error) {
|
||||||
if len(ctrErrors) > 0 {
|
if len(ctrErrors) > 0 {
|
||||||
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error killing some containers")
|
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error killing some containers")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := p.maybeStopServiceContainer(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -40,7 +40,7 @@ func (r *Runtime) RemoveContainersForImageCallback(ctx context.Context) libimage
|
||||||
if ctr.config.IsInfra {
|
if ctr.config.IsInfra {
|
||||||
pod, err := r.state.Pod(ctr.config.Pod)
|
pod, err := r.state.Pod(ctr.config.Pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", ctr.ID(), pod.ID())
|
return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", ctr.ID(), ctr.config.Pod)
|
||||||
}
|
}
|
||||||
if err := r.removePod(ctx, pod, true, true, timeout); err != nil {
|
if err := r.removePod(ctx, pod, true, true, timeout); err != nil {
|
||||||
return errors.Wrapf(err, "removing image %s: container %s using image could not be removed", imageID, ctr.ID())
|
return errors.Wrapf(err, "removing image %s: container %s using image could not be removed", imageID, ctr.ID())
|
||||||
|
|
|
@ -380,6 +380,10 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := p.maybeRemoveServiceContainer(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Remove pod from state
|
// Remove pod from state
|
||||||
if err := r.state.RemovePod(p); err != nil {
|
if err := r.state.RemovePod(p); err != nil {
|
||||||
if removalErr != nil {
|
if removalErr != nil {
|
||||||
|
|
|
@ -0,0 +1,213 @@
|
||||||
|
package libpod
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/containers/podman/v4/libpod/define"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A service consists of one or more pods. The service container is started
|
||||||
|
// before all pods and is stopped when the last pod stops. The service
|
||||||
|
// container allows for tracking and managing the entire life cycle of service
|
||||||
|
// which may be started via `podman-play-kube`.
|
||||||
|
type Service struct {
|
||||||
|
// Pods running as part of the service.
|
||||||
|
Pods []string `json:"servicePods"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Indicates whether the pod is associated with a service container.
|
||||||
|
// The pod is expected to be updated and locked.
|
||||||
|
func (p *Pod) hasServiceContainer() bool {
|
||||||
|
return p.config.ServiceContainerID != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the pod's service container.
|
||||||
|
// The pod is expected to be updated and locked.
|
||||||
|
func (p *Pod) serviceContainer() (*Container, error) {
|
||||||
|
id := p.config.ServiceContainerID
|
||||||
|
if id == "" {
|
||||||
|
return nil, errors.Wrap(define.ErrNoSuchCtr, "pod has no service container")
|
||||||
|
}
|
||||||
|
return p.runtime.state.Container(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceContainer returns the service container.
|
||||||
|
func (p *Pod) ServiceContainer() (*Container, error) {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
if err := p.updatePod(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return p.serviceContainer()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Container) addServicePodLocked(id string) error {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
if err := c.syncContainer(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.state.Service.Pods = append(c.state.Service.Pods, id)
|
||||||
|
return c.save()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Container) isService() bool {
|
||||||
|
return c.config.IsService
|
||||||
|
}
|
||||||
|
|
||||||
|
// canStopServiceContainer returns true if all pods of the service are stopped.
|
||||||
|
// Note that the method acquires the container lock.
|
||||||
|
func (c *Container) canStopServiceContainerLocked() (bool, error) {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
if err := c.syncContainer(); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !c.isService() {
|
||||||
|
return false, fmt.Errorf("internal error: checking service: container %s is not a service container", c.ID())
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, id := range c.state.Service.Pods {
|
||||||
|
pod, err := c.runtime.LookupPod(id)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, define.ErrNoSuchPod) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
status, err := pod.GetPodStatus()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We can only stop the service if all pods are done.
|
||||||
|
switch status {
|
||||||
|
case define.PodStateStopped, define.PodStateExited, define.PodStateErrored:
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checks whether the service container can be stopped and does so.
|
||||||
|
func (p *Pod) maybeStopServiceContainer() error {
|
||||||
|
if !p.hasServiceContainer() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
serviceCtr, err := p.serviceContainer()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("getting pod's service container: %w", err)
|
||||||
|
}
|
||||||
|
// Checking whether the service can be stopped must be done in
|
||||||
|
// the runtime's work queue to resolve ABBA dead locks in the
|
||||||
|
// pod->container->servicePods hierarchy.
|
||||||
|
p.runtime.queueWork(func() {
|
||||||
|
logrus.Debugf("Pod %s has a service %s: checking if it can be stopped", p.ID(), serviceCtr.ID())
|
||||||
|
canStop, err := serviceCtr.canStopServiceContainerLocked()
|
||||||
|
if err != nil {
|
||||||
|
logrus.Errorf("Checking whether service of container %s can be stopped: %v", serviceCtr.ID(), err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !canStop {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
logrus.Debugf("Stopping service container %s", serviceCtr.ID())
|
||||||
|
if err := serviceCtr.Stop(); err != nil {
|
||||||
|
logrus.Errorf("Stopping service container %s: %v", serviceCtr.ID(), err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Starts the pod's service container if it's not already running.
|
||||||
|
func (p *Pod) maybeStartServiceContainer(ctx context.Context) error {
|
||||||
|
if !p.hasServiceContainer() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
serviceCtr, err := p.serviceContainer()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("getting pod's service container: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
serviceCtr.lock.Lock()
|
||||||
|
defer serviceCtr.lock.Unlock()
|
||||||
|
|
||||||
|
if err := serviceCtr.syncContainer(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if serviceCtr.state.State == define.ContainerStateRunning {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restart will reinit among other things.
|
||||||
|
return serviceCtr.restartWithTimeout(ctx, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// canRemoveServiceContainer returns true if all pods of the service are removed.
|
||||||
|
// Note that the method acquires the container lock.
|
||||||
|
func (c *Container) canRemoveServiceContainerLocked() (bool, error) {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
if err := c.syncContainer(); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !c.isService() {
|
||||||
|
return false, fmt.Errorf("internal error: checking service: container %s is not a service container", c.ID())
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, id := range c.state.Service.Pods {
|
||||||
|
if _, err := c.runtime.LookupPod(id); err != nil {
|
||||||
|
if errors.Is(err, define.ErrNoSuchPod) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checks whether the service container can be removed and does so.
|
||||||
|
func (p *Pod) maybeRemoveServiceContainer() error {
|
||||||
|
if !p.hasServiceContainer() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
serviceCtr, err := p.serviceContainer()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("getting pod's service container: %w", err)
|
||||||
|
}
|
||||||
|
// Checking whether the service can be stopped must be done in
|
||||||
|
// the runtime's work queue to resolve ABBA dead locks in the
|
||||||
|
// pod->container->servicePods hierarchy.
|
||||||
|
p.runtime.queueWork(func() {
|
||||||
|
logrus.Debugf("Pod %s has a service %s: checking if it can be removed", p.ID(), serviceCtr.ID())
|
||||||
|
canRemove, err := serviceCtr.canRemoveServiceContainerLocked()
|
||||||
|
if err != nil {
|
||||||
|
logrus.Errorf("Checking whether service of container %s can be removed: %v", serviceCtr.ID(), err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !canRemove {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
timeout := uint(0)
|
||||||
|
logrus.Debugf("Removing service container %s", serviceCtr.ID())
|
||||||
|
if err := p.runtime.RemoveContainer(context.Background(), serviceCtr, true, false, &timeout); err != nil {
|
||||||
|
logrus.Errorf("Removing service container %s: %v", serviceCtr.ID(), err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -54,6 +54,8 @@ type PlayKubeOptions struct {
|
||||||
LogOptions []string
|
LogOptions []string
|
||||||
// Start - don't start the pod if false
|
// Start - don't start the pod if false
|
||||||
Start types.OptionalBool
|
Start types.OptionalBool
|
||||||
|
// ServiceContainer - creates a service container that is started before and is stopped after all pods.
|
||||||
|
ServiceContainer bool
|
||||||
// Userns - define the user namespace to use.
|
// Userns - define the user namespace to use.
|
||||||
Userns string
|
Userns string
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,12 +28,54 @@ import (
|
||||||
"github.com/containers/podman/v4/pkg/specgenutil"
|
"github.com/containers/podman/v4/pkg/specgenutil"
|
||||||
"github.com/containers/podman/v4/pkg/util"
|
"github.com/containers/podman/v4/pkg/util"
|
||||||
"github.com/ghodss/yaml"
|
"github.com/ghodss/yaml"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
yamlv2 "gopkg.in/yaml.v2"
|
yamlv2 "gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options entities.PlayKubeOptions) (*entities.PlayKubeReport, error) {
|
// createServiceContainer creates a container that can later on
|
||||||
|
// be associated with the pods of a K8s yaml. It will be started along with
|
||||||
|
// the first pod.
|
||||||
|
func (ic *ContainerEngine) createServiceContainer(ctx context.Context, name string) (*libpod.Container, error) {
|
||||||
|
// Similar to infra containers, a service container is using the pause image.
|
||||||
|
image, err := generate.PullOrBuildInfraImage(ic.Libpod, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("image for service container: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctrOpts := entities.ContainerCreateOptions{
|
||||||
|
// Inherited from infra containers
|
||||||
|
ImageVolume: "bind",
|
||||||
|
IsInfra: false,
|
||||||
|
MemorySwappiness: -1,
|
||||||
|
// No need to spin up slirp etc.
|
||||||
|
Net: &entities.NetOptions{Network: specgen.Namespace{NSMode: specgen.NoNetwork}},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create and fill out the runtime spec.
|
||||||
|
s := specgen.NewSpecGenerator(image, false)
|
||||||
|
if err := specgenutil.FillOutSpecGen(s, &ctrOpts, []string{}); err != nil {
|
||||||
|
return nil, fmt.Errorf("completing spec for service container: %w", err)
|
||||||
|
}
|
||||||
|
s.Name = name
|
||||||
|
|
||||||
|
runtimeSpec, spec, opts, err := generate.MakeContainer(ctx, ic.Libpod, s, false, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("creating runtime spec for service container: %w", err)
|
||||||
|
}
|
||||||
|
opts = append(opts, libpod.WithIsService())
|
||||||
|
|
||||||
|
// Create a new libpod container based on the spec.
|
||||||
|
ctr, err := ic.Libpod.NewContainer(ctx, runtimeSpec, spec, false, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("creating service container: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options entities.PlayKubeOptions) (_ *entities.PlayKubeReport, finalErr error) {
|
||||||
report := &entities.PlayKubeReport{}
|
report := &entities.PlayKubeReport{}
|
||||||
validKinds := 0
|
validKinds := 0
|
||||||
|
|
||||||
|
@ -67,6 +109,30 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
|
||||||
return nil, errors.Wrap(err, "unable to read kube YAML")
|
return nil, errors.Wrap(err, "unable to read kube YAML")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: create constants for the various "kinds" of yaml files.
|
||||||
|
var serviceContainer *libpod.Container
|
||||||
|
if options.ServiceContainer && (kind == "Pod" || kind == "Deployment") {
|
||||||
|
// The name of the service container is the first 12
|
||||||
|
// characters of the yaml file's hash followed by the
|
||||||
|
// '-service' suffix to guarantee a predictable and
|
||||||
|
// discoverable name.
|
||||||
|
hash := digest.FromBytes(content).Encoded()
|
||||||
|
ctr, err := ic.createServiceContainer(ctx, hash[0:12]+"-service")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
serviceContainer = ctr
|
||||||
|
// Make sure to remove the container in case something goes wrong below.
|
||||||
|
defer func() {
|
||||||
|
if finalErr == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := ic.Libpod.RemoveContainer(ctx, ctr, true, false, nil); err != nil {
|
||||||
|
logrus.Errorf("Cleaning up service container after failure: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
switch kind {
|
switch kind {
|
||||||
case "Pod":
|
case "Pod":
|
||||||
var podYAML v1.Pod
|
var podYAML v1.Pod
|
||||||
|
@ -90,7 +156,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
|
||||||
podYAML.Annotations[name] = val
|
podYAML.Annotations[name] = val
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := ic.playKubePod(ctx, podTemplateSpec.ObjectMeta.Name, &podTemplateSpec, options, &ipIndex, podYAML.Annotations, configMaps)
|
r, err := ic.playKubePod(ctx, podTemplateSpec.ObjectMeta.Name, &podTemplateSpec, options, &ipIndex, podYAML.Annotations, configMaps, serviceContainer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -104,7 +170,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
|
||||||
return nil, errors.Wrap(err, "unable to read YAML as Kube Deployment")
|
return nil, errors.Wrap(err, "unable to read YAML as Kube Deployment")
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := ic.playKubeDeployment(ctx, &deploymentYAML, options, &ipIndex, configMaps)
|
r, err := ic.playKubeDeployment(ctx, &deploymentYAML, options, &ipIndex, configMaps, serviceContainer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -148,7 +214,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
|
||||||
return report, nil
|
return report, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAML *v1apps.Deployment, options entities.PlayKubeOptions, ipIndex *int, configMaps []v1.ConfigMap) (*entities.PlayKubeReport, error) {
|
func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAML *v1apps.Deployment, options entities.PlayKubeOptions, ipIndex *int, configMaps []v1.ConfigMap, serviceContainer *libpod.Container) (*entities.PlayKubeReport, error) {
|
||||||
var (
|
var (
|
||||||
deploymentName string
|
deploymentName string
|
||||||
podSpec v1.PodTemplateSpec
|
podSpec v1.PodTemplateSpec
|
||||||
|
@ -170,7 +236,7 @@ func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAM
|
||||||
// create "replicas" number of pods
|
// create "replicas" number of pods
|
||||||
for i = 0; i < numReplicas; i++ {
|
for i = 0; i < numReplicas; i++ {
|
||||||
podName := fmt.Sprintf("%s-pod-%d", deploymentName, i)
|
podName := fmt.Sprintf("%s-pod-%d", deploymentName, i)
|
||||||
podReport, err := ic.playKubePod(ctx, podName, &podSpec, options, ipIndex, deploymentYAML.Annotations, configMaps)
|
podReport, err := ic.playKubePod(ctx, podName, &podSpec, options, ipIndex, deploymentYAML.Annotations, configMaps, serviceContainer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error encountered while bringing up pod %s", podName)
|
return nil, errors.Wrapf(err, "error encountered while bringing up pod %s", podName)
|
||||||
}
|
}
|
||||||
|
@ -179,7 +245,7 @@ func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAM
|
||||||
return &report, nil
|
return &report, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec, options entities.PlayKubeOptions, ipIndex *int, annotations map[string]string, configMaps []v1.ConfigMap) (*entities.PlayKubeReport, error) {
|
func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec, options entities.PlayKubeOptions, ipIndex *int, annotations map[string]string, configMaps []v1.ConfigMap, serviceContainer *libpod.Container) (*entities.PlayKubeReport, error) {
|
||||||
var (
|
var (
|
||||||
writer io.Writer
|
writer io.Writer
|
||||||
playKubePod entities.PlayKubePod
|
playKubePod entities.PlayKubePod
|
||||||
|
@ -374,6 +440,10 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if serviceContainer != nil {
|
||||||
|
podSpec.PodSpecGen.ServiceContainerID = serviceContainer.ID()
|
||||||
|
}
|
||||||
|
|
||||||
// Create the Pod
|
// Create the Pod
|
||||||
pod, err := generate.MakePod(&podSpec, ic.Libpod)
|
pod, err := generate.MakePod(&podSpec, ic.Libpod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -0,0 +1,89 @@
|
||||||
|
package generate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
buildahDefine "github.com/containers/buildah/define"
|
||||||
|
"github.com/containers/common/pkg/config"
|
||||||
|
"github.com/containers/podman/v4/libpod"
|
||||||
|
"github.com/containers/podman/v4/libpod/define"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PullOrBuildInfraImage pulls down the specified image or the one set in
|
||||||
|
// containers.conf. If none is set, it builds a local pause image.
|
||||||
|
func PullOrBuildInfraImage(rt *libpod.Runtime, imageName string) (string, error) {
|
||||||
|
rtConfig, err := rt.GetConfigNoCopy()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if imageName == "" {
|
||||||
|
imageName = rtConfig.Engine.InfraImage
|
||||||
|
}
|
||||||
|
|
||||||
|
if imageName != "" {
|
||||||
|
_, err := rt.LibimageRuntime().Pull(context.Background(), imageName, config.PullPolicyMissing, nil)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return imageName, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
name, err := buildPauseImage(rt, rtConfig)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("building local pause image: %w", err)
|
||||||
|
}
|
||||||
|
return name, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildPauseImage(rt *libpod.Runtime, rtConfig *config.Config) (string, error) {
|
||||||
|
version, err := define.GetVersion()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
imageName := fmt.Sprintf("localhost/podman-pause:%s-%d", version.Version, version.Built)
|
||||||
|
|
||||||
|
// First check if the image has already been built.
|
||||||
|
if _, _, err := rt.LibimageRuntime().LookupImage(imageName, nil); err == nil {
|
||||||
|
return imageName, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also look into the path as some distributions install catatonit in
|
||||||
|
// /usr/bin.
|
||||||
|
catatonitPath, err := rtConfig.FindHelperBinary("catatonit", true)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("finding pause binary: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
buildContent := fmt.Sprintf(`FROM scratch
|
||||||
|
COPY %s /catatonit
|
||||||
|
ENTRYPOINT ["/catatonit", "-P"]`, catatonitPath)
|
||||||
|
|
||||||
|
tmpF, err := ioutil.TempFile("", "pause.containerfile")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if _, err := tmpF.WriteString(buildContent); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if err := tmpF.Close(); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer os.Remove(tmpF.Name())
|
||||||
|
|
||||||
|
buildOptions := buildahDefine.BuildOptions{
|
||||||
|
CommonBuildOpts: &buildahDefine.CommonBuildOptions{},
|
||||||
|
Output: imageName,
|
||||||
|
Quiet: true,
|
||||||
|
IgnoreFile: "/dev/null", // makes sure to not read a local .ignorefile (see #13529)
|
||||||
|
IIDFile: "/dev/null", // prevents Buildah from writing the ID on stdout
|
||||||
|
}
|
||||||
|
if _, _, err := rt.Build(context.Background(), buildOptions, tmpF.Name()); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return imageName, nil
|
||||||
|
}
|
|
@ -2,13 +2,8 @@ package generate
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
"net"
|
||||||
"os"
|
|
||||||
|
|
||||||
buildahDefine "github.com/containers/buildah/define"
|
|
||||||
"github.com/containers/common/pkg/config"
|
|
||||||
"github.com/containers/podman/v4/libpod"
|
"github.com/containers/podman/v4/libpod"
|
||||||
"github.com/containers/podman/v4/libpod/define"
|
"github.com/containers/podman/v4/libpod/define"
|
||||||
"github.com/containers/podman/v4/pkg/domain/entities"
|
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||||
|
@ -17,98 +12,18 @@ import (
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func buildPauseImage(rt *libpod.Runtime, rtConfig *config.Config) (string, error) {
|
|
||||||
version, err := define.GetVersion()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
imageName := fmt.Sprintf("localhost/podman-pause:%s-%d", version.Version, version.Built)
|
|
||||||
|
|
||||||
// First check if the image has already been built.
|
|
||||||
if _, _, err := rt.LibimageRuntime().LookupImage(imageName, nil); err == nil {
|
|
||||||
return imageName, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Also look into the path as some distributions install catatonit in
|
|
||||||
// /usr/bin.
|
|
||||||
catatonitPath, err := rtConfig.FindHelperBinary("catatonit", true)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("finding pause binary: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
buildContent := fmt.Sprintf(`FROM scratch
|
|
||||||
COPY %s /catatonit
|
|
||||||
ENTRYPOINT ["/catatonit", "-P"]`, catatonitPath)
|
|
||||||
|
|
||||||
tmpF, err := ioutil.TempFile("", "pause.containerfile")
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if _, err := tmpF.WriteString(buildContent); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if err := tmpF.Close(); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer os.Remove(tmpF.Name())
|
|
||||||
|
|
||||||
buildOptions := buildahDefine.BuildOptions{
|
|
||||||
CommonBuildOpts: &buildahDefine.CommonBuildOptions{},
|
|
||||||
Output: imageName,
|
|
||||||
Quiet: true,
|
|
||||||
IgnoreFile: "/dev/null", // makes sure to not read a local .ignorefile (see #13529)
|
|
||||||
IIDFile: "/dev/null", // prevents Buildah from writing the ID on stdout
|
|
||||||
}
|
|
||||||
if _, _, err := rt.Build(context.Background(), buildOptions, tmpF.Name()); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return imageName, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func pullOrBuildInfraImage(p *entities.PodSpec, rt *libpod.Runtime) error {
|
|
||||||
if p.PodSpecGen.NoInfra {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
rtConfig, err := rt.GetConfigNoCopy()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE: we need pull down the infra image if it was explicitly set by
|
|
||||||
// the user (or containers.conf) to the non-default one.
|
|
||||||
imageName := p.PodSpecGen.InfraImage
|
|
||||||
if imageName == "" {
|
|
||||||
imageName = rtConfig.Engine.InfraImage
|
|
||||||
}
|
|
||||||
|
|
||||||
if imageName != "" {
|
|
||||||
_, err := rt.LibimageRuntime().Pull(context.Background(), imageName, config.PullPolicyMissing, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
name, err := buildPauseImage(rt, rtConfig)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("building local pause image: %w", err)
|
|
||||||
}
|
|
||||||
imageName = name
|
|
||||||
}
|
|
||||||
|
|
||||||
p.PodSpecGen.InfraImage = imageName
|
|
||||||
p.PodSpecGen.InfraContainerSpec.RawImageName = imageName
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func MakePod(p *entities.PodSpec, rt *libpod.Runtime) (*libpod.Pod, error) {
|
func MakePod(p *entities.PodSpec, rt *libpod.Runtime) (*libpod.Pod, error) {
|
||||||
if err := p.PodSpecGen.Validate(); err != nil {
|
if err := p.PodSpecGen.Validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := pullOrBuildInfraImage(p, rt); err != nil {
|
if !p.PodSpecGen.NoInfra {
|
||||||
return nil, err
|
imageName, err := PullOrBuildInfraImage(rt, p.PodSpecGen.InfraImage)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
p.PodSpecGen.InfraImage = imageName
|
||||||
|
p.PodSpecGen.InfraContainerSpec.RawImageName = imageName
|
||||||
}
|
}
|
||||||
|
|
||||||
if !p.PodSpecGen.NoInfra && p.PodSpecGen.InfraContainerSpec != nil {
|
if !p.PodSpecGen.NoInfra && p.PodSpecGen.InfraContainerSpec != nil {
|
||||||
|
@ -180,6 +95,11 @@ func createPodOptions(p *specgen.PodSpecGenerator) ([]libpod.PodCreateOption, er
|
||||||
options = append(options, libpod.WithPodUser())
|
options = append(options, libpod.WithPodUser())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(p.ServiceContainerID) > 0 {
|
||||||
|
options = append(options, libpod.WithServiceContainer(p.ServiceContainerID))
|
||||||
|
}
|
||||||
|
|
||||||
if len(p.CgroupParent) > 0 {
|
if len(p.CgroupParent) > 0 {
|
||||||
options = append(options, libpod.WithPodCgroupParent(p.CgroupParent))
|
options = append(options, libpod.WithPodCgroupParent(p.CgroupParent))
|
||||||
}
|
}
|
||||||
|
|
|
@ -204,6 +204,9 @@ type PodSpecGenerator struct {
|
||||||
PodStorageConfig
|
PodStorageConfig
|
||||||
PodSecurityConfig
|
PodSecurityConfig
|
||||||
InfraContainerSpec *SpecGenerator `json:"-"`
|
InfraContainerSpec *SpecGenerator `json:"-"`
|
||||||
|
|
||||||
|
// The ID of the pod's service container.
|
||||||
|
ServiceContainerID string `json:"serviceContainerID,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type PodResourceConfig struct {
|
type PodResourceConfig struct {
|
||||||
|
|
|
@ -408,19 +408,6 @@ EOF
|
||||||
run_podman pod rm test
|
run_podman pod rm test
|
||||||
}
|
}
|
||||||
|
|
||||||
# Wait for the pod (1st arg) to transition into the state (2nd arg)
|
|
||||||
function _ensure_pod_state() {
|
|
||||||
for i in {0..5}; do
|
|
||||||
run_podman pod inspect $1 --format "{{.State}}"
|
|
||||||
if [[ $output == "$2" ]]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
sleep 0.5
|
|
||||||
done
|
|
||||||
|
|
||||||
is "$output" "$2" "unexpected pod state"
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "pod exit policies" {
|
@test "pod exit policies" {
|
||||||
# Test setting exit policies
|
# Test setting exit policies
|
||||||
run_podman pod create
|
run_podman pod create
|
||||||
|
|
|
@ -100,6 +100,61 @@ RELABEL="system_u:object_r:container_file_t:s0"
|
||||||
run_podman pod rm -t 0 -f test_pod
|
run_podman pod rm -t 0 -f test_pod
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@test "podman play --service-container" {
|
||||||
|
skip_if_remote "service containers only work locally"
|
||||||
|
|
||||||
|
TESTDIR=$PODMAN_TMPDIR/testdir
|
||||||
|
mkdir -p $TESTDIR
|
||||||
|
|
||||||
|
yaml="
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: test
|
||||||
|
name: test_pod
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- command:
|
||||||
|
- top
|
||||||
|
image: $IMAGE
|
||||||
|
name: test
|
||||||
|
resources: {}
|
||||||
|
"
|
||||||
|
|
||||||
|
echo "$yaml" > $PODMAN_TMPDIR/test.yaml
|
||||||
|
run_podman play kube --service-container=true $PODMAN_TMPDIR/test.yaml
|
||||||
|
|
||||||
|
# Make sure that the service container exists and runs.
|
||||||
|
run_podman container inspect "352a88685060-service" --format "{{.State.Running}}"
|
||||||
|
is "$output" "true"
|
||||||
|
|
||||||
|
# Stop the *main* container and make sure that
|
||||||
|
# 1) The pod transitions to Exited
|
||||||
|
# 2) The service container is stopped
|
||||||
|
# #) The service container is marked as an service container
|
||||||
|
run_podman stop test_pod-test
|
||||||
|
_ensure_pod_state test_pod Exited
|
||||||
|
run_podman container inspect "352a88685060-service" --format "{{.State.Running}}"
|
||||||
|
is "$output" "false"
|
||||||
|
run_podman container inspect "352a88685060-service" --format "{{.IsService}}"
|
||||||
|
is "$output" "true"
|
||||||
|
|
||||||
|
# Restart the pod, make sure the service is running again
|
||||||
|
run_podman pod restart test_pod
|
||||||
|
run_podman container inspect "352a88685060-service" --format "{{.State.Running}}"
|
||||||
|
is "$output" "true"
|
||||||
|
|
||||||
|
# Kill the pod and make sure the service is not running
|
||||||
|
run_podman pod kill test_pod
|
||||||
|
run_podman container inspect "352a88685060-service" --format "{{.State.Running}}"
|
||||||
|
is "$output" "false"
|
||||||
|
|
||||||
|
# Remove the pod and make sure the service is removed along with it
|
||||||
|
run_podman pod rm test_pod
|
||||||
|
run_podman 1 container exists "352a88685060-service"
|
||||||
|
}
|
||||||
|
|
||||||
@test "podman play --network" {
|
@test "podman play --network" {
|
||||||
TESTDIR=$PODMAN_TMPDIR/testdir
|
TESTDIR=$PODMAN_TMPDIR/testdir
|
||||||
mkdir -p $TESTDIR
|
mkdir -p $TESTDIR
|
||||||
|
|
|
@ -392,6 +392,19 @@ function pause_image() {
|
||||||
echo "localhost/podman-pause:$output"
|
echo "localhost/podman-pause:$output"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Wait for the pod (1st arg) to transition into the state (2nd arg)
|
||||||
|
function _ensure_pod_state() {
|
||||||
|
for i in {0..5}; do
|
||||||
|
run_podman pod inspect $1 --format "{{.State}}"
|
||||||
|
if [[ $output == "$2" ]]; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
sleep 0.5
|
||||||
|
done
|
||||||
|
|
||||||
|
is "$output" "$2" "unexpected pod state"
|
||||||
|
}
|
||||||
|
|
||||||
###########################
|
###########################
|
||||||
# _add_label_if_missing # make sure skip messages include rootless/remote
|
# _add_label_if_missing # make sure skip messages include rootless/remote
|
||||||
###########################
|
###########################
|
||||||
|
|
Loading…
Reference in New Issue