working name of pod on start and stop

Signed-off-by: jkwiatko <jkwiatkoski@protonmail.com>
This commit is contained in:
jkwiatko 2024-05-24 12:39:32 -04:00
parent e53b96cb25
commit b45364254f
9 changed files with 242 additions and 237 deletions

View File

@ -39,9 +39,7 @@ var (
} }
) )
var ( var startOptions = podStartOptionsWrapper{}
startOptions = podStartOptionsWrapper{}
)
func init() { func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{ registry.Commands = append(registry.Commands, registry.CliCommand{
@ -60,9 +58,7 @@ func init() {
} }
func start(cmd *cobra.Command, args []string) error { func start(cmd *cobra.Command, args []string) error {
var ( var errs utils.OutputErrors
errs utils.OutputErrors
)
ids, err := specgenutil.ReadPodIDFiles(startOptions.PodIDFiles) ids, err := specgenutil.ReadPodIDFiles(startOptions.PodIDFiles)
if err != nil { if err != nil {
@ -77,7 +73,7 @@ func start(cmd *cobra.Command, args []string) error {
// in the cli, first we print out all the successful attempts // in the cli, first we print out all the successful attempts
for _, r := range responses { for _, r := range responses {
if len(r.Errs) == 0 { if len(r.Errs) == 0 {
fmt.Println(r.Id) fmt.Println(r.RawInput)
} else { } else {
errs = append(errs, r.Errs...) errs = append(errs, r.Errs...)
} }

View File

@ -71,9 +71,7 @@ func init() {
} }
func stop(cmd *cobra.Command, args []string) error { func stop(cmd *cobra.Command, args []string) error {
var ( var errs utils.OutputErrors
errs utils.OutputErrors
)
if cmd.Flag("time").Changed { if cmd.Flag("time").Changed {
stopOptions.Timeout = stopOptions.timeoutCLI stopOptions.Timeout = stopOptions.timeoutCLI
} }
@ -91,7 +89,7 @@ func stop(cmd *cobra.Command, args []string) error {
// in the cli, first we print out all the successful attempts // in the cli, first we print out all the successful attempts
for _, r := range responses { for _, r := range responses {
if len(r.Errs) == 0 { if len(r.Errs) == 0 {
fmt.Println(r.Id) fmt.Println(r.RawInput)
} else { } else {
errs = append(errs, r.Errs...) errs = append(errs, r.Errs...)
} }

View File

@ -177,7 +177,10 @@ func PodStop(w http.ResponseWriter, r *http.Request) {
} }
} }
report := entities.PodStopReport{Id: pod.ID()} report := entities.PodStopReport{
Id: pod.ID(),
RawInput: pod.Name(),
}
for id, err := range responses { for id, err := range responses {
report.Errs = append(report.Errs, fmt.Errorf("stopping container %s: %w", id, err)) report.Errs = append(report.Errs, fmt.Errorf("stopping container %s: %w", id, err))
} }
@ -213,7 +216,15 @@ func PodStart(w http.ResponseWriter, r *http.Request) {
return return
} }
report := entities.PodStartReport{Id: pod.ID()} cfg, err := pod.Config()
if err != nil {
utils.Error(w, http.StatusConflict, err)
return
}
report := entities.PodStartReport{
Id: pod.ID(),
RawInput: cfg.Name,
}
for id, err := range responses { for id, err := range responses {
report.Errs = append(report.Errs, fmt.Errorf("%v: %w", "starting container "+id, err)) report.Errs = append(report.Errs, fmt.Errorf("%v: %w", "starting container "+id, err))
} }
@ -559,14 +570,13 @@ func PodStats(w http.ResponseWriter, r *http.Request) {
return return
} }
var flush = func() {} flush := func() {}
if flusher, ok := w.(http.Flusher); ok { if flusher, ok := w.(http.Flusher); ok {
flush = flusher.Flush flush = flusher.Flush
} }
// Collect the stats and send them over the wire. // Collect the stats and send them over the wire.
containerEngine := abi.ContainerEngine{Libpod: runtime} containerEngine := abi.ContainerEngine{Libpod: runtime}
reports, err := containerEngine.PodStats(r.Context(), query.NamesOrIDs, options) reports, err := containerEngine.PodStats(r.Context(), query.NamesOrIDs, options)
// Error checks as documented in swagger. // Error checks as documented in swagger.
if err != nil { if err != nil {
if errors.Is(err, define.ErrNoSuchPod) { if errors.Is(err, define.ErrNoSuchPod) {

View File

@ -14,9 +14,7 @@ import (
) )
func CreatePodFromSpec(ctx context.Context, spec *entitiesTypes.PodSpec) (*entitiesTypes.PodCreateReport, error) { func CreatePodFromSpec(ctx context.Context, spec *entitiesTypes.PodSpec) (*entitiesTypes.PodCreateReport, error) {
var ( var pcr entitiesTypes.PodCreateReport
pcr entitiesTypes.PodCreateReport
)
if spec == nil { if spec == nil {
spec = new(entitiesTypes.PodSpec) spec = new(entitiesTypes.PodSpec)
} }
@ -55,9 +53,7 @@ func Exists(ctx context.Context, nameOrID string, options *ExistsOptions) (bool,
// Inspect returns low-level information about the given pod. // Inspect returns low-level information about the given pod.
func Inspect(ctx context.Context, nameOrID string, options *InspectOptions) (*entitiesTypes.PodInspectReport, error) { func Inspect(ctx context.Context, nameOrID string, options *InspectOptions) (*entitiesTypes.PodInspectReport, error) {
var ( var report entitiesTypes.PodInspectReport
report entitiesTypes.PodInspectReport
)
if options == nil { if options == nil {
options = new(InspectOptions) options = new(InspectOptions)
} }
@ -78,9 +74,7 @@ func Inspect(ctx context.Context, nameOrID string, options *InspectOptions) (*en
// Kill sends a SIGTERM to all the containers in a pod. The optional signal parameter // Kill sends a SIGTERM to all the containers in a pod. The optional signal parameter
// can be used to override SIGTERM. // can be used to override SIGTERM.
func Kill(ctx context.Context, nameOrID string, options *KillOptions) (*entitiesTypes.PodKillReport, error) { func Kill(ctx context.Context, nameOrID string, options *KillOptions) (*entitiesTypes.PodKillReport, error) {
var ( var report entitiesTypes.PodKillReport
report entitiesTypes.PodKillReport
)
if options == nil { if options == nil {
options = new(KillOptions) options = new(KillOptions)
} }
@ -145,9 +139,7 @@ func Prune(ctx context.Context, options *PruneOptions) ([]*entitiesTypes.PodPrun
// List returns all pods in local storage. The optional filters parameter can // List returns all pods in local storage. The optional filters parameter can
// be used to refine which pods should be listed. // be used to refine which pods should be listed.
func List(ctx context.Context, options *ListOptions) ([]*entitiesTypes.ListPodsReport, error) { func List(ctx context.Context, options *ListOptions) ([]*entitiesTypes.ListPodsReport, error) {
var ( var podsReports []*entitiesTypes.ListPodsReport
podsReports []*entitiesTypes.ListPodsReport
)
if options == nil { if options == nil {
options = new(ListOptions) options = new(ListOptions)
} }
@ -231,6 +223,7 @@ func Start(ctx context.Context, nameOrID string, options *StartOptions) (*entiti
if response.StatusCode == http.StatusNotModified { if response.StatusCode == http.StatusNotModified {
report.Id = nameOrID report.Id = nameOrID
report.RawInput = nameOrID
return &report, nil return &report, nil
} }

View File

@ -22,8 +22,9 @@ type PodUnpauseReport struct {
} }
type PodStopReport struct { type PodStopReport struct {
Errs []error Errs []error
Id string //nolint:revive,stylecheck Id string //nolint:revive,stylecheck
RawInput string
} }
type PodRestartReport struct { type PodRestartReport struct {
@ -32,8 +33,9 @@ type PodRestartReport struct {
} }
type PodStartReport struct { type PodStartReport struct {
Errs []error Errs []error
Id string //nolint:revive,stylecheck Id string //nolint:revive,stylecheck
RawInput string
} }
type PodRmReport struct { type PodRmReport struct {

View File

@ -194,7 +194,10 @@ func (ic *ContainerEngine) PodStop(ctx context.Context, namesOrIds []string, opt
return nil, err return nil, err
} }
for _, p := range pods { for _, p := range pods {
report := entities.PodStopReport{Id: p.ID()} report := entities.PodStopReport{
Id: p.ID(),
RawInput: p.Name(),
}
errs, err := p.StopWithTimeout(ctx, true, options.Timeout) errs, err := p.StopWithTimeout(ctx, true, options.Timeout)
if err != nil && !errors.Is(err, define.ErrPodPartialFail) { if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
report.Errs = []error{err} report.Errs = []error{err}
@ -247,7 +250,10 @@ func (ic *ContainerEngine) PodStart(ctx context.Context, namesOrIds []string, op
} }
for _, p := range pods { for _, p := range pods {
report := entities.PodStartReport{Id: p.ID()} report := entities.PodStartReport{
Id: p.ID(),
RawInput: p.Name(),
}
errs, err := p.Start(ctx) errs, err := p.Start(ctx)
if err != nil && !errors.Is(err, define.ErrPodPartialFail) { if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
report.Errs = []error{err} report.Errs = []error{err}

View File

@ -115,8 +115,9 @@ func (ic *ContainerEngine) PodStop(ctx context.Context, namesOrIds []string, opt
response, err := pods.Stop(ic.ClientCtx, p.Id, options) response, err := pods.Stop(ic.ClientCtx, p.Id, options)
if err != nil { if err != nil {
report := entities.PodStopReport{ report := entities.PodStopReport{
Errs: []error{err}, Errs: []error{err},
Id: p.Id, Id: p.Id,
RawInput: p.Name,
} }
reports = append(reports, &report) reports = append(reports, &report)
continue continue
@ -157,8 +158,9 @@ func (ic *ContainerEngine) PodStart(ctx context.Context, namesOrIds []string, op
response, err := pods.Start(ic.ClientCtx, p.Id, nil) response, err := pods.Start(ic.ClientCtx, p.Id, nil)
if err != nil { if err != nil {
report := entities.PodStartReport{ report := entities.PodStartReport{
Errs: []error{err}, Errs: []error{err},
Id: p.Id, Id: p.Id,
RawInput: p.Name,
} }
reports = append(reports, &report) reports = append(reports, &report)
continue continue

View File

@ -14,7 +14,6 @@ import (
) )
var _ = Describe("Podman pod start", func() { var _ = Describe("Podman pod start", func() {
It("podman pod start bogus pod", func() { It("podman pod start bogus pod", func() {
session := podmanTest.Podman([]string{"pod", "start", "123"}) session := podmanTest.Podman([]string{"pod", "start", "123"})
session.WaitWithDefaultTimeout() session.WaitWithDefaultTimeout()
@ -31,16 +30,18 @@ var _ = Describe("Podman pod start", func() {
}) })
It("podman pod start single pod by name", func() { It("podman pod start single pod by name", func() {
_, ec, _ := podmanTest.CreatePod(map[string][]string{"--name": {"foobar99"}}) name := "foobar99"
_, ec, _ := podmanTest.CreatePod(map[string][]string{"--name": {name}})
Expect(ec).To(Equal(0)) Expect(ec).To(Equal(0))
session := podmanTest.Podman([]string{"create", "--pod", "foobar99", ALPINE, "ls"}) session := podmanTest.Podman([]string{"create", "--pod", name, ALPINE, "ls"})
session.WaitWithDefaultTimeout() session.WaitWithDefaultTimeout()
Expect(session).Should(ExitCleanly()) Expect(session).Should(ExitCleanly())
session = podmanTest.Podman([]string{"pod", "start", "foobar99"}) session = podmanTest.Podman([]string{"pod", "start", name})
session.WaitWithDefaultTimeout() session.WaitWithDefaultTimeout()
Expect(session).Should(ExitCleanly()) Expect(session).Should(ExitCleanly())
Expect(session.OutputToString()).Should(ContainSubstring(name))
}) })
It("podman pod start multiple pods", func() { It("podman pod start multiple pods", func() {
@ -62,6 +63,8 @@ var _ = Describe("Podman pod start", func() {
session.WaitWithDefaultTimeout() session.WaitWithDefaultTimeout()
Expect(session).Should(ExitCleanly()) Expect(session).Should(ExitCleanly())
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2)) Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2))
Expect(session.OutputToString()).Should(ContainSubstring("foobar99"))
Expect(session.OutputToString()).Should(ContainSubstring("foobar100"))
}) })
It("multiple pods in conflict", func() { It("multiple pods in conflict", func() {
@ -231,5 +234,4 @@ var _ = Describe("Podman pod start", func() {
cmdline := readFirstLine(fmt.Sprintf("/proc/%s/cmdline", infraConmonPID)) cmdline := readFirstLine(fmt.Sprintf("/proc/%s/cmdline", infraConmonPID))
Expect(cmdline).To(ContainSubstring("/conmon")) Expect(cmdline).To(ContainSubstring("/conmon"))
}) })
}) })

View File

@ -10,88 +10,88 @@ load helpers
# options both in the container podman and out here: that's the only # options both in the container podman and out here: that's the only
# way to share image and container storage. # way to share image and container storage.
if [ -z "${PODMAN_UPGRADE_WORKDIR}" ]; then if [ -z "${PODMAN_UPGRADE_WORKDIR}" ]; then
# Much as I'd love a descriptive name like "podman-upgrade-tests.XXXXX", # Much as I'd love a descriptive name like "podman-upgrade-tests.XXXXX",
# keep it short ("pu") because of the 100-character path length limit # keep it short ("pu") because of the 100-character path length limit
# for UNIX sockets (needed by conmon) # for UNIX sockets (needed by conmon)
export PODMAN_UPGRADE_WORKDIR=$(mktemp -d --tmpdir=${BATS_TMPDIR:-${TMPDIR:-/tmp}} pu.XXXXXX) export PODMAN_UPGRADE_WORKDIR=$(mktemp -d --tmpdir=${BATS_TMPDIR:-${TMPDIR:-/tmp}} pu.XXXXXX)
touch $PODMAN_UPGRADE_WORKDIR/status touch $PODMAN_UPGRADE_WORKDIR/status
fi fi
# Generate a set of random strings used for content verification # Generate a set of random strings used for content verification
if [ -z "${RANDOM_STRING_1}" ]; then if [ -z "${RANDOM_STRING_1}" ]; then
export RANDOM_STRING_1=$(random_string 15) export RANDOM_STRING_1=$(random_string 15)
export LABEL_CREATED=$(random_string 16) export LABEL_CREATED=$(random_string 16)
export LABEL_FAILED=$(random_string 17) export LABEL_FAILED=$(random_string 17)
export LABEL_RUNNING=$(random_string 18) export LABEL_RUNNING=$(random_string 18)
export HOST_PORT=$(random_free_port) export HOST_PORT=$(random_free_port)
export MYTESTNETWORK=mytestnetwork$(random_string 8) export MYTESTNETWORK=mytestnetwork$(random_string 8)
fi fi
# Version string of the podman we're actually testing, e.g. '3.0.0-dev-d1a26013' # Version string of the podman we're actually testing, e.g. '3.0.0-dev-d1a26013'
PODMAN_VERSION=$($PODMAN version |awk '/^Version:/ { V=$2 } /^Git Commit:/ { G=$3 } END { print V "-" substr(G,0,8) }') PODMAN_VERSION=$($PODMAN version | awk '/^Version:/ { V=$2 } /^Git Commit:/ { G=$3 } END { print V "-" substr(G,0,8) }')
setup() { setup() {
skip_if_rootless skip_if_rootless
# The podman-in-podman image (old podman) # The podman-in-podman image (old podman)
if [[ -z "$PODMAN_UPGRADE_FROM" ]]; then if [[ -z "$PODMAN_UPGRADE_FROM" ]]; then
echo "# \$PODMAN_UPGRADE_FROM is undefined (should be e.g. v4.1.0)" >&3 echo "# \$PODMAN_UPGRADE_FROM is undefined (should be e.g. v4.1.0)" >&3
false false
fi fi
if [ "$(< $PODMAN_UPGRADE_WORKDIR/status)" = "failed" ]; then if [ "$(<$PODMAN_UPGRADE_WORKDIR/status)" = "failed" ]; then
skip "*** setup failed - no point in running tests" skip "*** setup failed - no point in running tests"
fi fi
# cgroup-manager=systemd does not work inside a container # cgroup-manager=systemd does not work inside a container
# skip_mount_home=true is required so we can share the storage mounts between host and container, # skip_mount_home=true is required so we can share the storage mounts between host and container,
# the default c/storage behavior is to make the mount propagation private. # the default c/storage behavior is to make the mount propagation private.
export _PODMAN_TEST_OPTS="--storage-opt=skip_mount_home=true --cgroup-manager=cgroupfs --root=$PODMAN_UPGRADE_WORKDIR/root --runroot=$PODMAN_UPGRADE_WORKDIR/runroot --tmpdir=$PODMAN_UPGRADE_WORKDIR/tmp" export _PODMAN_TEST_OPTS="--storage-opt=skip_mount_home=true --cgroup-manager=cgroupfs --root=$PODMAN_UPGRADE_WORKDIR/root --runroot=$PODMAN_UPGRADE_WORKDIR/runroot --tmpdir=$PODMAN_UPGRADE_WORKDIR/tmp"
} }
############################################################################### ###############################################################################
# BEGIN setup # BEGIN setup
@test "initial setup: start $PODMAN_UPGRADE_FROM containers" { @test "initial setup: start $PODMAN_UPGRADE_FROM containers" {
echo failed >| $PODMAN_UPGRADE_WORKDIR/status echo failed >|$PODMAN_UPGRADE_WORKDIR/status
OLD_PODMAN=quay.io/podman/stable:$PODMAN_UPGRADE_FROM OLD_PODMAN=quay.io/podman/stable:$PODMAN_UPGRADE_FROM
$PODMAN pull $OLD_PODMAN $PODMAN pull $OLD_PODMAN
# Can't mix-and-match iptables. # Can't mix-and-match iptables.
# This can only fail when we bring in new CI VMs. If/when it does fail, # This can only fail when we bring in new CI VMs. If/when it does fail,
# we'll need to figure out how to solve it. Until then, punt. # we'll need to figure out how to solve it. Until then, punt.
iptables_old_version=$($PODMAN run --rm $OLD_PODMAN iptables -V) iptables_old_version=$($PODMAN run --rm $OLD_PODMAN iptables -V)
run -0 expr "$iptables_old_version" : ".*(\(.*\))" run -0 expr "$iptables_old_version" : ".*(\(.*\))"
iptables_old_which="$output" iptables_old_which="$output"
iptables_new_version=$(iptables -V) iptables_new_version=$(iptables -V)
run -0 expr "$iptables_new_version" : ".*(\(.*\))" run -0 expr "$iptables_new_version" : ".*(\(.*\))"
iptables_new_which="$output" iptables_new_which="$output"
if [[ "$iptables_new_which" != "$iptables_old_which" ]]; then if [[ "$iptables_new_which" != "$iptables_old_which" ]]; then
die "Cannot mix iptables; $PODMAN_UPGRADE_FROM container uses $iptables_old_which, host uses $iptables_new_which" die "Cannot mix iptables; $PODMAN_UPGRADE_FROM container uses $iptables_old_which, host uses $iptables_new_which"
fi fi
# Shortcut name, because we're referencing it a lot # Shortcut name, because we're referencing it a lot
pmroot=$PODMAN_UPGRADE_WORKDIR pmroot=$PODMAN_UPGRADE_WORKDIR
# WWW content to share # WWW content to share
mkdir -p $pmroot/var/www mkdir -p $pmroot/var/www
echo $RANDOM_STRING_1 >$pmroot/var/www/index.txt echo $RANDOM_STRING_1 >$pmroot/var/www/index.txt
# podman tmpdir # podman tmpdir
mkdir -p $pmroot/tmp mkdir -p $pmroot/tmp
# #
# Script to run >>OLD<< podman commands. # Script to run >>OLD<< podman commands.
# #
# These commands will be run inside a podman container. The "podman" # These commands will be run inside a podman container. The "podman"
# command in this script will be the desired old-podman version. # command in this script will be the desired old-podman version.
# #
pmscript=$pmroot/setup pmscript=$pmroot/setup
cat >| $pmscript <<EOF cat >|$pmscript <<EOF
#!/bin/bash #!/bin/bash
# #
@ -148,51 +148,51 @@ while :;do
sleep 0.5 sleep 0.5
done done
EOF EOF
chmod 555 $pmscript chmod 555 $pmscript
# Clean up vestiges of previous run # Clean up vestiges of previous run
$PODMAN rm -f podman_parent $PODMAN rm -f podman_parent
# Not entirely a NOP! This is just so we get the /run/... mount points created on a CI VM # Not entirely a NOP! This is just so we get the /run/... mount points created on a CI VM
$PODMAN run --rm $OLD_PODMAN true $PODMAN run --rm $OLD_PODMAN true
# Containers-common around release 1-55 no-longer supplies this file # Containers-common around release 1-55 no-longer supplies this file
sconf=/etc/containers/storage.conf sconf=/etc/containers/storage.conf
v_sconf= v_sconf=
if [[ -e "$sconf" ]]; then if [[ -e "$sconf" ]]; then
v_sconf="-v $sconf:$sconf" v_sconf="-v $sconf:$sconf"
fi fi
# #
# Use new-podman to run the above script under old-podman. # Use new-podman to run the above script under old-podman.
# #
# DO NOT USE run_podman HERE! That would use $_PODMAN_TEST_OPTS # DO NOT USE run_podman HERE! That would use $_PODMAN_TEST_OPTS
# and would write into our shared test dir, which would then # and would write into our shared test dir, which would then
# pollute it for use by old-podman. We must keep that pristine # pollute it for use by old-podman. We must keep that pristine
# so old-podman is the first to write to it. # so old-podman is the first to write to it.
# #
# mount /etc/containers/storage.conf to use the same storage settings as on the host # mount /etc/containers/storage.conf to use the same storage settings as on the host
# mount /dev/shm because the container locks are stored there # mount /dev/shm because the container locks are stored there
# mount /run/containers for the dnsname plugin # mount /run/containers for the dnsname plugin
# #
$PODMAN run -d --name podman_parent \ $PODMAN run -d --name podman_parent \
--privileged \ --privileged \
--net=host \ --net=host \
--cgroupns=host \ --cgroupns=host \
--pid=host \ --pid=host \
$v_sconf \ $v_sconf \
-v /dev/fuse:/dev/fuse \ -v /dev/fuse:/dev/fuse \
-v /run/crun:/run/crun \ -v /run/crun:/run/crun \
-v /run/netns:/run/netns:rshared \ -v /run/netns:/run/netns:rshared \
-v /run/containers:/run/containers \ -v /run/containers:/run/containers \
-v /dev/shm:/dev/shm \ -v /dev/shm:/dev/shm \
-v /etc/containers/networks:/etc/containers/networks \ -v /etc/containers/networks:/etc/containers/networks \
-v $pmroot:$pmroot:rshared \ -v $pmroot:$pmroot:rshared \
$OLD_PODMAN $pmroot/setup $OLD_PODMAN $pmroot/setup
_PODMAN_TEST_OPTS= wait_for_ready podman_parent _PODMAN_TEST_OPTS= wait_for_ready podman_parent
echo OK >| $PODMAN_UPGRADE_WORKDIR/status echo OK >|$PODMAN_UPGRADE_WORKDIR/status
} }
# END setup # END setup
@ -201,187 +201,183 @@ EOF
# This is a NOP; used only so the version string will show up in logs # This is a NOP; used only so the version string will show up in logs
@test "upgrade: $PODMAN_UPGRADE_FROM -> $PODMAN_VERSION" { @test "upgrade: $PODMAN_UPGRADE_FROM -> $PODMAN_VERSION" {
: :
} }
@test "info - network" { @test "info - network" {
run_podman info --format '{{.Host.NetworkBackend}}' run_podman info --format '{{.Host.NetworkBackend}}'
assert "$output" = "netavark" "As of Feb 2024, CNI will never be default" assert "$output" = "netavark" "As of Feb 2024, CNI will never be default"
} }
# Whichever DB was picked by old_podman, make sure we honor it # Whichever DB was picked by old_podman, make sure we honor it
@test "info - database" { @test "info - database" {
run_podman info --format '{{.Host.DatabaseBackend}}' run_podman info --format '{{.Host.DatabaseBackend}}'
if version_is_older_than 4.8; then if version_is_older_than 4.8; then
assert "$output" = "boltdb" "DatabaseBackend for podman < 4.8" assert "$output" = "boltdb" "DatabaseBackend for podman < 4.8"
else else
assert "$output" = "sqlite" "DatabaseBackend for podman >= 4.8" assert "$output" = "sqlite" "DatabaseBackend for podman >= 4.8"
fi fi
} }
@test "images" { @test "images" {
run_podman images -a --format '{{.Names}}' run_podman images -a --format '{{.Names}}'
assert "${lines[0]}" =~ "\[localhost/podman-pause:${PODMAN_UPGRADE_FROM##v}-.*\]" "podman images, line 0" assert "${lines[0]}" =~ "\[localhost/podman-pause:${PODMAN_UPGRADE_FROM##v}-.*\]" "podman images, line 0"
assert "${lines[1]}" = "[$IMAGE]" "podman images, line 1" assert "${lines[1]}" = "[$IMAGE]" "podman images, line 1"
} }
@test "ps : one container running" { @test "ps : one container running" {
run_podman ps --format '{{.Image}}--{{.Names}}' run_podman ps --format '{{.Image}}--{{.Names}}'
is "$output" "$IMAGE--myrunningcontainer" "ps: one container running" is "$output" "$IMAGE--myrunningcontainer" "ps: one container running"
} }
@test "ps -a : shows all containers" { @test "ps -a : shows all containers" {
run_podman ps -a \ run_podman ps -a \
--format '{{.Names}}--{{.Status}}--{{.Ports}}--{{.Labels.mylabel}}' \ --format '{{.Names}}--{{.Status}}--{{.Ports}}--{{.Labels.mylabel}}' \
--sort=created --sort=created
assert "${lines[0]}" == "mycreatedcontainer--Created----$LABEL_CREATED" "line 0, created" assert "${lines[0]}" == "mycreatedcontainer--Created----$LABEL_CREATED" "line 0, created"
assert "${lines[1]}" =~ "mydonecontainer--Exited \(0\).*----<no value>" "line 1, done" assert "${lines[1]}" =~ "mydonecontainer--Exited \(0\).*----<no value>" "line 1, done"
assert "${lines[2]}" =~ "myfailedcontainer--Exited \(17\) .*----$LABEL_FAILED" "line 2, fail" assert "${lines[2]}" =~ "myfailedcontainer--Exited \(17\) .*----$LABEL_FAILED" "line 2, fail"
# Port order is not guaranteed # Port order is not guaranteed
assert "${lines[3]}" =~ "myrunningcontainer--Up .*--$LABEL_RUNNING" "line 3, running" assert "${lines[3]}" =~ "myrunningcontainer--Up .*--$LABEL_RUNNING" "line 3, running"
assert "${lines[3]}" =~ ".*--.*0\.0\.0\.0:$HOST_PORT->80\/tcp.*--.*" "line 3, first port forward" assert "${lines[3]}" =~ ".*--.*0\.0\.0\.0:$HOST_PORT->80\/tcp.*--.*" "line 3, first port forward"
assert "${lines[3]}" =~ ".*--.*127\.0\.0\.1\:9090-9092->8080-8082\/tcp.*--.*" "line 3, second port forward" assert "${lines[3]}" =~ ".*--.*127\.0\.0\.1\:9090-9092->8080-8082\/tcp.*--.*" "line 3, second port forward"
assert "${lines[4]}" =~ ".*-infra--Created----<no value>" "line 4, infra container" assert "${lines[4]}" =~ ".*-infra--Created----<no value>" "line 4, infra container"
# For debugging: dump containers and IDs # For debugging: dump containers and IDs
if [[ -n "$PODMAN_UPGRADE_TEST_DEBUG" ]]; then if [[ -n "$PODMAN_UPGRADE_TEST_DEBUG" ]]; then
run_podman ps -a run_podman ps -a
for l in "${lines[@]}"; do for l in "${lines[@]}"; do
echo "# $l" >&3 echo "# $l" >&3
done done
fi fi
} }
@test "inspect - all container status" { @test "inspect - all container status" {
tests=" tests="
running | running | 0 running | running | 0
created | created | 0 created | created | 0
done | exited | 0 done | exited | 0
failed | exited | 17 failed | exited | 17
" "
while read cname state exitstatus; do while read cname state exitstatus; do
run_podman inspect --format '{{.State.Status}}--{{.State.ExitCode}}' my${cname}container run_podman inspect --format '{{.State.Status}}--{{.State.ExitCode}}' my${cname}container
is "$output" "$state--$exitstatus" "status of my${cname}container" is "$output" "$state--$exitstatus" "status of my${cname}container"
done < <(parse_table "$tests") done < <(parse_table "$tests")
} }
@test "network - curl" { @test "network - curl" {
run -0 curl --max-time 3 -s 127.0.0.1:$HOST_PORT/index.txt run -0 curl --max-time 3 -s 127.0.0.1:$HOST_PORT/index.txt
is "$output" "$RANDOM_STRING_1" "curl on running container" is "$output" "$RANDOM_STRING_1" "curl on running container"
} }
# IMPORTANT: connect should happen before restart, we want to check # IMPORTANT: connect should happen before restart, we want to check
# if we can connect on an existing running container # if we can connect on an existing running container
@test "network - connect" { @test "network - connect" {
run_podman network connect $MYTESTNETWORK myrunningcontainer run_podman network connect $MYTESTNETWORK myrunningcontainer
run_podman network disconnect podman myrunningcontainer run_podman network disconnect podman myrunningcontainer
run -0 curl --max-time 3 -s 127.0.0.1:$HOST_PORT/index.txt run -0 curl --max-time 3 -s 127.0.0.1:$HOST_PORT/index.txt
is "$output" "$RANDOM_STRING_1" "curl on container with second network connected" is "$output" "$RANDOM_STRING_1" "curl on container with second network connected"
} }
@test "network - restart" { @test "network - restart" {
# restart the container and check if we can still use the port # restart the container and check if we can still use the port
run_podman stop -t0 myrunningcontainer run_podman stop -t0 myrunningcontainer
run_podman start myrunningcontainer run_podman start myrunningcontainer
run -0 curl --max-time 3 -s 127.0.0.1:$HOST_PORT/index.txt run -0 curl --max-time 3 -s 127.0.0.1:$HOST_PORT/index.txt
is "$output" "$RANDOM_STRING_1" "curl on restarted container" is "$output" "$RANDOM_STRING_1" "curl on restarted container"
} }
@test "logs" { @test "logs" {
run_podman logs mydonecontainer run_podman logs mydonecontainer
is "$output" "++$RANDOM_STRING_1++" "podman logs on stopped container" is "$output" "++$RANDOM_STRING_1++" "podman logs on stopped container"
} }
@test "exec" { @test "exec" {
run_podman exec myrunningcontainer cat /var/www/index.txt run_podman exec myrunningcontainer cat /var/www/index.txt
is "$output" "$RANDOM_STRING_1" "exec into myrunningcontainer" is "$output" "$RANDOM_STRING_1" "exec into myrunningcontainer"
} }
@test "load" { @test "load" {
# FIXME, is this really necessary? # FIXME, is this really necessary?
skip "TBI. Not sure if there's any point to this." skip "TBI. Not sure if there's any point to this."
} }
@test "mount" { @test "mount" {
skip "TBI" skip "TBI"
} }
@test "pods" { @test "pods" {
run_podman pod inspect mypod run_podman pod inspect mypod
is "$output" ".*mypod.*" is "$output" ".*mypod.*"
run_podman pod start mypod run_podman pod start mypod
is "$output" "[0-9a-f]\\{64\\}" "podman pod start" is "$output" "mypod" "podman pod start"
# run a container in an existing pod # run a container in an existing pod
# FIXME: 2024-02-07 fails: pod X cgroup is not set: internal libpod error # FIXME: 2024-02-07 fails: pod X cgroup is not set: internal libpod error
#run_podman run --pod=mypod --ipc=host --rm $IMAGE echo it works #run_podman run --pod=mypod --ipc=host --rm $IMAGE echo it works
#is "$output" ".*it works.*" "podman run --pod" #is "$output" ".*it works.*" "podman run --pod"
run_podman pod ps run_podman pod ps
is "$output" ".*mypod.*" "podman pod ps shows name" is "$output" ".*mypod.*" "podman pod ps shows name"
is "$output" ".*Running.*" "podman pod ps shows running state" is "$output" ".*Running.*" "podman pod ps shows running state"
run_podman pod stop mypod run_podman pod stop mypod
is "$output" "[0-9a-f]\\{64\\}" "podman pod stop" is "$output" "mypod" "podman pod stop"
run_podman pod rm mypod run_podman pod rm mypod
is "$output" "[0-9a-f]\\{64\\}" "podman pod rm" is "$output" "[0-9a-f]\\{64\\}" "podman pod rm"
} }
# FIXME: commit? kill? network? pause? restart? top? volumes? What else? # FIXME: commit? kill? network? pause? restart? top? volumes? What else?
@test "start" { @test "start" {
run_podman start -a mydonecontainer run_podman start -a mydonecontainer
is "$output" "++$RANDOM_STRING_1++" "start on already-run container" is "$output" "++$RANDOM_STRING_1++" "start on already-run container"
} }
@test "rm a stopped container" { @test "rm a stopped container" {
run_podman rm myfailedcontainer run_podman rm myfailedcontainer
is "$output" "myfailedcontainer" "podman rm myfailedcontainer" is "$output" "myfailedcontainer" "podman rm myfailedcontainer"
run_podman rm mydonecontainer run_podman rm mydonecontainer
is "$output" "mydonecontainer" "podman rm mydonecontainer" is "$output" "mydonecontainer" "podman rm mydonecontainer"
} }
@test "stop and rm" { @test "stop and rm" {
run_podman stop -t0 myrunningcontainer run_podman stop -t0 myrunningcontainer
run_podman rm myrunningcontainer run_podman rm myrunningcontainer
} }
@test "clean up parent" { @test "clean up parent" {
if [[ -n "$PODMAN_UPGRADE_TEST_DEBUG" ]]; then if [[ -n "$PODMAN_UPGRADE_TEST_DEBUG" ]]; then
skip "workdir is $PODMAN_UPGRADE_WORKDIR" skip "workdir is $PODMAN_UPGRADE_WORKDIR"
fi fi
# We're done with shared environment. By clearing this, we can now # We're done with shared environment. By clearing this, we can now
# use run_podman for actions on the podman_parent container # use run_podman for actions on the podman_parent container
unset _PODMAN_TEST_OPTS unset _PODMAN_TEST_OPTS
# (Useful for debugging the 'rm -f' step below, which, when it fails, only # (Useful for debugging the 'rm -f' step below, which, when it fails, only
# gives a container ID. This 'ps' confirms that the CID is podman_parent) # gives a container ID. This 'ps' confirms that the CID is podman_parent)
run_podman ps -a run_podman ps -a
# Stop the container gracefully # Stop the container gracefully
run_podman exec podman_parent touch /stop run_podman exec podman_parent touch /stop
run_podman wait podman_parent run_podman wait podman_parent
run_podman 0+we logs podman_parent run_podman 0+we logs podman_parent
run_podman 0+we rm -f podman_parent run_podman 0+we rm -f podman_parent
# Maybe some day I'll understand why podman leaves stray overlay mounts # Maybe some day I'll understand why podman leaves stray overlay mounts
while read overlaydir; do while read overlaydir; do
umount $overlaydir || true umount $overlaydir || true
done < <(mount | grep $PODMAN_UPGRADE_WORKDIR | awk '{print $3}' | sort -r) done < <(mount | grep $PODMAN_UPGRADE_WORKDIR | awk '{print $3}' | sort -r)
rm -rf $PODMAN_UPGRADE_WORKDIR rm -rf $PODMAN_UPGRADE_WORKDIR
} }
# FIXME: now clean up # FIXME: now clean up