Quadlet: kube: add ExitCodePropagation field

Add a new field `ExitCodePropagation` field to allow for configuring the
newly added functionality of controlling how the main PID of a kube
service exits.

Jira: issues.redhat.com/browse/RUN-1776
Signed-off-by: Valentin Rothberg <vrothberg@redhat.com>
This commit is contained in:
Valentin Rothberg 2023-05-23 16:56:38 +02:00
parent 08b0d93ea3
commit 6487d9c11a
5 changed files with 101 additions and 20 deletions

View File

@ -481,15 +481,15 @@ There is only one required key, `Yaml`, which defines the path to the Kubernetes
Valid options for `[Kube]` are listed below:
| **[Kube] options** | **podman kube play equivalent** |
| ----------------- | ------------------ |
| ConfigMap=/tmp/config.map | --config-map /tmp/config.map |
| LogDriver=journald | --log-driver journald |
| Network=host | --net host |
| PodmanArgs=--annotation=key=value | --annotation=key=value |
| PublishPort=59-60 | --publish=59-60 |
| UserNS=keep-id:uid=200,gid=210 | --userns keep-id:uid=200,gid=210 |
| Yaml=/tmp/kube.yaml | podman kube play /tmp/kube.yaml |
| **[Kube] options** | **podman kube play equivalent** |
| --------------------------------- | ------------------------------------------- |
| ConfigMap=/tmp/config.map | --config-map /tmp/config.map |
| LogDriver=journald | --log-driver journald |
| Network=host | --net host |
| PodmanArgs=--annotation=key=value | --annotation=key=value |
| PublishPort=59-60 | --publish=59-60 |
| UserNS=keep-id:uid=200,gid=210 | --userns keep-id:uid=200,gid=210 |
| Yaml=/tmp/kube.yaml | podman kube play /tmp/kube.yaml |
Supported keys in the `[Kube]` section are:
@ -501,6 +501,15 @@ it may be absolute or relative to the location of the unit file.
This key may be used multiple times
### `ExitCodePropagation=`
Control how the main PID of the systemd service should exit. The following values are supported:
- `all`: exit non-zero if all containers have failed (i.e., exited non-zero)
- `any`: exit non-zero if any container has failed
- `none`: exit zero and ignore failed containers
The current default value is `none`.
### `LogDriver=`
Set the log-driver Podman uses when running the container.

View File

@ -62,6 +62,7 @@ const (
KeyImage = "Image"
KeyIP = "IP"
KeyIP6 = "IP6"
KeyExitCodePropagation = "ExitCodePropagation"
KeyLabel = "Label"
KeyLogDriver = "LogDriver"
KeyMount = "Mount"
@ -192,17 +193,18 @@ var (
// Supported keys in "Kube" group
supportedKubeKeys = map[string]bool{
KeyConfigMap: true,
KeyLogDriver: true,
KeyNetwork: true,
KeyPodmanArgs: true,
KeyPublishPort: true,
KeyRemapGID: true,
KeyRemapUID: true,
KeyRemapUIDSize: true,
KeyRemapUsers: true,
KeyUserNS: true,
KeyYaml: true,
KeyConfigMap: true,
KeyExitCodePropagation: true,
KeyLogDriver: true,
KeyNetwork: true,
KeyPodmanArgs: true,
KeyPublishPort: true,
KeyRemapGID: true,
KeyRemapUID: true,
KeyRemapUIDSize: true,
KeyRemapUsers: true,
KeyUserNS: true,
KeyYaml: true,
}
)
@ -895,6 +897,10 @@ func ConvertKube(kube *parser.UnitFile, isUser bool) (*parser.UnitFile, error) {
"--service-container=true",
)
if ecp, ok := kube.Lookup(KubeGroup, KeyExitCodePropagation); ok && len(ecp) > 0 {
execStart.addf("--service-exit-code-propagation=%s", ecp)
}
handleLogDriver(kube, KubeGroup, execStart)
if err := handleUserRemap(kube, KubeGroup, execStart, isUser, false); err != nil {

View File

@ -0,0 +1,5 @@
[Kube]
Yaml=/opt/k8s/deployment.yml
## assert-podman-args "--service-exit-code-propagation=all"
ExitCodePropagation=all

View File

@ -580,6 +580,7 @@ var _ = Describe("quadlet system generator", func() {
Entry("Kube - Publish IPv6 ports", "ports_ipv6.kube"),
Entry("Kube - Logdriver", "logdriver.kube"),
Entry("Kube - PodmanArgs", "podmanargs.kube"),
Entry("Kube - Exit Code Propagation", "exit_code_propagation.kube"),
Entry("Network - Basic", "basic.network"),
Entry("Network - Label", "label.network"),

View File

@ -637,4 +637,64 @@ EOF
service_cleanup $QUADLET_SERVICE_NAME failed
}
@test "quadlet - exit-code propagation" {
local quadlet_file=$PODMAN_TMPDIR/basic_$(random_string).kube
local yaml_file=$PODMAN_TMPDIR/$(random_string).yaml
exit_tests="
all | true | 0 | inactive
all | false | 137 | failed
none | false | 0 | inactive
"
while read exit_code_prop cmd exit_code service_state; do
cat > $yaml_file <<EOF
apiVersion: v1
kind: Pod
metadata:
labels:
app: test
name: test_pod
spec:
restartPolicy: Never
containers:
- name: ctr
image: $IMAGE
command:
- $cmd
EOF
cat > $quadlet_file <<EOF
[Kube]
Yaml=$yaml_file
ExitCodePropagation=$exit_code_prop
# Never restart the service as we only want to test exit codes.
[Service]
Restart=never
EOF
run_quadlet "$quadlet_file"
run systemctl status $QUADLET_SERVICE_NAME
service_setup $QUADLET_SERVICE_NAME
# Ensure we have output. Output is synced via sd-notify (socat in Exec)
run journalctl "--since=$STARTED_TIME" --unit="$QUADLET_SERVICE_NAME"
is "$output" '.*Started.*\.service.*'
yaml_sha=$(sha256sum $yaml_file)
service_container="${yaml_sha:0:12}-service"
run_podman container inspect --format '{{.KubeExitCodePropagation}}' $service_container
is "$output" "$exit_code_prop" "service container has the expected policy set in its annotations"
run_podman wait $service_container
is "$output" "$exit_code" "service container reflects expected exit code $exit_code"
# FIXME: we need an additional cleanup when the main PID exits non-zero.
# For .kube files we probably need a similar trick as in .container files
# and use an ExecStopPost which _is_ being executed in case of failure.
service_cleanup $QUADLET_SERVICE_NAME $service_state
run_podman '?' kube down $yaml_file
done < <(parse_table "$exit_tests")
run_podman rmi $(pause_image)
}
# vim: filetype=sh