Cleanup kube play workloads if error happens
If an error happening while playing a kube yaml, clean up any pods, containers, and volumes that might have been created before the error was hit. This improves the user experience for when they go to re-run the same yaml with their fixes and podman doesn't complain about any existing workloads from the previously failed run. Suppress the clean up output when clean up happens after an error as the user doesn't need to see or know about that. Signed-off-by: Urvashi Mohnani <umohnani@redhat.com>
This commit is contained in:
parent
6e2e9ab227
commit
2f29639bd3
|
@ -52,5 +52,5 @@ func down(cmd *cobra.Command, args []string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return teardown(reader, entities.PlayKubeDownOptions{Force: downOptions.Force})
|
||||
return teardown(reader, entities.PlayKubeDownOptions{Force: downOptions.Force}, false)
|
||||
}
|
||||
|
|
|
@ -253,11 +253,11 @@ func play(cmd *cobra.Command, args []string) error {
|
|||
}
|
||||
|
||||
if playOptions.Down {
|
||||
return teardown(reader, entities.PlayKubeDownOptions{Force: playOptions.Force})
|
||||
return teardown(reader, entities.PlayKubeDownOptions{Force: playOptions.Force}, false)
|
||||
}
|
||||
|
||||
if playOptions.Replace {
|
||||
if err := teardown(reader, entities.PlayKubeDownOptions{Force: playOptions.Force}); err != nil && !errorhandling.Contains(err, define.ErrNoSuchPod) {
|
||||
if err := teardown(reader, entities.PlayKubeDownOptions{Force: playOptions.Force}, false); err != nil && !errorhandling.Contains(err, define.ErrNoSuchPod) {
|
||||
return err
|
||||
}
|
||||
if _, err := reader.Seek(0, 0); err != nil {
|
||||
|
@ -265,7 +265,19 @@ func play(cmd *cobra.Command, args []string) error {
|
|||
}
|
||||
}
|
||||
|
||||
return kubeplay(reader)
|
||||
if err := kubeplay(reader); err != nil {
|
||||
// teardown any containers, pods, and volumes that might have created before we hit the error
|
||||
teardownReader, trErr := readerFromArg(args[0])
|
||||
if trErr != nil {
|
||||
return trErr
|
||||
}
|
||||
if tErr := teardown(teardownReader, entities.PlayKubeDownOptions{Force: true}, true); tErr != nil && !errorhandling.Contains(tErr, define.ErrNoSuchPod) {
|
||||
return fmt.Errorf("error tearing down workloads %q after kube play error %q", tErr, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func playKube(cmd *cobra.Command, args []string) error {
|
||||
|
@ -307,7 +319,7 @@ func readerFromArg(fileName string) (*bytes.Reader, error) {
|
|||
return bytes.NewReader(data), nil
|
||||
}
|
||||
|
||||
func teardown(body io.Reader, options entities.PlayKubeDownOptions) error {
|
||||
func teardown(body io.Reader, options entities.PlayKubeDownOptions, quiet bool) error {
|
||||
var (
|
||||
podStopErrors utils.OutputErrors
|
||||
podRmErrors utils.OutputErrors
|
||||
|
@ -319,9 +331,11 @@ func teardown(body io.Reader, options entities.PlayKubeDownOptions) error {
|
|||
}
|
||||
|
||||
// Output stopped pods
|
||||
fmt.Println("Pods stopped:")
|
||||
if !quiet {
|
||||
fmt.Println("Pods stopped:")
|
||||
}
|
||||
for _, stopped := range reports.StopReport {
|
||||
if len(stopped.Errs) == 0 {
|
||||
if len(stopped.Errs) == 0 && !quiet {
|
||||
fmt.Println(stopped.Id)
|
||||
} else {
|
||||
podStopErrors = append(podStopErrors, stopped.Errs...)
|
||||
|
@ -334,9 +348,11 @@ func teardown(body io.Reader, options entities.PlayKubeDownOptions) error {
|
|||
}
|
||||
|
||||
// Output rm'd pods
|
||||
fmt.Println("Pods removed:")
|
||||
if !quiet {
|
||||
fmt.Println("Pods removed:")
|
||||
}
|
||||
for _, removed := range reports.RmReport {
|
||||
if removed.Err == nil {
|
||||
if removed.Err == nil && !quiet {
|
||||
fmt.Println(removed.Id)
|
||||
} else {
|
||||
podRmErrors = append(podRmErrors, removed.Err)
|
||||
|
@ -349,9 +365,11 @@ func teardown(body io.Reader, options entities.PlayKubeDownOptions) error {
|
|||
}
|
||||
|
||||
// Output rm'd volumes
|
||||
fmt.Println("Volumes removed:")
|
||||
if !quiet {
|
||||
fmt.Println("Volumes removed:")
|
||||
}
|
||||
for _, removed := range reports.VolumeRmReport {
|
||||
if removed.Err == nil {
|
||||
if removed.Err == nil && !quiet {
|
||||
fmt.Println(removed.Id)
|
||||
} else {
|
||||
volRmErrors = append(volRmErrors, removed.Err)
|
||||
|
|
|
@ -4406,4 +4406,40 @@ cgroups="disabled"`), 0644)
|
|||
kube.WaitWithDefaultTimeout()
|
||||
Expect(kube).Should(Exit(0))
|
||||
})
|
||||
|
||||
It("podman kube play invalid yaml should clean up pod that was created before failure", func() {
|
||||
podTemplate := `---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
creationTimestamp: "2022-08-02T04:05:53Z"
|
||||
labels:
|
||||
app: vol-test-3-pod
|
||||
name: vol-test-3
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- sleep
|
||||
- "1000"
|
||||
image: non-existing-image
|
||||
name: vol-test-3
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- CAP_MKNOD
|
||||
- CAP_NET_RAW
|
||||
- CAP_AUDIT_WRITE
|
||||
`
|
||||
|
||||
// the image is incorrect so the kube play will fail, but it will clean up the pod that was created for it before the failure happened
|
||||
kube := podmanTest.Podman([]string{"kube", "play", podTemplate})
|
||||
kube.WaitWithDefaultTimeout()
|
||||
Expect(kube).To(ExitWithError())
|
||||
|
||||
ps := podmanTest.Podman([]string{"pod", "ps", "-q"})
|
||||
ps.WaitWithDefaultTimeout()
|
||||
Expect(ps).Should(Exit(0))
|
||||
Expect(ps.OutputToStringArray()).To(HaveLen(0))
|
||||
})
|
||||
|
||||
})
|
||||
|
|
Loading…
Reference in New Issue