Merge pull request #9266 from vrothberg/fix-6510
make `podman rmi` more robust
This commit is contained in:
commit
2bf13219f5
|
@ -580,12 +580,21 @@ func (ir *ImageEngine) Remove(ctx context.Context, images []string, opts entitie
|
||||||
// without having to pass all local data around.
|
// without having to pass all local data around.
|
||||||
deleteImage := func(img *image.Image) error {
|
deleteImage := func(img *image.Image) error {
|
||||||
results, err := ir.Libpod.RemoveImage(ctx, img, opts.Force)
|
results, err := ir.Libpod.RemoveImage(ctx, img, opts.Force)
|
||||||
if err != nil {
|
switch errors.Cause(err) {
|
||||||
|
case nil:
|
||||||
|
// Removal worked, so let's report it.
|
||||||
|
report.Deleted = append(report.Deleted, results.Deleted)
|
||||||
|
report.Untagged = append(report.Untagged, results.Untagged...)
|
||||||
|
return nil
|
||||||
|
case storage.ErrImageUnknown:
|
||||||
|
// The image must have been removed already (see #6510).
|
||||||
|
report.Deleted = append(report.Deleted, img.ID())
|
||||||
|
report.Untagged = append(report.Untagged, img.ID())
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
// Fatal error.
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
report.Deleted = append(report.Deleted, results.Deleted)
|
|
||||||
report.Untagged = append(report.Untagged, results.Untagged...)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete all images from the local storage.
|
// Delete all images from the local storage.
|
||||||
|
|
|
@ -1,7 +1,9 @@
|
||||||
package integration
|
package integration
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"sync"
|
||||||
|
|
||||||
. "github.com/containers/podman/v2/test/utils"
|
. "github.com/containers/podman/v2/test/utils"
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
|
@ -275,4 +277,32 @@ RUN find $LOCAL
|
||||||
match, _ := session.ErrorGrepString("image name or ID must be specified")
|
match, _ := session.ErrorGrepString("image name or ID must be specified")
|
||||||
Expect(match).To(BeTrue())
|
Expect(match).To(BeTrue())
|
||||||
})
|
})
|
||||||
|
|
||||||
|
It("podman image rm - concurrent with shared layers", func() {
|
||||||
|
// #6510 has shown a fairly simple reproducer to force storage
|
||||||
|
// errors during parallel image removal. Since it's subject to
|
||||||
|
// a race, we may not hit the condition a 100 percent of times
|
||||||
|
// but ocal reproducers hit it all the time.
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
buildAndRemove := func(i int) {
|
||||||
|
defer GinkgoRecover()
|
||||||
|
defer wg.Done()
|
||||||
|
imageName := fmt.Sprintf("rmtest:%d", i)
|
||||||
|
containerfile := `FROM quay.io/libpod/cirros:latest
|
||||||
|
RUN ` + fmt.Sprintf("touch %s", imageName)
|
||||||
|
|
||||||
|
podmanTest.BuildImage(containerfile, imageName, "false")
|
||||||
|
session := podmanTest.Podman([]string{"rmi", "-f", imageName})
|
||||||
|
session.WaitWithDefaultTimeout()
|
||||||
|
Expect(session).Should(Exit(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Add(10)
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
go buildAndRemove(i)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
Loading…
Reference in New Issue