mirror of https://github.com/containers/podman.git
Do not lock all containers during pod start
This solves a nasty locking issue with getting the path of namespaces for dependencies Signed-off-by: Matthew Heon <matthew.heon@gmail.com> Closes: #600 Approved by: rhatdan
This commit is contained in:
parent
77a1665c05
commit
542f8fe98d
|
@ -2,6 +2,7 @@ package libpod
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/containers/storage"
|
"github.com/containers/storage"
|
||||||
"github.com/docker/docker/pkg/stringid"
|
"github.com/docker/docker/pkg/stringid"
|
||||||
|
@ -93,16 +94,6 @@ func (p *Pod) Start() (map[string]error, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// We need to lock all the containers
|
|
||||||
for _, ctr := range allCtrs {
|
|
||||||
ctr.lock.Lock()
|
|
||||||
defer ctr.lock.Unlock()
|
|
||||||
|
|
||||||
if err := ctr.syncContainer(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build a dependency graph of containers in the pod
|
// Build a dependency graph of containers in the pod
|
||||||
graph, err := buildContainerGraph(allCtrs)
|
graph, err := buildContainerGraph(allCtrs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -165,10 +156,32 @@ func startNode(node *containerNode, setError bool, ctrErrors map[string]error, c
|
||||||
// Going to start the container, mark us as visited
|
// Going to start the container, mark us as visited
|
||||||
ctrsVisited[node.id] = true
|
ctrsVisited[node.id] = true
|
||||||
|
|
||||||
// TODO: Maybe have a checkDependenciesRunningLocked here?
|
// Lock before we start
|
||||||
// Graph traversal should ensure our deps have been started, but some
|
node.container.lock.Lock()
|
||||||
// might have stopped since?
|
|
||||||
// Potentially will hurt our perf, though
|
// Check if dependencies are running
|
||||||
|
// Graph traversal means we should have started them
|
||||||
|
// But they could have died before we got here
|
||||||
|
depsStopped, err := node.container.checkDependenciesRunning()
|
||||||
|
if err != nil {
|
||||||
|
node.container.lock.Unlock()
|
||||||
|
|
||||||
|
ctrErrors[node.id] = err
|
||||||
|
for _, successor := range node.dependedOn {
|
||||||
|
startNode(successor, true, ctrErrors, ctrsVisited)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
} else if len(depsStopped) > 0 {
|
||||||
|
node.container.lock.Unlock()
|
||||||
|
|
||||||
|
// Our dependencies are not running
|
||||||
|
depsList := strings.Join(depsStopped, ",")
|
||||||
|
ctrErrors[node.id] = errors.Wrapf(ErrCtrStateInvalid, "the following dependencies of container %s are not running: %s", node.id, depsList)
|
||||||
|
for _, successor := range node.dependedOn {
|
||||||
|
startNode(successor, true, ctrErrors, ctrsVisited)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Start the container (only if it is not running)
|
// Start the container (only if it is not running)
|
||||||
ctrErrored := false
|
ctrErrored := false
|
||||||
|
@ -179,6 +192,8 @@ func startNode(node *containerNode, setError bool, ctrErrors map[string]error, c
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
node.container.lock.Unlock()
|
||||||
|
|
||||||
// Recurse to anyone who depends on us and start them
|
// Recurse to anyone who depends on us and start them
|
||||||
for _, successor := range node.dependedOn {
|
for _, successor := range node.dependedOn {
|
||||||
startNode(successor, ctrErrored, ctrErrors, ctrsVisited)
|
startNode(successor, ctrErrored, ctrErrors, ctrsVisited)
|
||||||
|
|
Loading…
Reference in New Issue