Convert containers to SHM locking

Signed-off-by: Matthew Heon <matthew.heon@gmail.com>
This commit is contained in:
Matthew Heon 2018-08-23 15:13:41 -04:00 committed by Matthew Heon
parent a364b656ea
commit 3de560053f
7 changed files with 59 additions and 19 deletions

View File

@ -288,10 +288,9 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.
}
// Get the lock
lockPath := filepath.Join(s.runtime.lockDir, string(id))
lock, err := storage.GetLockfile(lockPath)
lock, err := s.runtime.lockManager.RetrieveLock(ctr.config.LockID)
if err != nil {
return errors.Wrapf(err, "error retrieving lockfile for container %s", string(id))
return errors.Wrapf(err, "error retrieving lock for container %s", string(id))
}
ctr.lock = lock

View File

@ -9,6 +9,7 @@ import (
"github.com/containernetworking/cni/pkg/types"
cnitypes "github.com/containernetworking/cni/pkg/types/current"
"github.com/containers/libpod/libpod/lock"
"github.com/containers/libpod/pkg/namespaces"
"github.com/containers/storage"
"github.com/cri-o/ocicni/pkg/ocicni"
@ -122,7 +123,7 @@ type Container struct {
batched bool
valid bool
lock storage.Locker
lock lock.Locker
runtime *Runtime
rootlessSlirpSyncR *os.File
@ -211,6 +212,8 @@ type Config struct {
Pod string `json:"pod,omitempty"`
// Namespace the container is in
Namespace string `json:"namespace,omitempty"`
// ID of this container's lock
LockID uint32 `json:"lockID"`
// TODO consider breaking these subsections up into smaller structs

View File

@ -1275,6 +1275,8 @@ func easyjson1dbef17bDecodeGithubComContainersLibpodLibpod2(in *jlexer.Lexer, ou
out.Pod = string(in.String())
case "namespace":
out.Namespace = string(in.String())
case "lockID":
out.LockID = uint32(in.Uint32())
case "idMappingsOptions":
easyjson1dbef17bDecodeGithubComContainersLibpodVendorGithubComContainersStorage(in, &out.IDMappings)
case "rootfsImageID":
@ -1778,6 +1780,16 @@ func easyjson1dbef17bEncodeGithubComContainersLibpodLibpod2(out *jwriter.Writer,
}
out.String(string(in.Namespace))
}
{
const prefix string = ",\"lockID\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Uint32(uint32(in.LockID))
}
if true {
const prefix string = ",\"idMappingsOptions\":"
if first {

View File

@ -401,7 +401,10 @@ func resetState(state *containerState) error {
return nil
}
// Refresh refreshes the container's state after a restart
// Refresh refreshes the container's state after a restart.
// Refresh cannot perform any operations that would lock another container.
// We cannot guarantee any other container has a valid lock at the time it is
// running.
func (c *Container) refresh() error {
// Don't need a full sync, but we do need to update from the database to
// pick up potentially-missing container state
@ -447,6 +450,14 @@ func (c *Container) refresh() error {
c.state.DestinationRunDir = filepath.Join(c.state.UserNSRoot, "rundir")
}
// We need to pick up a new lock
lock, err := c.runtime.lockManager.AllocateLock()
if err != nil {
return errors.Wrapf(err, "error acquiring lock for container %s", c.ID())
}
c.lock = lock
c.config.LockID = c.lock.ID()
if err := c.save(); err != nil {
return errors.Wrapf(err, "error refreshing state for container %s", c.ID())
}

View File

@ -830,19 +830,22 @@ func (r *Runtime) refresh(alivePath string) error {
if err != nil {
return errors.Wrapf(err, "error retrieving all pods from state")
}
// No locks are taken during pod and container refresh.
// Furthermore, the pod and container refresh() functions are not
// allowed to take locks themselves.
// We cannot assume that any pod or container has a valid lock until
// after this function has returned.
// The runtime alive lock should suffice to provide mutual exclusion
// until this has run.
for _, ctr := range ctrs {
ctr.lock.Lock()
if err := ctr.refresh(); err != nil {
logrus.Errorf("Error refreshing container %s: %v", ctr.ID(), err)
}
ctr.lock.Unlock()
}
for _, pod := range pods {
pod.lock.Lock()
if err := pod.refresh(); err != nil {
logrus.Errorf("Error refreshing pod %s: %v", pod.ID(), err)
}
pod.lock.Unlock()
}
// Create a file indicating the runtime is alive and ready

View File

@ -9,7 +9,6 @@ import (
"time"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/containers/storage/pkg/stringid"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
@ -61,15 +60,6 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
ctr.state.BindMounts = make(map[string]string)
// Path our lock file will reside at
lockPath := filepath.Join(r.lockDir, ctr.config.ID)
// Grab a lockfile at the given path
lock, err := storage.GetLockfile(lockPath)
if err != nil {
return nil, errors.Wrapf(err, "error creating lockfile for new container")
}
ctr.lock = lock
ctr.config.StopTimeout = CtrRemoveTimeout
// Set namespace based on current runtime namespace
@ -85,6 +75,14 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
}
}
// Allocate a lock for the container
lock, err := r.lockManager.AllocateLock()
if err != nil {
return nil, errors.Wrapf(err, "error allocating lock for new container")
}
ctr.lock = lock
ctr.config.LockID = c.lock.ID()
ctr.valid = true
ctr.state.State = ContainerStateConfigured
@ -379,6 +377,15 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool)
}
}
// Deallocate the container's lock
if err := c.lock.Free(); err != nil {
if cleanupErr == nil {
cleanupErr = err
} else {
logrus.Errorf("free container lock: %v", err)
}
}
return cleanupErr
}

View File

@ -239,6 +239,11 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
return err
}
}
// Free the container's lock
if err := ctr.lock.Free(); err != nil {
return err
}
}
// Remove containers from the state