Move layer mount refcounts to mountedLayer

Instead of implementing refcounts at each graphdriver, implement this in
the layer package which is what the engine actually interacts with now.
This means interacting directly with the graphdriver is no longer
explicitly safe with regard to Get/Put calls being refcounted.

In addition, with the containerd, layers may still be mounted after
a daemon restart since we will no longer explicitly kill containers when
we shutdown or startup engine.
Because of this ref counts would need to be repopulated.

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
This commit is contained in:
Brian Goff 2016-03-09 16:23:04 -05:00 committed by Anusha Ragunathan
parent 57ca2a2101
commit 65d79e3e5e
14 changed files with 359 additions and 366 deletions

View File

@ -222,6 +222,7 @@ func (daemon *Daemon) exportContainerRw(container *container.Container) (archive
archive, err := container.RWLayer.TarStream() archive, err := container.RWLayer.TarStream()
if err != nil { if err != nil {
daemon.Unmount(container) // logging is already handled in the `Unmount` function
return nil, err return nil, err
} }
return ioutils.NewReadCloserWrapper(archive, func() error { return ioutils.NewReadCloserWrapper(archive, func() error {

View File

@ -29,6 +29,7 @@ import (
"os" "os"
"os/exec" "os/exec"
"path" "path"
"path/filepath"
"strings" "strings"
"sync" "sync"
"syscall" "syscall"
@ -64,21 +65,13 @@ func init() {
graphdriver.Register("aufs", Init) graphdriver.Register("aufs", Init)
} }
type data struct {
referenceCount int
path string
}
// Driver contains information about the filesystem mounted. // Driver contains information about the filesystem mounted.
// root of the filesystem
// sync.Mutex to protect against concurrent modifications
// active maps mount id to the count
type Driver struct { type Driver struct {
root string root string
uidMaps []idtools.IDMap uidMaps []idtools.IDMap
gidMaps []idtools.IDMap gidMaps []idtools.IDMap
sync.Mutex // Protects concurrent modification to active pathCacheLock sync.Mutex
active map[string]*data pathCache map[string]string
} }
// Init returns a new AUFS driver. // Init returns a new AUFS driver.
@ -112,9 +105,9 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
a := &Driver{ a := &Driver{
root: root, root: root,
active: make(map[string]*data),
uidMaps: uidMaps, uidMaps: uidMaps,
gidMaps: gidMaps, gidMaps: gidMaps,
pathCache: make(map[string]string),
} }
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
@ -228,9 +221,7 @@ func (a *Driver) Create(id, parent, mountLabel string) error {
} }
} }
} }
a.Lock()
a.active[id] = &data{}
a.Unlock()
return nil return nil
} }
@ -259,108 +250,91 @@ func (a *Driver) createDirsFor(id string) error {
// Remove will unmount and remove the given id. // Remove will unmount and remove the given id.
func (a *Driver) Remove(id string) error { func (a *Driver) Remove(id string) error {
// Protect the a.active from concurrent access a.pathCacheLock.Lock()
a.Lock() mountpoint, exists := a.pathCache[id]
defer a.Unlock() a.pathCacheLock.Unlock()
if !exists {
m := a.active[id] mountpoint = a.getMountpoint(id)
if m != nil {
if m.referenceCount > 0 {
return nil
} }
// Make sure the dir is umounted first if err := a.unmount(mountpoint); err != nil {
if err := a.unmount(m); err != nil { // no need to return here, we can still try to remove since the `Rename` will fail below if still mounted
return err logrus.Debugf("aufs: error while unmounting %s: %v", mountpoint, err)
}
}
tmpDirs := []string{
"mnt",
"diff",
} }
// Atomically remove each directory in turn by first moving it out of the // Atomically remove each directory in turn by first moving it out of the
// way (so that docker doesn't find it anymore) before doing removal of // way (so that docker doesn't find it anymore) before doing removal of
// the whole tree. // the whole tree.
for _, p := range tmpDirs { tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id))
realPath := path.Join(a.rootPath(), p, id) if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) {
tmpPath := path.Join(a.rootPath(), p, fmt.Sprintf("%s-removing", id))
if err := os.Rename(realPath, tmpPath); err != nil && !os.IsNotExist(err) {
return err return err
} }
defer os.RemoveAll(tmpPath) defer os.RemoveAll(tmpMntPath)
tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id))
if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) {
return err
} }
defer os.RemoveAll(tmpDiffpath)
// Remove the layers file for the id // Remove the layers file for the id
if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) {
return err return err
} }
if m != nil {
delete(a.active, id) a.pathCacheLock.Lock()
} delete(a.pathCache, id)
a.pathCacheLock.Unlock()
return nil return nil
} }
// Get returns the rootfs path for the id. // Get returns the rootfs path for the id.
// This will mount the dir at it's given path // This will mount the dir at it's given path
func (a *Driver) Get(id, mountLabel string) (string, error) { func (a *Driver) Get(id, mountLabel string) (string, error) {
// Protect the a.active from concurrent access
a.Lock()
defer a.Unlock()
m := a.active[id]
if m == nil {
m = &data{}
a.active[id] = m
}
parents, err := a.getParentLayerPaths(id) parents, err := a.getParentLayerPaths(id)
if err != nil && !os.IsNotExist(err) { if err != nil && !os.IsNotExist(err) {
return "", err return "", err
} }
a.pathCacheLock.Lock()
m, exists := a.pathCache[id]
a.pathCacheLock.Unlock()
if !exists {
m = a.getDiffPath(id)
if len(parents) > 0 {
m = a.getMountpoint(id)
}
}
// If a dir does not have a parent ( no layers )do not try to mount // If a dir does not have a parent ( no layers )do not try to mount
// just return the diff path to the data // just return the diff path to the data
m.path = path.Join(a.rootPath(), "diff", id)
if len(parents) > 0 { if len(parents) > 0 {
m.path = path.Join(a.rootPath(), "mnt", id)
if m.referenceCount == 0 {
if err := a.mount(id, m, mountLabel, parents); err != nil { if err := a.mount(id, m, mountLabel, parents); err != nil {
return "", err return "", err
} }
} }
}
m.referenceCount++ a.pathCacheLock.Lock()
return m.path, nil a.pathCache[id] = m
a.pathCacheLock.Unlock()
return m, nil
} }
// Put unmounts and updates list of active mounts. // Put unmounts and updates list of active mounts.
func (a *Driver) Put(id string) error { func (a *Driver) Put(id string) error {
// Protect the a.active from concurrent access a.pathCacheLock.Lock()
a.Lock() m, exists := a.pathCache[id]
defer a.Unlock() if !exists {
m = a.getMountpoint(id)
a.pathCache[id] = m
}
a.pathCacheLock.Unlock()
m := a.active[id] err := a.unmount(m)
if m == nil {
// but it might be still here
if a.Exists(id) {
path := path.Join(a.rootPath(), "mnt", id)
err := Unmount(path)
if err != nil { if err != nil {
logrus.Debugf("Failed to unmount %s aufs: %v", id, err) logrus.Debugf("Failed to unmount %s aufs: %v", id, err)
} }
} return err
return nil
}
if count := m.referenceCount; count > 1 {
m.referenceCount = count - 1
} else {
ids, _ := getParentIds(a.rootPath(), id)
// We only mounted if there are any parents
if ids != nil && len(ids) > 0 {
a.unmount(m)
}
delete(a.active, id)
}
return nil
} }
// Diff produces an archive of the changes between the specified // Diff produces an archive of the changes between the specified
@ -443,16 +417,13 @@ func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
return layers, nil return layers, nil
} }
func (a *Driver) mount(id string, m *data, mountLabel string, layers []string) error { func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error {
// If the id is mounted or we get an error return // If the id is mounted or we get an error return
if mounted, err := a.mounted(m); err != nil || mounted { if mounted, err := a.mounted(target); err != nil || mounted {
return err return err
} }
var ( rw := a.getDiffPath(id)
target = m.path
rw = path.Join(a.rootPath(), "diff", id)
)
if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { if err := a.aufsMount(layers, rw, target, mountLabel); err != nil {
return fmt.Errorf("error creating aufs mount to %s: %v", target, err) return fmt.Errorf("error creating aufs mount to %s: %v", target, err)
@ -460,26 +431,39 @@ func (a *Driver) mount(id string, m *data, mountLabel string, layers []string) e
return nil return nil
} }
func (a *Driver) unmount(m *data) error { func (a *Driver) unmount(mountPath string) error {
if mounted, err := a.mounted(m); err != nil || !mounted { if mounted, err := a.mounted(mountPath); err != nil || !mounted {
return err return err
} }
return Unmount(m.path) if err := Unmount(mountPath); err != nil {
return err
}
return nil
} }
func (a *Driver) mounted(m *data) (bool, error) { func (a *Driver) mounted(mountpoint string) (bool, error) {
var buf syscall.Statfs_t return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint)
if err := syscall.Statfs(m.path, &buf); err != nil {
return false, nil
}
return graphdriver.FsMagic(buf.Type) == graphdriver.FsMagicAufs, nil
} }
// Cleanup aufs and unmount all mountpoints // Cleanup aufs and unmount all mountpoints
func (a *Driver) Cleanup() error { func (a *Driver) Cleanup() error {
for id, m := range a.active { var dirs []string
if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
return nil
}
dirs = append(dirs, path)
return nil
}); err != nil {
return err
}
for _, m := range dirs {
if err := a.unmount(m); err != nil { if err := a.unmount(m); err != nil {
logrus.Errorf("Unmounting %s: %s", stringid.TruncateID(id), err) logrus.Debugf("aufs error unmounting %s: %s", stringid.TruncateID(m), err)
} }
} }
return mountpk.Unmount(a.root) return mountpk.Unmount(a.root)

View File

@ -200,7 +200,7 @@ func TestMountedFalseResponse(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
response, err := d.mounted(d.active["1"]) response, err := d.mounted(d.getDiffPath("1"))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -227,7 +227,7 @@ func TestMountedTrueReponse(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
response, err := d.mounted(d.active["2"]) response, err := d.mounted(d.pathCache["2"])
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -293,7 +293,7 @@ func TestRemoveMountedDir(t *testing.T) {
t.Fatal("mntPath should not be empty string") t.Fatal("mntPath should not be empty string")
} }
mounted, err := d.mounted(d.active["2"]) mounted, err := d.mounted(d.pathCache["2"])
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -46,3 +46,19 @@ func getParentIds(root, id string) ([]string, error) {
} }
return out, s.Err() return out, s.Err()
} }
func (a *Driver) getMountpoint(id string) string {
return path.Join(a.mntPath(), id)
}
func (a *Driver) mntPath() string {
return path.Join(a.rootPath(), "mnt")
}
func (a *Driver) getDiffPath(id string) string {
return path.Join(a.diffPath(), id)
}
func (a *Driver) diffPath() string {
return path.Join(a.rootPath(), "diff")
}

View File

@ -69,9 +69,6 @@ type devInfo struct {
Deleted bool `json:"deleted"` Deleted bool `json:"deleted"`
devices *DeviceSet devices *DeviceSet
mountCount int
mountPath string
// The global DeviceSet lock guarantees that we serialize all // The global DeviceSet lock guarantees that we serialize all
// the calls to libdevmapper (which is not threadsafe), but we // the calls to libdevmapper (which is not threadsafe), but we
// sometimes release that lock while sleeping. In that case // sometimes release that lock while sleeping. In that case
@ -1991,13 +1988,6 @@ func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error {
devices.Lock() devices.Lock()
defer devices.Unlock() defer devices.Unlock()
// If mountcount is not zero, that means devices is still in use
// or has not been Put() properly. Fail device deletion.
if info.mountCount != 0 {
return fmt.Errorf("devmapper: Can't delete device %v as it is still mounted. mntCount=%v", info.Hash, info.mountCount)
}
return devices.deleteDevice(info, syncDelete) return devices.deleteDevice(info, syncDelete)
} }
@ -2116,13 +2106,11 @@ func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error {
} }
// Shutdown shuts down the device by unmounting the root. // Shutdown shuts down the device by unmounting the root.
func (devices *DeviceSet) Shutdown() error { func (devices *DeviceSet) Shutdown(home string) error {
logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix)
logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root) logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root)
defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix) defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix)
var devs []*devInfo
// Stop deletion worker. This should start delivering new events to // Stop deletion worker. This should start delivering new events to
// ticker channel. That means no new instance of cleanupDeletedDevice() // ticker channel. That means no new instance of cleanupDeletedDevice()
// will run after this call. If one instance is already running at // will run after this call. If one instance is already running at
@ -2139,29 +2127,45 @@ func (devices *DeviceSet) Shutdown() error {
// metadata. Hence save this early before trying to deactivate devices. // metadata. Hence save this early before trying to deactivate devices.
devices.saveDeviceSetMetaData() devices.saveDeviceSetMetaData()
for _, info := range devices.Devices { // ignore the error since it's just a best effort to not try to unmount something that's mounted
devs = append(devs, info) mounts, _ := mount.GetMounts()
mounted := make(map[string]bool, len(mounts))
for _, mnt := range mounts {
mounted[mnt.Mountpoint] = true
} }
devices.Unlock()
for _, info := range devs { if err := filepath.Walk(path.Join(home, "mnt"), func(p string, info os.FileInfo, err error) error {
info.lock.Lock() if err != nil {
if info.mountCount > 0 { return err
}
if !info.IsDir() {
return nil
}
if mounted[p] {
// We use MNT_DETACH here in case it is still busy in some running // We use MNT_DETACH here in case it is still busy in some running
// container. This means it'll go away from the global scope directly, // container. This means it'll go away from the global scope directly,
// and the device will be released when that container dies. // and the device will be released when that container dies.
if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil { if err := syscall.Unmount(p, syscall.MNT_DETACH); err != nil {
logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", info.mountPath, err) logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err)
}
} }
devices.Lock() if devInfo, err := devices.lookupDevice(path.Base(p)); err != nil {
if err := devices.deactivateDevice(info); err != nil { logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", path.Base(p), err)
logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", info.Hash, err) } else {
if err := devices.deactivateDevice(devInfo); err != nil {
logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", devInfo.Hash, err)
} }
}
return nil
}); err != nil && !os.IsNotExist(err) {
devices.Unlock() devices.Unlock()
return err
} }
info.lock.Unlock()
} devices.Unlock()
info, _ := devices.lookupDeviceWithLock("") info, _ := devices.lookupDeviceWithLock("")
if info != nil { if info != nil {
@ -2202,15 +2206,6 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
devices.Lock() devices.Lock()
defer devices.Unlock() defer devices.Unlock()
if info.mountCount > 0 {
if path != info.mountPath {
return fmt.Errorf("devmapper: Trying to mount devmapper device in multiple places (%s, %s)", info.mountPath, path)
}
info.mountCount++
return nil
}
if err := devices.activateDeviceIfNeeded(info, false); err != nil { if err := devices.activateDeviceIfNeeded(info, false); err != nil {
return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err)
} }
@ -2234,9 +2229,6 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err) return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err)
} }
info.mountCount = 1
info.mountPath = path
return nil return nil
} }
@ -2256,20 +2248,6 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
devices.Lock() devices.Lock()
defer devices.Unlock() defer devices.Unlock()
// If there are running containers when daemon crashes, during daemon
// restarting, it will kill running containers and will finally call
// Put() without calling Get(). So info.MountCount may become negative.
// if info.mountCount goes negative, we do the unmount and assign
// it to 0.
info.mountCount--
if info.mountCount > 0 {
return nil
} else if info.mountCount < 0 {
logrus.Warnf("devmapper: Mount count of device went negative. Put() called without matching Get(). Resetting count to 0")
info.mountCount = 0
}
logrus.Debugf("devmapper: Unmount(%s)", mountPath) logrus.Debugf("devmapper: Unmount(%s)", mountPath)
if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil { if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil {
return err return err
@ -2280,8 +2258,6 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
return err return err
} }
info.mountPath = ""
return nil return nil
} }

View File

@ -108,7 +108,7 @@ func (d *Driver) GetMetadata(id string) (map[string]string, error) {
// Cleanup unmounts a device. // Cleanup unmounts a device.
func (d *Driver) Cleanup() error { func (d *Driver) Cleanup() error {
err := d.DeviceSet.Shutdown() err := d.DeviceSet.Shutdown(d.home)
if err2 := mount.Unmount(d.home); err == nil { if err2 := mount.Unmount(d.home); err == nil {
err = err2 err = err2

View File

@ -1,8 +1,19 @@
package graphdriver package graphdriver
import "syscall"
var ( var (
// Slice of drivers that should be used in an order // Slice of drivers that should be used in an order
priority = []string{ priority = []string{
"zfs", "zfs",
} }
) )
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf syscall.Statfs_t
if err := syscall.Statfs(mountPath, &buf); err != nil {
return false, err
}
return FsMagic(buf.Type) == fsType, nil
}

View File

@ -42,6 +42,8 @@ const (
FsMagicXfs = FsMagic(0x58465342) FsMagicXfs = FsMagic(0x58465342)
// FsMagicZfs filesystem id for Zfs // FsMagicZfs filesystem id for Zfs
FsMagicZfs = FsMagic(0x2fc12fc1) FsMagicZfs = FsMagic(0x2fc12fc1)
// FsMagicOverlay filesystem id for overlay
FsMagicOverlay = FsMagic(0x794C7630)
) )
var ( var (
@ -86,3 +88,12 @@ func GetFSMagic(rootpath string) (FsMagic, error) {
} }
return FsMagic(buf.Type), nil return FsMagic(buf.Type), nil
} }
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf syscall.Statfs_t
if err := syscall.Statfs(mountPath, &buf); err != nil {
return false, err
}
return FsMagic(buf.Type) == fsType, nil
}

View File

@ -88,19 +88,11 @@ func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.Rea
// of that. This means all child images share file (but not directory) // of that. This means all child images share file (but not directory)
// data with the parent. // data with the parent.
// ActiveMount contains information about the count, path and whether is mounted or not.
// This information is part of the Driver, that contains list of active mounts that are part of this overlay.
type ActiveMount struct {
count int
path string
mounted bool
}
// Driver contains information about the home directory and the list of active mounts that are created using this driver. // Driver contains information about the home directory and the list of active mounts that are created using this driver.
type Driver struct { type Driver struct {
home string home string
sync.Mutex // Protects concurrent modification to active pathCacheLock sync.Mutex
active map[string]*ActiveMount pathCache map[string]string
uidMaps []idtools.IDMap uidMaps []idtools.IDMap
gidMaps []idtools.IDMap gidMaps []idtools.IDMap
} }
@ -152,7 +144,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
d := &Driver{ d := &Driver{
home: home, home: home,
active: make(map[string]*ActiveMount), pathCache: make(map[string]string),
uidMaps: uidMaps, uidMaps: uidMaps,
gidMaps: gidMaps, gidMaps: gidMaps,
} }
@ -325,23 +317,14 @@ func (d *Driver) Remove(id string) error {
if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) {
return err return err
} }
d.pathCacheLock.Lock()
delete(d.pathCache, id)
d.pathCacheLock.Unlock()
return nil return nil
} }
// Get creates and mounts the required file system for the given id and returns the mount path. // Get creates and mounts the required file system for the given id and returns the mount path.
func (d *Driver) Get(id string, mountLabel string) (string, error) { func (d *Driver) Get(id string, mountLabel string) (string, error) {
// Protect the d.active from concurrent access
d.Lock()
defer d.Unlock()
mount := d.active[id]
if mount != nil {
mount.count++
return mount.path, nil
}
mount = &ActiveMount{count: 1}
dir := d.dir(id) dir := d.dir(id)
if _, err := os.Stat(dir); err != nil { if _, err := os.Stat(dir); err != nil {
return "", err return "", err
@ -350,9 +333,10 @@ func (d *Driver) Get(id string, mountLabel string) (string, error) {
// If id has a root, just return it // If id has a root, just return it
rootDir := path.Join(dir, "root") rootDir := path.Join(dir, "root")
if _, err := os.Stat(rootDir); err == nil { if _, err := os.Stat(rootDir); err == nil {
mount.path = rootDir d.pathCacheLock.Lock()
d.active[id] = mount d.pathCache[id] = rootDir
return mount.path, nil d.pathCacheLock.Unlock()
return rootDir, nil
} }
lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id"))
@ -388,42 +372,38 @@ func (d *Driver) Get(id string, mountLabel string) (string, error) {
if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil {
return "", err return "", err
} }
mount.path = mergedDir
mount.mounted = true
d.active[id] = mount
return mount.path, nil d.pathCacheLock.Lock()
d.pathCache[id] = mergedDir
d.pathCacheLock.Unlock()
return mergedDir, nil
}
func (d *Driver) mounted(dir string) (bool, error) {
return graphdriver.Mounted(graphdriver.FsMagicOverlay, dir)
} }
// Put unmounts the mount path created for the give id. // Put unmounts the mount path created for the give id.
func (d *Driver) Put(id string) error { func (d *Driver) Put(id string) error {
// Protect the d.active from concurrent access d.pathCacheLock.Lock()
d.Lock() mountpoint, exists := d.pathCache[id]
defer d.Unlock() d.pathCacheLock.Unlock()
mount := d.active[id] if !exists {
if mount == nil {
logrus.Debugf("Put on a non-mounted device %s", id) logrus.Debugf("Put on a non-mounted device %s", id)
// but it might be still here // but it might be still here
if d.Exists(id) { if d.Exists(id) {
mergedDir := path.Join(d.dir(id), "merged") mountpoint = path.Join(d.dir(id), "merged")
err := syscall.Unmount(mergedDir, 0)
if err != nil {
logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
}
}
return nil
} }
mount.count-- d.pathCacheLock.Lock()
if mount.count > 0 { d.pathCache[id] = mountpoint
return nil d.pathCacheLock.Unlock()
} }
defer delete(d.active, id) if mounted, err := d.mounted(mountpoint); mounted || err != nil {
if mount.mounted { if err = syscall.Unmount(mountpoint, 0); err != nil {
err := syscall.Unmount(mount.path, 0)
if err != nil {
logrus.Debugf("Failed to unmount %s overlay: %v", id, err) logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
} }
return err return err

View File

@ -13,7 +13,6 @@ import (
"path" "path"
"path/filepath" "path/filepath"
"strings" "strings"
"sync"
"syscall" "syscall"
"time" "time"
@ -47,10 +46,6 @@ const (
type Driver struct { type Driver struct {
// info stores the shim driver information // info stores the shim driver information
info hcsshim.DriverInfo info hcsshim.DriverInfo
// Mutex protects concurrent modification to active
sync.Mutex
// active stores references to the activated layers
active map[string]int
} }
var _ graphdriver.DiffGetterDriver = &Driver{} var _ graphdriver.DiffGetterDriver = &Driver{}
@ -63,7 +58,6 @@ func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap)
HomeDir: home, HomeDir: home,
Flavour: filterDriver, Flavour: filterDriver,
}, },
active: make(map[string]int),
} }
return d, nil return d, nil
} }
@ -76,7 +70,6 @@ func InitDiff(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (
HomeDir: home, HomeDir: home,
Flavour: diffDriver, Flavour: diffDriver,
}, },
active: make(map[string]int),
} }
return d, nil return d, nil
} }
@ -189,9 +182,6 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel)
var dir string var dir string
d.Lock()
defer d.Unlock()
rID, err := d.resolveID(id) rID, err := d.resolveID(id)
if err != nil { if err != nil {
return "", err return "", err
@ -203,7 +193,6 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
return "", err return "", err
} }
if d.active[rID] == 0 {
if err := hcsshim.ActivateLayer(d.info, rID); err != nil { if err := hcsshim.ActivateLayer(d.info, rID); err != nil {
return "", err return "", err
} }
@ -213,7 +202,6 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
} }
return "", err return "", err
} }
}
mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) mountPath, err := hcsshim.GetLayerMountPath(d.info, rID)
if err != nil { if err != nil {
@ -223,8 +211,6 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
return "", err return "", err
} }
d.active[rID]++
// If the layer has a mount path, use that. Otherwise, use the // If the layer has a mount path, use that. Otherwise, use the
// folder path. // folder path.
if mountPath != "" { if mountPath != "" {
@ -245,22 +231,10 @@ func (d *Driver) Put(id string) error {
return err return err
} }
d.Lock()
defer d.Unlock()
if d.active[rID] > 1 {
d.active[rID]--
} else if d.active[rID] == 1 {
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
return err return err
} }
if err := hcsshim.DeactivateLayer(d.info, rID); err != nil { return hcsshim.DeactivateLayer(d.info, rID)
return err
}
delete(d.active, rID)
}
return nil
} }
// Cleanup ensures the information the driver stores is properly removed. // Cleanup ensures the information the driver stores is properly removed.
@ -270,62 +244,40 @@ func (d *Driver) Cleanup() error {
// Diff produces an archive of the changes between the specified // Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "". // layer and its parent layer which may be "".
// The layer should be mounted when calling this function
func (d *Driver) Diff(id, parent string) (_ archive.Archive, err error) { func (d *Driver) Diff(id, parent string) (_ archive.Archive, err error) {
rID, err := d.resolveID(id) rID, err := d.resolveID(id)
if err != nil { if err != nil {
return return
} }
// Getting the layer paths must be done outside of the lock.
layerChain, err := d.getLayerChain(rID) layerChain, err := d.getLayerChain(rID)
if err != nil { if err != nil {
return return
} }
var undo func() // this is assuming that the layer is unmounted
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
d.Lock() return nil, err
// To support export, a layer must be activated but not prepared.
if d.info.Flavour == filterDriver {
if d.active[rID] == 0 {
if err = hcsshim.ActivateLayer(d.info, rID); err != nil {
d.Unlock()
return
} }
undo = func() { defer func() {
if err := hcsshim.DeactivateLayer(d.info, rID); err != nil { if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
logrus.Warnf("Failed to Deactivate %s: %s", rID, err) logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
} }
} }()
} else {
if err = hcsshim.UnprepareLayer(d.info, rID); err != nil {
d.Unlock()
return
}
undo = func() {
if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
logrus.Warnf("Failed to re-PrepareLayer %s: %s", rID, err)
}
}
}
}
d.Unlock()
arch, err := d.exportLayer(rID, layerChain) arch, err := d.exportLayer(rID, layerChain)
if err != nil { if err != nil {
undo()
return return
} }
return ioutils.NewReadCloserWrapper(arch, func() error { return ioutils.NewReadCloserWrapper(arch, func() error {
defer undo()
return arch.Close() return arch.Close()
}), nil }), nil
} }
// Changes produces a list of changes between the specified layer // Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes. // and its parent layer. If parent is "", then all changes will be ADD changes.
// The layer should be mounted when calling this function
func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
rID, err := d.resolveID(id) rID, err := d.resolveID(id)
if err != nil { if err != nil {
@ -336,31 +288,15 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
return nil, err return nil, err
} }
d.Lock() // this is assuming that the layer is unmounted
if d.info.Flavour == filterDriver { if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
if d.active[rID] == 0 {
if err = hcsshim.ActivateLayer(d.info, rID); err != nil {
d.Unlock()
return nil, err
}
defer func() {
if err := hcsshim.DeactivateLayer(d.info, rID); err != nil {
logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
}
}()
} else {
if err = hcsshim.UnprepareLayer(d.info, rID); err != nil {
d.Unlock()
return nil, err return nil, err
} }
defer func() { defer func() {
if err := hcsshim.PrepareLayer(d.info, rID, parentChain); err != nil { if err := hcsshim.PrepareLayer(d.info, rID, parentChain); err != nil {
logrus.Warnf("Failed to re-PrepareLayer %s: %s", rID, err) logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
} }
}() }()
}
}
d.Unlock()
r, err := hcsshim.NewLayerReader(d.info, id, parentChain) r, err := hcsshim.NewLayerReader(d.info, id, parentChain)
if err != nil { if err != nil {
@ -391,6 +327,7 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
// ApplyDiff extracts the changeset from the given diff into the // ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the // layer with the specified id and parent, returning the size of the
// new layer in bytes. // new layer in bytes.
// The layer should not be mounted when calling this function
func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) {
rPId, err := d.resolveID(parent) rPId, err := d.resolveID(parent)
if err != nil { if err != nil {

View File

@ -22,12 +22,6 @@ import (
"github.com/opencontainers/runc/libcontainer/label" "github.com/opencontainers/runc/libcontainer/label"
) )
type activeMount struct {
count int
path string
mounted bool
}
type zfsOptions struct { type zfsOptions struct {
fsName string fsName string
mountPath string mountPath string
@ -109,7 +103,6 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri
dataset: rootDataset, dataset: rootDataset,
options: options, options: options,
filesystemsCache: filesystemsCache, filesystemsCache: filesystemsCache,
active: make(map[string]*activeMount),
uidMaps: uidMaps, uidMaps: uidMaps,
gidMaps: gidMaps, gidMaps: gidMaps,
} }
@ -166,7 +159,6 @@ type Driver struct {
options zfsOptions options zfsOptions
sync.Mutex // protects filesystem cache against concurrent access sync.Mutex // protects filesystem cache against concurrent access
filesystemsCache map[string]bool filesystemsCache map[string]bool
active map[string]*activeMount
uidMaps []idtools.IDMap uidMaps []idtools.IDMap
gidMaps []idtools.IDMap gidMaps []idtools.IDMap
} }
@ -302,17 +294,6 @@ func (d *Driver) Remove(id string) error {
// Get returns the mountpoint for the given id after creating the target directories if necessary. // Get returns the mountpoint for the given id after creating the target directories if necessary.
func (d *Driver) Get(id, mountLabel string) (string, error) { func (d *Driver) Get(id, mountLabel string) (string, error) {
d.Lock()
defer d.Unlock()
mnt := d.active[id]
if mnt != nil {
mnt.count++
return mnt.path, nil
}
mnt = &activeMount{count: 1}
mountpoint := d.mountPath(id) mountpoint := d.mountPath(id)
filesystem := d.zfsPath(id) filesystem := d.zfsPath(id)
options := label.FormatMountLabel("", mountLabel) options := label.FormatMountLabel("", mountLabel)
@ -335,48 +316,29 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { if err := os.Chown(mountpoint, rootUID, rootGID); err != nil {
return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err) return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err)
} }
mnt.path = mountpoint
mnt.mounted = true
d.active[id] = mnt
return mountpoint, nil return mountpoint, nil
} }
// Put removes the existing mountpoint for the given id if it exists. // Put removes the existing mountpoint for the given id if it exists.
func (d *Driver) Put(id string) error { func (d *Driver) Put(id string) error {
d.Lock() mountpoint := d.mountPath(id)
defer d.Unlock() mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint)
if err != nil || !mounted {
mnt := d.active[id] return err
if mnt == nil {
logrus.Debugf("[zfs] Put on a non-mounted device %s", id)
// but it might be still here
if d.Exists(id) {
err := mount.Unmount(d.mountPath(id))
if err != nil {
logrus.Debugf("[zfs] Failed to unmount %s zfs fs: %v", id, err)
}
}
return nil
} }
mnt.count-- logrus.Debugf(`[zfs] unmount("%s")`, mountpoint)
if mnt.count > 0 {
return nil
}
defer delete(d.active, id) if err := mount.Unmount(mountpoint); err != nil {
if mnt.mounted { return fmt.Errorf("error unmounting to %s: %v", mountpoint, err)
logrus.Debugf(`[zfs] unmount("%s")`, mnt.path)
if err := mount.Unmount(mnt.path); err != nil {
return fmt.Errorf("error unmounting to %s: %v", mnt.path, err)
}
} }
return nil return nil
} }
// Exists checks to see if the cache entry exists for the given id. // Exists checks to see if the cache entry exists for the given id.
func (d *Driver) Exists(id string) bool { func (d *Driver) Exists(id string) bool {
d.Lock()
defer d.Unlock()
return d.filesystemsCache[d.zfsPath(id)] == true return d.filesystemsCache[d.zfsPath(id)] == true
} }

View File

@ -0,0 +1,95 @@
package main
import (
"fmt"
"io/ioutil"
"os"
"runtime"
"strings"
"sync"
"github.com/docker/docker/pkg/integration/checker"
"github.com/go-check/check"
)
func (s *DockerSuite) BenchmarkConcurrentContainerActions(c *check.C) {
maxConcurrency := runtime.GOMAXPROCS(0)
numIterations := c.N
outerGroup := &sync.WaitGroup{}
outerGroup.Add(maxConcurrency)
chErr := make(chan error, numIterations*2*maxConcurrency)
for i := 0; i < maxConcurrency; i++ {
go func() {
defer outerGroup.Done()
innerGroup := &sync.WaitGroup{}
innerGroup.Add(2)
go func() {
defer innerGroup.Done()
for i := 0; i < numIterations; i++ {
args := []string{"run", "-d", defaultSleepImage}
args = append(args, defaultSleepCommand...)
out, _, err := dockerCmdWithError(args...)
if err != nil {
chErr <- fmt.Errorf(out)
return
}
id := strings.TrimSpace(out)
tmpDir, err := ioutil.TempDir("", "docker-concurrent-test-"+id)
if err != nil {
chErr <- err
return
}
defer os.RemoveAll(tmpDir)
out, _, err = dockerCmdWithError("cp", id+":/tmp", tmpDir)
if err != nil {
chErr <- fmt.Errorf(out)
return
}
out, _, err = dockerCmdWithError("kill", id)
if err != nil {
chErr <- fmt.Errorf(out)
}
out, _, err = dockerCmdWithError("start", id)
if err != nil {
chErr <- fmt.Errorf(out)
}
out, _, err = dockerCmdWithError("kill", id)
if err != nil {
chErr <- fmt.Errorf(out)
}
// don't do an rm -f here since it can potentially ignore errors from the graphdriver
out, _, err = dockerCmdWithError("rm", id)
if err != nil {
chErr <- fmt.Errorf(out)
}
}
}()
go func() {
defer innerGroup.Done()
for i := 0; i < numIterations; i++ {
out, _, err := dockerCmdWithError("ps")
if err != nil {
chErr <- fmt.Errorf(out)
}
}
}()
innerGroup.Wait()
}()
}
outerGroup.Wait()
close(chErr)
for err := range chErr {
c.Assert(err, checker.IsNil)
}
}

View File

@ -49,6 +49,10 @@ var (
// to be created which would result in a layer depth // to be created which would result in a layer depth
// greater than the 125 max. // greater than the 125 max.
ErrMaxDepthExceeded = errors.New("max depth exceeded") ErrMaxDepthExceeded = errors.New("max depth exceeded")
// ErrNotSupported is used when the action is not supppoted
// on the current platform
ErrNotSupported = errors.New("not support on this platform")
) )
// ChainID is the content-addressable ID of a layer. // ChainID is the content-addressable ID of a layer.

View File

@ -12,6 +12,7 @@ type mountedLayer struct {
mountID string mountID string
initID string initID string
parent *roLayer parent *roLayer
path string
layerStore *layerStore layerStore *layerStore
references map[RWLayer]*referencedRWLayer references map[RWLayer]*referencedRWLayer
@ -131,10 +132,21 @@ func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) {
return "", ErrLayerNotRetained return "", ErrLayerNotRetained
} }
if rl.activityCount > 0 {
rl.activityCount++ rl.activityCount++
return rl.mountedLayer.Mount(mountLabel) return rl.path, nil
}
m, err := rl.mountedLayer.Mount(mountLabel)
if err == nil {
rl.activityCount++
rl.path = m
}
return m, err
} }
// Unmount decrements the activity count and unmounts the underlying layer
// Callers should only call `Unmount` once per call to `Mount`, even on error.
func (rl *referencedRWLayer) Unmount() error { func (rl *referencedRWLayer) Unmount() error {
rl.activityL.Lock() rl.activityL.Lock()
defer rl.activityL.Unlock() defer rl.activityL.Unlock()
@ -145,7 +157,11 @@ func (rl *referencedRWLayer) Unmount() error {
if rl.activityCount == -1 { if rl.activityCount == -1 {
return ErrLayerNotRetained return ErrLayerNotRetained
} }
rl.activityCount-- rl.activityCount--
if rl.activityCount > 0 {
return nil
}
return rl.mountedLayer.Unmount() return rl.mountedLayer.Unmount()
} }