Merge pull request #15310 from MHBauer/demon-lint-squash

golint fixes for daemon/ package
This commit is contained in:
David Calavera 2015-08-28 17:34:36 +02:00
commit 433956cc47
49 changed files with 637 additions and 468 deletions

View File

@ -3,7 +3,7 @@
package server package server
func (s *Server) registerSubRouter() { func (s *Server) registerSubRouter() {
httpHandler := s.daemon.NetworkApiRouter() httpHandler := s.daemon.NetworkAPIRouter()
subrouter := s.router.PathPrefix("/v{version:[0-9.]+}/networks").Subrouter() subrouter := s.router.PathPrefix("/v{version:[0-9.]+}/networks").Subrouter()
subrouter.Methods("GET", "POST", "PUT", "DELETE").HandlerFunc(httpHandler) subrouter.Methods("GET", "POST", "PUT", "DELETE").HandlerFunc(httpHandler)

View File

@ -29,7 +29,7 @@ func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, err
res = res[1:] res = res[1:]
} }
return container.Copy(res) return container.copy(res)
} }
// ContainerStatPath stats the filesystem resource at the specified path in the // ContainerStatPath stats the filesystem resource at the specified path in the
@ -142,7 +142,7 @@ func (container *Container) StatPath(path string) (stat *types.ContainerPathStat
defer container.Unmount() defer container.Unmount()
err = container.mountVolumes() err = container.mountVolumes()
defer container.UnmountVolumes(true) defer container.unmountVolumes(true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -177,7 +177,7 @@ func (container *Container) ArchivePath(path string) (content io.ReadCloser, sta
defer func() { defer func() {
if err != nil { if err != nil {
// unmount any volumes // unmount any volumes
container.UnmountVolumes(true) container.unmountVolumes(true)
// unmount the container's rootfs // unmount the container's rootfs
container.Unmount() container.Unmount()
} }
@ -212,13 +212,13 @@ func (container *Container) ArchivePath(path string) (content io.ReadCloser, sta
content = ioutils.NewReadCloserWrapper(data, func() error { content = ioutils.NewReadCloserWrapper(data, func() error {
err := data.Close() err := data.Close()
container.UnmountVolumes(true) container.unmountVolumes(true)
container.Unmount() container.Unmount()
container.Unlock() container.Unlock()
return err return err
}) })
container.LogEvent("archive-path") container.logEvent("archive-path")
return content, stat, nil return content, stat, nil
} }
@ -239,7 +239,7 @@ func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool,
defer container.Unmount() defer container.Unmount()
err = container.mountVolumes() err = container.mountVolumes()
defer container.UnmountVolumes(true) defer container.unmountVolumes(true)
if err != nil { if err != nil {
return err return err
} }
@ -288,7 +288,7 @@ func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool,
} }
if !toVolume && container.hostConfig.ReadonlyRootfs { if !toVolume && container.hostConfig.ReadonlyRootfs {
return ErrContainerRootfsReadonly return ErrRootFSReadOnly
} }
options := &archive.TarOptions{ options := &archive.TarOptions{
@ -302,7 +302,7 @@ func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool,
return err return err
} }
container.LogEvent("extract-to-dir") container.logEvent("extract-to-dir")
return nil return nil
} }

View File

@ -6,6 +6,7 @@ import (
"github.com/docker/docker/pkg/stdcopy" "github.com/docker/docker/pkg/stdcopy"
) )
// ContainerAttachWithLogsConfig holds the streams to use when connecting to a container to view logs.
type ContainerAttachWithLogsConfig struct { type ContainerAttachWithLogsConfig struct {
InStream io.ReadCloser InStream io.ReadCloser
OutStream io.Writer OutStream io.Writer
@ -13,6 +14,7 @@ type ContainerAttachWithLogsConfig struct {
Logs, Stream bool Logs, Stream bool
} }
// ContainerAttachWithLogs attaches to logs according to the config passed in. See ContainerAttachWithLogsConfig.
func (daemon *Daemon) ContainerAttachWithLogs(container *Container, c *ContainerAttachWithLogsConfig) error { func (daemon *Daemon) ContainerAttachWithLogs(container *Container, c *ContainerAttachWithLogsConfig) error {
var errStream io.Writer var errStream io.Writer
@ -36,15 +38,18 @@ func (daemon *Daemon) ContainerAttachWithLogs(container *Container, c *Container
stderr = errStream stderr = errStream
} }
return container.AttachWithLogs(stdin, stdout, stderr, c.Logs, c.Stream) return container.attachWithLogs(stdin, stdout, stderr, c.Logs, c.Stream)
} }
// ContainerWsAttachWithLogsConfig attach with websockets, since all
// stream data is delegated to the websocket to handle, there
type ContainerWsAttachWithLogsConfig struct { type ContainerWsAttachWithLogsConfig struct {
InStream io.ReadCloser InStream io.ReadCloser
OutStream, ErrStream io.Writer OutStream, ErrStream io.Writer
Logs, Stream bool Logs, Stream bool
} }
// ContainerWsAttachWithLogs websocket connection
func (daemon *Daemon) ContainerWsAttachWithLogs(container *Container, c *ContainerWsAttachWithLogsConfig) error { func (daemon *Daemon) ContainerWsAttachWithLogs(container *Container, c *ContainerWsAttachWithLogsConfig) error {
return container.AttachWithLogs(c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream) return container.attachWithLogs(c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream)
} }

View File

@ -9,5 +9,5 @@ func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) {
return nil, err return nil, err
} }
return container.Changes() return container.changes()
} }

View File

@ -5,6 +5,8 @@ import (
"github.com/docker/docker/runconfig" "github.com/docker/docker/runconfig"
) )
// ContainerCommitConfig contains build configs for commit operation,
// and is used when making a commit with the current state of the container.
type ContainerCommitConfig struct { type ContainerCommitConfig struct {
Pause bool Pause bool
Repo string Repo string
@ -15,14 +17,14 @@ type ContainerCommitConfig struct {
} }
// Commit creates a new filesystem image from the current state of a container. // Commit creates a new filesystem image from the current state of a container.
// The image can optionally be tagged into a repository // The image can optionally be tagged into a repository.
func (daemon *Daemon) Commit(container *Container, c *ContainerCommitConfig) (*image.Image, error) { func (daemon *Daemon) Commit(container *Container, c *ContainerCommitConfig) (*image.Image, error) {
if c.Pause && !container.IsPaused() { if c.Pause && !container.isPaused() {
container.Pause() container.pause()
defer container.Unpause() defer container.unpause()
} }
rwTar, err := container.ExportRw() rwTar, err := container.exportRw()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -55,6 +57,6 @@ func (daemon *Daemon) Commit(container *Container, c *ContainerCommitConfig) (*i
return img, err return img, err
} }
} }
container.LogEvent("commit") container.logEvent("commit")
return img, nil return img, nil
} }

View File

@ -18,8 +18,8 @@ type CommonConfig struct {
Bridge bridgeConfig // Bridge holds bridge network specific configuration. Bridge bridgeConfig // Bridge holds bridge network specific configuration.
Context map[string][]string Context map[string][]string
DisableBridge bool DisableBridge bool
Dns []string DNS []string
DnsSearch []string DNSSearch []string
ExecDriver string ExecDriver string
ExecOptions []string ExecOptions []string
ExecRoot string ExecRoot string
@ -50,8 +50,8 @@ func (config *Config) InstallCommonFlags(cmd *flag.FlagSet, usageFn func(string)
cmd.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, defaultExec, usageFn("Exec driver to use")) cmd.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, defaultExec, usageFn("Exec driver to use"))
cmd.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, usageFn("Set the containers network MTU")) cmd.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, usageFn("Set the containers network MTU"))
// FIXME: why the inconsistency between "hosts" and "sockets"? // FIXME: why the inconsistency between "hosts" and "sockets"?
cmd.Var(opts.NewListOptsRef(&config.Dns, opts.ValidateIPAddress), []string{"#dns", "-dns"}, usageFn("DNS server to use")) cmd.Var(opts.NewListOptsRef(&config.DNS, opts.ValidateIPAddress), []string{"#dns", "-dns"}, usageFn("DNS server to use"))
cmd.Var(opts.NewListOptsRef(&config.DnsSearch, opts.ValidateDNSSearch), []string{"-dns-search"}, usageFn("DNS search domains to use")) cmd.Var(opts.NewListOptsRef(&config.DNSSearch, opts.ValidateDNSSearch), []string{"-dns-search"}, usageFn("DNS search domains to use"))
cmd.Var(opts.NewListOptsRef(&config.Labels, opts.ValidateLabel), []string{"-label"}, usageFn("Set key=value labels to the daemon")) cmd.Var(opts.NewListOptsRef(&config.Labels, opts.ValidateLabel), []string{"-label"}, usageFn("Set key=value labels to the daemon"))
cmd.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", usageFn("Default driver for container logs")) cmd.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", usageFn("Default driver for container logs"))
cmd.Var(opts.NewMapOpts(config.LogConfig.Config, nil), []string{"-log-opt"}, usageFn("Set log driver options")) cmd.Var(opts.NewMapOpts(config.LogConfig.Config, nil), []string{"-log-opt"}, usageFn("Set log driver options"))

View File

@ -34,13 +34,12 @@ import (
) )
var ( var (
ErrNotATTY = errors.New("The PTY is not a file") // ErrRootFSReadOnly is returned when a container
ErrNoTTY = errors.New("No PTY found") // rootfs is marked readonly.
ErrContainerStart = errors.New("The container failed to start. Unknown error") ErrRootFSReadOnly = errors.New("container rootfs is marked read-only")
ErrContainerStartTimeout = errors.New("The container failed to start due to timed out.")
ErrContainerRootfsReadonly = errors.New("container rootfs is marked read-only")
) )
// ErrContainerNotRunning holds the id of the container that is not running.
type ErrContainerNotRunning struct { type ErrContainerNotRunning struct {
id string id string
} }
@ -49,22 +48,21 @@ func (e ErrContainerNotRunning) Error() string {
return fmt.Sprintf("Container %s is not running", e.id) return fmt.Sprintf("Container %s is not running", e.id)
} }
type StreamConfig struct { type streamConfig struct {
stdout *broadcastwriter.BroadcastWriter stdout *broadcastwriter.BroadcastWriter
stderr *broadcastwriter.BroadcastWriter stderr *broadcastwriter.BroadcastWriter
stdin io.ReadCloser stdin io.ReadCloser
stdinPipe io.WriteCloser stdinPipe io.WriteCloser
} }
// CommonContainer holds the settings for a container which are applicable // CommonContainer holds the fields for a container which are
// across all platforms supported by the daemon. // applicable across all platforms supported by the daemon.
type CommonContainer struct { type CommonContainer struct {
StreamConfig streamConfig
// embed for Container to support states directly.
*State `json:"State"` // Needed for remote api version <= 1.11 *State `json:"State"` // Needed for remote api version <= 1.11
root string // Path to the "home" of the container, including metadata. root string // Path to the "home" of the container, including metadata.
basefs string // Path to the graphdriver mountpoint basefs string // Path to the graphdriver mountpoint
ID string ID string
Created time.Time Created time.Time
Path string Path string
@ -76,7 +74,9 @@ type CommonContainer struct {
Name string Name string
Driver string Driver string
ExecDriver string ExecDriver string
MountLabel, ProcessLabel string // MountLabel contains the options for the 'mount' command
MountLabel string
ProcessLabel string
RestartCount int RestartCount int
HasBeenStartedBefore bool HasBeenStartedBefore bool
HasBeenManuallyStopped bool // used for unless-stopped restart policy HasBeenManuallyStopped bool // used for unless-stopped restart policy
@ -90,7 +90,7 @@ type CommonContainer struct {
logCopier *logger.Copier logCopier *logger.Copier
} }
func (container *Container) FromDisk() error { func (container *Container) fromDisk() error {
pth, err := container.jsonPath() pth, err := container.jsonPath()
if err != nil { if err != nil {
return err return err
@ -131,10 +131,10 @@ func (container *Container) toDisk() error {
return err return err
} }
return container.WriteHostConfig() return container.writeHostConfig()
} }
func (container *Container) ToDisk() error { func (container *Container) toDiskLocking() error {
container.Lock() container.Lock()
err := container.toDisk() err := container.toDisk()
container.Unlock() container.Unlock()
@ -165,7 +165,7 @@ func (container *Container) readHostConfig() error {
return json.NewDecoder(f).Decode(&container.hostConfig) return json.NewDecoder(f).Decode(&container.hostConfig)
} }
func (container *Container) WriteHostConfig() error { func (container *Container) writeHostConfig() error {
data, err := json.Marshal(container.hostConfig) data, err := json.Marshal(container.hostConfig)
if err != nil { if err != nil {
return err return err
@ -179,7 +179,7 @@ func (container *Container) WriteHostConfig() error {
return ioutil.WriteFile(pth, data, 0666) return ioutil.WriteFile(pth, data, 0666)
} }
func (container *Container) LogEvent(action string) { func (container *Container) logEvent(action string) {
d := container.daemon d := container.daemon
d.EventsService.Log( d.EventsService.Log(
action, action,
@ -188,7 +188,7 @@ func (container *Container) LogEvent(action string) {
) )
} }
// Evaluates `path` in the scope of the container's basefs, with proper path // GetResourcePath evaluates `path` in the scope of the container's basefs, with proper path
// sanitisation. Symlinks are all scoped to the basefs of the container, as // sanitisation. Symlinks are all scoped to the basefs of the container, as
// though the container's basefs was `/`. // though the container's basefs was `/`.
// //
@ -221,18 +221,18 @@ func (container *Container) GetResourcePath(path string) (string, error) {
// if no component of the returned path changes (such as a component // if no component of the returned path changes (such as a component
// symlinking to a different path) between using this method and using the // symlinking to a different path) between using this method and using the
// path. See symlink.FollowSymlinkInScope for more details. // path. See symlink.FollowSymlinkInScope for more details.
func (container *Container) GetRootResourcePath(path string) (string, error) { func (container *Container) getRootResourcePath(path string) (string, error) {
// IMPORTANT - These are paths on the OS where the daemon is running, hence // IMPORTANT - These are paths on the OS where the daemon is running, hence
// any filepath operations must be done in an OS agnostic way. // any filepath operations must be done in an OS agnostic way.
cleanPath := filepath.Join(string(os.PathSeparator), path) cleanPath := filepath.Join(string(os.PathSeparator), path)
return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root) return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root)
} }
func (container *Container) ExportRw() (archive.Archive, error) { func (container *Container) exportContainerRw() (archive.Archive, error) {
if container.daemon == nil { if container.daemon == nil {
return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID) return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID)
} }
archive, err := container.daemon.Diff(container) archive, err := container.daemon.diff(container)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -243,6 +243,10 @@ func (container *Container) ExportRw() (archive.Archive, error) {
nil nil
} }
// Start prepares the container to run by setting up everything the
// container needs, such as storage and networking, as well as links
// between containers. The container is left waiting for a signal to
// begin running.
func (container *Container) Start() (err error) { func (container *Container) Start() (err error) {
container.Lock() container.Lock()
defer container.Unlock() defer container.Unlock()
@ -266,7 +270,7 @@ func (container *Container) Start() (err error) {
} }
container.toDisk() container.toDisk()
container.cleanup() container.cleanup()
container.LogEvent("die") container.logEvent("die")
} }
}() }()
@ -302,7 +306,7 @@ func (container *Container) Start() (err error) {
return container.waitForStart() return container.waitForStart()
} }
func (container *Container) Run() error { func (container *Container) run() error {
if err := container.Start(); err != nil { if err := container.Start(); err != nil {
return err return err
} }
@ -311,7 +315,7 @@ func (container *Container) Run() error {
return nil return nil
} }
func (container *Container) Output() (output []byte, err error) { func (container *Container) output() (output []byte, err error) {
pipe := container.StdoutPipe() pipe := container.StdoutPipe()
defer pipe.Close() defer pipe.Close()
if err := container.Start(); err != nil { if err := container.Start(); err != nil {
@ -322,7 +326,7 @@ func (container *Container) Output() (output []byte, err error) {
return output, err return output, err
} }
// StreamConfig.StdinPipe returns a WriteCloser which can be used to feed data // streamConfig.StdinPipe returns a WriteCloser which can be used to feed data
// to the standard input of the container's active process. // to the standard input of the container's active process.
// Container.StdoutPipe and Container.StderrPipe each return a ReadCloser // Container.StdoutPipe and Container.StderrPipe each return a ReadCloser
// which can be used to retrieve the standard output (and error) generated // which can be used to retrieve the standard output (and error) generated
@ -330,17 +334,17 @@ func (container *Container) Output() (output []byte, err error) {
// copied and delivered to all StdoutPipe and StderrPipe consumers, using // copied and delivered to all StdoutPipe and StderrPipe consumers, using
// a kind of "broadcaster". // a kind of "broadcaster".
func (streamConfig *StreamConfig) StdinPipe() io.WriteCloser { func (streamConfig *streamConfig) StdinPipe() io.WriteCloser {
return streamConfig.stdinPipe return streamConfig.stdinPipe
} }
func (streamConfig *StreamConfig) StdoutPipe() io.ReadCloser { func (streamConfig *streamConfig) StdoutPipe() io.ReadCloser {
reader, writer := io.Pipe() reader, writer := io.Pipe()
streamConfig.stdout.AddWriter(writer) streamConfig.stdout.AddWriter(writer)
return ioutils.NewBufReader(reader) return ioutils.NewBufReader(reader)
} }
func (streamConfig *StreamConfig) StderrPipe() io.ReadCloser { func (streamConfig *streamConfig) StderrPipe() io.ReadCloser {
reader, writer := io.Pipe() reader, writer := io.Pipe()
streamConfig.stderr.AddWriter(writer) streamConfig.stderr.AddWriter(writer)
return ioutils.NewBufReader(reader) return ioutils.NewBufReader(reader)
@ -353,7 +357,7 @@ func (container *Container) isNetworkAllocated() bool {
// cleanup releases any network resources allocated to the container along with any rules // cleanup releases any network resources allocated to the container along with any rules
// around how containers are linked together. It also unmounts the container's root filesystem. // around how containers are linked together. It also unmounts the container's root filesystem.
func (container *Container) cleanup() { func (container *Container) cleanup() {
container.ReleaseNetwork() container.releaseNetwork()
if err := container.Unmount(); err != nil { if err := container.Unmount(); err != nil {
logrus.Errorf("%v: Failed to umount filesystem: %v", container.ID, err) logrus.Errorf("%v: Failed to umount filesystem: %v", container.ID, err)
@ -363,10 +367,15 @@ func (container *Container) cleanup() {
container.daemon.unregisterExecCommand(eConfig) container.daemon.unregisterExecCommand(eConfig)
} }
container.UnmountVolumes(false) container.unmountVolumes(false)
} }
func (container *Container) KillSig(sig int) error { // killSig sends the container the given signal. This wrapper for the
// host specific kill command prepares the container before attempting
// to send the signal. An error is returned if the container is paused
// or not running, or if there is a problem returned from the
// underlying kill command.
func (container *Container) killSig(sig int) error {
logrus.Debugf("Sending %d to %s", sig, container.ID) logrus.Debugf("Sending %d to %s", sig, container.ID)
container.Lock() container.Lock()
defer container.Unlock() defer container.Unlock()
@ -391,24 +400,24 @@ func (container *Container) KillSig(sig int) error {
return nil return nil
} }
if err := container.daemon.Kill(container, sig); err != nil { if err := container.daemon.kill(container, sig); err != nil {
return err return err
} }
container.LogEvent("kill") container.logEvent("kill")
return nil return nil
} }
// Wrapper aroung KillSig() suppressing "no such process" error. // Wrapper aroung killSig() suppressing "no such process" error.
func (container *Container) killPossiblyDeadProcess(sig int) error { func (container *Container) killPossiblyDeadProcess(sig int) error {
err := container.KillSig(sig) err := container.killSig(sig)
if err == syscall.ESRCH { if err == syscall.ESRCH {
logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.GetPid(), sig) logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.getPID(), sig)
return nil return nil
} }
return err return err
} }
func (container *Container) Pause() error { func (container *Container) pause() error {
container.Lock() container.Lock()
defer container.Unlock() defer container.Unlock()
@ -426,11 +435,11 @@ func (container *Container) Pause() error {
return err return err
} }
container.Paused = true container.Paused = true
container.LogEvent("pause") container.logEvent("pause")
return nil return nil
} }
func (container *Container) Unpause() error { func (container *Container) unpause() error {
container.Lock() container.Lock()
defer container.Unlock() defer container.Unlock()
@ -448,17 +457,18 @@ func (container *Container) Unpause() error {
return err return err
} }
container.Paused = false container.Paused = false
container.LogEvent("unpause") container.logEvent("unpause")
return nil return nil
} }
// Kill forcefully terminates a container.
func (container *Container) Kill() error { func (container *Container) Kill() error {
if !container.IsRunning() { if !container.IsRunning() {
return ErrContainerNotRunning{container.ID} return ErrContainerNotRunning{container.ID}
} }
// 1. Send SIGKILL // 1. Send SIGKILL
if err := container.killPossiblyDeadProcess(9); err != nil { if err := container.killPossiblyDeadProcess(int(syscall.SIGKILL)); err != nil {
// While normally we might "return err" here we're not going to // While normally we might "return err" here we're not going to
// because if we can't stop the container by this point then // because if we can't stop the container by this point then
// its probably because its already stopped. Meaning, between // its probably because its already stopped. Meaning, between
@ -487,15 +497,20 @@ func (container *Container) Kill() error {
return nil return nil
} }
// Stop halts a container by sending SIGTERM, waiting for the given
// duration in seconds, and then calling SIGKILL and waiting for the
// process to exit. If a negative duration is given, Stop will wait
// for SIGTERM forever. If the container is not running Stop returns
// immediately.
func (container *Container) Stop(seconds int) error { func (container *Container) Stop(seconds int) error {
if !container.IsRunning() { if !container.IsRunning() {
return nil return nil
} }
// 1. Send a SIGTERM // 1. Send a SIGTERM
if err := container.killPossiblyDeadProcess(15); err != nil { if err := container.killPossiblyDeadProcess(int(syscall.SIGTERM)); err != nil {
logrus.Infof("Failed to send SIGTERM to the process, force killing") logrus.Infof("Failed to send SIGTERM to the process, force killing")
if err := container.killPossiblyDeadProcess(9); err != nil { if err := container.killPossiblyDeadProcess(int(syscall.SIGKILL)); err != nil {
return err return err
} }
} }
@ -510,10 +525,14 @@ func (container *Container) Stop(seconds int) error {
} }
} }
container.LogEvent("stop") container.logEvent("stop")
return nil return nil
} }
// Restart attempts to gracefully stop and then start the
// container. When stopping, wait for the given duration in seconds to
// gracefully stop, before forcefully terminating the container. If
// given a negative duration, wait forever for a graceful stop.
func (container *Container) Restart(seconds int) error { func (container *Container) Restart(seconds int) error {
// Avoid unnecessarily unmounting and then directly mounting // Avoid unnecessarily unmounting and then directly mounting
// the container when the container stops and then starts // the container when the container stops and then starts
@ -530,10 +549,12 @@ func (container *Container) Restart(seconds int) error {
return err return err
} }
container.LogEvent("restart") container.logEvent("restart")
return nil return nil
} }
// Resize changes the TTY of the process running inside the container
// to the given height and width. The container must be running.
func (container *Container) Resize(h, w int) error { func (container *Container) Resize(h, w int) error {
if !container.IsRunning() { if !container.IsRunning() {
return ErrContainerNotRunning{container.ID} return ErrContainerNotRunning{container.ID}
@ -541,11 +562,11 @@ func (container *Container) Resize(h, w int) error {
if err := container.command.ProcessConfig.Terminal.Resize(h, w); err != nil { if err := container.command.ProcessConfig.Terminal.Resize(h, w); err != nil {
return err return err
} }
container.LogEvent("resize") container.logEvent("resize")
return nil return nil
} }
func (container *Container) Export() (archive.Archive, error) { func (container *Container) export() (archive.Archive, error) {
if err := container.Mount(); err != nil { if err := container.Mount(); err != nil {
return nil, err return nil, err
} }
@ -560,46 +581,45 @@ func (container *Container) Export() (archive.Archive, error) {
container.Unmount() container.Unmount()
return err return err
}) })
container.LogEvent("export") container.logEvent("export")
return arch, err return arch, err
} }
// Mount sets container.basefs
func (container *Container) Mount() error { func (container *Container) Mount() error {
return container.daemon.Mount(container) return container.daemon.Mount(container)
} }
func (container *Container) changes() ([]archive.Change, error) { func (container *Container) changes() ([]archive.Change, error) {
return container.daemon.Changes(container)
}
func (container *Container) Changes() ([]archive.Change, error) {
container.Lock() container.Lock()
defer container.Unlock() defer container.Unlock()
return container.changes() return container.daemon.changes(container)
} }
func (container *Container) GetImage() (*image.Image, error) { func (container *Container) getImage() (*image.Image, error) {
if container.daemon == nil { if container.daemon == nil {
return nil, fmt.Errorf("Can't get image of unregistered container") return nil, fmt.Errorf("Can't get image of unregistered container")
} }
return container.daemon.graph.Get(container.ImageID) return container.daemon.graph.Get(container.ImageID)
} }
// Unmount asks the daemon to release the layered filesystems that are
// mounted by the container.
func (container *Container) Unmount() error { func (container *Container) Unmount() error {
return container.daemon.Unmount(container) return container.daemon.unmount(container)
} }
func (container *Container) hostConfigPath() (string, error) { func (container *Container) hostConfigPath() (string, error) {
return container.GetRootResourcePath("hostconfig.json") return container.getRootResourcePath("hostconfig.json")
} }
func (container *Container) jsonPath() (string, error) { func (container *Container) jsonPath() (string, error) {
return container.GetRootResourcePath("config.json") return container.getRootResourcePath("config.json")
} }
// This method must be exported to be used from the lxc template // This method must be exported to be used from the lxc template
// This directory is only usable when the container is running // This directory is only usable when the container is running
func (container *Container) RootfsPath() string { func (container *Container) rootfsPath() string {
return container.basefs return container.basefs
} }
@ -610,7 +630,7 @@ func validateID(id string) error {
return nil return nil
} }
func (container *Container) Copy(resource string) (rc io.ReadCloser, err error) { func (container *Container) copy(resource string) (rc io.ReadCloser, err error) {
container.Lock() container.Lock()
defer func() { defer func() {
@ -629,7 +649,7 @@ func (container *Container) Copy(resource string) (rc io.ReadCloser, err error)
defer func() { defer func() {
if err != nil { if err != nil {
// unmount any volumes // unmount any volumes
container.UnmountVolumes(true) container.unmountVolumes(true)
// unmount the container's rootfs // unmount the container's rootfs
container.Unmount() container.Unmount()
} }
@ -666,17 +686,17 @@ func (container *Container) Copy(resource string) (rc io.ReadCloser, err error)
reader := ioutils.NewReadCloserWrapper(archive, func() error { reader := ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close() err := archive.Close()
container.UnmountVolumes(true) container.unmountVolumes(true)
container.Unmount() container.Unmount()
container.Unlock() container.Unlock()
return err return err
}) })
container.LogEvent("copy") container.logEvent("copy")
return reader, nil return reader, nil
} }
// Returns true if the container exposes a certain port // Returns true if the container exposes a certain port
func (container *Container) Exposes(p nat.Port) bool { func (container *Container) exposes(p nat.Port) bool {
_, exists := container.Config.ExposedPorts[p] _, exists := container.Config.ExposedPorts[p]
return exists return exists
} }
@ -718,7 +738,7 @@ func (container *Container) getLogger() (logger.Logger, error) {
// Set logging file for "json-logger" // Set logging file for "json-logger"
if cfg.Type == jsonfilelog.Name { if cfg.Type == jsonfilelog.Name {
ctx.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) ctx.LogPath, err = container.getRootResourcePath(fmt.Sprintf("%s-json.log", container.ID))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -764,7 +784,7 @@ func (container *Container) waitForStart() error {
return nil return nil
} }
func (container *Container) GetProcessLabel() string { func (container *Container) getProcessLabel() string {
// even if we have a process label return "" if we are running // even if we have a process label return "" if we are running
// in privileged mode // in privileged mode
if container.hostConfig.Privileged { if container.hostConfig.Privileged {
@ -773,31 +793,22 @@ func (container *Container) GetProcessLabel() string {
return container.ProcessLabel return container.ProcessLabel
} }
func (container *Container) GetMountLabel() string { func (container *Container) getMountLabel() string {
if container.hostConfig.Privileged { if container.hostConfig.Privileged {
return "" return ""
} }
return container.MountLabel return container.MountLabel
} }
func (container *Container) Stats() (*execdriver.ResourceStats, error) { func (container *Container) stats() (*execdriver.ResourceStats, error) {
return container.daemon.Stats(container) return container.daemon.stats(container)
} }
func (c *Container) LogDriverType() string { func (container *Container) getExecIDs() []string {
c.Lock()
defer c.Unlock()
if c.hostConfig.LogConfig.Type == "" {
return c.daemon.defaultLogConfig.Type
}
return c.hostConfig.LogConfig.Type
}
func (container *Container) GetExecIDs() []string {
return container.execCommands.List() return container.execCommands.List()
} }
func (container *Container) Exec(execConfig *execConfig) error { func (container *Container) exec(ExecConfig *ExecConfig) error {
container.Lock() container.Lock()
defer container.Unlock() defer container.Unlock()
@ -810,16 +821,16 @@ func (container *Container) Exec(execConfig *execConfig) error {
c.Close() c.Close()
} }
} }
close(execConfig.waitStart) close(ExecConfig.waitStart)
} }
// We use a callback here instead of a goroutine and an chan for // We use a callback here instead of a goroutine and an chan for
// synchronization purposes // synchronization purposes
cErr := promise.Go(func() error { return container.monitorExec(execConfig, callback) }) cErr := promise.Go(func() error { return container.monitorExec(ExecConfig, callback) })
// Exec should not return until the process is actually running // Exec should not return until the process is actually running
select { select {
case <-execConfig.waitStart: case <-ExecConfig.waitStart:
case err := <-cErr: case err := <-cErr:
return err return err
} }
@ -827,46 +838,48 @@ func (container *Container) Exec(execConfig *execConfig) error {
return nil return nil
} }
func (container *Container) monitorExec(execConfig *execConfig, callback execdriver.StartCallback) error { func (container *Container) monitorExec(ExecConfig *ExecConfig, callback execdriver.StartCallback) error {
var ( var (
err error err error
exitCode int exitCode int
) )
pipes := execdriver.NewPipes(execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdout, execConfig.StreamConfig.stderr, execConfig.OpenStdin) pipes := execdriver.NewPipes(ExecConfig.streamConfig.stdin, ExecConfig.streamConfig.stdout, ExecConfig.streamConfig.stderr, ExecConfig.OpenStdin)
exitCode, err = container.daemon.Exec(container, execConfig, pipes, callback) exitCode, err = container.daemon.Exec(container, ExecConfig, pipes, callback)
if err != nil { if err != nil {
logrus.Errorf("Error running command in existing container %s: %s", container.ID, err) logrus.Errorf("Error running command in existing container %s: %s", container.ID, err)
} }
logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode) logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode)
if execConfig.OpenStdin { if ExecConfig.OpenStdin {
if err := execConfig.StreamConfig.stdin.Close(); err != nil { if err := ExecConfig.streamConfig.stdin.Close(); err != nil {
logrus.Errorf("Error closing stdin while running in %s: %s", container.ID, err) logrus.Errorf("Error closing stdin while running in %s: %s", container.ID, err)
} }
} }
if err := execConfig.StreamConfig.stdout.Clean(); err != nil { if err := ExecConfig.streamConfig.stdout.Clean(); err != nil {
logrus.Errorf("Error closing stdout while running in %s: %s", container.ID, err) logrus.Errorf("Error closing stdout while running in %s: %s", container.ID, err)
} }
if err := execConfig.StreamConfig.stderr.Clean(); err != nil { if err := ExecConfig.streamConfig.stderr.Clean(); err != nil {
logrus.Errorf("Error closing stderr while running in %s: %s", container.ID, err) logrus.Errorf("Error closing stderr while running in %s: %s", container.ID, err)
} }
if execConfig.ProcessConfig.Terminal != nil { if ExecConfig.ProcessConfig.Terminal != nil {
if err := execConfig.ProcessConfig.Terminal.Close(); err != nil { if err := ExecConfig.ProcessConfig.Terminal.Close(); err != nil {
logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err) logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err)
} }
} }
// remove the exec command from the container's store only and not the // remove the exec command from the container's store only and not the
// daemon's store so that the exec command can be inspected. // daemon's store so that the exec command can be inspected.
container.execCommands.Delete(execConfig.ID) container.execCommands.Delete(ExecConfig.ID)
return err return err
} }
func (c *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error { // Attach connects to the container's TTY, delegating to standard
return attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, stdin, stdout, stderr) // streams or websockets depending on the configuration.
func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
return attach(&container.streamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr)
} }
func (c *Container) AttachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error { func (container *Container) attachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
if logs { if logs {
logDriver, err := c.getLogger() logDriver, err := container.getLogger()
if err != nil { if err != nil {
return err return err
} }
@ -896,7 +909,7 @@ func (c *Container) AttachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer
} }
} }
c.LogEvent("attach") container.logEvent("attach")
//stream //stream
if stream { if stream {
@ -910,17 +923,17 @@ func (c *Container) AttachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer
}() }()
stdinPipe = r stdinPipe = r
} }
<-c.Attach(stdinPipe, stdout, stderr) <-container.Attach(stdinPipe, stdout, stderr)
// If we are in stdinonce mode, wait for the process to end // If we are in stdinonce mode, wait for the process to end
// otherwise, simply return // otherwise, simply return
if c.Config.StdinOnce && !c.Config.Tty { if container.Config.StdinOnce && !container.Config.Tty {
c.WaitStop(-1 * time.Second) container.WaitStop(-1 * time.Second)
} }
} }
return nil return nil
} }
func attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error { func attach(streamConfig *streamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
var ( var (
cStdout, cStderr io.ReadCloser cStdout, cStderr io.ReadCloser
cStdin io.WriteCloser cStdin io.WriteCloser

View File

@ -18,7 +18,9 @@ import (
"github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/daemon/links" "github.com/docker/docker/daemon/links"
"github.com/docker/docker/daemon/network" "github.com/docker/docker/daemon/network"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/directory"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/nat" "github.com/docker/docker/pkg/nat"
"github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/system"
@ -35,8 +37,13 @@ import (
"github.com/opencontainers/runc/libcontainer/label" "github.com/opencontainers/runc/libcontainer/label"
) )
// DefaultPathEnv is unix style list of directories to search for
// executables. Each directory is separated from the next by a colon
// ':' character .
const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
// Container holds the fields specific to unixen implementations. See
// CommonContainer for standard fields common to all containers.
type Container struct { type Container struct {
CommonContainer CommonContainer
@ -47,7 +54,7 @@ type Container struct {
HostsPath string HostsPath string
MountPoints map[string]*mountPoint MountPoints map[string]*mountPoint
ResolvConfPath string ResolvConfPath string
UpdateDns bool
Volumes map[string]string // Deprecated since 1.7, kept for backwards compatibility Volumes map[string]string // Deprecated since 1.7, kept for backwards compatibility
VolumesRW map[string]bool // Deprecated since 1.7, kept for backwards compatibility VolumesRW map[string]bool // Deprecated since 1.7, kept for backwards compatibility
} }
@ -55,7 +62,7 @@ type Container struct {
func killProcessDirectly(container *Container) error { func killProcessDirectly(container *Container) error {
if _, err := container.WaitStop(10 * time.Second); err != nil { if _, err := container.WaitStop(10 * time.Second); err != nil {
// Ensure that we don't kill ourselves // Ensure that we don't kill ourselves
if pid := container.GetPid(); pid != 0 { if pid := container.getPID(); pid != 0 {
logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID)) logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID))
if err := syscall.Kill(pid, 9); err != nil { if err := syscall.Kill(pid, 9); err != nil {
if err != syscall.ESRCH { if err != syscall.ESRCH {
@ -73,7 +80,7 @@ func (container *Container) setupLinkedContainers() ([]string, error) {
env []string env []string
daemon = container.daemon daemon = container.daemon
) )
children, err := daemon.Children(container.Name) children, err := daemon.children(container.Name)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -231,7 +238,7 @@ func populateCommand(c *Container, env []string) error {
for _, ul := range ulimits { for _, ul := range ulimits {
ulIdx[ul.Name] = ul ulIdx[ul.Name] = ul
} }
for name, ul := range c.daemon.config.Ulimits { for name, ul := range c.daemon.configStore.Ulimits {
if _, exists := ulIdx[name]; !exists { if _, exists := ulIdx[name]; !exists {
ulimits = append(ulimits, ul) ulimits = append(ulimits, ul)
} }
@ -277,7 +284,7 @@ func populateCommand(c *Container, env []string) error {
c.command = &execdriver.Command{ c.command = &execdriver.Command{
ID: c.ID, ID: c.ID,
Rootfs: c.RootfsPath(), Rootfs: c.rootfsPath(),
ReadonlyRootfs: c.hostConfig.ReadonlyRootfs, ReadonlyRootfs: c.hostConfig.ReadonlyRootfs,
InitPath: "/.dockerinit", InitPath: "/.dockerinit",
WorkingDir: c.Config.WorkingDir, WorkingDir: c.Config.WorkingDir,
@ -292,8 +299,8 @@ func populateCommand(c *Container, env []string) error {
CapDrop: c.hostConfig.CapDrop.Slice(), CapDrop: c.hostConfig.CapDrop.Slice(),
GroupAdd: c.hostConfig.GroupAdd, GroupAdd: c.hostConfig.GroupAdd,
ProcessConfig: processConfig, ProcessConfig: processConfig,
ProcessLabel: c.GetProcessLabel(), ProcessLabel: c.getProcessLabel(),
MountLabel: c.GetMountLabel(), MountLabel: c.getMountLabel(),
LxcConfig: lxcConfig, LxcConfig: lxcConfig,
AppArmorProfile: c.AppArmorProfile, AppArmorProfile: c.AppArmorProfile,
CgroupParent: c.hostConfig.CgroupParent, CgroupParent: c.hostConfig.CgroupParent,
@ -321,8 +328,8 @@ func mergeDevices(defaultDevices, userDevices []*configs.Device) []*configs.Devi
return append(devs, userDevices...) return append(devs, userDevices...)
} }
// GetSize, return real size, virtual size // GetSize returns the real size & virtual size of the container.
func (container *Container) GetSize() (int64, int64) { func (container *Container) getSize() (int64, int64) {
var ( var (
sizeRw, sizeRootfs int64 sizeRw, sizeRootfs int64
err error err error
@ -373,7 +380,7 @@ func (container *Container) trySetNetworkMount(destination string, path string)
} }
func (container *Container) buildHostnameFile() error { func (container *Container) buildHostnameFile() error {
hostnamePath, err := container.GetRootResourcePath("hostname") hostnamePath, err := container.getRootResourcePath("hostname")
if err != nil { if err != nil {
return err return err
} }
@ -400,13 +407,13 @@ func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, err
joinOptions = append(joinOptions, libnetwork.JoinOptionUseDefaultSandbox()) joinOptions = append(joinOptions, libnetwork.JoinOptionUseDefaultSandbox())
} }
container.HostsPath, err = container.GetRootResourcePath("hosts") container.HostsPath, err = container.getRootResourcePath("hosts")
if err != nil { if err != nil {
return nil, err return nil, err
} }
joinOptions = append(joinOptions, libnetwork.JoinOptionHostsPath(container.HostsPath)) joinOptions = append(joinOptions, libnetwork.JoinOptionHostsPath(container.HostsPath))
container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf") container.ResolvConfPath, err = container.getRootResourcePath("resolv.conf")
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -414,8 +421,8 @@ func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, err
if len(container.hostConfig.DNS) > 0 { if len(container.hostConfig.DNS) > 0 {
dns = container.hostConfig.DNS dns = container.hostConfig.DNS
} else if len(container.daemon.config.Dns) > 0 { } else if len(container.daemon.configStore.DNS) > 0 {
dns = container.daemon.config.Dns dns = container.daemon.configStore.DNS
} }
for _, d := range dns { for _, d := range dns {
@ -424,8 +431,8 @@ func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, err
if len(container.hostConfig.DNSSearch) > 0 { if len(container.hostConfig.DNSSearch) > 0 {
dnsSearch = container.hostConfig.DNSSearch dnsSearch = container.hostConfig.DNSSearch
} else if len(container.daemon.config.DnsSearch) > 0 { } else if len(container.daemon.configStore.DNSSearch) > 0 {
dnsSearch = container.daemon.config.DnsSearch dnsSearch = container.daemon.configStore.DNSSearch
} }
for _, ds := range dnsSearch { for _, ds := range dnsSearch {
@ -445,7 +452,7 @@ func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, err
var childEndpoints, parentEndpoints []string var childEndpoints, parentEndpoints []string
children, err := container.daemon.Children(container.Name) children, err := container.daemon.children(container.Name)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -470,7 +477,7 @@ func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, err
joinOptions = append(joinOptions, libnetwork.JoinOptionExtraHost(parts[0], parts[1])) joinOptions = append(joinOptions, libnetwork.JoinOptionExtraHost(parts[0], parts[1]))
} }
refs := container.daemon.ContainerGraph().RefPaths(container.ID) refs := container.daemon.containerGraph().RefPaths(container.ID)
for _, ref := range refs { for _, ref := range refs {
if ref.ParentID == "0" { if ref.ParentID == "0" {
continue continue
@ -481,7 +488,7 @@ func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, err
logrus.Error(err) logrus.Error(err)
} }
if c != nil && !container.daemon.config.DisableBridge && container.hostConfig.NetworkMode.IsPrivate() { if c != nil && !container.daemon.configStore.DisableBridge && container.hostConfig.NetworkMode.IsPrivate() {
logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, container.NetworkSettings.IPAddress) logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, container.NetworkSettings.IPAddress)
joinOptions = append(joinOptions, libnetwork.JoinOptionParentUpdate(c.NetworkSettings.EndpointID, ref.Name, container.NetworkSettings.IPAddress)) joinOptions = append(joinOptions, libnetwork.JoinOptionParentUpdate(c.NetworkSettings.EndpointID, ref.Name, container.NetworkSettings.IPAddress))
if c.NetworkSettings.EndpointID != "" { if c.NetworkSettings.EndpointID != "" {
@ -642,7 +649,7 @@ func (container *Container) updateNetworkSettings(n libnetwork.Network, ep libne
} }
if container.hostConfig.NetworkMode == runconfig.NetworkMode("bridge") { if container.hostConfig.NetworkMode == runconfig.NetworkMode("bridge") {
networkSettings.Bridge = container.daemon.config.Bridge.Iface networkSettings.Bridge = container.daemon.configStore.Bridge.Iface
} }
container.NetworkSettings = networkSettings container.NetworkSettings = networkSettings
@ -651,7 +658,7 @@ func (container *Container) updateNetworkSettings(n libnetwork.Network, ep libne
// UpdateNetwork is used to update the container's network (e.g. when linked containers // UpdateNetwork is used to update the container's network (e.g. when linked containers
// get removed/unlinked). // get removed/unlinked).
func (container *Container) UpdateNetwork() error { func (container *Container) updateNetwork() error {
n, err := container.daemon.netController.NetworkByID(container.NetworkSettings.NetworkID) n, err := container.daemon.netController.NetworkByID(container.NetworkSettings.NetworkID)
if err != nil { if err != nil {
return fmt.Errorf("error locating network id %s: %v", container.NetworkSettings.NetworkID, err) return fmt.Errorf("error locating network id %s: %v", container.NetworkSettings.NetworkID, err)
@ -803,7 +810,7 @@ func (container *Container) secondaryNetworkRequired(primaryNetworkType string)
return false return false
} }
if container.daemon.config.DisableBridge { if container.daemon.configStore.DisableBridge {
return false return false
} }
@ -816,7 +823,7 @@ func (container *Container) secondaryNetworkRequired(primaryNetworkType string)
return false return false
} }
func (container *Container) AllocateNetwork() error { func (container *Container) allocateNetwork() error {
mode := container.hostConfig.NetworkMode mode := container.hostConfig.NetworkMode
controller := container.daemon.netController controller := container.daemon.netController
if container.Config.NetworkDisabled || mode.IsContainer() { if container.Config.NetworkDisabled || mode.IsContainer() {
@ -837,7 +844,7 @@ func (container *Container) AllocateNetwork() error {
return fmt.Errorf("conflicting options: publishing a service and network mode") return fmt.Errorf("conflicting options: publishing a service and network mode")
} }
if runconfig.NetworkMode(networkDriver).IsBridge() && container.daemon.config.DisableBridge { if runconfig.NetworkMode(networkDriver).IsBridge() && container.daemon.configStore.DisableBridge {
container.Config.NetworkDisabled = true container.Config.NetworkDisabled = true
return nil return nil
} }
@ -861,7 +868,7 @@ func (container *Container) AllocateNetwork() error {
return err return err
} }
return container.WriteHostConfig() return container.writeHostConfig()
} }
func (container *Container) configureNetwork(networkName, service, networkDriver string, canCreateNetwork bool) error { func (container *Container) configureNetwork(networkName, service, networkDriver string, canCreateNetwork bool) error {
@ -945,13 +952,28 @@ func (container *Container) initializeNetworking() error {
} }
if err := container.AllocateNetwork(); err != nil { if err := container.allocateNetwork(); err != nil {
return err return err
} }
return container.buildHostnameFile() return container.buildHostnameFile()
} }
func (container *Container) exportRw() (archive.Archive, error) {
if container.daemon == nil {
return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID)
}
archive, err := container.daemon.diff(container)
if err != nil {
return nil, err
}
return ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
return err
}),
nil
}
func (container *Container) getIpcContainer() (*Container, error) { func (container *Container) getIpcContainer() (*Container, error) {
containerID := container.hostConfig.IpcMode.Container() containerID := container.hostConfig.IpcMode.Container()
c, err := container.daemon.Get(containerID) c, err := container.daemon.Get(containerID)
@ -1013,7 +1035,7 @@ func (container *Container) getNetworkedContainer() (*Container, error) {
} }
} }
func (container *Container) ReleaseNetwork() { func (container *Container) releaseNetwork() {
if container.hostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled { if container.hostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled {
return return
} }
@ -1060,7 +1082,7 @@ func (container *Container) ReleaseNetwork() {
} }
} }
func (container *Container) UnmountVolumes(forceSyscall bool) error { func (container *Container) unmountVolumes(forceSyscall bool) error {
var volumeMounts []mountPoint var volumeMounts []mountPoint
for _, mntPoint := range container.MountPoints { for _, mntPoint := range container.MountPoints {

View File

@ -7,12 +7,15 @@ import (
"strings" "strings"
"github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/pkg/archive"
) )
// This is deliberately empty on Windows as the default path will be set by // DefaultPathEnv is deliberately empty on Windows as the default path will be set by
// the container. Docker has no context of what the default path should be. // the container. Docker has no context of what the default path should be.
const DefaultPathEnv = "" const DefaultPathEnv = ""
// Container holds fields specific to the Windows implementation. See
// CommonContainer for standard fields common to all containers.
type Container struct { type Container struct {
CommonContainer CommonContainer
@ -23,14 +26,6 @@ func killProcessDirectly(container *Container) error {
return nil return nil
} }
func (container *Container) setupContainerDns() error {
return nil
}
func (container *Container) updateParentsHosts() error {
return nil
}
func (container *Container) setupLinkedContainers() ([]string, error) { func (container *Container) setupLinkedContainers() ([]string, error) {
return nil, nil return nil, nil
} }
@ -60,7 +55,7 @@ func populateCommand(c *Container, env []string) error {
if !c.Config.NetworkDisabled { if !c.Config.NetworkDisabled {
en.Interface = &execdriver.NetworkInterface{ en.Interface = &execdriver.NetworkInterface{
MacAddress: c.Config.MacAddress, MacAddress: c.Config.MacAddress,
Bridge: c.daemon.config.Bridge.VirtualSwitchName, Bridge: c.daemon.configStore.Bridge.VirtualSwitchName,
PortBindings: c.hostConfig.PortBindings, PortBindings: c.hostConfig.PortBindings,
// TODO Windows. Include IPAddress. There already is a // TODO Windows. Include IPAddress. There already is a
@ -118,7 +113,7 @@ func populateCommand(c *Container, env []string) error {
// TODO Windows: Factor out remainder of unused fields. // TODO Windows: Factor out remainder of unused fields.
c.command = &execdriver.Command{ c.command = &execdriver.Command{
ID: c.ID, ID: c.ID,
Rootfs: c.RootfsPath(), Rootfs: c.rootfsPath(),
ReadonlyRootfs: c.hostConfig.ReadonlyRootfs, ReadonlyRootfs: c.hostConfig.ReadonlyRootfs,
InitPath: "/.dockerinit", InitPath: "/.dockerinit",
WorkingDir: c.Config.WorkingDir, WorkingDir: c.Config.WorkingDir,
@ -128,8 +123,8 @@ func populateCommand(c *Container, env []string) error {
CapAdd: c.hostConfig.CapAdd.Slice(), CapAdd: c.hostConfig.CapAdd.Slice(),
CapDrop: c.hostConfig.CapDrop.Slice(), CapDrop: c.hostConfig.CapDrop.Slice(),
ProcessConfig: processConfig, ProcessConfig: processConfig,
ProcessLabel: c.GetProcessLabel(), ProcessLabel: c.getProcessLabel(),
MountLabel: c.GetMountLabel(), MountLabel: c.getMountLabel(),
FirstStart: !c.HasBeenStartedBefore, FirstStart: !c.HasBeenStartedBefore,
LayerFolder: layerFolder, LayerFolder: layerFolder,
LayerPaths: layerPaths, LayerPaths: layerPaths,
@ -138,28 +133,33 @@ func populateCommand(c *Container, env []string) error {
return nil return nil
} }
// GetSize, return real size, virtual size // GetSize returns real size & virtual size
func (container *Container) GetSize() (int64, int64) { func (container *Container) getSize() (int64, int64) {
// TODO Windows // TODO Windows
return 0, 0 return 0, 0
} }
func (container *Container) AllocateNetwork() error { // allocateNetwork is a no-op on Windows.
func (container *Container) allocateNetwork() error {
return nil return nil
} }
func (container *Container) UpdateNetwork() error { func (container *Container) exportRw() (archive.Archive, error) {
if container.IsRunning() {
return nil, fmt.Errorf("Cannot export a running container.")
}
// TODO Windows. Implementation (different to Linux)
return nil, nil
}
func (container *Container) updateNetwork() error {
return nil return nil
} }
func (container *Container) ReleaseNetwork() { func (container *Container) releaseNetwork() {
} }
func (container *Container) RestoreNetwork() error { func (container *Container) unmountVolumes(forceSyscall bool) error {
return nil
}
func (container *Container) UnmountVolumes(forceSyscall bool) error {
return nil return nil
} }

View File

@ -13,6 +13,7 @@ import (
"github.com/opencontainers/runc/libcontainer/label" "github.com/opencontainers/runc/libcontainer/label"
) )
// ContainerCreate takes configs and creates a container.
func (daemon *Daemon) ContainerCreate(name string, config *runconfig.Config, hostConfig *runconfig.HostConfig, adjustCPUShares bool) (*Container, []string, error) { func (daemon *Daemon) ContainerCreate(name string, config *runconfig.Config, hostConfig *runconfig.HostConfig, adjustCPUShares bool) (*Container, []string, error) {
if config == nil { if config == nil {
return nil, nil, fmt.Errorf("Config cannot be empty in order to create a container") return nil, nil, fmt.Errorf("Config cannot be empty in order to create a container")
@ -70,7 +71,7 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
hostConfig = &runconfig.HostConfig{} hostConfig = &runconfig.HostConfig{}
} }
if hostConfig.SecurityOpt == nil { if hostConfig.SecurityOpt == nil {
hostConfig.SecurityOpt, err = daemon.GenerateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode) hostConfig.SecurityOpt, err = daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -104,15 +105,15 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
return nil, nil, err return nil, nil, err
} }
if err := container.ToDisk(); err != nil { if err := container.toDiskLocking(); err != nil {
logrus.Errorf("Error saving new container to disk: %v", err) logrus.Errorf("Error saving new container to disk: %v", err)
return nil, nil, err return nil, nil, err
} }
container.LogEvent("create") container.logEvent("create")
return container, warnings, nil return container, warnings, nil
} }
func (daemon *Daemon) GenerateSecurityOpt(ipcMode runconfig.IpcMode, pidMode runconfig.PidMode) ([]string, error) { func (daemon *Daemon) generateSecurityOpt(ipcMode runconfig.IpcMode, pidMode runconfig.PidMode) ([]string, error) {
if ipcMode.IsHost() || pidMode.IsHost() { if ipcMode.IsHost() || pidMode.IsHost() {
return label.DisableSecOpt(), nil return label.DisableSecOpt(), nil
} }

View File

@ -1,3 +1,8 @@
// Package daemon exposes the functions that occur on the host server
// that the Docker daemon is running.
//
// In implementing the various functions of the daemon, there is often
// a method-specific struct for configuring the runtime behavior.
package daemon package daemon
import ( import (
@ -19,6 +24,7 @@ import (
"github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/daemon/execdriver/execdrivers" "github.com/docker/docker/daemon/execdriver/execdrivers"
"github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver"
// register vfs
_ "github.com/docker/docker/daemon/graphdriver/vfs" _ "github.com/docker/docker/daemon/graphdriver/vfs"
"github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger"
"github.com/docker/docker/daemon/network" "github.com/docker/docker/daemon/network"
@ -47,7 +53,7 @@ var (
validContainerNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` validContainerNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`) validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`)
ErrSystemNotSupported = errors.New("The Docker daemon is not supported on this platform.") errSystemNotSupported = errors.New("The Docker daemon is not supported on this platform.")
) )
type contStore struct { type contStore struct {
@ -81,10 +87,11 @@ func (c *contStore) List() []*Container {
containers.Add(cont) containers.Add(cont)
} }
c.Unlock() c.Unlock()
containers.Sort() containers.sort()
return *containers return *containers
} }
// Daemon holds information about the Docker daemon.
type Daemon struct { type Daemon struct {
ID string ID string
repository string repository string
@ -94,8 +101,8 @@ type Daemon struct {
graph *graph.Graph graph *graph.Graph
repositories *graph.TagStore repositories *graph.TagStore
idIndex *truncindex.TruncIndex idIndex *truncindex.TruncIndex
config *Config configStore *Config
containerGraph *graphdb.Database containerGraphDB *graphdb.Database
driver graphdriver.Driver driver graphdriver.Driver
execDriver execdriver.Driver execDriver execdriver.Driver
statsCollector *statsCollector statsCollector *statsCollector
@ -127,11 +134,11 @@ func (daemon *Daemon) Get(prefixOrName string) (*Container, error) {
return containerByName, nil return containerByName, nil
} }
containerId, indexError := daemon.idIndex.Get(prefixOrName) containerID, indexError := daemon.idIndex.Get(prefixOrName)
if indexError != nil { if indexError != nil {
return nil, indexError return nil, indexError
} }
return daemon.containers.Get(containerId), nil return daemon.containers.Get(containerID), nil
} }
// Exists returns a true if a container of the specified ID or name exists, // Exists returns a true if a container of the specified ID or name exists,
@ -150,7 +157,7 @@ func (daemon *Daemon) containerRoot(id string) string {
func (daemon *Daemon) load(id string) (*Container, error) { func (daemon *Daemon) load(id string) (*Container, error) {
container := daemon.newBaseContainer(id) container := daemon.newBaseContainer(id)
if err := container.FromDisk(); err != nil { if err := container.fromDisk(); err != nil {
return nil, err return nil, err
} }
@ -200,8 +207,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
if container.IsRunning() { if container.IsRunning() {
logrus.Debugf("killing old running container %s", container.ID) logrus.Debugf("killing old running container %s", container.ID)
// Set exit code to 128 + SIGKILL (9) to properly represent unsuccessful exit // Set exit code to 128 + SIGKILL (9) to properly represent unsuccessful exit
container.SetStopped(&execdriver.ExitStatus{ExitCode: 137}) container.setStoppedLocking(&execdriver.ExitStatus{ExitCode: 137})
// use the current driver and ensure that the container is dead x.x // use the current driver and ensure that the container is dead x.x
cmd := &execdriver.Command{ cmd := &execdriver.Command{
ID: container.ID, ID: container.ID,
@ -211,7 +217,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
if err := container.Unmount(); err != nil { if err := container.Unmount(); err != nil {
logrus.Debugf("unmount error %s", err) logrus.Debugf("unmount error %s", err)
} }
if err := container.ToDisk(); err != nil { if err := container.toDiskLocking(); err != nil {
logrus.Errorf("Error saving stopped state to disk: %v", err) logrus.Errorf("Error saving stopped state to disk: %v", err)
} }
} }
@ -235,7 +241,7 @@ func (daemon *Daemon) ensureName(container *Container) error {
} }
container.Name = name container.Name = name
if err := container.ToDisk(); err != nil { if err := container.toDiskLocking(); err != nil {
logrus.Errorf("Error saving container name to disk: %v", err) logrus.Errorf("Error saving container name to disk: %v", err)
} }
} }
@ -283,7 +289,7 @@ func (daemon *Daemon) restore() error {
} }
} }
if entities := daemon.containerGraph.List("/", -1); entities != nil { if entities := daemon.containerGraphDB.List("/", -1); entities != nil {
for _, p := range entities.Paths() { for _, p := range entities.Paths() {
if !debug && logrus.GetLevel() == logrus.InfoLevel { if !debug && logrus.GetLevel() == logrus.InfoLevel {
fmt.Print(".") fmt.Print(".")
@ -318,7 +324,7 @@ func (daemon *Daemon) restore() error {
// check the restart policy on the containers and restart any container with // check the restart policy on the containers and restart any container with
// the restart policy of "always" // the restart policy of "always"
if daemon.config.AutoRestart && container.shouldRestart() { if daemon.configStore.AutoRestart && container.shouldRestart() {
logrus.Debugf("Starting container %s", container.ID) logrus.Debugf("Starting container %s", container.ID)
if err := container.Start(); err != nil { if err := container.Start(); err != nil {
@ -351,7 +357,7 @@ func (daemon *Daemon) mergeAndVerifyConfig(config *runconfig.Config, img *image.
return nil return nil
} }
func (daemon *Daemon) generateIdAndName(name string) (string, string, error) { func (daemon *Daemon) generateIDAndName(name string) (string, string, error) {
var ( var (
err error err error
id = stringid.GenerateNonCryptoID() id = stringid.GenerateNonCryptoID()
@ -380,7 +386,7 @@ func (daemon *Daemon) reserveName(id, name string) (string, error) {
name = "/" + name name = "/" + name
} }
if _, err := daemon.containerGraph.Set(name, id); err != nil { if _, err := daemon.containerGraphDB.Set(name, id); err != nil {
if !graphdb.IsNonUniqueNameError(err) { if !graphdb.IsNonUniqueNameError(err) {
return "", err return "", err
} }
@ -392,7 +398,7 @@ func (daemon *Daemon) reserveName(id, name string) (string, error) {
} }
// Remove name and continue starting the container // Remove name and continue starting the container
if err := daemon.containerGraph.Delete(name); err != nil { if err := daemon.containerGraphDB.Delete(name); err != nil {
return "", err return "", err
} }
} else { } else {
@ -413,7 +419,7 @@ func (daemon *Daemon) generateNewName(id string) (string, error) {
name = "/" + name name = "/" + name
} }
if _, err := daemon.containerGraph.Set(name, id); err != nil { if _, err := daemon.containerGraphDB.Set(name, id); err != nil {
if !graphdb.IsNonUniqueNameError(err) { if !graphdb.IsNonUniqueNameError(err) {
return "", err return "", err
} }
@ -423,7 +429,7 @@ func (daemon *Daemon) generateNewName(id string) (string, error) {
} }
name = "/" + stringid.TruncateID(id) name = "/" + stringid.TruncateID(id)
if _, err := daemon.containerGraph.Set(name, id); err != nil { if _, err := daemon.containerGraphDB.Set(name, id); err != nil {
return "", err return "", err
} }
return name, nil return name, nil
@ -460,7 +466,7 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID
id string id string
err error err error
) )
id, name, err = daemon.generateIdAndName(name) id, name, err = daemon.generateIDAndName(name)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -483,6 +489,9 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID
return &base, err return &base, err
} }
// GetFullContainerName returns a constructed container name. I think
// it has to do with the fact that a container is a file on disek and
// this is sort of just creating a file name.
func GetFullContainerName(name string) (string, error) { func GetFullContainerName(name string) (string, error) {
if name == "" { if name == "" {
return "", fmt.Errorf("Container name cannot be empty") return "", fmt.Errorf("Container name cannot be empty")
@ -493,12 +502,13 @@ func GetFullContainerName(name string) (string, error) {
return name, nil return name, nil
} }
// GetByName returns a container given a name.
func (daemon *Daemon) GetByName(name string) (*Container, error) { func (daemon *Daemon) GetByName(name string) (*Container, error) {
fullName, err := GetFullContainerName(name) fullName, err := GetFullContainerName(name)
if err != nil { if err != nil {
return nil, err return nil, err
} }
entity := daemon.containerGraph.Get(fullName) entity := daemon.containerGraphDB.Get(fullName)
if entity == nil { if entity == nil {
return nil, fmt.Errorf("Could not find entity for %s", name) return nil, fmt.Errorf("Could not find entity for %s", name)
} }
@ -509,14 +519,17 @@ func (daemon *Daemon) GetByName(name string) (*Container, error) {
return e, nil return e, nil
} }
func (daemon *Daemon) Children(name string) (map[string]*Container, error) { // children returns all child containers of the container with the
// given name. The containers are returned as a map from the container
// name to a pointer to Container.
func (daemon *Daemon) children(name string) (map[string]*Container, error) {
name, err := GetFullContainerName(name) name, err := GetFullContainerName(name)
if err != nil { if err != nil {
return nil, err return nil, err
} }
children := make(map[string]*Container) children := make(map[string]*Container)
err = daemon.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error { err = daemon.containerGraphDB.Walk(name, func(p string, e *graphdb.Entity) error {
c, err := daemon.Get(e.ID()) c, err := daemon.Get(e.ID())
if err != nil { if err != nil {
return err return err
@ -531,24 +544,28 @@ func (daemon *Daemon) Children(name string) (map[string]*Container, error) {
return children, nil return children, nil
} }
func (daemon *Daemon) Parents(name string) ([]string, error) { // parents returns the names of the parent containers of the container
// with the given name.
func (daemon *Daemon) parents(name string) ([]string, error) {
name, err := GetFullContainerName(name) name, err := GetFullContainerName(name)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return daemon.containerGraph.Parents(name) return daemon.containerGraphDB.Parents(name)
} }
func (daemon *Daemon) RegisterLink(parent, child *Container, alias string) error { func (daemon *Daemon) registerLink(parent, child *Container, alias string) error {
fullName := filepath.Join(parent.Name, alias) fullName := filepath.Join(parent.Name, alias)
if !daemon.containerGraph.Exists(fullName) { if !daemon.containerGraphDB.Exists(fullName) {
_, err := daemon.containerGraph.Set(fullName, child.ID) _, err := daemon.containerGraphDB.Set(fullName, child.ID)
return err return err
} }
return nil return nil
} }
// NewDaemon sets up everything for the daemon to be able to service
// requests from the webserver.
func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemon, err error) { func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemon, err error) {
setDefaultMtu(config) setDefaultMtu(config)
@ -562,7 +579,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
// Verify the platform is supported as a daemon // Verify the platform is supported as a daemon
if !platformSupported { if !platformSupported {
return nil, ErrSystemNotSupported return nil, errSystemNotSupported
} }
// Validate platform-specific requirements // Validate platform-specific requirements
@ -705,7 +722,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
return nil, err return nil, err
} }
d.containerGraph = graph d.containerGraphDB = graph
var sysInitPath string var sysInitPath string
if config.ExecDriver == "lxc" { if config.ExecDriver == "lxc" {
@ -735,7 +752,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
d.graph = g d.graph = g
d.repositories = repositories d.repositories = repositories
d.idIndex = truncindex.NewTruncIndex([]string{}) d.idIndex = truncindex.NewTruncIndex([]string{})
d.config = config d.configStore = config
d.sysInitPath = sysInitPath d.sysInitPath = sysInitPath
d.execDriver = ed d.execDriver = ed
d.statsCollector = newStatsCollector(1 * time.Second) d.statsCollector = newStatsCollector(1 * time.Second)
@ -753,6 +770,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
return d, nil return d, nil
} }
// Shutdown stops the daemon.
func (daemon *Daemon) Shutdown() error { func (daemon *Daemon) Shutdown() error {
daemon.shutdown = true daemon.shutdown = true
if daemon.containers != nil { if daemon.containers != nil {
@ -767,7 +785,7 @@ func (daemon *Daemon) Shutdown() error {
go func() { go func() {
defer group.Done() defer group.Done()
// TODO(windows): Handle docker restart with paused containers // TODO(windows): Handle docker restart with paused containers
if c.IsPaused() { if c.isPaused() {
// To terminate a process in freezer cgroup, we should send // To terminate a process in freezer cgroup, we should send
// SIGTERM to this process then unfreeze it, and the process will // SIGTERM to this process then unfreeze it, and the process will
// force to terminate immediately. // force to terminate immediately.
@ -777,11 +795,11 @@ func (daemon *Daemon) Shutdown() error {
logrus.Warnf("System does not support SIGTERM") logrus.Warnf("System does not support SIGTERM")
return return
} }
if err := daemon.Kill(c, int(sig)); err != nil { if err := daemon.kill(c, int(sig)); err != nil {
logrus.Debugf("sending SIGTERM to container %s with error: %v", c.ID, err) logrus.Debugf("sending SIGTERM to container %s with error: %v", c.ID, err)
return return
} }
if err := c.Unpause(); err != nil { if err := c.unpause(); err != nil {
logrus.Debugf("Failed to unpause container %s with error: %v", c.ID, err) logrus.Debugf("Failed to unpause container %s with error: %v", c.ID, err)
return return
} }
@ -792,7 +810,7 @@ func (daemon *Daemon) Shutdown() error {
logrus.Warnf("System does not support SIGKILL") logrus.Warnf("System does not support SIGKILL")
return return
} }
daemon.Kill(c, int(sig)) daemon.kill(c, int(sig))
} }
} else { } else {
// If container failed to exit in 10 seconds of SIGTERM, then using the force // If container failed to exit in 10 seconds of SIGTERM, then using the force
@ -813,8 +831,8 @@ func (daemon *Daemon) Shutdown() error {
} }
} }
if daemon.containerGraph != nil { if daemon.containerGraphDB != nil {
if err := daemon.containerGraph.Close(); err != nil { if err := daemon.containerGraphDB.Close(); err != nil {
logrus.Errorf("Error during container graph.Close(): %v", err) logrus.Errorf("Error during container graph.Close(): %v", err)
} }
} }
@ -828,8 +846,10 @@ func (daemon *Daemon) Shutdown() error {
return nil return nil
} }
// Mount sets container.basefs
// (is it not set coming in? why is it unset?)
func (daemon *Daemon) Mount(container *Container) error { func (daemon *Daemon) Mount(container *Container) error {
dir, err := daemon.driver.Get(container.ID, container.GetMountLabel()) dir, err := daemon.driver.Get(container.ID, container.getMountLabel())
if err != nil { if err != nil {
return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, daemon.driver, err) return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, daemon.driver, err)
} }
@ -848,24 +868,24 @@ func (daemon *Daemon) Mount(container *Container) error {
return nil return nil
} }
func (daemon *Daemon) Unmount(container *Container) error { func (daemon *Daemon) unmount(container *Container) error {
daemon.driver.Put(container.ID) daemon.driver.Put(container.ID)
return nil return nil
} }
func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) { func (daemon *Daemon) run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
return daemon.execDriver.Run(c.command, pipes, startCallback) return daemon.execDriver.Run(c.command, pipes, startCallback)
} }
func (daemon *Daemon) Kill(c *Container, sig int) error { func (daemon *Daemon) kill(c *Container, sig int) error {
return daemon.execDriver.Kill(c.command, sig) return daemon.execDriver.Kill(c.command, sig)
} }
func (daemon *Daemon) Stats(c *Container) (*execdriver.ResourceStats, error) { func (daemon *Daemon) stats(c *Container) (*execdriver.ResourceStats, error) {
return daemon.execDriver.Stats(c.ID) return daemon.execDriver.Stats(c.ID)
} }
func (daemon *Daemon) SubscribeToContainerStats(name string) (chan interface{}, error) { func (daemon *Daemon) subscribeToContainerStats(name string) (chan interface{}, error) {
c, err := daemon.Get(name) c, err := daemon.Get(name)
if err != nil { if err != nil {
return nil, err return nil, err
@ -874,7 +894,7 @@ func (daemon *Daemon) SubscribeToContainerStats(name string) (chan interface{},
return ch, nil return ch, nil
} }
func (daemon *Daemon) UnsubscribeToContainerStats(name string, ch chan interface{}) error { func (daemon *Daemon) unsubscribeToContainerStats(name string, ch chan interface{}) error {
c, err := daemon.Get(name) c, err := daemon.Get(name)
if err != nil { if err != nil {
return err return err
@ -883,12 +903,12 @@ func (daemon *Daemon) UnsubscribeToContainerStats(name string, ch chan interface
return nil return nil
} }
func (daemon *Daemon) Changes(container *Container) ([]archive.Change, error) { func (daemon *Daemon) changes(container *Container) ([]archive.Change, error) {
initID := fmt.Sprintf("%s-init", container.ID) initID := fmt.Sprintf("%s-init", container.ID)
return daemon.driver.Changes(container.ID, initID) return daemon.driver.Changes(container.ID, initID)
} }
func (daemon *Daemon) Diff(container *Container) (archive.Archive, error) { func (daemon *Daemon) diff(container *Container) (archive.Archive, error) {
initID := fmt.Sprintf("%s-init", container.ID) initID := fmt.Sprintf("%s-init", container.ID)
return daemon.driver.Diff(container.ID, initID) return daemon.driver.Diff(container.ID, initID)
} }
@ -923,6 +943,8 @@ func (daemon *Daemon) createRootfs(container *Container) error {
return nil return nil
} }
// Graph needs to be removed.
//
// FIXME: this is a convenience function for integration tests // FIXME: this is a convenience function for integration tests
// which need direct access to daemon.graph. // which need direct access to daemon.graph.
// Once the tests switch to using engine and jobs, this method // Once the tests switch to using engine and jobs, this method
@ -931,30 +953,39 @@ func (daemon *Daemon) Graph() *graph.Graph {
return daemon.graph return daemon.graph
} }
// Repositories returns all repositories.
func (daemon *Daemon) Repositories() *graph.TagStore { func (daemon *Daemon) Repositories() *graph.TagStore {
return daemon.repositories return daemon.repositories
} }
func (daemon *Daemon) Config() *Config { func (daemon *Daemon) config() *Config {
return daemon.config return daemon.configStore
} }
func (daemon *Daemon) SystemInitPath() string { func (daemon *Daemon) systemInitPath() string {
return daemon.sysInitPath return daemon.sysInitPath
} }
// GraphDriver returns the currently used driver for processing
// container layers.
func (daemon *Daemon) GraphDriver() graphdriver.Driver { func (daemon *Daemon) GraphDriver() graphdriver.Driver {
return daemon.driver return daemon.driver
} }
// ExecutionDriver returns the currently used driver for creating and
// starting execs in a container.
func (daemon *Daemon) ExecutionDriver() execdriver.Driver { func (daemon *Daemon) ExecutionDriver() execdriver.Driver {
return daemon.execDriver return daemon.execDriver
} }
func (daemon *Daemon) ContainerGraph() *graphdb.Database { func (daemon *Daemon) containerGraph() *graphdb.Database {
return daemon.containerGraph return daemon.containerGraphDB
} }
// ImageGetCached returns the earliest created image that is a child
// of the image with imgID, that had the same config when it was
// created. nil is returned if a child cannot be found. An error is
// returned if the parent image cannot be found.
func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) { func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
// Retrieve all images // Retrieve all images
images := daemon.Graph().Map() images := daemon.Graph().Map()
@ -1010,7 +1041,7 @@ func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.
container.Lock() container.Lock()
defer container.Unlock() defer container.Unlock()
// Register any links from the host config before starting the container // Register any links from the host config before starting the container
if err := daemon.RegisterLinks(container, hostConfig); err != nil { if err := daemon.registerLinks(container, hostConfig); err != nil {
return err return err
} }

View File

@ -3,5 +3,6 @@
package daemon package daemon
import ( import (
// register the btrfs graphdriver
_ "github.com/docker/docker/daemon/graphdriver/btrfs" _ "github.com/docker/docker/daemon/graphdriver/btrfs"
) )

View File

@ -3,5 +3,6 @@
package daemon package daemon
import ( import (
// register the devmapper graphdriver
_ "github.com/docker/docker/daemon/graphdriver/devmapper" _ "github.com/docker/docker/daemon/graphdriver/devmapper"
) )

View File

@ -3,5 +3,6 @@
package daemon package daemon
import ( import (
// register the overlay graphdriver
_ "github.com/docker/docker/daemon/graphdriver/overlay" _ "github.com/docker/docker/daemon/graphdriver/overlay"
) )

View File

@ -88,7 +88,7 @@ func TestGet(t *testing.T) {
daemon := &Daemon{ daemon := &Daemon{
containers: store, containers: store,
idIndex: index, idIndex: index,
containerGraph: graph, containerGraphDB: graph,
} }
if container, _ := daemon.Get("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 { if container, _ := daemon.Get("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 {
@ -130,15 +130,15 @@ func TestLoadWithVolume(t *testing.T) {
} }
defer os.RemoveAll(tmp) defer os.RemoveAll(tmp)
containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
containerPath := filepath.Join(tmp, containerId) containerPath := filepath.Join(tmp, containerID)
if err := os.MkdirAll(containerPath, 0755); err != nil { if err := os.MkdirAll(containerPath, 0755); err != nil {
t.Fatal(err) t.Fatal(err)
} }
hostVolumeId := stringid.GenerateNonCryptoID() hostVolumeID := stringid.GenerateNonCryptoID()
vfsPath := filepath.Join(tmp, "vfs", "dir", hostVolumeId) vfsPath := filepath.Join(tmp, "vfs", "dir", hostVolumeID)
volumePath := filepath.Join(tmp, "volumes", hostVolumeId) volumePath := filepath.Join(tmp, "volumes", hostVolumeID)
if err := os.MkdirAll(vfsPath, 0755); err != nil { if err := os.MkdirAll(vfsPath, 0755); err != nil {
t.Fatal(err) t.Fatal(err)
@ -187,7 +187,7 @@ func TestLoadWithVolume(t *testing.T) {
} }
defer volumedrivers.Unregister(volume.DefaultDriverName) defer volumedrivers.Unregister(volume.DefaultDriverName)
c, err := daemon.load(containerId) c, err := daemon.load(containerID)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -202,8 +202,8 @@ func TestLoadWithVolume(t *testing.T) {
} }
m := c.MountPoints["/vol1"] m := c.MountPoints["/vol1"]
if m.Name != hostVolumeId { if m.Name != hostVolumeID {
t.Fatalf("Expected mount name to be %s, was %s\n", hostVolumeId, m.Name) t.Fatalf("Expected mount name to be %s, was %s\n", hostVolumeID, m.Name)
} }
if m.Destination != "/vol1" { if m.Destination != "/vol1" {
@ -235,8 +235,8 @@ func TestLoadWithBindMount(t *testing.T) {
} }
defer os.RemoveAll(tmp) defer os.RemoveAll(tmp)
containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
containerPath := filepath.Join(tmp, containerId) containerPath := filepath.Join(tmp, containerID)
if err = os.MkdirAll(containerPath, 0755); err != nil { if err = os.MkdirAll(containerPath, 0755); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -275,7 +275,7 @@ func TestLoadWithBindMount(t *testing.T) {
} }
defer volumedrivers.Unregister(volume.DefaultDriverName) defer volumedrivers.Unregister(volume.DefaultDriverName)
c, err := daemon.load(containerId) c, err := daemon.load(containerID)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -314,14 +314,14 @@ func TestLoadWithVolume17RC(t *testing.T) {
} }
defer os.RemoveAll(tmp) defer os.RemoveAll(tmp)
containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
containerPath := filepath.Join(tmp, containerId) containerPath := filepath.Join(tmp, containerID)
if err := os.MkdirAll(containerPath, 0755); err != nil { if err := os.MkdirAll(containerPath, 0755); err != nil {
t.Fatal(err) t.Fatal(err)
} }
hostVolumeId := "6a3c03fc4a4e588561a543cc3bdd50089e27bd11bbb0e551e19bf735e2514101" hostVolumeID := "6a3c03fc4a4e588561a543cc3bdd50089e27bd11bbb0e551e19bf735e2514101"
volumePath := filepath.Join(tmp, "volumes", hostVolumeId) volumePath := filepath.Join(tmp, "volumes", hostVolumeID)
if err := os.MkdirAll(volumePath, 0755); err != nil { if err := os.MkdirAll(volumePath, 0755); err != nil {
t.Fatal(err) t.Fatal(err)
@ -366,7 +366,7 @@ func TestLoadWithVolume17RC(t *testing.T) {
} }
defer volumedrivers.Unregister(volume.DefaultDriverName) defer volumedrivers.Unregister(volume.DefaultDriverName)
c, err := daemon.load(containerId) c, err := daemon.load(containerID)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -381,8 +381,8 @@ func TestLoadWithVolume17RC(t *testing.T) {
} }
m := c.MountPoints["/vol1"] m := c.MountPoints["/vol1"]
if m.Name != hostVolumeId { if m.Name != hostVolumeID {
t.Fatalf("Expected mount name to be %s, was %s\n", hostVolumeId, m.Name) t.Fatalf("Expected mount name to be %s, was %s\n", hostVolumeID, m.Name)
} }
if m.Destination != "/vol1" { if m.Destination != "/vol1" {
@ -414,15 +414,15 @@ func TestRemoveLocalVolumesFollowingSymlinks(t *testing.T) {
} }
defer os.RemoveAll(tmp) defer os.RemoveAll(tmp)
containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
containerPath := filepath.Join(tmp, containerId) containerPath := filepath.Join(tmp, containerID)
if err := os.MkdirAll(containerPath, 0755); err != nil { if err := os.MkdirAll(containerPath, 0755); err != nil {
t.Fatal(err) t.Fatal(err)
} }
hostVolumeId := stringid.GenerateNonCryptoID() hostVolumeID := stringid.GenerateNonCryptoID()
vfsPath := filepath.Join(tmp, "vfs", "dir", hostVolumeId) vfsPath := filepath.Join(tmp, "vfs", "dir", hostVolumeID)
volumePath := filepath.Join(tmp, "volumes", hostVolumeId) volumePath := filepath.Join(tmp, "volumes", hostVolumeID)
if err := os.MkdirAll(vfsPath, 0755); err != nil { if err := os.MkdirAll(vfsPath, 0755); err != nil {
t.Fatal(err) t.Fatal(err)
@ -471,7 +471,7 @@ func TestRemoveLocalVolumesFollowingSymlinks(t *testing.T) {
} }
defer volumedrivers.Unregister(volume.DefaultDriverName) defer volumedrivers.Unregister(volume.DefaultDriverName)
c, err := daemon.load(containerId) c, err := daemon.load(containerID)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -63,7 +63,7 @@ func parseSecurityOpt(container *Container, config *runconfig.HostConfig) error
return err return err
} }
func CheckKernelVersion(k, major, minor int) bool { func checkKernelVersion(k, major, minor int) bool {
if v, err := kernel.GetKernelVersion(); err != nil { if v, err := kernel.GetKernelVersion(); err != nil {
logrus.Warnf("%s", err) logrus.Warnf("%s", err)
} else { } else {
@ -82,7 +82,7 @@ func checkKernel() error {
// without actually causing a kernel panic, so we need this workaround until // without actually causing a kernel panic, so we need this workaround until
// the circumstances of pre-3.10 crashes are clearer. // the circumstances of pre-3.10 crashes are clearer.
// For details see https://github.com/docker/docker/issues/407 // For details see https://github.com/docker/docker/issues/407
if !CheckKernelVersion(3, 10, 0) { if !checkKernelVersion(3, 10, 0) {
v, _ := kernel.GetKernelVersion() v, _ := kernel.GetKernelVersion()
if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
logrus.Warnf("Your Linux kernel version %s can be unstable running docker. Please upgrade your kernel to 3.10.0.", v.String()) logrus.Warnf("Your Linux kernel version %s can be unstable running docker. Please upgrade your kernel to 3.10.0.", v.String())
@ -161,7 +161,7 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *runconfig.HostC
logrus.Warnf("Your kernel does not support kernel memory limit capabilities. Limitation discarded.") logrus.Warnf("Your kernel does not support kernel memory limit capabilities. Limitation discarded.")
hostConfig.KernelMemory = 0 hostConfig.KernelMemory = 0
} }
if hostConfig.KernelMemory > 0 && !CheckKernelVersion(4, 0, 0) { if hostConfig.KernelMemory > 0 && !checkKernelVersion(4, 0, 0) {
warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.")
logrus.Warnf("You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") logrus.Warnf("You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.")
} }
@ -194,7 +194,6 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *runconfig.HostC
if hostConfig.BlkioWeight > 0 && (hostConfig.BlkioWeight < 10 || hostConfig.BlkioWeight > 1000) { if hostConfig.BlkioWeight > 0 && (hostConfig.BlkioWeight < 10 || hostConfig.BlkioWeight > 1000) {
return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000.") return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000.")
} }
if hostConfig.OomKillDisable && !sysInfo.OomKillDisable { if hostConfig.OomKillDisable && !sysInfo.OomKillDisable {
hostConfig.OomKillDisable = false hostConfig.OomKillDisable = false
return warnings, fmt.Errorf("Your kernel does not support oom kill disable.") return warnings, fmt.Errorf("Your kernel does not support oom kill disable.")
@ -494,11 +493,14 @@ func setupInitLayer(initLayer string) error {
return nil return nil
} }
func (daemon *Daemon) NetworkApiRouter() func(w http.ResponseWriter, req *http.Request) { // NetworkAPIRouter implements a feature for server-experimental,
// directly calling into libnetwork.
func (daemon *Daemon) NetworkAPIRouter() func(w http.ResponseWriter, req *http.Request) {
return nwapi.NewHTTPHandler(daemon.netController) return nwapi.NewHTTPHandler(daemon.netController)
} }
func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error { // registerLinks writes the links to a file.
func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.HostConfig) error {
if hostConfig == nil || hostConfig.Links == nil { if hostConfig == nil || hostConfig.Links == nil {
return nil return nil
} }
@ -523,7 +525,7 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.
if child.hostConfig.NetworkMode.IsHost() { if child.hostConfig.NetworkMode.IsHost() {
return runconfig.ErrConflictHostNetworkAndLinks return runconfig.ErrConflictHostNetworkAndLinks
} }
if err := daemon.RegisterLink(container, child, alias); err != nil { if err := daemon.registerLink(container, child, alias); err != nil {
return err return err
} }
} }
@ -531,7 +533,7 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.
// After we load all the links into the daemon // After we load all the links into the daemon
// set them to nil on the hostconfig // set them to nil on the hostconfig
hostConfig.Links = nil hostConfig.Links = nil
if err := container.WriteHostConfig(); err != nil { if err := container.writeHostConfig(); err != nil {
return err return err
} }

View File

@ -6,6 +6,7 @@ import (
"syscall" "syscall"
"github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver"
// register the windows graph driver
_ "github.com/docker/docker/daemon/graphdriver/windows" _ "github.com/docker/docker/daemon/graphdriver/windows"
"github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/runconfig" "github.com/docker/docker/runconfig"
@ -13,7 +14,7 @@ import (
) )
const ( const (
DefaultVirtualSwitch = "Virtual Switch" defaultVirtualSwitch = "Virtual Switch"
platformSupported = true platformSupported = true
) )
@ -91,12 +92,14 @@ func isBridgeNetworkDisabled(config *Config) bool {
func initNetworkController(config *Config) (libnetwork.NetworkController, error) { func initNetworkController(config *Config) (libnetwork.NetworkController, error) {
// Set the name of the virtual switch if not specified by -b on daemon start // Set the name of the virtual switch if not specified by -b on daemon start
if config.Bridge.VirtualSwitchName == "" { if config.Bridge.VirtualSwitchName == "" {
config.Bridge.VirtualSwitchName = DefaultVirtualSwitch config.Bridge.VirtualSwitchName = defaultVirtualSwitch
} }
return nil, nil return nil, nil
} }
func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error { // registerLinks sets up links between containers and writes the
// configuration out for persistence.
func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.HostConfig) error {
// TODO Windows. Factored out for network modes. There may be more // TODO Windows. Factored out for network modes. There may be more
// refactoring required here. // refactoring required here.
@ -114,7 +117,7 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.
//An error from daemon.Get() means this name could not be found //An error from daemon.Get() means this name could not be found
return fmt.Errorf("Could not get container for %s", name) return fmt.Errorf("Could not get container for %s", name)
} }
if err := daemon.RegisterLink(container, child, alias); err != nil { if err := daemon.registerLink(container, child, alias); err != nil {
return err return err
} }
} }
@ -122,7 +125,7 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.
// After we load all the links into the daemon // After we load all the links into the daemon
// set them to nil on the hostconfig // set them to nil on the hostconfig
hostConfig.Links = nil hostConfig.Links = nil
if err := container.WriteHostConfig(); err != nil { if err := container.writeHostConfig(); err != nil {
return err return err
} }
return nil return nil

View File

@ -3,5 +3,6 @@
package daemon package daemon
import ( import (
// register the zfs driver
_ "github.com/docker/docker/daemon/graphdriver/zfs" _ "github.com/docker/docker/daemon/graphdriver/zfs"
) )

View File

@ -8,10 +8,15 @@ import (
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
) )
// ContainerRmConfig is a holder for passing in runtime config.
type ContainerRmConfig struct { type ContainerRmConfig struct {
ForceRemove, RemoveVolume, RemoveLink bool ForceRemove, RemoveVolume, RemoveLink bool
} }
// ContainerRm removes the container id from the filesystem. An error
// is returned if the container is not found, or if the remove
// fails. If the remove succeeds, the container name is released, and
// network links are removed.
func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error { func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error {
container, err := daemon.Get(name) container, err := daemon.Get(name)
if err != nil { if err != nil {
@ -27,18 +32,18 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error
if parent == "/" { if parent == "/" {
return fmt.Errorf("Conflict, cannot remove the default name of the container") return fmt.Errorf("Conflict, cannot remove the default name of the container")
} }
pe := daemon.ContainerGraph().Get(parent) pe := daemon.containerGraph().Get(parent)
if pe == nil { if pe == nil {
return fmt.Errorf("Cannot get parent %s for name %s", parent, name) return fmt.Errorf("Cannot get parent %s for name %s", parent, name)
} }
if err := daemon.ContainerGraph().Delete(name); err != nil { if err := daemon.containerGraph().Delete(name); err != nil {
return err return err
} }
parentContainer, _ := daemon.Get(pe.ID()) parentContainer, _ := daemon.Get(pe.ID())
if parentContainer != nil { if parentContainer != nil {
if err := parentContainer.UpdateNetwork(); err != nil { if err := parentContainer.updateNetwork(); err != nil {
logrus.Debugf("Could not update network to remove link %s: %v", n, err) logrus.Debugf("Could not update network to remove link %s: %v", n, err)
} }
} }
@ -75,23 +80,23 @@ func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
} }
// Container state RemovalInProgress should be used to avoid races. // Container state RemovalInProgress should be used to avoid races.
if err = container.SetRemovalInProgress(); err != nil { if err = container.setRemovalInProgress(); err != nil {
return fmt.Errorf("Failed to set container state to RemovalInProgress: %s", err) return fmt.Errorf("Failed to set container state to RemovalInProgress: %s", err)
} }
defer container.ResetRemovalInProgress() defer container.resetRemovalInProgress()
if err = container.Stop(3); err != nil { if err = container.Stop(3); err != nil {
return err return err
} }
// Mark container dead. We don't want anybody to be restarting it. // Mark container dead. We don't want anybody to be restarting it.
container.SetDead() container.setDead()
// Save container state to disk. So that if error happens before // Save container state to disk. So that if error happens before
// container meta file got removed from disk, then a restart of // container meta file got removed from disk, then a restart of
// docker should not make a dead container alive. // docker should not make a dead container alive.
if err := container.ToDisk(); err != nil { if err := container.toDiskLocking(); err != nil {
logrus.Errorf("Error saving dying container to disk: %v", err) logrus.Errorf("Error saving dying container to disk: %v", err)
} }
@ -102,11 +107,11 @@ func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
daemon.idIndex.Delete(container.ID) daemon.idIndex.Delete(container.ID)
daemon.containers.Delete(container.ID) daemon.containers.Delete(container.ID)
os.RemoveAll(container.root) os.RemoveAll(container.root)
container.LogEvent("destroy") container.logEvent("destroy")
} }
}() }()
if _, err := daemon.containerGraph.Purge(container.ID); err != nil { if _, err := daemon.containerGraphDB.Purge(container.ID); err != nil {
logrus.Debugf("Unable to remove container from link graph: %s", err) logrus.Debugf("Unable to remove container from link graph: %s", err)
} }
@ -131,7 +136,7 @@ func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
daemon.idIndex.Delete(container.ID) daemon.idIndex.Delete(container.ID)
daemon.containers.Delete(container.ID) daemon.containers.Delete(container.ID)
container.LogEvent("destroy") container.logEvent("destroy")
return nil return nil
} }

View File

@ -17,13 +17,16 @@ import (
"github.com/docker/docker/runconfig" "github.com/docker/docker/runconfig"
) )
type execConfig struct { // ExecConfig holds the configurations for execs. The Daemon keeps
// track of both running and finished execs so that they can be
// examined both during and after completion.
type ExecConfig struct {
sync.Mutex sync.Mutex
ID string ID string
Running bool Running bool
ExitCode int ExitCode int
ProcessConfig *execdriver.ProcessConfig ProcessConfig *execdriver.ProcessConfig
StreamConfig streamConfig
OpenStdin bool OpenStdin bool
OpenStderr bool OpenStderr bool
OpenStdout bool OpenStdout bool
@ -35,21 +38,21 @@ type execConfig struct {
} }
type execStore struct { type execStore struct {
s map[string]*execConfig s map[string]*ExecConfig
sync.RWMutex sync.RWMutex
} }
func newExecStore() *execStore { func newExecStore() *execStore {
return &execStore{s: make(map[string]*execConfig, 0)} return &execStore{s: make(map[string]*ExecConfig, 0)}
} }
func (e *execStore) Add(id string, execConfig *execConfig) { func (e *execStore) Add(id string, ExecConfig *ExecConfig) {
e.Lock() e.Lock()
e.s[id] = execConfig e.s[id] = ExecConfig
e.Unlock() e.Unlock()
} }
func (e *execStore) Get(id string) *execConfig { func (e *execStore) Get(id string) *ExecConfig {
e.RLock() e.RLock()
res := e.s[id] res := e.s[id]
e.RUnlock() e.RUnlock()
@ -72,24 +75,24 @@ func (e *execStore) List() []string {
return IDs return IDs
} }
func (execConfig *execConfig) Resize(h, w int) error { func (ExecConfig *ExecConfig) resize(h, w int) error {
select { select {
case <-execConfig.waitStart: case <-ExecConfig.waitStart:
case <-time.After(time.Second): case <-time.After(time.Second):
return fmt.Errorf("Exec %s is not running, so it can not be resized.", execConfig.ID) return fmt.Errorf("Exec %s is not running, so it can not be resized.", ExecConfig.ID)
} }
return execConfig.ProcessConfig.Terminal.Resize(h, w) return ExecConfig.ProcessConfig.Terminal.Resize(h, w)
} }
func (d *Daemon) registerExecCommand(execConfig *execConfig) { func (d *Daemon) registerExecCommand(ExecConfig *ExecConfig) {
// Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed.
execConfig.Container.execCommands.Add(execConfig.ID, execConfig) ExecConfig.Container.execCommands.Add(ExecConfig.ID, ExecConfig)
// Storing execs in daemon for easy access via remote API. // Storing execs in daemon for easy access via remote API.
d.execCommands.Add(execConfig.ID, execConfig) d.execCommands.Add(ExecConfig.ID, ExecConfig)
} }
func (d *Daemon) getExecConfig(name string) (*execConfig, error) { func (d *Daemon) getExecConfig(name string) (*ExecConfig, error) {
execConfig := d.execCommands.Get(name) ExecConfig := d.execCommands.Get(name)
// If the exec is found but its container is not in the daemon's list of // If the exec is found but its container is not in the daemon's list of
// containers then it must have been delete, in which case instead of // containers then it must have been delete, in which case instead of
@ -97,20 +100,20 @@ func (d *Daemon) getExecConfig(name string) (*execConfig, error) {
// the user sees the same error now that they will after the // the user sees the same error now that they will after the
// 5 minute clean-up loop is run which erases old/dead execs. // 5 minute clean-up loop is run which erases old/dead execs.
if execConfig != nil && d.containers.Get(execConfig.Container.ID) != nil { if ExecConfig != nil && d.containers.Get(ExecConfig.Container.ID) != nil {
if !execConfig.Container.IsRunning() { if !ExecConfig.Container.IsRunning() {
return nil, fmt.Errorf("Container %s is not running", execConfig.Container.ID) return nil, fmt.Errorf("Container %s is not running", ExecConfig.Container.ID)
} }
return execConfig, nil return ExecConfig, nil
} }
return nil, fmt.Errorf("No such exec instance '%s' found in daemon", name) return nil, fmt.Errorf("No such exec instance '%s' found in daemon", name)
} }
func (d *Daemon) unregisterExecCommand(execConfig *execConfig) { func (d *Daemon) unregisterExecCommand(ExecConfig *ExecConfig) {
execConfig.Container.execCommands.Delete(execConfig.ID) ExecConfig.Container.execCommands.Delete(ExecConfig.ID)
d.execCommands.Delete(execConfig.ID) d.execCommands.Delete(ExecConfig.ID)
} }
func (d *Daemon) getActiveContainer(name string) (*Container, error) { func (d *Daemon) getActiveContainer(name string) (*Container, error) {
@ -122,12 +125,13 @@ func (d *Daemon) getActiveContainer(name string) (*Container, error) {
if !container.IsRunning() { if !container.IsRunning() {
return nil, fmt.Errorf("Container %s is not running", name) return nil, fmt.Errorf("Container %s is not running", name)
} }
if container.IsPaused() { if container.isPaused() {
return nil, fmt.Errorf("Container %s is paused, unpause the container before exec", name) return nil, fmt.Errorf("Container %s is paused, unpause the container before exec", name)
} }
return container, nil return container, nil
} }
// ContainerExecCreate sets up an exec in a running container.
func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, error) { func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, error) {
// Not all drivers support Exec (LXC for example) // Not all drivers support Exec (LXC for example)
if err := checkExecSupport(d.execDriver.Name()); err != nil { if err := checkExecSupport(d.execDriver.Name()); err != nil {
@ -155,55 +159,56 @@ func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, erro
Privileged: config.Privileged, Privileged: config.Privileged,
} }
execConfig := &execConfig{ ExecConfig := &ExecConfig{
ID: stringid.GenerateNonCryptoID(), ID: stringid.GenerateNonCryptoID(),
OpenStdin: config.AttachStdin, OpenStdin: config.AttachStdin,
OpenStdout: config.AttachStdout, OpenStdout: config.AttachStdout,
OpenStderr: config.AttachStderr, OpenStderr: config.AttachStderr,
StreamConfig: StreamConfig{}, streamConfig: streamConfig{},
ProcessConfig: processConfig, ProcessConfig: processConfig,
Container: container, Container: container,
Running: false, Running: false,
waitStart: make(chan struct{}), waitStart: make(chan struct{}),
} }
d.registerExecCommand(execConfig) d.registerExecCommand(ExecConfig)
container.LogEvent("exec_create: " + execConfig.ProcessConfig.Entrypoint + " " + strings.Join(execConfig.ProcessConfig.Arguments, " ")) container.logEvent("exec_create: " + ExecConfig.ProcessConfig.Entrypoint + " " + strings.Join(ExecConfig.ProcessConfig.Arguments, " "))
return execConfig.ID, nil
return ExecConfig.ID, nil
} }
// ContainerExecStart starts a previously set up exec instance. The
// std streams are set up.
func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error { func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error {
var ( var (
cStdin io.ReadCloser cStdin io.ReadCloser
cStdout, cStderr io.Writer cStdout, cStderr io.Writer
) )
execConfig, err := d.getExecConfig(execName) ExecConfig, err := d.getExecConfig(execName)
if err != nil { if err != nil {
return err return err
} }
func() { func() {
execConfig.Lock() ExecConfig.Lock()
defer execConfig.Unlock() defer ExecConfig.Unlock()
if execConfig.Running { if ExecConfig.Running {
err = fmt.Errorf("Error: Exec command %s is already running", execName) err = fmt.Errorf("Error: Exec command %s is already running", execName)
} }
execConfig.Running = true ExecConfig.Running = true
}() }()
if err != nil { if err != nil {
return err return err
} }
logrus.Debugf("starting exec command %s in container %s", execConfig.ID, execConfig.Container.ID) logrus.Debugf("starting exec command %s in container %s", ExecConfig.ID, ExecConfig.Container.ID)
container := execConfig.Container container := ExecConfig.Container
container.LogEvent("exec_start: " + execConfig.ProcessConfig.Entrypoint + " " + strings.Join(execConfig.ProcessConfig.Arguments, " ")) container.logEvent("exec_start: " + ExecConfig.ProcessConfig.Entrypoint + " " + strings.Join(ExecConfig.ProcessConfig.Arguments, " "))
if execConfig.OpenStdin { if ExecConfig.OpenStdin {
r, w := io.Pipe() r, w := io.Pipe()
go func() { go func() {
defer w.Close() defer w.Close()
@ -212,32 +217,32 @@ func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout
}() }()
cStdin = r cStdin = r
} }
if execConfig.OpenStdout { if ExecConfig.OpenStdout {
cStdout = stdout cStdout = stdout
} }
if execConfig.OpenStderr { if ExecConfig.OpenStderr {
cStderr = stderr cStderr = stderr
} }
execConfig.StreamConfig.stderr = broadcastwriter.New() ExecConfig.streamConfig.stderr = broadcastwriter.New()
execConfig.StreamConfig.stdout = broadcastwriter.New() ExecConfig.streamConfig.stdout = broadcastwriter.New()
// Attach to stdin // Attach to stdin
if execConfig.OpenStdin { if ExecConfig.OpenStdin {
execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdinPipe = io.Pipe() ExecConfig.streamConfig.stdin, ExecConfig.streamConfig.stdinPipe = io.Pipe()
} else { } else {
execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin ExecConfig.streamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
} }
attachErr := attach(&execConfig.StreamConfig, execConfig.OpenStdin, true, execConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr) attachErr := attach(&ExecConfig.streamConfig, ExecConfig.OpenStdin, true, ExecConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr)
execErr := make(chan error) execErr := make(chan error)
// Note, the execConfig data will be removed when the container // Note, the ExecConfig data will be removed when the container
// itself is deleted. This allows us to query it (for things like // itself is deleted. This allows us to query it (for things like
// the exitStatus) even after the cmd is done running. // the exitStatus) even after the cmd is done running.
go func() { go func() {
if err := container.Exec(execConfig); err != nil { if err := container.exec(ExecConfig); err != nil {
execErr <- fmt.Errorf("Cannot run exec command %s in container %s: %s", execName, container.ID, err) execErr <- fmt.Errorf("Cannot run exec command %s in container %s: %s", execName, container.ID, err)
} }
}() }()
@ -260,16 +265,17 @@ func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout
} }
} }
func (d *Daemon) Exec(c *Container, execConfig *execConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { // Exec calls the underlying exec driver to run
exitStatus, err := d.execDriver.Exec(c.command, execConfig.ProcessConfig, pipes, startCallback) func (d *Daemon) Exec(c *Container, ExecConfig *ExecConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
exitStatus, err := d.execDriver.Exec(c.command, ExecConfig.ProcessConfig, pipes, startCallback)
// On err, make sure we don't leave ExitCode at zero // On err, make sure we don't leave ExitCode at zero
if err != nil && exitStatus == 0 { if err != nil && exitStatus == 0 {
exitStatus = 128 exitStatus = 128
} }
execConfig.ExitCode = exitStatus ExecConfig.ExitCode = exitStatus
execConfig.Running = false ExecConfig.Running = false
return exitStatus, err return exitStatus, err
} }

View File

@ -5,13 +5,15 @@ import (
"io" "io"
) )
// ContainerExport writes the contents of the container to the given
// writer. An error is returned if the container cannot be found.
func (daemon *Daemon) ContainerExport(name string, out io.Writer) error { func (daemon *Daemon) ContainerExport(name string, out io.Writer) error {
container, err := daemon.Get(name) container, err := daemon.Get(name)
if err != nil { if err != nil {
return err return err
} }
data, err := container.Export() data, err := container.export()
if err != nil { if err != nil {
return fmt.Errorf("%s: %s", name, err) return fmt.Errorf("%s: %s", name, err)
} }

View File

@ -22,10 +22,11 @@ func (history *History) Swap(i, j int) {
containers[i], containers[j] = containers[j], containers[i] containers[i], containers[j] = containers[j], containers[i]
} }
// Add the given container to history.
func (history *History) Add(container *Container) { func (history *History) Add(container *Container) {
*history = append(*history, container) *history = append(*history, container)
} }
func (history *History) Sort() { func (history *History) sort() {
sort.Sort(history) sort.Sort(history)
} }

View File

@ -13,6 +13,7 @@ import (
"github.com/docker/docker/utils" "github.com/docker/docker/utils"
) )
// ImageDelete removes the image from the filesystem.
// FIXME: remove ImageDelete's dependency on Daemon, then move to graph/ // FIXME: remove ImageDelete's dependency on Daemon, then move to graph/
func (daemon *Daemon) ImageDelete(name string, force, noprune bool) ([]types.ImageDelete, error) { func (daemon *Daemon) ImageDelete(name string, force, noprune bool) ([]types.ImageDelete, error) {
list := []types.ImageDelete{} list := []types.ImageDelete{}

View File

@ -17,6 +17,7 @@ import (
"github.com/docker/docker/utils" "github.com/docker/docker/utils"
) )
// SystemInfo returns information about the host server the daemon is running on.
func (daemon *Daemon) SystemInfo() (*types.Info, error) { func (daemon *Daemon) SystemInfo() (*types.Info, error) {
images := daemon.Graph().Map() images := daemon.Graph().Map()
var imgcount int var imgcount int
@ -50,11 +51,14 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
logrus.Errorf("Could not read system memory info: %v", err) logrus.Errorf("Could not read system memory info: %v", err)
} }
// if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION) // if we still have the original dockerinit binary from before
// we copied it locally, let's return the path to that, since
// that's more intuitive (the copied path is trivial to derive
// by hand given VERSION)
initPath := utils.DockerInitPath("") initPath := utils.DockerInitPath("")
if initPath == "" { if initPath == "" {
// if that fails, we'll just return the path from the daemon // if that fails, we'll just return the path from the daemon
initPath = daemon.SystemInitPath() initPath = daemon.systemInitPath()
} }
sysInfo := sysinfo.New(false) sysInfo := sysinfo.New(false)
@ -83,8 +87,8 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
InitPath: initPath, InitPath: initPath,
NCPU: runtime.NumCPU(), NCPU: runtime.NumCPU(),
MemTotal: meminfo.MemTotal, MemTotal: meminfo.MemTotal,
DockerRootDir: daemon.Config().Root, DockerRootDir: daemon.config().Root,
Labels: daemon.Config().Labels, Labels: daemon.config().Labels,
ExperimentalBuild: utils.ExperimentalBuild(), ExperimentalBuild: utils.ExperimentalBuild(),
} }

View File

@ -7,6 +7,9 @@ import (
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
) )
// ContainerInspect returns low-level information about a
// container. Returns an error if the container cannot be found, or if
// there is an error getting the data.
func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error) { func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error) {
container, err := daemon.Get(name) container, err := daemon.Get(name)
if err != nil { if err != nil {
@ -30,7 +33,7 @@ func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSON
// make a copy to play with // make a copy to play with
hostConfig := *container.hostConfig hostConfig := *container.hostConfig
if children, err := daemon.Children(container.Name); err == nil { if children, err := daemon.children(container.Name); err == nil {
for linkAlias, child := range children { for linkAlias, child := range children {
hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias))
} }
@ -74,7 +77,7 @@ func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSON
ExecDriver: container.ExecDriver, ExecDriver: container.ExecDriver,
MountLabel: container.MountLabel, MountLabel: container.MountLabel,
ProcessLabel: container.ProcessLabel, ProcessLabel: container.ProcessLabel,
ExecIDs: container.GetExecIDs(), ExecIDs: container.getExecIDs(),
HostConfig: &hostConfig, HostConfig: &hostConfig,
} }
@ -91,7 +94,9 @@ func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSON
return contJSONBase, nil return contJSONBase, nil
} }
func (daemon *Daemon) ContainerExecInspect(id string) (*execConfig, error) { // ContainerExecInspect returns low-level information about the exec
// command. An error is returned if the exec cannot be found.
func (daemon *Daemon) ContainerExecInspect(id string) (*ExecConfig, error) {
eConfig, err := daemon.getExecConfig(id) eConfig, err := daemon.getExecConfig(id)
if err != nil { if err != nil {
return nil, err return nil, err
@ -99,6 +104,8 @@ func (daemon *Daemon) ContainerExecInspect(id string) (*execConfig, error) {
return eConfig, nil return eConfig, nil
} }
// VolumeInspect looks up a volume by name. An error is returned if
// the volume cannot be found.
func (daemon *Daemon) VolumeInspect(name string) (*types.Volume, error) { func (daemon *Daemon) VolumeInspect(name string) (*types.Volume, error) {
v, err := daemon.volumes.Get(name) v, err := daemon.volumes.Get(name)
if err != nil { if err != nil {

View File

@ -14,6 +14,7 @@ func setPlatformSpecificContainerFields(container *Container, contJSONBase *type
return contJSONBase return contJSONBase
} }
// ContainerInspectPre120 is for backwards compatibility with pre v1.20 clients.
func (daemon *Daemon) ContainerInspectPre120(name string) (*types.ContainerJSONPre120, error) { func (daemon *Daemon) ContainerInspectPre120(name string) (*types.ContainerJSONPre120, error) {
container, err := daemon.Get(name) container, err := daemon.Get(name)
if err != nil { if err != nil {

View File

@ -19,7 +19,7 @@ func (daemon *Daemon) ContainerKill(name string, sig uint64) error {
} }
} else { } else {
// Otherwise, just send the requested signal // Otherwise, just send the requested signal
if err := container.KillSig(int(sig)); err != nil { if err := container.killSig(int(sig)); err != nil {
return err return err
} }
} }

View File

@ -17,15 +17,24 @@ func (daemon *Daemon) List() []*Container {
return daemon.containers.List() return daemon.containers.List()
} }
// ContainersConfig is a struct for configuring the command to list
// containers.
type ContainersConfig struct { type ContainersConfig struct {
// if true show all containers, otherwise only running containers.
All bool All bool
// show all containers created after this container id
Since string Since string
// show all containers created before this container id
Before string Before string
// number of containers to return at most
Limit int Limit int
// if true include the sizes of the containers
Size bool Size bool
// return only containers that match filters
Filters string Filters string
} }
// Containers returns a list of all the containers.
func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container, error) { func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container, error) {
var ( var (
foundBefore bool foundBefore bool
@ -62,7 +71,7 @@ func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container,
} }
} }
names := map[string][]string{} names := map[string][]string{}
daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error { daemon.containerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
names[e.ID()] = append(names[e.ID()], p) names[e.ID()] = append(names[e.ID()], p)
return nil return nil
}, 1) }, 1)
@ -195,7 +204,7 @@ func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container,
} }
if config.Size { if config.Size {
sizeRw, sizeRootFs := container.GetSize() sizeRw, sizeRootFs := container.getSize()
newC.SizeRw = sizeRw newC.SizeRw = sizeRw
newC.SizeRootFs = sizeRootFs newC.SizeRootFs = sizeRootFs
} }
@ -215,6 +224,8 @@ func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container,
return containers, nil return containers, nil
} }
// Volumes lists known volumes, using the filter to restrict the range
// of volumes returned.
func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, error) { func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, error) {
var volumesOut []*types.Volume var volumesOut []*types.Volume
volFilters, err := filters.FromParam(filter) volFilters, err := filters.FromParam(filter)

View File

@ -1,8 +1,8 @@
package daemon package daemon
import (
// Importing packages here only to make sure their init gets called and // Importing packages here only to make sure their init gets called and
// therefore they register themselves to the logdriver factory. // therefore they register themselves to the logdriver factory.
import (
_ "github.com/docker/docker/daemon/logger/fluentd" _ "github.com/docker/docker/daemon/logger/fluentd"
_ "github.com/docker/docker/daemon/logger/gelf" _ "github.com/docker/docker/daemon/logger/gelf"
_ "github.com/docker/docker/daemon/logger/journald" _ "github.com/docker/docker/daemon/logger/journald"

View File

@ -1,7 +1,7 @@
package daemon package daemon
import (
// Importing packages here only to make sure their init gets called and // Importing packages here only to make sure their init gets called and
// therefore they register themselves to the logdriver factory. // therefore they register themselves to the logdriver factory.
import (
_ "github.com/docker/docker/daemon/logger/jsonfilelog" _ "github.com/docker/docker/daemon/logger/jsonfilelog"
) )

View File

@ -11,15 +11,25 @@ import (
"github.com/docker/docker/pkg/stdcopy" "github.com/docker/docker/pkg/stdcopy"
) )
// ContainerLogsConfig holds configs for logging operations. Exists
// for users of the daemon to to pass it a logging configuration.
type ContainerLogsConfig struct { type ContainerLogsConfig struct {
Follow, Timestamps bool // if true stream log output
Follow bool
// if true include timestamps for each line of log output
Timestamps bool
// return that many lines of log output from the end
Tail string Tail string
// filter logs by returning on those entries after this time
Since time.Time Since time.Time
// whether or not to show stdout and stderr as well as log entries.
UseStdout, UseStderr bool UseStdout, UseStderr bool
OutStream io.Writer OutStream io.Writer
Stop <-chan bool Stop <-chan bool
} }
// ContainerLogs hooks up a container's stdout and stderr streams
// configured with the given struct.
func (daemon *Daemon) ContainerLogs(container *Container, config *ContainerLogsConfig) error { func (daemon *Daemon) ContainerLogs(container *Container, config *ContainerLogsConfig) error {
if !(config.UseStdout || config.UseStderr) { if !(config.UseStdout || config.UseStderr) {
return fmt.Errorf("You must choose at least one stream") return fmt.Errorf("You must choose at least one stream")

View File

@ -138,11 +138,11 @@ func (m *containerMonitor) Start() error {
pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin) pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin)
m.container.LogEvent("start") m.container.logEvent("start")
m.lastStartTime = time.Now() m.lastStartTime = time.Now()
if exitStatus, err = m.container.daemon.Run(m.container, pipes, m.callback); err != nil { if exitStatus, err = m.container.daemon.run(m.container, pipes, m.callback); err != nil {
// if we receive an internal error from the initial start of a container then lets // if we receive an internal error from the initial start of a container then lets
// return it instead of entering the restart loop // return it instead of entering the restart loop
if m.container.RestartCount == 0 { if m.container.RestartCount == 0 {
@ -161,11 +161,11 @@ func (m *containerMonitor) Start() error {
m.resetMonitor(err == nil && exitStatus.ExitCode == 0) m.resetMonitor(err == nil && exitStatus.ExitCode == 0)
if m.shouldRestart(exitStatus.ExitCode) { if m.shouldRestart(exitStatus.ExitCode) {
m.container.SetRestarting(&exitStatus) m.container.setRestarting(&exitStatus)
if exitStatus.OOMKilled { if exitStatus.OOMKilled {
m.container.LogEvent("oom") m.container.logEvent("oom")
} }
m.container.LogEvent("die") m.container.logEvent("die")
m.resetContainer(true) m.resetContainer(true)
// sleep with a small time increment between each restart to help avoid issues cased by quickly // sleep with a small time increment between each restart to help avoid issues cased by quickly
@ -180,9 +180,9 @@ func (m *containerMonitor) Start() error {
continue continue
} }
if exitStatus.OOMKilled { if exitStatus.OOMKilled {
m.container.LogEvent("oom") m.container.logEvent("oom")
} }
m.container.LogEvent("die") m.container.logEvent("die")
m.resetContainer(true) m.resetContainer(true)
return err return err
} }
@ -270,7 +270,7 @@ func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid
close(m.startSignal) close(m.startSignal)
} }
if err := m.container.ToDisk(); err != nil { if err := m.container.toDiskLocking(); err != nil {
logrus.Errorf("Error saving container to disk: %v", err) logrus.Errorf("Error saving container to disk: %v", err)
} }
} }

View File

@ -9,7 +9,7 @@ func (daemon *Daemon) ContainerPause(name string) error {
return err return err
} }
if err := container.Pause(); err != nil { if err := container.pause(); err != nil {
return fmt.Errorf("Cannot pause container %s: %s", name, err) return fmt.Errorf("Cannot pause container %s: %s", name, err)
} }

View File

@ -4,6 +4,9 @@ import (
"fmt" "fmt"
) )
// ContainerRename changes the name of a container, using the oldName
// to find the container. An error is returned if newName is already
// reserved.
func (daemon *Daemon) ContainerRename(oldName, newName string) error { func (daemon *Daemon) ContainerRename(oldName, newName string) error {
if oldName == "" || newName == "" { if oldName == "" || newName == "" {
return fmt.Errorf("usage: docker rename OLD_NAME NEW_NAME") return fmt.Errorf("usage: docker rename OLD_NAME NEW_NAME")
@ -27,10 +30,10 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
undo := func() { undo := func() {
container.Name = oldName container.Name = oldName
daemon.reserveName(container.ID, oldName) daemon.reserveName(container.ID, oldName)
daemon.containerGraph.Delete(newName) daemon.containerGraphDB.Delete(newName)
} }
if err := daemon.containerGraph.Delete(oldName); err != nil { if err := daemon.containerGraphDB.Delete(oldName); err != nil {
undo() undo()
return fmt.Errorf("Failed to delete container %q: %v", oldName, err) return fmt.Errorf("Failed to delete container %q: %v", oldName, err)
} }
@ -40,6 +43,6 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
return err return err
} }
container.LogEvent("rename") container.logEvent("rename")
return nil return nil
} }

View File

@ -1,5 +1,7 @@
package daemon package daemon
// ContainerResize changes the size of the TTY of the process running
// in the container with the given name to the given height and width.
func (daemon *Daemon) ContainerResize(name string, height, width int) error { func (daemon *Daemon) ContainerResize(name string, height, width int) error {
container, err := daemon.Get(name) container, err := daemon.Get(name)
if err != nil { if err != nil {
@ -9,11 +11,14 @@ func (daemon *Daemon) ContainerResize(name string, height, width int) error {
return container.Resize(height, width) return container.Resize(height, width)
} }
// ContainerExecResize changes the size of the TTY of the process
// running in the exec with the given name to the given height and
// width.
func (daemon *Daemon) ContainerExecResize(name string, height, width int) error { func (daemon *Daemon) ContainerExecResize(name string, height, width int) error {
execConfig, err := daemon.getExecConfig(name) ExecConfig, err := daemon.getExecConfig(name)
if err != nil { if err != nil {
return err return err
} }
return execConfig.Resize(height, width) return ExecConfig.resize(height, width)
} }

View File

@ -2,6 +2,12 @@ package daemon
import "fmt" import "fmt"
// ContainerRestart stops and starts a container. It attempts to
// gracefully stop the container within the given timeout, forcefully
// stopping it if the timeout is exceeded. If given a negative
// timeout, ContainerRestart will wait forever until a graceful
// stop. Returns an error if the container cannot be found, or if
// there is an underlying error at any stage of the restart.
func (daemon *Daemon) ContainerRestart(name string, seconds int) error { func (daemon *Daemon) ContainerRestart(name string, seconds int) error {
container, err := daemon.Get(name) container, err := daemon.Get(name)
if err != nil { if err != nil {

View File

@ -7,13 +7,14 @@ import (
"github.com/docker/docker/runconfig" "github.com/docker/docker/runconfig"
) )
// ContainerStart starts a container.
func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConfig) error { func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConfig) error {
container, err := daemon.Get(name) container, err := daemon.Get(name)
if err != nil { if err != nil {
return err return err
} }
if container.IsPaused() { if container.isPaused() {
return fmt.Errorf("Cannot start a paused container, try unpause instead.") return fmt.Errorf("Cannot start a paused container, try unpause instead.")
} }

View File

@ -9,8 +9,13 @@ import (
"github.com/docker/docker/pkg/units" "github.com/docker/docker/pkg/units"
) )
// State holds the current container state, and has methods to get and
// set the state. Container has an embed, which allows all of the
// functions defined against State to run against Container.
type State struct { type State struct {
sync.Mutex sync.Mutex
// FIXME: Why do we have both paused and running if a
// container cannot be paused and running at the same time?
Running bool Running bool
Paused bool Paused bool
Restarting bool Restarting bool
@ -25,6 +30,7 @@ type State struct {
waitChan chan struct{} waitChan chan struct{}
} }
// NewState creates a default state object with a fresh channel for state changes.
func NewState() *State { func NewState() *State {
return &State{ return &State{
waitChan: make(chan struct{}), waitChan: make(chan struct{}),
@ -111,10 +117,11 @@ func wait(waitChan <-chan struct{}, timeout time.Duration) error {
} }
} }
// WaitRunning waits until state is running. If state already running it returns // waitRunning waits until state is running. If state is already
// immediately. If you want wait forever you must supply negative timeout. // running it returns immediately. If you want wait forever you must
// Returns pid, that was passed to SetRunning // supply negative timeout. Returns pid, that was passed to
func (s *State) WaitRunning(timeout time.Duration) (int, error) { // setRunningLocking.
func (s *State) waitRunning(timeout time.Duration) (int, error) {
s.Lock() s.Lock()
if s.Running { if s.Running {
pid := s.Pid pid := s.Pid
@ -126,12 +133,12 @@ func (s *State) WaitRunning(timeout time.Duration) (int, error) {
if err := wait(waitChan, timeout); err != nil { if err := wait(waitChan, timeout); err != nil {
return -1, err return -1, err
} }
return s.GetPid(), nil return s.getPID(), nil
} }
// WaitStop waits until state is stopped. If state already stopped it returns // WaitStop waits until state is stopped. If state already stopped it returns
// immediately. If you want wait forever you must supply negative timeout. // immediately. If you want wait forever you must supply negative timeout.
// Returns exit code, that was passed to SetStopped // Returns exit code, that was passed to setStoppedLocking
func (s *State) WaitStop(timeout time.Duration) (int, error) { func (s *State) WaitStop(timeout time.Duration) (int, error) {
s.Lock() s.Lock()
if !s.Running { if !s.Running {
@ -144,9 +151,10 @@ func (s *State) WaitStop(timeout time.Duration) (int, error) {
if err := wait(waitChan, timeout); err != nil { if err := wait(waitChan, timeout); err != nil {
return -1, err return -1, err
} }
return s.GetExitCode(), nil return s.getExitCode(), nil
} }
// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running.
func (s *State) IsRunning() bool { func (s *State) IsRunning() bool {
s.Lock() s.Lock()
res := s.Running res := s.Running
@ -154,21 +162,22 @@ func (s *State) IsRunning() bool {
return res return res
} }
func (s *State) GetPid() int { // GetPID holds the process id of a container.
func (s *State) getPID() int {
s.Lock() s.Lock()
res := s.Pid res := s.Pid
s.Unlock() s.Unlock()
return res return res
} }
func (s *State) GetExitCode() int { func (s *State) getExitCode() int {
s.Lock() s.Lock()
res := s.ExitCode res := s.ExitCode
s.Unlock() s.Unlock()
return res return res
} }
func (s *State) SetRunning(pid int) { func (s *State) setRunningLocking(pid int) {
s.Lock() s.Lock()
s.setRunning(pid) s.setRunning(pid)
s.Unlock() s.Unlock()
@ -186,7 +195,7 @@ func (s *State) setRunning(pid int) {
s.waitChan = make(chan struct{}) s.waitChan = make(chan struct{})
} }
func (s *State) SetStopped(exitStatus *execdriver.ExitStatus) { func (s *State) setStoppedLocking(exitStatus *execdriver.ExitStatus) {
s.Lock() s.Lock()
s.setStopped(exitStatus) s.setStopped(exitStatus)
s.Unlock() s.Unlock()
@ -203,9 +212,9 @@ func (s *State) setStopped(exitStatus *execdriver.ExitStatus) {
s.waitChan = make(chan struct{}) s.waitChan = make(chan struct{})
} }
// SetRestarting is when docker handles the auto restart of containers when they are // setRestarting is when docker handles the auto restart of containers when they are
// in the middle of a stop and being restarted again // in the middle of a stop and being restarted again
func (s *State) SetRestarting(exitStatus *execdriver.ExitStatus) { func (s *State) setRestartingLocking(exitStatus *execdriver.ExitStatus) {
s.Lock() s.Lock()
s.setRestarting(exitStatus) s.setRestarting(exitStatus)
s.Unlock() s.Unlock()
@ -231,33 +240,14 @@ func (s *State) setError(err error) {
s.Error = err.Error() s.Error = err.Error()
} }
func (s *State) IsRestarting() bool { func (s *State) isPaused() bool {
s.Lock()
res := s.Restarting
s.Unlock()
return res
}
func (s *State) SetPaused() {
s.Lock()
s.Paused = true
s.Unlock()
}
func (s *State) SetUnpaused() {
s.Lock()
s.Paused = false
s.Unlock()
}
func (s *State) IsPaused() bool {
s.Lock() s.Lock()
res := s.Paused res := s.Paused
s.Unlock() s.Unlock()
return res return res
} }
func (s *State) SetRemovalInProgress() error { func (s *State) setRemovalInProgress() error {
s.Lock() s.Lock()
defer s.Unlock() defer s.Unlock()
if s.removalInProgress { if s.removalInProgress {
@ -267,13 +257,13 @@ func (s *State) SetRemovalInProgress() error {
return nil return nil
} }
func (s *State) ResetRemovalInProgress() { func (s *State) resetRemovalInProgress() {
s.Lock() s.Lock()
s.removalInProgress = false s.removalInProgress = false
s.Unlock() s.Unlock()
} }
func (s *State) SetDead() { func (s *State) setDead() {
s.Lock() s.Lock()
s.Dead = true s.Dead = true
s.Unlock() s.Unlock()

View File

@ -14,11 +14,12 @@ func TestStateRunStop(t *testing.T) {
started := make(chan struct{}) started := make(chan struct{})
var pid int64 var pid int64
go func() { go func() {
runPid, _ := s.WaitRunning(-1 * time.Second) runPid, _ := s.waitRunning(-1 * time.Second)
atomic.StoreInt64(&pid, int64(runPid)) atomic.StoreInt64(&pid, int64(runPid))
close(started) close(started)
}() }()
s.SetRunning(i + 100) s.setRunningLocking(i + 100)
if !s.IsRunning() { if !s.IsRunning() {
t.Fatal("State not running") t.Fatal("State not running")
} }
@ -38,8 +39,8 @@ func TestStateRunStop(t *testing.T) {
if runPid != i+100 { if runPid != i+100 {
t.Fatalf("Pid %v, expected %v", runPid, i+100) t.Fatalf("Pid %v, expected %v", runPid, i+100)
} }
if pid, err := s.WaitRunning(-1 * time.Second); err != nil || pid != i+100 { if pid, err := s.waitRunning(-1 * time.Second); err != nil || pid != i+100 {
t.Fatalf("WaitRunning returned pid: %v, err: %v, expected pid: %v, err: %v", pid, err, i+100, nil) t.Fatalf("waitRunning returned pid: %v, err: %v, expected pid: %v, err: %v", pid, err, i+100, nil)
} }
stopped := make(chan struct{}) stopped := make(chan struct{})
@ -49,7 +50,7 @@ func TestStateRunStop(t *testing.T) {
atomic.StoreInt64(&exit, int64(exitCode)) atomic.StoreInt64(&exit, int64(exitCode))
close(stopped) close(stopped)
}() }()
s.SetStopped(&execdriver.ExitStatus{ExitCode: i}) s.setStoppedLocking(&execdriver.ExitStatus{ExitCode: i})
if s.IsRunning() { if s.IsRunning() {
t.Fatal("State is running") t.Fatal("State is running")
} }
@ -79,7 +80,7 @@ func TestStateTimeoutWait(t *testing.T) {
s := NewState() s := NewState()
started := make(chan struct{}) started := make(chan struct{})
go func() { go func() {
s.WaitRunning(100 * time.Millisecond) s.waitRunning(100 * time.Millisecond)
close(started) close(started)
}() }()
select { select {
@ -88,10 +89,12 @@ func TestStateTimeoutWait(t *testing.T) {
case <-started: case <-started:
t.Log("Start callback fired") t.Log("Start callback fired")
} }
s.SetRunning(42)
s.setRunningLocking(42)
stopped := make(chan struct{}) stopped := make(chan struct{})
go func() { go func() {
s.WaitRunning(100 * time.Millisecond) s.waitRunning(100 * time.Millisecond)
close(stopped) close(stopped)
}() }()
select { select {

View File

@ -10,14 +10,18 @@ import (
"github.com/opencontainers/runc/libcontainer" "github.com/opencontainers/runc/libcontainer"
) )
// ContainerStatsConfig holds information for configuring the runtime
// behavior of a daemon.ContainerStats() call.
type ContainerStatsConfig struct { type ContainerStatsConfig struct {
Stream bool Stream bool
OutStream io.Writer OutStream io.Writer
Stop <-chan bool Stop <-chan bool
} }
// ContainerStats writes information about the container to the stream
// given in the config object.
func (daemon *Daemon) ContainerStats(name string, config *ContainerStatsConfig) error { func (daemon *Daemon) ContainerStats(name string, config *ContainerStatsConfig) error {
updates, err := daemon.SubscribeToContainerStats(name) updates, err := daemon.subscribeToContainerStats(name)
if err != nil { if err != nil {
return err return err
} }
@ -26,7 +30,7 @@ func (daemon *Daemon) ContainerStats(name string, config *ContainerStatsConfig)
config.OutStream.Write(nil) config.OutStream.Write(nil)
} }
var preCpuStats types.CPUStats var preCPUStats types.CPUStats
getStat := func(v interface{}) *types.Stats { getStat := func(v interface{}) *types.Stats {
update := v.(*execdriver.ResourceStats) update := v.(*execdriver.ResourceStats)
// Retrieve the nw statistics from libnetwork and inject them in the Stats // Retrieve the nw statistics from libnetwork and inject them in the Stats
@ -34,17 +38,17 @@ func (daemon *Daemon) ContainerStats(name string, config *ContainerStatsConfig)
update.Stats.Interfaces = nwStats update.Stats.Interfaces = nwStats
} }
ss := convertStatsToAPITypes(update.Stats) ss := convertStatsToAPITypes(update.Stats)
ss.PreCPUStats = preCpuStats ss.PreCPUStats = preCPUStats
ss.MemoryStats.Limit = uint64(update.MemoryLimit) ss.MemoryStats.Limit = uint64(update.MemoryLimit)
ss.Read = update.Read ss.Read = update.Read
ss.CPUStats.SystemUsage = update.SystemUsage ss.CPUStats.SystemUsage = update.SystemUsage
preCpuStats = ss.CPUStats preCPUStats = ss.CPUStats
return ss return ss
} }
enc := json.NewEncoder(config.OutStream) enc := json.NewEncoder(config.OutStream)
defer daemon.UnsubscribeToContainerStats(name, updates) defer daemon.unsubscribeToContainerStats(name, updates)
noStreamFirstFrame := true noStreamFirstFrame := true
for { for {

View File

@ -25,7 +25,7 @@ func newStatsCollector(interval time.Duration) *statsCollector {
s := &statsCollector{ s := &statsCollector{
interval: interval, interval: interval,
publishers: make(map[*Container]*pubsub.Publisher), publishers: make(map[*Container]*pubsub.Publisher),
clockTicks: uint64(system.GetClockTicks()), clockTicksPerSecond: uint64(system.GetClockTicks()),
bufReader: bufio.NewReaderSize(nil, 128), bufReader: bufio.NewReaderSize(nil, 128),
} }
go s.run() go s.run()
@ -36,7 +36,7 @@ func newStatsCollector(interval time.Duration) *statsCollector {
type statsCollector struct { type statsCollector struct {
m sync.Mutex m sync.Mutex
interval time.Duration interval time.Duration
clockTicks uint64 clockTicksPerSecond uint64
publishers map[*Container]*pubsub.Publisher publishers map[*Container]*pubsub.Publisher
bufReader *bufio.Reader bufReader *bufio.Reader
} }
@ -89,7 +89,7 @@ func (s *statsCollector) run() {
var pairs []publishersPair var pairs []publishersPair
for range time.Tick(s.interval) { for range time.Tick(s.interval) {
systemUsage, err := s.getSystemCpuUsage() systemUsage, err := s.getSystemCPUUsage()
if err != nil { if err != nil {
logrus.Errorf("collecting system cpu usage: %v", err) logrus.Errorf("collecting system cpu usage: %v", err)
continue continue
@ -107,7 +107,7 @@ func (s *statsCollector) run() {
s.m.Unlock() s.m.Unlock()
for _, pair := range pairs { for _, pair := range pairs {
stats, err := pair.container.Stats() stats, err := pair.container.stats()
if err != nil { if err != nil {
if err != execdriver.ErrNotRunning { if err != execdriver.ErrNotRunning {
logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err) logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err)
@ -120,11 +120,17 @@ func (s *statsCollector) run() {
} }
} }
const nanoSeconds = 1e9 const nanoSecondsPerSecond = 1e9
// getSystemCpuUSage returns the host system's cpu usage in nanoseconds // getSystemCPUUsage returns the host system's cpu usage in
// for the system to match the cgroup readings are returned in the same format. // nanoseconds. An error is returned if the format of the underlying
func (s *statsCollector) getSystemCpuUsage() (uint64, error) { // file does not match.
//
// Uses /proc/stat defined by POSIX. Looks for the cpu
// statistics line and then sums up the first seven fields
// provided. See `man 5 proc` for details on specific field
// information.
func (s *statsCollector) getSystemCPUUsage() (uint64, error) {
var line string var line string
f, err := os.Open("/proc/stat") f, err := os.Open("/proc/stat")
if err != nil { if err != nil {
@ -147,15 +153,16 @@ func (s *statsCollector) getSystemCpuUsage() (uint64, error) {
if len(parts) < 8 { if len(parts) < 8 {
return 0, fmt.Errorf("invalid number of cpu fields") return 0, fmt.Errorf("invalid number of cpu fields")
} }
var sum uint64 var totalClockTicks uint64
for _, i := range parts[1:8] { for _, i := range parts[1:8] {
v, err := strconv.ParseUint(i, 10, 64) v, err := strconv.ParseUint(i, 10, 64)
if err != nil { if err != nil {
return 0, fmt.Errorf("Unable to convert value %s to int: %s", i, err) return 0, fmt.Errorf("Unable to convert value %s to int: %s", i, err)
} }
sum += v totalClockTicks += v
} }
return (sum * nanoSeconds) / s.clockTicks, nil return (totalClockTicks * nanoSecondsPerSecond) /
s.clockTicksPerSecond, nil
} }
} }
return 0, fmt.Errorf("invalid stat format") return 0, fmt.Errorf("invalid stat format")

View File

@ -2,6 +2,12 @@ package daemon
import "fmt" import "fmt"
// ContainerStop looks for the given container and terminates it,
// waiting the given number of seconds before forcefully killing the
// container. If a negative number of seconds is given, ContainerStop
// will wait for a graceful termination. An error is returned if the
// container is not found, is already stopped, or if there is a
// problem stopping the container.
func (daemon *Daemon) ContainerStop(name string, seconds int) error { func (daemon *Daemon) ContainerStop(name string, seconds int) error {
container, err := daemon.Get(name) container, err := daemon.Get(name)
if err != nil { if err != nil {

View File

@ -11,6 +11,11 @@ import (
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
) )
// ContainerTop lists the processes running inside of the given
// container by calling ps with the given args, or with the flags
// "-ef" if no args are given. An error is returned if the container
// is not found, or is not running, or if there are any problems
// running ps, or parsing the output.
func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) {
if psArgs == "" { if psArgs == "" {
psArgs = "-ef" psArgs = "-ef"
@ -50,6 +55,7 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.Container
return nil, fmt.Errorf("Couldn't find PID field in ps output") return nil, fmt.Errorf("Couldn't find PID field in ps output")
} }
// loop through the output and extract the PID from each line
for _, line := range lines[1:] { for _, line := range lines[1:] {
if len(line) == 0 { if len(line) == 0 {
continue continue
@ -70,6 +76,6 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.Container
} }
} }
} }
container.LogEvent("top") container.logEvent("top")
return procList, nil return procList, nil
} }

View File

@ -6,6 +6,7 @@ import (
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
) )
// ContainerTop is not supported on Windows and returns an error.
func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) {
return nil, fmt.Errorf("Top is not supported on Windows") return nil, fmt.Errorf("Top is not supported on Windows")
} }

View File

@ -9,7 +9,7 @@ func (daemon *Daemon) ContainerUnpause(name string) error {
return err return err
} }
if err := container.Unpause(); err != nil { if err := container.unpause(); err != nil {
return fmt.Errorf("Cannot unpause container %s: %s", name, err) return fmt.Errorf("Cannot unpause container %s: %s", name, err)
} }

View File

@ -5,7 +5,7 @@ import "testing"
func TestParseVolumeFrom(t *testing.T) { func TestParseVolumeFrom(t *testing.T) {
cases := []struct { cases := []struct {
spec string spec string
expId string expID string
expMode string expMode string
fail bool fail bool
}{ }{
@ -25,8 +25,8 @@ func TestParseVolumeFrom(t *testing.T) {
continue continue
} }
if id != c.expId { if id != c.expID {
t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expId, id, c.spec) t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expID, id, c.spec)
} }
if mode != c.expMode { if mode != c.expMode {
t.Fatalf("Expected mode %s, was %s for spec %s\n", c.expMode, mode, c.spec) t.Fatalf("Expected mode %s, was %s for spec %s\n", c.expMode, mode, c.spec)

View File

@ -249,7 +249,7 @@ func (daemon *Daemon) verifyVolumesInfo(container *Container) error {
} }
} }
return container.ToDisk() return container.toDiskLocking()
} }
return nil return nil

View File

@ -2,6 +2,11 @@ package daemon
import "time" import "time"
// ContainerWait stops processing until the given container is
// stopped. If the container is not found, an error is returned. On a
// successful stop, the exit code of the container is returned. On a
// timeout, an error is returned. If you want to wait forever, supply
// a negative duration for the timeout.
func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) { func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) {
container, err := daemon.Get(name) container, err := daemon.Get(name)
if err != nil { if err != nil {

View File

@ -18,6 +18,7 @@ packages=(
builder/parser builder/parser
builder/parser/dumper builder/parser/dumper
cliconfig cliconfig
daemon
daemon/events daemon/events
daemon/execdriver daemon/execdriver
daemon/execdriver/execdrivers daemon/execdriver/execdrivers