From 56fa079906f19434ddc809cd1dba9dad177187fd Mon Sep 17 00:00:00 2001 From: mschurenko Date: Sat, 12 Jul 2014 12:15:42 -0700 Subject: [PATCH 001/592] add --releasever=/ to mkimage-yum.sh I didn't realize the commit required a Docker-DCO so it failed the travis-ci build. So I removed the commit from my forked repo. Now it looks like there is a pull request with no commit. So here it is again: Needed to add '--releasever=/' flag to run yum groupinstall on Centos7 (didn't try on anything else). This snippet from yum man page explains why: ``` Note: You may also want to use the option --releasever=/ when creating the installroot as otherwise the $releasever value is taken from the rpmdb within the installroot (and thus. will be empty, before creation). ``` Docker-DCO-1.1-Signed-off-by: Matt Schurenko (github: mschurenko) --- contrib/mkimage-yum.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/mkimage-yum.sh b/contrib/mkimage-yum.sh index f21a63a225..80f7b4956f 100755 --- a/contrib/mkimage-yum.sh +++ b/contrib/mkimage-yum.sh @@ -57,7 +57,7 @@ mknod -m 666 "$target"/dev/tty0 c 4 0 mknod -m 666 "$target"/dev/urandom c 1 9 mknod -m 666 "$target"/dev/zero c 1 5 -yum -c "$yum_config" --installroot="$target" --setopt=tsflags=nodocs \ +yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ --setopt=group_package_types=mandatory -y groupinstall Core yum -c "$yum_config" --installroot="$target" -y clean all From 8e7aa44f0e610f471dec9dd232599b4b9cba80b2 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 25 Sep 2014 10:57:37 -0400 Subject: [PATCH 002/592] devmapper: include dm_deps information in debug Signed-off-by: Vincent Batts --- daemon/graphdriver/devmapper/deviceset.go | 5 ++++ daemon/graphdriver/devmapper/devmapper.go | 25 +++++++++++++++++++ .../graphdriver/devmapper/devmapper_test.go | 3 ++- .../devmapper/devmapper_wrapper.go | 20 ++++++++++++--- 4 files changed, 49 insertions(+), 4 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index ccaea0181e..b3b5c84399 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -771,10 +771,15 @@ func (devices *DeviceSet) deactivatePool() error { log.Debugf("[devmapper] deactivatePool()") defer log.Debugf("[devmapper] deactivatePool END") devname := devices.getPoolDevName() + devinfo, err := getInfo(devname) if err != nil { return err } + if d, err := getDeps(devname); err == nil { + // Access to more Debug output + log.Debugf("[devmapper] getDeps() %s: %#v", devname, d) + } if devinfo.Exists != 0 { return removeDevice(devname) } diff --git a/daemon/graphdriver/devmapper/devmapper.go b/daemon/graphdriver/devmapper/devmapper.go index d09e740749..42cba76230 100644 --- a/daemon/graphdriver/devmapper/devmapper.go +++ b/daemon/graphdriver/devmapper/devmapper.go @@ -51,6 +51,7 @@ var ( ErrTaskSetRo = errors.New("dm_task_set_ro failed") ErrTaskAddTarget = errors.New("dm_task_add_target failed") ErrTaskSetSector = errors.New("dm_task_set_sector failed") + ErrTaskGetDeps = errors.New("dm_task_get_deps failed") ErrTaskGetInfo = errors.New("dm_task_get_info failed") ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") @@ -75,6 +76,11 @@ type ( Task struct { unmanaged *CDmTask } + Deps struct { + Count uint32 + Filler uint32 + Device []uint64 + } Info struct { Exists int Suspended int @@ -171,6 +177,14 @@ func (t *Task) AddTarget(start, size uint64, ttype, params string) error { return nil } +func (t *Task) GetDeps() (*Deps, error) { + var deps *Deps + if deps = DmTaskGetDeps(t.unmanaged); deps == nil { + return nil, ErrTaskGetDeps + } + return deps, nil +} + func (t *Task) GetInfo() (*Info, error) { info := &Info{} if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { @@ -392,6 +406,17 @@ func createTask(t TaskType, name string) (*Task, error) { return task, nil } +func getDeps(name string) (*Deps, error) { + task, err := createTask(DeviceDeps, name) + if task == nil { + return nil, err + } + if err := task.Run(); err != nil { + return nil, err + } + return task.GetDeps() +} + func getInfo(name string) (*Info, error) { task, err := createTask(DeviceInfo, name) if task == nil { diff --git a/daemon/graphdriver/devmapper/devmapper_test.go b/daemon/graphdriver/devmapper/devmapper_test.go index 167261999e..b6e26bc1d7 100644 --- a/daemon/graphdriver/devmapper/devmapper_test.go +++ b/daemon/graphdriver/devmapper/devmapper_test.go @@ -3,8 +3,9 @@ package devmapper import ( - "github.com/docker/docker/daemon/graphdriver/graphtest" "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" ) func init() { diff --git a/daemon/graphdriver/devmapper/devmapper_wrapper.go b/daemon/graphdriver/devmapper/devmapper_wrapper.go index bd1c6fd5b6..855c95e3ba 100644 --- a/daemon/graphdriver/devmapper/devmapper_wrapper.go +++ b/daemon/graphdriver/devmapper/devmapper_wrapper.go @@ -38,9 +38,7 @@ static void log_with_errno_init() */ import "C" -import ( - "unsafe" -) +import "unsafe" type ( CDmTask C.struct_dm_task @@ -92,6 +90,7 @@ var ( DmTaskAddTarget = dmTaskAddTargetFct DmTaskCreate = dmTaskCreateFct DmTaskDestroy = dmTaskDestroyFct + DmTaskGetDeps = dmTaskGetDepsFct DmTaskGetInfo = dmTaskGetInfoFct DmTaskGetDriverVersion = dmTaskGetDriverVersionFct DmTaskRun = dmTaskRunFct @@ -168,6 +167,21 @@ func dmTaskAddTargetFct(task *CDmTask, return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) } +func dmTaskGetDepsFct(task *CDmTask) *Deps { + Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task)) + if Cdeps == nil { + return nil + } + deps := &Deps{ + Count: uint32(Cdeps.count), + Filler: uint32(Cdeps.filler), + } + for _, device := range Cdeps.device { + deps.Device = append(deps.Device, (uint64)(device)) + } + return deps +} + func dmTaskGetInfoFct(task *CDmTask, info *Info) int { Cinfo := C.struct_dm_info{} defer func() { From 92df943fbf225d78b160babb36e9c6fd38cdc0d0 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 25 Sep 2014 12:55:53 -0400 Subject: [PATCH 003/592] daemon logging: unifying output and timestamps A little refactor of the ./pkg/log so engine can have a logger instance Signed-off-by: Vincent Batts --- docker/client.go | 2 +- docker/daemon.go | 5 +-- docker/docker.go | 4 +- engine/engine.go | 3 +- pkg/iptables/iptables.go | 7 ++-- pkg/log/log.go | 79 ++++++++++++++++++++++++++++------------ pkg/log/log_test.go | 12 +++--- pkg/signal/trap.go | 7 ++-- 8 files changed, 76 insertions(+), 43 deletions(-) diff --git a/docker/client.go b/docker/client.go index 27001cc557..58a67067bf 100644 --- a/docker/client.go +++ b/docker/client.go @@ -3,7 +3,7 @@ package main import ( - "log" + "github.com/docker/docker/pkg/log" ) const CanDaemon = false diff --git a/docker/daemon.go b/docker/daemon.go index eef17efdc4..2f65878472 100644 --- a/docker/daemon.go +++ b/docker/daemon.go @@ -3,8 +3,6 @@ package main import ( - "log" - "github.com/docker/docker/builder" "github.com/docker/docker/builtins" "github.com/docker/docker/daemon" @@ -12,6 +10,7 @@ import ( _ "github.com/docker/docker/daemon/execdriver/native" "github.com/docker/docker/dockerversion" "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/log" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/signal" ) @@ -60,7 +59,7 @@ func mainDaemon() { } }() // TODO actually have a resolved graphdriver to show? - log.Printf("docker daemon: %s %s; execdriver: %s; graphdriver: %s", + log.Infof("docker daemon: %s %s; execdriver: %s; graphdriver: %s", dockerversion.VERSION, dockerversion.GITCOMMIT, daemonCfg.ExecDriver, diff --git a/docker/docker.go b/docker/docker.go index e906b2043e..473e424731 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -5,13 +5,13 @@ import ( "crypto/x509" "fmt" "io/ioutil" - "log" "os" "strings" "github.com/docker/docker/api" "github.com/docker/docker/api/client" "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/log" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/reexec" "github.com/docker/docker/utils" @@ -103,7 +103,7 @@ func main() { if err := cli.Cmd(flag.Args()...); err != nil { if sterr, ok := err.(*utils.StatusError); ok { if sterr.Status != "" { - log.Println(sterr.Status) + log.Infof("%s", sterr.Status) } os.Exit(sterr.StatusCode) } diff --git a/engine/engine.go b/engine/engine.go index 5c708d405f..769f644a17 100644 --- a/engine/engine.go +++ b/engine/engine.go @@ -11,6 +11,7 @@ import ( "time" "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/timeutils" "github.com/docker/docker/utils" ) @@ -255,6 +256,6 @@ func (eng *Engine) Logf(format string, args ...interface{}) (n int, err error) { if !eng.Logging { return 0, nil } - prefixedFormat := fmt.Sprintf("[%s] %s\n", eng, strings.TrimRight(format, "\n")) + prefixedFormat := fmt.Sprintf("[%s] [%s] %s\n", time.Now().Format(timeutils.RFC3339NanoFixed), eng, strings.TrimRight(format, "\n")) return fmt.Fprintf(eng.Stderr, prefixedFormat, args...) } diff --git a/pkg/iptables/iptables.go b/pkg/iptables/iptables.go index 88d8b5f352..b8d9e56705 100644 --- a/pkg/iptables/iptables.go +++ b/pkg/iptables/iptables.go @@ -4,11 +4,12 @@ import ( "errors" "fmt" "net" - "os" "os/exec" "regexp" "strconv" "strings" + + "github.com/docker/docker/pkg/log" ) type Action string @@ -175,9 +176,7 @@ func Raw(args ...string) ([]byte, error) { args = append([]string{"--wait"}, args...) } - if os.Getenv("DEBUG") != "" { - fmt.Fprintf(os.Stderr, fmt.Sprintf("[debug] %s, %v\n", path, args)) - } + log.Debugf("%s, %v", path, args) output, err := exec.Command(path, args...).CombinedOutput() if err != nil { diff --git a/pkg/log/log.go b/pkg/log/log.go index 53be6cf182..b06d958cb1 100644 --- a/pkg/log/log.go +++ b/pkg/log/log.go @@ -6,18 +6,21 @@ import ( "os" "runtime" "strings" + "time" + + "github.com/docker/docker/pkg/timeutils" ) type priority int const ( - errorFormat = "[%s] %s:%d %s\n" - logFormat = "[%s] %s\n" + errorFormat = "[%s] [%s] %s:%d %s\n" + logFormat = "[%s] [%s] %s\n" - fatal priority = iota - error - info - debug + fatalPriority priority = iota + errorPriority + infoPriority + debugPriority ) // A common interface to access the Fatal method of @@ -28,44 +31,72 @@ type Fataler interface { func (p priority) String() string { switch p { - case fatal: + case fatalPriority: return "fatal" - case error: + case errorPriority: return "error" - case info: + case infoPriority: return "info" - case debug: + case debugPriority: return "debug" } return "" } +var DefaultLogger = Logger{Out: os.Stdout, Err: os.Stderr} + // Debug function, if the debug flag is set, then display. Do nothing otherwise // If Docker is in damon mode, also send the debug info on the socket -func Debugf(format string, a ...interface{}) { - if os.Getenv("DEBUG") != "" { - logf(os.Stderr, debug, format, a...) - } +func Debugf(format string, a ...interface{}) (int, error) { + return DefaultLogger.Debugf(format, a...) } -func Infof(format string, a ...interface{}) { - logf(os.Stdout, info, format, a...) +func Infof(format string, a ...interface{}) (int, error) { + return DefaultLogger.Infof(format, a...) } -func Errorf(format string, a ...interface{}) { - logf(os.Stderr, error, format, a...) +func Errorf(format string, a ...interface{}) (int, error) { + return DefaultLogger.Errorf(format, a...) +} + +func Fatal(a ...interface{}) { + DefaultLogger.Fatalf("%s", a...) } func Fatalf(format string, a ...interface{}) { - logf(os.Stderr, fatal, format, a...) + DefaultLogger.Fatalf(format, a...) +} + +type Logger struct { + Err io.Writer + Out io.Writer +} + +func (l Logger) Debugf(format string, a ...interface{}) (int, error) { + if os.Getenv("DEBUG") != "" { + return l.logf(l.Err, debugPriority, format, a...) + } + return 0, nil +} + +func (l Logger) Infof(format string, a ...interface{}) (int, error) { + return l.logf(l.Out, infoPriority, format, a...) +} + +func (l Logger) Errorf(format string, a ...interface{}) (int, error) { + return l.logf(l.Err, errorPriority, format, a...) +} + +func (l Logger) Fatalf(format string, a ...interface{}) { + l.logf(l.Err, fatalPriority, format, a...) os.Exit(1) } -func logf(stream io.Writer, level priority, format string, a ...interface{}) { +func (l Logger) logf(stream io.Writer, level priority, format string, a ...interface{}) (int, error) { var prefix string - if level <= error || level == debug { + if level <= errorPriority || level == debugPriority { // Retrieve the stack infos _, file, line, ok := runtime.Caller(2) if !ok { @@ -74,10 +105,10 @@ func logf(stream io.Writer, level priority, format string, a ...interface{}) { } else { file = file[strings.LastIndex(file, "/")+1:] } - prefix = fmt.Sprintf(errorFormat, level.String(), file, line, format) + prefix = fmt.Sprintf(errorFormat, time.Now().Format(timeutils.RFC3339NanoFixed), level.String(), file, line, format) } else { - prefix = fmt.Sprintf(logFormat, level.String(), format) + prefix = fmt.Sprintf(logFormat, time.Now().Format(timeutils.RFC3339NanoFixed), level.String(), format) } - fmt.Fprintf(stream, prefix, a...) + return fmt.Fprintf(stream, prefix, a...) } diff --git a/pkg/log/log_test.go b/pkg/log/log_test.go index 83ba5fd27c..4f5b3f82ed 100644 --- a/pkg/log/log_test.go +++ b/pkg/log/log_test.go @@ -7,6 +7,8 @@ import ( "testing" ) +var reRFC3339NanoFixed = "[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{9}.([0-9]{2}:[0-9]{2})?" + func TestLogFatalf(t *testing.T) { var output *bytes.Buffer @@ -16,15 +18,15 @@ func TestLogFatalf(t *testing.T) { Values []interface{} ExpectedPattern string }{ - {fatal, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[fatal\\] testing.go:\\d+ 1 \\+ 1 = 2"}, - {error, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[error\\] testing.go:\\d+ 1 \\+ 1 = 2"}, - {info, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[info\\] 1 \\+ 1 = 2"}, - {debug, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[debug\\] testing.go:\\d+ 1 \\+ 1 = 2"}, + {fatalPriority, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[" + reRFC3339NanoFixed + "\\] \\[fatal\\] testing.go:\\d+ 1 \\+ 1 = 2"}, + {errorPriority, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[" + reRFC3339NanoFixed + "\\] \\[error\\] testing.go:\\d+ 1 \\+ 1 = 2"}, + {infoPriority, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[" + reRFC3339NanoFixed + "\\] \\[info\\] 1 \\+ 1 = 2"}, + {debugPriority, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[" + reRFC3339NanoFixed + "\\] \\[debug\\] testing.go:\\d+ 1 \\+ 1 = 2"}, } for i, test := range tests { output = &bytes.Buffer{} - logf(output, test.Level, test.Format, test.Values...) + DefaultLogger.logf(output, test.Level, test.Format, test.Values...) expected := regexp.MustCompile(test.ExpectedPattern) if !expected.MatchString(output.String()) { diff --git a/pkg/signal/trap.go b/pkg/signal/trap.go index cbdfd1ff17..42ddb4d277 100644 --- a/pkg/signal/trap.go +++ b/pkg/signal/trap.go @@ -1,11 +1,12 @@ package signal import ( - "log" "os" gosignal "os/signal" "sync/atomic" "syscall" + + "github.com/docker/docker/pkg/log" ) // Trap sets up a simplified signal "trap", appropriate for common @@ -28,7 +29,7 @@ func Trap(cleanup func()) { interruptCount := uint32(0) for sig := range c { go func(sig os.Signal) { - log.Printf("Received signal '%v', starting shutdown of docker...\n", sig) + log.Infof("Received signal '%v', starting shutdown of docker...", sig) switch sig { case os.Interrupt, syscall.SIGTERM: // If the user really wants to interrupt, let him do so. @@ -43,7 +44,7 @@ func Trap(cleanup func()) { return } } else { - log.Printf("Force shutdown of docker, interrupting cleanup\n") + log.Infof("Force shutdown of docker, interrupting cleanup") } case syscall.SIGQUIT: } From 6c60e8c7849742c111a2b0a5ad4ff88fc8e960ef Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 23 Sep 2014 17:24:52 -0700 Subject: [PATCH 004/592] Adding self to various maintainers files. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- api/MAINTAINERS | 1 + contrib/completion/MAINTAINERS | 2 ++ contrib/init/systemd/MAINTAINERS | 1 + contrib/init/upstart/MAINTAINERS | 2 ++ pkg/iptables/MAINTAINERS | 1 + pkg/units/MAINTAINERS | 2 +- 6 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 contrib/completion/MAINTAINERS create mode 100644 contrib/init/upstart/MAINTAINERS diff --git a/api/MAINTAINERS b/api/MAINTAINERS index e0f18f14f1..96abeae570 100644 --- a/api/MAINTAINERS +++ b/api/MAINTAINERS @@ -1 +1,2 @@ Victor Vieux (@vieux) +Jessie Frazelle (@jfrazelle) diff --git a/contrib/completion/MAINTAINERS b/contrib/completion/MAINTAINERS new file mode 100644 index 0000000000..03ee2dde3d --- /dev/null +++ b/contrib/completion/MAINTAINERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/contrib/init/systemd/MAINTAINERS b/contrib/init/systemd/MAINTAINERS index 760a76d6fe..b9ba55b3fb 100644 --- a/contrib/init/systemd/MAINTAINERS +++ b/contrib/init/systemd/MAINTAINERS @@ -1,2 +1,3 @@ Lokesh Mandvekar (@lsm5) Brandon Philips (@philips) +Jessie Frazelle (@jfrazelle) diff --git a/contrib/init/upstart/MAINTAINERS b/contrib/init/upstart/MAINTAINERS new file mode 100644 index 0000000000..03ee2dde3d --- /dev/null +++ b/contrib/init/upstart/MAINTAINERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/pkg/iptables/MAINTAINERS b/pkg/iptables/MAINTAINERS index 1e998f8ac1..134b02a071 100644 --- a/pkg/iptables/MAINTAINERS +++ b/pkg/iptables/MAINTAINERS @@ -1 +1,2 @@ Michael Crosby (@crosbymichael) +Jessie Frazelle (@jfrazelle) diff --git a/pkg/units/MAINTAINERS b/pkg/units/MAINTAINERS index 68a97d2fc2..96abeae570 100644 --- a/pkg/units/MAINTAINERS +++ b/pkg/units/MAINTAINERS @@ -1,2 +1,2 @@ -Michael Crosby (@crosbymichael) Victor Vieux (@vieux) +Jessie Frazelle (@jfrazelle) From f5f4d2d9cbeefb1d896d987a87058eeb65b4ebf6 Mon Sep 17 00:00:00 2001 From: Malte Janduda Date: Tue, 30 Sep 2014 17:00:24 +0200 Subject: [PATCH 005/592] Implementing IPv6 functionality for ipallocator Closes #6975 Signed-off-by: Malte Janduda --- daemon/networkdriver/ipallocator/allocator.go | 91 ++++---- .../ipallocator/allocator_test.go | 205 +++++++++++++++++- daemon/networkdriver/network_test.go | 15 -- daemon/networkdriver/utils.go | 31 ++- 4 files changed, 259 insertions(+), 83 deletions(-) diff --git a/daemon/networkdriver/ipallocator/allocator.go b/daemon/networkdriver/ipallocator/allocator.go index a1aaabbdfe..d5c644b23c 100644 --- a/daemon/networkdriver/ipallocator/allocator.go +++ b/daemon/networkdriver/ipallocator/allocator.go @@ -1,31 +1,38 @@ package ipallocator import ( - "encoding/binary" "errors" + "math/big" "net" "sync" "github.com/docker/docker/daemon/networkdriver" + "github.com/docker/docker/pkg/log" ) // allocatedMap is thread-unsafe set of allocated IP type allocatedMap struct { - p map[uint32]struct{} - last uint32 - begin uint32 - end uint32 + p map[string]struct{} + last *big.Int + begin *big.Int + end *big.Int } func newAllocatedMap(network *net.IPNet) *allocatedMap { firstIP, lastIP := networkdriver.NetworkRange(network) - begin := ipToInt(firstIP) + 2 - end := ipToInt(lastIP) - 1 + begin := big.NewInt(0).Add(ipToBigInt(firstIP), big.NewInt(1)) + end := big.NewInt(0).Sub(ipToBigInt(lastIP), big.NewInt(1)) + + // if IPv4 network, then allocation range starts at begin + 1 because begin is bridge IP + if len(firstIP) == 4 { + begin = begin.Add(begin, big.NewInt(1)) + } + return &allocatedMap{ - p: make(map[uint32]struct{}), + p: make(map[string]struct{}), begin: begin, end: end, - last: begin - 1, // so first allocated will be begin + last: big.NewInt(0).Sub(begin, big.NewInt(1)), // so first allocated will be begin } } @@ -56,13 +63,16 @@ func RegisterSubnet(network *net.IPNet, subnet *net.IPNet) error { } n := newAllocatedMap(network) beginIP, endIP := networkdriver.NetworkRange(subnet) - begin, end := ipToInt(beginIP)+1, ipToInt(endIP)-1 - if !(begin >= n.begin && end <= n.end && begin < end) { + begin := big.NewInt(0).Add(ipToBigInt(beginIP), big.NewInt(1)) + end := big.NewInt(0).Sub(ipToBigInt(endIP), big.NewInt(1)) + + // Check that subnet is within network + if !(begin.Cmp(n.begin) >= 0 && end.Cmp(n.end) <= 0 && begin.Cmp(end) == -1) { return ErrBadSubnet } - n.begin = begin - n.end = end - n.last = begin - 1 + n.begin.Set(begin) + n.end.Set(end) + n.last.Sub(begin, big.NewInt(1)) allocatedIPs[key] = n return nil } @@ -93,28 +103,25 @@ func ReleaseIP(network *net.IPNet, ip net.IP) error { lock.Lock() defer lock.Unlock() if allocated, exists := allocatedIPs[network.String()]; exists { - pos := ipToInt(ip) - delete(allocated.p, pos) + delete(allocated.p, ip.String()) } return nil } func (allocated *allocatedMap) checkIP(ip net.IP) (net.IP, error) { - pos := ipToInt(ip) - - // Verify that the IP address has not been already allocated. - if _, ok := allocated.p[pos]; ok { + if _, ok := allocated.p[ip.String()]; ok { return nil, ErrIPAlreadyAllocated } + pos := ipToBigInt(ip) // Verify that the IP address is within our network range. - if pos < allocated.begin || pos > allocated.end { + if pos.Cmp(allocated.begin) == -1 || pos.Cmp(allocated.end) == 1 { return nil, ErrIPOutOfRange } // Register the IP. - allocated.p[pos] = struct{}{} - allocated.last = pos + allocated.p[ip.String()] = struct{}{} + allocated.last.Set(pos) return ip, nil } @@ -122,29 +129,35 @@ func (allocated *allocatedMap) checkIP(ip net.IP) (net.IP, error) { // return an available ip if one is currently available. If not, // return the next available ip for the nextwork func (allocated *allocatedMap) getNextIP() (net.IP, error) { - for pos := allocated.last + 1; pos != allocated.last; pos++ { - if pos > allocated.end { - pos = allocated.begin + for pos := big.NewInt(0).Add(allocated.last, big.NewInt(1)); pos.Cmp(allocated.last) != 0; pos.Add(pos, big.NewInt(1)) { + if pos.Cmp(allocated.end) == 1 { + pos.Set(allocated.begin) } - if _, ok := allocated.p[pos]; ok { + if _, ok := allocated.p[bigIntToIP(pos).String()]; ok { continue } - allocated.p[pos] = struct{}{} - allocated.last = pos - return intToIP(pos), nil + allocated.p[bigIntToIP(pos).String()] = struct{}{} + allocated.last.Set(pos) + return bigIntToIP(pos), nil } return nil, ErrNoAvailableIPs } -// Converts a 4 bytes IP into a 32 bit integer -func ipToInt(ip net.IP) uint32 { - return binary.BigEndian.Uint32(ip.To4()) +// Converts a 4 bytes IP into a 128 bit integer +func ipToBigInt(ip net.IP) *big.Int { + x := big.NewInt(0) + if ip4 := ip.To4(); ip4 != nil { + return x.SetBytes(ip4) + } + if ip6 := ip.To16(); ip6 != nil { + return x.SetBytes(ip6) + } + + log.Errorf("ipToBigInt: Wrong IP length! %s", ip) + return nil } -// Converts 32 bit integer into a 4 bytes IP address -func intToIP(n uint32) net.IP { - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, n) - ip := net.IP(b) - return ip +// Converts 128 bit integer into a 4 bytes IP address +func bigIntToIP(v *big.Int) net.IP { + return net.IP(v.Bytes()) } diff --git a/daemon/networkdriver/ipallocator/allocator_test.go b/daemon/networkdriver/ipallocator/allocator_test.go index 056c13b647..c4ce40cd0a 100644 --- a/daemon/networkdriver/ipallocator/allocator_test.go +++ b/daemon/networkdriver/ipallocator/allocator_test.go @@ -2,6 +2,7 @@ package ipallocator import ( "fmt" + "math/big" "net" "testing" ) @@ -10,6 +11,46 @@ func reset() { allocatedIPs = networkSet{} } +func TestConversion(t *testing.T) { + ip := net.ParseIP("127.0.0.1") + i := ipToBigInt(ip) + if i.Cmp(big.NewInt(0x7f000001)) != 0 { + t.Fatal("incorrect conversion") + } + conv := bigIntToIP(i) + if !ip.Equal(conv) { + t.Error(conv.String()) + } +} + +func TestConversionIPv6(t *testing.T) { + ip := net.ParseIP("2a00:1450::1") + ip2 := net.ParseIP("2a00:1450::2") + ip3 := net.ParseIP("2a00:1450::1:1") + i := ipToBigInt(ip) + val, success := big.NewInt(0).SetString("2a001450000000000000000000000001", 16) + if !success { + t.Fatal("Hex-String to BigInt conversion failed.") + } + if i.Cmp(val) != 0 { + t.Fatal("incorrent conversion") + } + + conv := bigIntToIP(i) + conv2 := bigIntToIP(big.NewInt(0).Add(i, big.NewInt(1))) + conv3 := bigIntToIP(big.NewInt(0).Add(i, big.NewInt(0x10000))) + + if !ip.Equal(conv) { + t.Error("2a00:1450::1 should be equal to " + conv.String()) + } + if !ip2.Equal(conv2) { + t.Error("2a00:1450::2 should be equal to " + conv2.String()) + } + if !ip3.Equal(conv3) { + t.Error("2a00:1450::1:1 should be equal to " + conv3.String()) + } +} + func TestRequestNewIps(t *testing.T) { defer reset() network := &net.IPNet{ @@ -19,6 +60,7 @@ func TestRequestNewIps(t *testing.T) { var ip net.IP var err error + for i := 2; i < 10; i++ { ip, err = RequestIP(network, nil) if err != nil { @@ -29,7 +71,39 @@ func TestRequestNewIps(t *testing.T) { t.Fatalf("Expected ip %s got %s", expected, ip.String()) } } - value := intToIP(ipToInt(ip) + 1).String() + value := bigIntToIP(big.NewInt(0).Add(ipToBigInt(ip), big.NewInt(1))).String() + if err := ReleaseIP(network, ip); err != nil { + t.Fatal(err) + } + ip, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + if ip.String() != value { + t.Fatalf("Expected to receive the next ip %s got %s", value, ip.String()) + } +} + +func TestRequestNewIpV6(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask + } + + var ip net.IP + var err error + for i := 1; i < 10; i++ { + ip, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + if expected := fmt.Sprintf("2a00:1450::%d", i); ip.String() != expected { + t.Fatalf("Expected ip %s got %s", expected, ip.String()) + } + } + value := bigIntToIP(big.NewInt(0).Add(ipToBigInt(ip), big.NewInt(1))).String() if err := ReleaseIP(network, ip); err != nil { t.Fatal(err) } @@ -59,6 +133,23 @@ func TestReleaseIp(t *testing.T) { } } +func TestReleaseIpV6(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask + } + + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + if err := ReleaseIP(network, ip); err != nil { + t.Fatal(err) + } +} + func TestGetReleasedIp(t *testing.T) { defer reset() network := &net.IPNet{ @@ -97,6 +188,44 @@ func TestGetReleasedIp(t *testing.T) { } } +func TestGetReleasedIpV6(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0}, + } + + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + value := ip.String() + if err := ReleaseIP(network, ip); err != nil { + t.Fatal(err) + } + + for i := 0; i < 253; i++ { + _, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + err = ReleaseIP(network, ip) + if err != nil { + t.Fatal(err) + } + } + + ip, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + if ip.String() != value { + t.Fatalf("Expected to receive same ip %s got %s", value, ip.String()) + } +} + func TestRequestSpecificIp(t *testing.T) { defer reset() network := &net.IPNet{ @@ -122,15 +251,28 @@ func TestRequestSpecificIp(t *testing.T) { } } -func TestConversion(t *testing.T) { - ip := net.ParseIP("127.0.0.1") - i := ipToInt(ip) - if i == 0 { - t.Fatal("converted to zero") +func TestRequestSpecificIpV6(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask } - conv := intToIP(i) - if !ip.Equal(conv) { - t.Error(conv.String()) + + ip := net.ParseIP("2a00:1450::5") + + // Request a "good" IP. + if _, err := RequestIP(network, ip); err != nil { + t.Fatal(err) + } + + // Request the same IP again. + if _, err := RequestIP(network, ip); err != ErrIPAlreadyAllocated { + t.Fatalf("Got the same IP twice: %#v", err) + } + + // Request an out of range IP. + if _, err := RequestIP(network, net.ParseIP("2a00:1500::1")); err != ErrIPOutOfRange { + t.Fatalf("Got an out of range IP: %#v", err) } } @@ -144,6 +286,7 @@ func TestIPAllocator(t *testing.T) { } gwIP, n, _ := net.ParseCIDR("127.0.0.1/29") + network := &net.IPNet{IP: gwIP, Mask: n.Mask} // Pool after initialisation (f = free, u = used) // 2(f) - 3(f) - 4(f) - 5(f) - 6(f) @@ -237,13 +380,13 @@ func TestAllocateFirstIP(t *testing.T) { } firstIP := network.IP.To4().Mask(network.Mask) - first := ipToInt(firstIP) + 1 + first := big.NewInt(0).Add(ipToBigInt(firstIP), big.NewInt(1)) ip, err := RequestIP(network, nil) if err != nil { t.Fatal(err) } - allocated := ipToInt(ip) + allocated := ipToBigInt(ip) if allocated == first { t.Fatalf("allocated ip should not equal first ip: %d == %d", first, allocated) @@ -301,11 +444,24 @@ func TestAllocateDifferentSubnets(t *testing.T) { IP: []byte{127, 0, 0, 1}, Mask: []byte{255, 255, 255, 0}, } + network3 := &net.IPNet{ + IP: []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask + } + network4 := &net.IPNet{ + IP: []byte{0x2a, 0x00, 0x16, 0x32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask + } expectedIPs := []net.IP{ 0: net.IPv4(192, 168, 0, 2), 1: net.IPv4(192, 168, 0, 3), 2: net.IPv4(127, 0, 0, 2), 3: net.IPv4(127, 0, 0, 3), + 4: net.ParseIP("2a00:1450::1"), + 5: net.ParseIP("2a00:1450::2"), + 6: net.ParseIP("2a00:1450::3"), + 7: net.ParseIP("2a00:1632::1"), + 8: net.ParseIP("2a00:1632::2"), } ip11, err := RequestIP(network1, nil) @@ -324,11 +480,37 @@ func TestAllocateDifferentSubnets(t *testing.T) { if err != nil { t.Fatal(err) } + ip31, err := RequestIP(network3, nil) + if err != nil { + t.Fatal(err) + } + ip32, err := RequestIP(network3, nil) + if err != nil { + t.Fatal(err) + } + ip33, err := RequestIP(network3, nil) + if err != nil { + t.Fatal(err) + } + ip41, err := RequestIP(network4, nil) + if err != nil { + t.Fatal(err) + } + ip42, err := RequestIP(network4, nil) + if err != nil { + t.Fatal(err) + } assertIPEquals(t, expectedIPs[0], ip11) assertIPEquals(t, expectedIPs[1], ip12) assertIPEquals(t, expectedIPs[2], ip21) assertIPEquals(t, expectedIPs[3], ip22) + assertIPEquals(t, expectedIPs[4], ip31) + assertIPEquals(t, expectedIPs[5], ip32) + assertIPEquals(t, expectedIPs[6], ip33) + assertIPEquals(t, expectedIPs[7], ip41) + assertIPEquals(t, expectedIPs[8], ip42) } + func TestRegisterBadTwice(t *testing.T) { defer reset() network := &net.IPNet{ @@ -378,6 +560,7 @@ func TestAllocateFromRange(t *testing.T) { IP: []byte{192, 168, 0, 8}, Mask: []byte{255, 255, 255, 248}, } + if err := RegisterSubnet(network, subnet); err != nil { t.Fatal(err) } diff --git a/daemon/networkdriver/network_test.go b/daemon/networkdriver/network_test.go index d655cb30e4..1a6336b5de 100644 --- a/daemon/networkdriver/network_test.go +++ b/daemon/networkdriver/network_test.go @@ -122,9 +122,6 @@ func TestNetworkRange(t *testing.T) { if !last.Equal(net.ParseIP("192.168.0.255")) { t.Error(last.String()) } - if size := NetworkSize(network.Mask); size != 256 { - t.Error(size) - } // Class A test _, network, _ = net.ParseCIDR("10.0.0.1/8") @@ -135,9 +132,6 @@ func TestNetworkRange(t *testing.T) { if !last.Equal(net.ParseIP("10.255.255.255")) { t.Error(last.String()) } - if size := NetworkSize(network.Mask); size != 16777216 { - t.Error(size) - } // Class A, random IP address _, network, _ = net.ParseCIDR("10.1.2.3/8") @@ -158,9 +152,6 @@ func TestNetworkRange(t *testing.T) { if !last.Equal(net.ParseIP("10.1.2.3")) { t.Error(last.String()) } - if size := NetworkSize(network.Mask); size != 1 { - t.Error(size) - } // 31bit mask _, network, _ = net.ParseCIDR("10.1.2.3/31") @@ -171,9 +162,6 @@ func TestNetworkRange(t *testing.T) { if !last.Equal(net.ParseIP("10.1.2.3")) { t.Error(last.String()) } - if size := NetworkSize(network.Mask); size != 2 { - t.Error(size) - } // 26bit mask _, network, _ = net.ParseCIDR("10.1.2.3/26") @@ -184,7 +172,4 @@ func TestNetworkRange(t *testing.T) { if !last.Equal(net.ParseIP("10.1.2.63")) { t.Error(last.String()) } - if size := NetworkSize(network.Mask); size != 64 { - t.Error(size) - } } diff --git a/daemon/networkdriver/utils.go b/daemon/networkdriver/utils.go index 410d6010c4..07d95445a0 100644 --- a/daemon/networkdriver/utils.go +++ b/daemon/networkdriver/utils.go @@ -1,7 +1,6 @@ package networkdriver import ( - "encoding/binary" "errors" "fmt" "net" @@ -56,25 +55,21 @@ func NetworkOverlaps(netX *net.IPNet, netY *net.IPNet) bool { // Calculates the first and last IP addresses in an IPNet func NetworkRange(network *net.IPNet) (net.IP, net.IP) { - var ( - netIP = network.IP.To4() - firstIP = netIP.Mask(network.Mask) - lastIP = net.IPv4(0, 0, 0, 0).To4() - ) + var netIP net.IP + if network.IP.To4() != nil { + netIP = network.IP.To4() + } else if network.IP.To16() != nil { + netIP = network.IP.To16() + } else { + return nil, nil + } - for i := 0; i < len(lastIP); i++ { + lastIP := make([]byte, len(netIP), len(netIP)) + + for i := 0; i < len(netIP); i++ { lastIP[i] = netIP[i] | ^network.Mask[i] } - return firstIP, lastIP -} - -// Given a netmask, calculates the number of available hosts -func NetworkSize(mask net.IPMask) int32 { - m := net.IPv4Mask(0, 0, 0, 0) - for i := 0; i < net.IPv4len; i++ { - m[i] = ^mask[i] - } - return int32(binary.BigEndian.Uint32(m)) + 1 + return netIP.Mask(network.Mask), net.IP(lastIP) } // Return the IPv4 address of a network interface @@ -90,7 +85,7 @@ func GetIfaceAddr(name string) (net.Addr, error) { var addrs4 []net.Addr for _, addr := range addrs { ip := (addr.(*net.IPNet)).IP - if ip4 := ip.To4(); len(ip4) == net.IPv4len { + if ip4 := ip.To4(); ip4 != nil { addrs4 = append(addrs4, addr) } } From ed55a2db06c7343f00a71844b166da1bbfbb812d Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Fri, 3 Oct 2014 10:17:42 -0400 Subject: [PATCH 006/592] Add more names Docker-DCO-1.1-Signed-off-by: Brian Goff (github: cpuguy83) --- pkg/namesgenerator/names-generator.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pkg/namesgenerator/names-generator.go b/pkg/namesgenerator/names-generator.go index ebb5850bda..30df30fdc9 100644 --- a/pkg/namesgenerator/names-generator.go +++ b/pkg/namesgenerator/names-generator.go @@ -7,7 +7,7 @@ import ( ) var ( - left = [...]string{"happy", "jolly", "dreamy", "sad", "angry", "pensive", "focused", "sleepy", "grave", "distracted", "determined", "stoic", "stupefied", "sharp", "agitated", "cocky", "tender", "goofy", "furious", "desperate", "hopeful", "compassionate", "silly", "lonely", "condescending", "naughty", "kickass", "drunk", "boring", "nostalgic", "ecstatic", "insane", "cranky", "mad", "jovial", "sick", "hungry", "thirsty", "elegant", "backstabbing", "clever", "trusting", "loving", "suspicious", "berserk", "high", "romantic", "prickly", "evil"} + left = [...]string{"happy", "jolly", "dreamy", "sad", "angry", "pensive", "focused", "sleepy", "grave", "distracted", "determined", "stoic", "stupefied", "sharp", "agitated", "cocky", "tender", "goofy", "furious", "desperate", "hopeful", "compassionate", "silly", "lonely", "condescending", "naughty", "kickass", "drunk", "boring", "nostalgic", "ecstatic", "insane", "cranky", "mad", "jovial", "sick", "hungry", "thirsty", "elegant", "backstabbing", "clever", "trusting", "loving", "suspicious", "berserk", "high", "romantic", "prickly", "evil", "admiring", "adoring", "reverent", "serene", "fervent", "modest", "gloomy", "elated"} // Docker 0.7.x generates names from notable scientists and hackers. // // Ada Lovelace invented the first algorithm. http://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) @@ -22,6 +22,7 @@ var ( // Charles Babbage invented the concept of a programmable computer. http://en.wikipedia.org/wiki/Charles_Babbage. // Charles Darwin established the principles of natural evolution. http://en.wikipedia.org/wiki/Charles_Darwin. // Dennis Ritchie and Ken Thompson created UNIX and the C programming language. http://en.wikipedia.org/wiki/Dennis_Ritchie http://en.wikipedia.org/wiki/Ken_Thompson + // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. http://en.wikipedia.org/wiki/Dorothy_Hodgkin // Douglas Engelbart gave the mother of all demos: http://en.wikipedia.org/wiki/Douglas_Engelbart // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - http://en.wikipedia.org/wiki/Elizabeth_Blackwell // Emmett Brown invented time travel. http://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) @@ -31,6 +32,7 @@ var ( // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. http://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. http://en.wikipedia.org/wiki/Galileo_Galilei // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - http://en.wikipedia.org/wiki/Gertrude_Elion + // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. http://en.wikipedia.org/wiki/Gerty_Cori // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. http://en.wikipedia.org/wiki/Grace_Hopper // Henry Poincare made fundamental contributions in several fields of mathematics. http://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - http://en.wikipedia.org/wiki/Hypatia @@ -64,6 +66,7 @@ var ( // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. http://en.wikiquote.org/wiki/Richard_Stallman // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. http://en.wikipedia.org/wiki/Rob_Pike // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - http://en.wikipedia.org/wiki/Rosalind_Franklin + // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. http://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - http://en.wikipedia.org/wiki/Sofia_Kovalevskaya // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. http://en.wikipedia.org/wiki/Sophie_Wilson // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. http://en.wikipedia.org/wiki/Stephen_Hawking @@ -73,7 +76,7 @@ var ( // http://en.wikipedia.org/wiki/John_Bardeen // http://en.wikipedia.org/wiki/Walter_Houser_Brattain // http://en.wikipedia.org/wiki/William_Shockley - right = [...]string{"albattani", "almeida", "archimedes", "ardinghelli", "babbage", "bardeen", "bartik", "bell", "blackwell", "bohr", "brattain", "brown", "carson", "colden", "curie", "darwin", "davinci", "einstein", "elion", "engelbart", "euclid", "fermat", "fermi", "feynman", "franklin", "galileo", "goldstine", "goodall", "hawking", "heisenberg", "hoover", "hopper", "hypatia", "jones", "kirch", "kowalevski", "lalande", "leakey", "lovelace", "lumiere", "mayer", "mccarthy", "mcclintock", "mclean", "meitner", "mestorf", "morse", "newton", "nobel", "pare", "pasteur", "perlman", "pike", "poincare", "ptolemy", "ritchie", "rosalind", "sammet", "shockley", "sinoussi", "stallman", "tesla", "thompson", "torvalds", "turing", "wilson", "wozniak", "wright", "yonath"} + right = [...]string{"albattani", "almeida", "archimedes", "ardinghelli", "babbage", "bardeen", "bartik", "bell", "blackwell", "bohr", "brattain", "brown", "carson", "colden", "cori", "curie", "darwin", "davinci", "einstein", "elion", "engelbart", "euclid", "fermat", "fermi", "feynman", "franklin", "galileo", "goldstine", "goodall", "hawking", "heisenberg", "hodgkin", "hoover", "hopper", "hypatia", "jones", "kirch", "kowalevski", "lalande", "leakey", "lovelace", "lumiere", "mayer", "mccarthy", "mcclintock", "mclean", "meitner", "mestorf", "morse", "newton", "nobel", "pare", "pasteur", "perlman", "pike", "poincare", "ptolemy", "ritchie", "rosalind", "sammet", "shockley", "sinoussi", "stallman", "tesla", "thompson", "torvalds", "turing", "wilson", "wozniak", "wright", "yalow", "yonath"} ) func GetRandomName(retry int) string { From e857716d2d881aaf1cf5a775d02ee5b109d9e423 Mon Sep 17 00:00:00 2001 From: Dan Griffin Date: Mon, 6 Oct 2014 09:23:56 +0100 Subject: [PATCH 007/592] Preserve extended attributes and acls on archlinux build Failure to do this means that file capabilites are not preserved in the image. Ping fails to work as a non-root user if cap_net_raw is capability is not set Signed-off-by: Dan Griffin --- contrib/mkimage-arch.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/mkimage-arch.sh b/contrib/mkimage-arch.sh index e83b2b6731..35cb1617d5 100755 --- a/contrib/mkimage-arch.sh +++ b/contrib/mkimage-arch.sh @@ -60,6 +60,6 @@ mknod -m 600 $DEV/initctl p mknod -m 666 $DEV/ptmx c 5 2 ln -sf /proc/self/fd $DEV/fd -tar --numeric-owner -C $ROOTFS -c . | docker import - archlinux +tar --numeric-owner --xattrs --acls -C $ROOTFS -c . | docker import - archlinux docker run -i -t archlinux echo Success. rm -rf $ROOTFS From d29c7e51cf0c260e6b528f78783aa27e5953b4bf Mon Sep 17 00:00:00 2001 From: unclejack Date: Mon, 6 Oct 2014 18:41:53 +0300 Subject: [PATCH 008/592] pkg/version: lint and add comments --- pkg/version/version.go | 42 ++++++++++++++++++++++++------------------ 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/pkg/version/version.go b/pkg/version/version.go index 6a7d63544b..cc802a654c 100644 --- a/pkg/version/version.go +++ b/pkg/version/version.go @@ -5,53 +5,59 @@ import ( "strings" ) +// Version provides utility methods for comparing versions. type Version string -func (me Version) compareTo(other Version) int { +func (v Version) compareTo(other Version) int { var ( - meTab = strings.Split(string(me), ".") + currTab = strings.Split(string(v), ".") otherTab = strings.Split(string(other), ".") ) - max := len(meTab) + max := len(currTab) if len(otherTab) > max { max = len(otherTab) } for i := 0; i < max; i++ { - var meInt, otherInt int + var currInt, otherInt int - if len(meTab) > i { - meInt, _ = strconv.Atoi(meTab[i]) + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) } if len(otherTab) > i { otherInt, _ = strconv.Atoi(otherTab[i]) } - if meInt > otherInt { + if currInt > otherInt { return 1 } - if otherInt > meInt { + if otherInt > currInt { return -1 } } return 0 } -func (me Version) LessThan(other Version) bool { - return me.compareTo(other) == -1 +// LessThan checks if a version is less than another version +func (v Version) LessThan(other Version) bool { + return v.compareTo(other) == -1 } -func (me Version) LessThanOrEqualTo(other Version) bool { - return me.compareTo(other) <= 0 +// LessThanOrEqualTo checks if a version is less than or equal to another +func (v Version) LessThanOrEqualTo(other Version) bool { + return v.compareTo(other) <= 0 } -func (me Version) GreaterThan(other Version) bool { - return me.compareTo(other) == 1 +// GreaterThan checks if a version is greater than another one +func (v Version) GreaterThan(other Version) bool { + return v.compareTo(other) == 1 } -func (me Version) GreaterThanOrEqualTo(other Version) bool { - return me.compareTo(other) >= 0 +// GreaterThanOrEqualTo checks ia version is greater than or equal to another +func (v Version) GreaterThanOrEqualTo(other Version) bool { + return v.compareTo(other) >= 0 } -func (me Version) Equal(other Version) bool { - return me.compareTo(other) == 0 +// Equal checks if a version is equal to another +func (v Version) Equal(other Version) bool { + return v.compareTo(other) == 0 } From 115436e038a83ae24b0e89aefd8309133b013e1c Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Tue, 30 Sep 2014 15:41:43 -0700 Subject: [PATCH 009/592] docker save: Do not save to a terminal. Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- api/client/commands.go | 4 +++ integration-cli/docker_cli_save_load_test.go | 31 ++++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/api/client/commands.go b/api/client/commands.go index ba2e416d70..2747ca1b69 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -5,6 +5,7 @@ import ( "bytes" "encoding/base64" "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -2365,7 +2366,10 @@ func (cli *DockerCli) CmdSave(args ...string) error { if err != nil { return err } + } else if cli.isTerminalOut { + return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") } + if len(cmd.Args()) == 1 { image := cmd.Arg(0) if err := cli.stream("GET", "/images/"+image+"/get", nil, output, nil); err != nil { diff --git a/integration-cli/docker_cli_save_load_test.go b/integration-cli/docker_cli_save_load_test.go index 8632247b7d..b5e89c53a1 100644 --- a/integration-cli/docker_cli_save_load_test.go +++ b/integration-cli/docker_cli_save_load_test.go @@ -1,6 +1,7 @@ package main import ( + "bytes" "fmt" "io/ioutil" "os" @@ -8,6 +9,8 @@ import ( "path/filepath" "reflect" "testing" + + "github.com/docker/docker/vendor/src/github.com/kr/pty" ) // save a repo and try to load it using stdout @@ -60,6 +63,34 @@ func TestSaveAndLoadRepoStdout(t *testing.T) { logDone("save - save a repo using stdout") logDone("load - load a repo using stdout") + + pty, tty, err := pty.Open() + if err != nil { + t.Fatalf("Could not open pty: %v", err) + } + cmd := exec.Command(dockerBinary, "save", repoName) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + if err := cmd.Start(); err != nil { + t.Fatalf("start err: %v", err) + } + if err := cmd.Wait(); err == nil { + t.Fatal("did not break writing to a TTY") + } + + buf := make([]byte, 1024) + + n, err := pty.Read(buf) + if err != nil { + t.Fatal("could not read tty output") + } + + if !bytes.Contains(buf[:n], []byte("Cowardly refusing")) { + t.Fatal("help output is not being yielded", out) + } + + logDone("save - do not save to a tty") } func TestSaveSingleTag(t *testing.T) { From 39fe2a3e4e059c4235f2e1692e240058fccc0ea3 Mon Sep 17 00:00:00 2001 From: unclejack Date: Mon, 6 Oct 2014 22:00:58 +0300 Subject: [PATCH 010/592] pkg/truncindex: lint and add comments Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- api/server/server.go | 2 +- pkg/truncindex/truncindex.go | 32 ++++++++++++++++++++------------ 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/api/server/server.go b/api/server/server.go index 27fe305106..1fa2cf3f19 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -92,7 +92,7 @@ func httpError(w http.ResponseWriter, err error) { // FIXME: this is brittle and should not be necessary. // If we need to differentiate between different possible error types, we should // create appropriate error types with clearly defined meaning. - if strings.Contains(err.Error(), "No such") { + if strings.Contains(err.Error(), "no such") { statusCode = http.StatusNotFound } else if strings.Contains(err.Error(), "Bad parameter") { statusCode = http.StatusBadRequest diff --git a/pkg/truncindex/truncindex.go b/pkg/truncindex/truncindex.go index 89aa88d6b7..c5b71752b5 100644 --- a/pkg/truncindex/truncindex.go +++ b/pkg/truncindex/truncindex.go @@ -10,7 +10,9 @@ import ( ) var ( - ErrNoID = errors.New("prefix can't be empty") + // ErrNoID is thrown when attempting to use empty prefixes + ErrNoID = errors.New("prefix can't be empty") + errDuplicateID = errors.New("multiple IDs were found") ) func init() { @@ -27,56 +29,62 @@ type TruncIndex struct { ids map[string]struct{} } +// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs func NewTruncIndex(ids []string) (idx *TruncIndex) { idx = &TruncIndex{ ids: make(map[string]struct{}), trie: patricia.NewTrie(), } for _, id := range ids { - idx.addId(id) + idx.addID(id) } return } -func (idx *TruncIndex) addId(id string) error { +func (idx *TruncIndex) addID(id string) error { if strings.Contains(id, " ") { - return fmt.Errorf("Illegal character: ' '") + return fmt.Errorf("illegal character: ' '") } if id == "" { return ErrNoID } if _, exists := idx.ids[id]; exists { - return fmt.Errorf("Id already exists: '%s'", id) + return fmt.Errorf("id already exists: '%s'", id) } idx.ids[id] = struct{}{} if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { - return fmt.Errorf("Failed to insert id: %s", id) + return fmt.Errorf("failed to insert id: %s", id) } return nil } +// Add adds a new ID to the TruncIndex func (idx *TruncIndex) Add(id string) error { idx.Lock() defer idx.Unlock() - if err := idx.addId(id); err != nil { + if err := idx.addID(id); err != nil { return err } return nil } +// Delete removes an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. func (idx *TruncIndex) Delete(id string) error { idx.Lock() defer idx.Unlock() if _, exists := idx.ids[id]; !exists || id == "" { - return fmt.Errorf("No such id: '%s'", id) + return fmt.Errorf("no such id: '%s'", id) } delete(idx.ids, id) if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { - return fmt.Errorf("No such id: '%s'", id) + return fmt.Errorf("no such id: '%s'", id) } return nil } +// Get retrieves an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. func (idx *TruncIndex) Get(s string) (string, error) { idx.RLock() defer idx.RUnlock() @@ -90,17 +98,17 @@ func (idx *TruncIndex) Get(s string) (string, error) { if id != "" { // we haven't found the ID if there are two or more IDs id = "" - return fmt.Errorf("we've found two entries") + return errDuplicateID } id = string(prefix) return nil } if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { - return "", fmt.Errorf("No such id: %s", s) + return "", fmt.Errorf("no such id: %s", s) } if id != "" { return id, nil } - return "", fmt.Errorf("No such id: %s", s) + return "", fmt.Errorf("no such id: %s", s) } From d202ff2ece6812f90af60292b88a9a20adb807ee Mon Sep 17 00:00:00 2001 From: unclejack Date: Mon, 6 Oct 2014 22:19:41 +0300 Subject: [PATCH 011/592] pkg/units: lint Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- pkg/units/size.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/units/size.go b/pkg/units/size.go index ea39bbddf7..853b555862 100644 --- a/pkg/units/size.go +++ b/pkg/units/size.go @@ -10,6 +10,7 @@ import ( // See: http://en.wikipedia.org/wiki/Binary_prefix const ( // Decimal + KB = 1000 MB = 1000 * KB GB = 1000 * MB @@ -17,6 +18,7 @@ const ( PB = 1000 * TB // Binary + KiB = 1024 MiB = 1024 * KiB GiB = 1024 * MiB @@ -52,7 +54,7 @@ func FromHumanSize(size string) (int64, error) { return parseSize(size, decimalMap) } -// Parses a human-readable string representing an amount of RAM +// RAMInBytes parses a human-readable string representing an amount of RAM // in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and // returns the number of bytes, or -1 if the string is unparseable. // Units are case-insensitive, and the 'b' suffix is optional. @@ -64,7 +66,7 @@ func RAMInBytes(size string) (int64, error) { func parseSize(sizeStr string, uMap unitMap) (int64, error) { matches := sizeRegex.FindStringSubmatch(sizeStr) if len(matches) != 3 { - return -1, fmt.Errorf("Invalid size: '%s'", sizeStr) + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) } size, err := strconv.ParseInt(matches[1], 10, 0) From d1a85078b552bf35e76a4e12fca6ecbdecb1c3b6 Mon Sep 17 00:00:00 2001 From: unclejack Date: Mon, 6 Oct 2014 22:27:56 +0300 Subject: [PATCH 012/592] pkg/timeutils: lint and add comments Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- pkg/timeutils/json.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/pkg/timeutils/json.go b/pkg/timeutils/json.go index 19f107bffe..8043d69d18 100644 --- a/pkg/timeutils/json.go +++ b/pkg/timeutils/json.go @@ -6,18 +6,21 @@ import ( ) const ( - // Define our own version of RFC339Nano because we want one + // RFC3339NanoFixed is our own version of RFC339Nano because we want one // that pads the nano seconds part with zeros to ensure // the timestamps are aligned in the logs. RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - JSONFormat = `"` + time.RFC3339Nano + `"` + // JSONFormat is the format used by FastMarshalJSON + JSONFormat = `"` + time.RFC3339Nano + `"` ) +// FastMarshalJSON avoids one of the extra allocations that +// time.MarshalJSON is making. func FastMarshalJSON(t time.Time) (string, error) { if y := t.Year(); y < 0 || y >= 10000 { // RFC 3339 is clear that years are 4 digits exactly. // See golang.org/issue/4556#c15 for more discussion. - return "", errors.New("Time.MarshalJSON: year outside of range [0,9999]") + return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") } return t.Format(JSONFormat), nil } From ae3b59c1715840ba322fbe19002994e717b10b48 Mon Sep 17 00:00:00 2001 From: unclejack Date: Mon, 6 Oct 2014 22:34:39 +0300 Subject: [PATCH 013/592] registry: lint Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- registry/auth.go | 7 +++--- registry/endpoint.go | 4 ++-- registry/registry_mock_test.go | 22 +++++++++--------- registry/registry_test.go | 41 ++++++++++++++++++---------------- registry/session.go | 5 +++-- 5 files changed, 41 insertions(+), 38 deletions(-) diff --git a/registry/auth.go b/registry/auth.go index 906a37dde7..ba370f4bc7 100644 --- a/registry/auth.go +++ b/registry/auth.go @@ -224,12 +224,11 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e return "", fmt.Errorf("Login: Account is not Active. Please check your e-mail for a confirmation link.") } return "", fmt.Errorf("Login: Account is not Active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) - } else { - return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header) } - } else { - return "", fmt.Errorf("Registration: %s", reqBody) + return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header) } + return "", fmt.Errorf("Registration: %s", reqBody) + } else if reqStatusCode == 401 { // This case would happen with private registries where /v1/users is // protected, so people can use `docker login` as an auth check. diff --git a/registry/endpoint.go b/registry/endpoint.go index 5313a8079f..58311d32d1 100644 --- a/registry/endpoint.go +++ b/registry/endpoint.go @@ -13,7 +13,7 @@ import ( ) // scans string for api version in the URL path. returns the trimmed hostname, if version found, string and API version. -func scanForApiVersion(hostname string) (string, APIVersion) { +func scanForAPIVersion(hostname string) (string, APIVersion) { var ( chunks []string apiVersionStr string @@ -43,7 +43,7 @@ func NewEndpoint(hostname string) (*Endpoint, error) { if !strings.HasPrefix(hostname, "http") { hostname = "https://" + hostname } - trimmedHostname, endpoint.Version = scanForApiVersion(hostname) + trimmedHostname, endpoint.Version = scanForAPIVersion(hostname) endpoint.URL, err = url.Parse(trimmedHostname) if err != nil { return nil, err diff --git a/registry/registry_mock_test.go b/registry/registry_mock_test.go index 379dc78f47..967d8b2615 100644 --- a/registry/registry_mock_test.go +++ b/registry/registry_mock_test.go @@ -19,7 +19,7 @@ import ( ) var ( - testHttpServer *httptest.Server + testHTTPServer *httptest.Server testLayers = map[string]map[string]string{ "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20": { "json": `{"id":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", @@ -99,7 +99,7 @@ func init() { // /v2/ r.HandleFunc("/v2/version", handlerGetPing).Methods("GET") - testHttpServer = httptest.NewServer(handlerAccessLog(r)) + testHTTPServer = httptest.NewServer(handlerAccessLog(r)) } func handlerAccessLog(handler http.Handler) http.Handler { @@ -111,7 +111,7 @@ func handlerAccessLog(handler http.Handler) http.Handler { } func makeURL(req string) string { - return testHttpServer.URL + req + return testHTTPServer.URL + req } func writeHeaders(w http.ResponseWriter) { @@ -198,8 +198,8 @@ func handlerGetImage(w http.ResponseWriter, r *http.Request) { return } writeHeaders(w) - layer_size := len(layer["layer"]) - w.Header().Add("X-Docker-Size", strconv.Itoa(layer_size)) + layerSize := len(layer["layer"]) + w.Header().Add("X-Docker-Size", strconv.Itoa(layerSize)) io.WriteString(w, layer[vars["action"]]) } @@ -208,16 +208,16 @@ func handlerPutImage(w http.ResponseWriter, r *http.Request) { return } vars := mux.Vars(r) - image_id := vars["image_id"] + imageID := vars["image_id"] action := vars["action"] - layer, exists := testLayers[image_id] + layer, exists := testLayers[imageID] if !exists { if action != "json" { http.NotFound(w, r) return } layer = make(map[string]string) - testLayers[image_id] = layer + testLayers[imageID] = layer } if checksum := r.Header.Get("X-Docker-Checksum"); checksum != "" { if checksum != layer["checksum_simple"] && checksum != layer["checksum_tarsum"] { @@ -301,7 +301,7 @@ func handlerUsers(w http.ResponseWriter, r *http.Request) { } func handlerImages(w http.ResponseWriter, r *http.Request) { - u, _ := url.Parse(testHttpServer.URL) + u, _ := url.Parse(testHTTPServer.URL) w.Header().Add("X-Docker-Endpoints", fmt.Sprintf("%s , %s ", u.Host, "test.example.com")) w.Header().Add("X-Docker-Token", fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano())) if r.Method == "PUT" { @@ -317,9 +317,9 @@ func handlerImages(w http.ResponseWriter, r *http.Request) { return } images := []map[string]string{} - for image_id, layer := range testLayers { + for imageID, layer := range testLayers { image := make(map[string]string) - image["id"] = image_id + image["id"] = imageID image["checksum"] = layer["checksum_tarsum"] image["Tag"] = "latest" images = append(images, image) diff --git a/registry/registry_test.go b/registry/registry_test.go index ab4178126a..fdf714e800 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -11,9 +11,12 @@ import ( ) var ( - IMAGE_ID = "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d" - TOKEN = []string{"fake-token"} - REPO = "foo42/bar" + token = []string{"fake-token"} +) + +const ( + imageID = "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d" + REPO = "foo42/bar" ) func spawnTestRegistrySession(t *testing.T) *Session { @@ -43,27 +46,27 @@ func TestPingRegistryEndpoint(t *testing.T) { func TestGetRemoteHistory(t *testing.T) { r := spawnTestRegistrySession(t) - hist, err := r.GetRemoteHistory(IMAGE_ID, makeURL("/v1/"), TOKEN) + hist, err := r.GetRemoteHistory(imageID, makeURL("/v1/"), token) if err != nil { t.Fatal(err) } assertEqual(t, len(hist), 2, "Expected 2 images in history") - assertEqual(t, hist[0], IMAGE_ID, "Expected "+IMAGE_ID+"as first ancestry") + assertEqual(t, hist[0], imageID, "Expected "+imageID+"as first ancestry") assertEqual(t, hist[1], "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", "Unexpected second ancestry") } func TestLookupRemoteImage(t *testing.T) { r := spawnTestRegistrySession(t) - found := r.LookupRemoteImage(IMAGE_ID, makeURL("/v1/"), TOKEN) + found := r.LookupRemoteImage(imageID, makeURL("/v1/"), token) assertEqual(t, found, true, "Expected remote lookup to succeed") - found = r.LookupRemoteImage("abcdef", makeURL("/v1/"), TOKEN) + found = r.LookupRemoteImage("abcdef", makeURL("/v1/"), token) assertEqual(t, found, false, "Expected remote lookup to fail") } func TestGetRemoteImageJSON(t *testing.T) { r := spawnTestRegistrySession(t) - json, size, err := r.GetRemoteImageJSON(IMAGE_ID, makeURL("/v1/"), TOKEN) + json, size, err := r.GetRemoteImageJSON(imageID, makeURL("/v1/"), token) if err != nil { t.Fatal(err) } @@ -72,7 +75,7 @@ func TestGetRemoteImageJSON(t *testing.T) { t.Fatal("Expected non-empty json") } - _, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/"), TOKEN) + _, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/"), token) if err == nil { t.Fatal("Expected image not found error") } @@ -80,7 +83,7 @@ func TestGetRemoteImageJSON(t *testing.T) { func TestGetRemoteImageLayer(t *testing.T) { r := spawnTestRegistrySession(t) - data, err := r.GetRemoteImageLayer(IMAGE_ID, makeURL("/v1/"), TOKEN, 0) + data, err := r.GetRemoteImageLayer(imageID, makeURL("/v1/"), token, 0) if err != nil { t.Fatal(err) } @@ -88,7 +91,7 @@ func TestGetRemoteImageLayer(t *testing.T) { t.Fatal("Expected non-nil data result") } - _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), TOKEN, 0) + _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), token, 0) if err == nil { t.Fatal("Expected image not found error") } @@ -96,14 +99,14 @@ func TestGetRemoteImageLayer(t *testing.T) { func TestGetRemoteTags(t *testing.T) { r := spawnTestRegistrySession(t) - tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, REPO, TOKEN) + tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, REPO, token) if err != nil { t.Fatal(err) } assertEqual(t, len(tags), 1, "Expected one tag") - assertEqual(t, tags["latest"], IMAGE_ID, "Expected tag latest to map to "+IMAGE_ID) + assertEqual(t, tags["latest"], imageID, "Expected tag latest to map to "+imageID) - _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, "foo42/baz", TOKEN) + _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, "foo42/baz", token) if err == nil { t.Fatal("Expected error when fetching tags for bogus repo") } @@ -111,11 +114,11 @@ func TestGetRemoteTags(t *testing.T) { func TestGetRepositoryData(t *testing.T) { r := spawnTestRegistrySession(t) - parsedUrl, err := url.Parse(makeURL("/v1/")) + parsedURL, err := url.Parse(makeURL("/v1/")) if err != nil { t.Fatal(err) } - host := "http://" + parsedUrl.Host + "/v1/" + host := "http://" + parsedURL.Host + "/v1/" data, err := r.GetRepositoryData("foo42/bar") if err != nil { t.Fatal(err) @@ -137,7 +140,7 @@ func TestPushImageJSONRegistry(t *testing.T) { Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", } - err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/"), TOKEN) + err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/"), token) if err != nil { t.Fatal(err) } @@ -146,7 +149,7 @@ func TestPushImageJSONRegistry(t *testing.T) { func TestPushImageLayerRegistry(t *testing.T) { r := spawnTestRegistrySession(t) layer := strings.NewReader("") - _, _, err := r.PushImageLayerRegistry(IMAGE_ID, layer, makeURL("/v1/"), TOKEN, []byte{}) + _, _, err := r.PushImageLayerRegistry(imageID, layer, makeURL("/v1/"), token, []byte{}) if err != nil { t.Fatal(err) } @@ -180,7 +183,7 @@ func TestResolveRepositoryName(t *testing.T) { func TestPushRegistryTag(t *testing.T) { r := spawnTestRegistrySession(t) - err := r.PushRegistryTag("foo42/bar", IMAGE_ID, "stable", makeURL("/v1/"), TOKEN) + err := r.PushRegistryTag("foo42/bar", imageID, "stable", makeURL("/v1/"), token) if err != nil { t.Fatal(err) } diff --git a/registry/session.go b/registry/session.go index 5067b8d5de..ff0be343d5 100644 --- a/registry/session.go +++ b/registry/session.go @@ -3,6 +3,7 @@ package registry import ( "bytes" "crypto/sha256" + // this is required for some certificates _ "crypto/sha512" "encoding/hex" "encoding/json" @@ -243,11 +244,11 @@ func (r *Session) GetRemoteTags(registries []string, repository string, token [] func buildEndpointsList(headers []string, indexEp string) ([]string, error) { var endpoints []string - parsedUrl, err := url.Parse(indexEp) + parsedURL, err := url.Parse(indexEp) if err != nil { return nil, err } - var urlScheme = parsedUrl.Scheme + var urlScheme = parsedURL.Scheme // The Registry's URL scheme has to match the Index' for _, ep := range headers { epList := strings.Split(ep, ",") From 9e45069e0ab8023bb7c8bcbda9a525fb6932dcf8 Mon Sep 17 00:00:00 2001 From: unclejack Date: Mon, 6 Oct 2014 22:57:27 +0300 Subject: [PATCH 014/592] pkg/graphdb: some linting Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- pkg/graphdb/graphdb.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/pkg/graphdb/graphdb.go b/pkg/graphdb/graphdb.go index 59873fefb3..450bd508eb 100644 --- a/pkg/graphdb/graphdb.go +++ b/pkg/graphdb/graphdb.go @@ -131,8 +131,8 @@ func (db *Database) Set(fullPath, id string) (*Entity, error) { if _, err := db.conn.Exec("BEGIN EXCLUSIVE"); err != nil { return nil, err } - var entityId string - if err := db.conn.QueryRow("SELECT id FROM entity WHERE id = ?;", id).Scan(&entityId); err != nil { + var entityID string + if err := db.conn.QueryRow("SELECT id FROM entity WHERE id = ?;", id).Scan(&entityID); err != nil { if err == sql.ErrNoRows { if _, err := db.conn.Exec("INSERT INTO entity (id) VALUES(?);", id); err != nil { rollback() @@ -320,14 +320,14 @@ func (db *Database) RefPaths(id string) Edges { for rows.Next() { var name string - var parentId string - if err := rows.Scan(&name, &parentId); err != nil { + var parentID string + if err := rows.Scan(&name, &parentID); err != nil { return refs } refs = append(refs, &Edge{ EntityID: id, Name: name, - ParentID: parentId, + ParentID: parentID, }) } return refs @@ -443,11 +443,11 @@ func (db *Database) children(e *Entity, name string, depth int, entities []WalkM defer rows.Close() for rows.Next() { - var entityId, entityName string - if err := rows.Scan(&entityId, &entityName); err != nil { + var entityID, entityName string + if err := rows.Scan(&entityID, &entityName); err != nil { return nil, err } - child := &Entity{entityId} + child := &Entity{entityID} edge := &Edge{ ParentID: e.id, Name: entityName, @@ -490,11 +490,11 @@ func (db *Database) parents(e *Entity) (parents []string, err error) { defer rows.Close() for rows.Next() { - var parentId string - if err := rows.Scan(&parentId); err != nil { + var parentID string + if err := rows.Scan(&parentID); err != nil { return nil, err } - parents = append(parents, parentId) + parents = append(parents, parentID) } return parents, nil From 5e6f16e34264fa81205c8becbdcd401823261056 Mon Sep 17 00:00:00 2001 From: Lei Jitang Date: Tue, 30 Sep 2014 16:57:17 +0800 Subject: [PATCH 015/592] Fix the bug of tag a existed tag name of a repository. Signed-off-by: Lei Jitang --- builder/job.go | 2 +- graph/tags.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/builder/job.go b/builder/job.go index 555232c9ae..ae501acb5d 100644 --- a/builder/job.go +++ b/builder/job.go @@ -124,7 +124,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { } if repoName != "" { - b.Daemon.Repositories().Set(repoName, tag, id, false) + b.Daemon.Repositories().Set(repoName, tag, id, true) } return engine.StatusOK } diff --git a/graph/tags.go b/graph/tags.go index 31c65ced5c..6e4e63148a 100644 --- a/graph/tags.go +++ b/graph/tags.go @@ -218,11 +218,11 @@ func (store *TagStore) Set(repoName, tag, imageName string, force bool) error { var repo Repository if r, exists := store.Repositories[repoName]; exists { repo = r + if old, exists := store.Repositories[repoName][tag]; exists && !force { + return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", tag, old) + } } else { repo = make(map[string]string) - if old, exists := store.Repositories[repoName]; exists && !force { - return fmt.Errorf("Conflict: Tag %s:%s is already set to %s", repoName, tag, old) - } store.Repositories[repoName] = repo } repo[tag] = img.ID From f1c319f77cc4ba97ab56a7b0d1e18a4cf12db7ba Mon Sep 17 00:00:00 2001 From: James Turnbull Date: Sat, 11 Oct 2014 16:30:36 -0400 Subject: [PATCH 016/592] Minor changes to SSHd example Docker-DCO-1.1-Signed-off-by: James Turnbull (github: jamtur01) --- docs/sources/examples/running_ssh_service.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/docs/sources/examples/running_ssh_service.md b/docs/sources/examples/running_ssh_service.md index 9f87fb726d..445cfe5257 100644 --- a/docs/sources/examples/running_ssh_service.md +++ b/docs/sources/examples/running_ssh_service.md @@ -46,7 +46,8 @@ the container's port 22 is mapped to: And now you can ssh as `root` on the container's IP address (you can find it with `docker inspect`) or on port `49154` of the Docker daemon's host IP address -(`ip address` or `ifconfig` can tell you that): +(`ip address` or `ifconfig` can tell you that) or `localhost` if on the +Docker daemon host: $ ssh root@192.168.1.2 -p 49154 # The password is ``screencast``. @@ -55,15 +56,15 @@ with `docker inspect`) or on port `49154` of the Docker daemon's host IP address ## Environment variables Using the `sshd` daemon to spawn shells makes it complicated to pass environment -variables to the user's shell via the simple Docker mechanisms, as `sshd` scrubs +variables to the user's shell via the normal Docker mechanisms, as `sshd` scrubs the environment before it starts the shell. -If you're setting values in the Dockerfile using `ENV`, you'll need to push them -to a shell initialisation file like the `/etc/profile` example in the Dockerfile +If you're setting values in the `Dockerfile` using `ENV`, you'll need to push them +to a shell initialization file like the `/etc/profile` example in the `Dockerfile` above. If you need to pass`docker run -e ENV=value` values, you will need to write a -short script to do the same before you start `sshd -D` - and then replace the +short script to do the same before you start `sshd -D` and then replace the `CMD` with that script. ## Clean up From da42ae536cf86dafbdad88901b9322ea7317f154 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Sun, 12 Oct 2014 22:26:42 -0400 Subject: [PATCH 017/592] client: even cleaner use of Transport First off, sorry for the noise. This is a cleaner step of #8508 Found more of a root cause of the open file handles. After more testing I found that the open file descriptors will still occur for TCP:// connections to the daemon, causing client and/or daemon to fail. The issue was instantiating a new http.Transport on _ever_ client request. So each instance held the prior connection alive, but was only ever used once. By moving it out to the initilization of DockerCli, we can now have reuse of idled connections. Simplifies the garbage overhead of the client too, though that's not usually a deal. Signed-off-by: Vincent Batts --- api/client/cli.go | 18 ++++++++++++++++++ api/client/utils.go | 19 +------------------ 2 files changed, 19 insertions(+), 18 deletions(-) diff --git a/api/client/cli.go b/api/client/cli.go index 6bc3fc3507..70eae6e4b4 100644 --- a/api/client/cli.go +++ b/api/client/cli.go @@ -5,10 +5,13 @@ import ( "encoding/json" "fmt" "io" + "net" + "net/http" "os" "reflect" "strings" "text/template" + "time" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/term" @@ -34,6 +37,7 @@ type DockerCli struct { isTerminalIn bool // isTerminalOut describes if client's STDOUT is a TTY isTerminalOut bool + transport *http.Transport } var funcMap = template.FuncMap{ @@ -131,6 +135,19 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, key libtrust.PrivateKey, err = out } + // The transport is created here for reuse during the client session + tr := &http.Transport{ + TLSClientConfig: tlsConfig, + Dial: func(dial_network, dial_addr string) (net.Conn, error) { + // Why 32? See issue 8035 + return net.DialTimeout(proto, addr, 32*time.Second) + }, + } + if proto == "unix" { + // no need in compressing for local communications + tr.DisableCompression = true + } + return &DockerCli{ proto: proto, addr: addr, @@ -144,5 +161,6 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, key libtrust.PrivateKey, isTerminalOut: isTerminalOut, tlsConfig: tlsConfig, scheme: scheme, + transport: tr, } } diff --git a/api/client/utils.go b/api/client/utils.go index 58b730bd1b..11e39729af 100644 --- a/api/client/utils.go +++ b/api/client/utils.go @@ -8,7 +8,6 @@ import ( "fmt" "io" "io/ioutil" - "net" "net/http" "net/url" "os" @@ -16,7 +15,6 @@ import ( "strconv" "strings" "syscall" - "time" "github.com/docker/docker/api" "github.com/docker/docker/dockerversion" @@ -33,22 +31,7 @@ var ( ) func (cli *DockerCli) HTTPClient() *http.Client { - tr := &http.Transport{ - TLSClientConfig: cli.tlsConfig, - Dial: func(network, addr string) (net.Conn, error) { - // Why 32? See issue 8035 - return net.DialTimeout(cli.proto, cli.addr, 32*time.Second) - }, - } - if cli.proto == "unix" { - // XXX workaround for net/http Transport which caches connections, but is - // intended for tcp connections, not unix sockets. - tr.DisableKeepAlives = true - - // no need in compressing for local communications - tr.DisableCompression = true - } - return &http.Client{Transport: tr} + return &http.Client{Transport: cli.transport} } func (cli *DockerCli) encodeData(data interface{}) (*bytes.Buffer, error) { From 2709c4677c252a07d49a24f7583adcc17b38fa14 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Wed, 8 Oct 2014 13:10:31 +1000 Subject: [PATCH 018/592] Add info on --device flag permissions ':rwm' Signed-off-by: Sven Dowideit Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/man/docker-create.1.md | 2 +- docs/man/docker-run.1.md | 3 ++- docs/sources/reference/commandline/cli.md | 31 ++++++++++++++++++++--- docs/sources/reference/run.md | 20 +++++++++++++++ runconfig/parse.go | 2 +- 5 files changed, 52 insertions(+), 6 deletions(-) diff --git a/docs/man/docker-create.1.md b/docs/man/docker-create.1.md index c5ed0349c4..00934347e3 100644 --- a/docs/man/docker-create.1.md +++ b/docs/man/docker-create.1.md @@ -61,7 +61,7 @@ docker-create - Create a new container CPUs in which to allow execution (0-3, 0,1) **--device**=[] - Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc) + Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) **--dns-search**=[] Set custom DNS search domains diff --git a/docs/man/docker-run.1.md b/docs/man/docker-run.1.md index e3d846749d..32777b7f0e 100644 --- a/docs/man/docker-run.1.md +++ b/docs/man/docker-run.1.md @@ -98,8 +98,9 @@ the detached mode, then you cannot use the **-rm** option. When attached in the tty mode, you can detach from a running container without stopping the process by pressing the keys CTRL-P CTRL-Q. + **--device**=[] - Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc) + Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) **--dns-search**=[] Set custom DNS search domains diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 2722aaa4ef..45631a7e53 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -417,7 +417,7 @@ Creates a new container. --cap-drop=[] Drop Linux capabilities --cidfile="" Write the container ID to the file --cpuset="" CPUs in which to allow execution (0-3, 0,1) - --device=[] Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc) + --device=[] Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) --dns=[] Set custom DNS servers --dns-search=[] Set custom DNS search domains -e, --env=[] Set environment variables @@ -457,6 +457,8 @@ container at any point. This is useful when you want to set up a container configuration ahead of time so that it is ready to start when you need it. +Please see the [run command](#run) section for more details. + #### Example $ sudo docker create -t -i fedora bash @@ -1115,7 +1117,7 @@ removed before the image is removed. --cidfile="" Write the container ID to the file --cpuset="" CPUs in which to allow execution (0-3, 0,1) -d, --detach=false Detached mode: run the container in the background and print the new container ID - --device=[] Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc) + --device=[] Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) --dns=[] Set custom DNS servers --dns-search=[] Set custom DNS search domains -e, --env=[] Set environment variables @@ -1324,8 +1326,31 @@ option enables that. For example, a specific block storage device or loop device or audio device can be added to an otherwise unprivileged container (without the `--privileged` flag) and have the application directly access it. +By default, the container will be able to `read`, `write` and `mknod` these devices. +This can be overridden using a third `:rwm` set of options to each `--device` +flag: + + +``` + $ sudo docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc + + Command (m for help): q + $ sudo docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc + You will not be able to write the partition table. + + Command (m for help): q + + $ sudo docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc + + Command (m for help): q + + $ sudo docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc + fdisk: unable to open /dev/xvdc: Operation not permitted +``` + **Note:** -> `--device` cannot be safely used with ephemeral devices. Block devices that may be removed should not be added to untrusted containers with `--device`. +> `--device` cannot be safely used with ephemeral devices. Block devices that +> may be removed should not be added to untrusted containers with `--device`. **A complete example:** diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index c72d28a000..8b7d6da2e1 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -308,6 +308,26 @@ will be accessible within the container. $ sudo docker run --device=/dev/snd:/dev/snd ... +By default, the container will be able to `read`, `write`, and `mknod` these devices. +This can be overridden using a third `:rwm` set of options to each `--device` flag: + + +``` + $ sudo docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc + + Command (m for help): q + $ sudo docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc + You will not be able to write the partition table. + + Command (m for help): q + + $ sudo docker run --device=/dev/sda:/dev/xvdc:w --rm -it ubuntu fdisk /dev/xvdc + crash.... + + $ sudo docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc + fdisk: unable to open /dev/xvdc: Operation not permitted +``` + In addition to `--privileged`, the operator can have fine grain control over the capabilities using `--cap-add` and `--cap-drop`. By default, Docker has a default list of capabilities that are kept. Both flags support the value `all`, so if the diff --git a/runconfig/parse.go b/runconfig/parse.go index 42ec68898d..3a8cdd3350 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -65,7 +65,7 @@ func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to STDIN, STDOUT or STDERR.") cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)") cmd.Var(&flLinks, []string{"#link", "-link"}, "Add link to another container in the form of name:alias") - cmd.Var(&flDevices, []string{"-device"}, "Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc)") + cmd.Var(&flDevices, []string{"-device"}, "Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)") cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables") cmd.Var(&flEnvFile, []string{"-env-file"}, "Read in a line delimited file of environment variables") From 380fe94614cecad6af85263af17245c5fc4103e8 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Tue, 14 Oct 2014 09:16:17 +1000 Subject: [PATCH 019/592] Link directly to the 'latest' release alias Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/installation/mac.md | 6 +++--- docs/sources/installation/windows.md | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/sources/installation/mac.md b/docs/sources/installation/mac.md index 0707c56b7b..89fed17115 100644 --- a/docs/sources/installation/mac.md +++ b/docs/sources/installation/mac.md @@ -22,8 +22,8 @@ virtual machine (using VirtualBox) that's all set up to run the Docker daemon. ## Installation 1. Download the latest release of the [Docker for OS X Installer]( - https://github.com/boot2docker/osx-installer/releases) (Look for the green - Boot2Docker-x.x.x.pkg button near the bottom of the page.) + https://github.com/boot2docker/osx-installer/releases/latest) (Look for the + green Boot2Docker-x.x.x.pkg button near the bottom of the page.) 2. Run the installer by double-clicking the downloaded package, which will install a VirtualBox VM, Docker itself, and the Boot2Docker management tool. @@ -55,7 +55,7 @@ for more information. ## Upgrading 1. Download the latest release of the [Docker for OS X Installer]( - https://github.com/boot2docker/osx-installer/releases) + https://github.com/boot2docker/osx-installer/releases/latest) 2. If Boot2Docker is currently running, stop it with `boot2docker stop`. Then, run the installer package, which will update Docker and the Boot2Docker management tool. diff --git a/docs/sources/installation/windows.md b/docs/sources/installation/windows.md index 6220cd6b6e..667ce2935d 100644 --- a/docs/sources/installation/windows.md +++ b/docs/sources/installation/windows.md @@ -21,7 +21,7 @@ virtual machine and runs the Docker daemon. ## Installation -1. Download the latest release of the [Docker for Windows Installer](https://github.com/boot2docker/windows-installer/releases) +1. Download the latest release of the [Docker for Windows Installer](https://github.com/boot2docker/windows-installer/releases/latest) 2. Run the installer, which will install VirtualBox, MSYS-git, the boot2docker Linux ISO, and the Boot2Docker management tool. ![](/installation/images/windows-installer.png) @@ -37,7 +37,7 @@ and the Boot2Docker management tool. ## Upgrading 1. Download the latest release of the [Docker for Windows Installer]( - https://github.com/boot2docker/windows-installer/releases) + https://github.com/boot2docker/windows-installer/releases/latest) 2. Run the installer, which will update the Boot2Docker management tool. From 6b285c4cd6fe4a28c73a7137fe47753510befd3f Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Tue, 14 Oct 2014 11:52:10 +1000 Subject: [PATCH 020/592] Move registry search API docs into the registry API document. Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/reference/api/docker-io_api.md | 41 --------------------- docs/sources/reference/api/registry_api.md | 41 +++++++++++++++++++++ 2 files changed, 41 insertions(+), 41 deletions(-) diff --git a/docs/sources/reference/api/docker-io_api.md b/docs/sources/reference/api/docker-io_api.md index c21781a42a..a7557bacb5 100644 --- a/docs/sources/reference/api/docker-io_api.md +++ b/docs/sources/reference/api/docker-io_api.md @@ -503,44 +503,3 @@ Status Codes: - **401** – Unauthorized - **403** – Account is not Active - **404** – User not found - -## Search - -If you need to search the index, this is the endpoint you would use. - -`GET /v1/search` - -Search the Index given a search term. It accepts - - [GET](http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html#sec9.3) - only. - -**Example request**: - - GET /v1/search?q=search_term HTTP/1.1 - Host: index.docker.io - Accept: application/json - -**Example response**: - - HTTP/1.1 200 OK - Vary: Accept - Content-Type: application/json - - {"query":"search_term", - "num_results": 3, - "results" : [ - {"name": "ubuntu", "description": "An ubuntu image..."}, - {"name": "centos", "description": "A centos image..."}, - {"name": "fedora", "description": "A fedora image..."} - ] - } - -Query Parameters: - -- **q** – what you want to search for - -Status Codes: - -- **200** – no error -- **500** – server error diff --git a/docs/sources/reference/api/registry_api.md b/docs/sources/reference/api/registry_api.md index 1ae37dba6d..8fe24cf6fb 100644 --- a/docs/sources/reference/api/registry_api.md +++ b/docs/sources/reference/api/registry_api.md @@ -494,6 +494,47 @@ Status Codes: - **401** – Requires authorization - **404** – Repository not found +## Search + +If you need to search the index, this is the endpoint you would use. + +`GET /v1/search` + +Search the Index given a search term. It accepts + + [GET](http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html#sec9.3) + only. + +**Example request**: + + GET /v1/search?q=search_term HTTP/1.1 + Host: index.docker.io + Accept: application/json + +**Example response**: + + HTTP/1.1 200 OK + Vary: Accept + Content-Type: application/json + + {"query":"search_term", + "num_results": 3, + "results" : [ + {"name": "ubuntu", "description": "An ubuntu image..."}, + {"name": "centos", "description": "A centos image..."}, + {"name": "fedora", "description": "A fedora image..."} + ] + } + +Query Parameters: + +- **q** – what you want to search for + +Status Codes: + +- **200** – no error +- **500** – server error + ## Status ### Status check for registry From ae4689f14d59ece06bbd2a08a9687dc075e42d6d Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Tue, 14 Oct 2014 03:54:32 +0000 Subject: [PATCH 021/592] add BytesSize in pkg/units Signed-off-by: Victor Vieux --- pkg/units/size.go | 18 +++++++++++++----- pkg/units/size_test.go | 10 ++++++++++ 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/pkg/units/size.go b/pkg/units/size.go index ea39bbddf7..eb2d88715a 100644 --- a/pkg/units/size.go +++ b/pkg/units/size.go @@ -32,18 +32,26 @@ var ( sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`) ) -var unitAbbrs = [...]string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} +var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} +var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} // HumanSize returns a human-readable approximation of a size // using SI standard (eg. "44kB", "17MB") func HumanSize(size int64) string { + return intToString(float64(size), 1000.0, decimapAbbrs) +} + +func BytesSize(size float64) string { + return intToString(size, 1024.0, binaryAbbrs) +} + +func intToString(size, unit float64, _map []string) string { i := 0 - sizef := float64(size) - for sizef >= 1000.0 { - sizef = sizef / 1000.0 + for size >= unit { + size = size / unit i++ } - return fmt.Sprintf("%.4g %s", sizef, unitAbbrs[i]) + return fmt.Sprintf("%.4g %s", size, _map[i]) } // FromHumanSize returns an integer from a human-readable specification of a diff --git a/pkg/units/size_test.go b/pkg/units/size_test.go index 8dae7e716b..5b329fcf68 100644 --- a/pkg/units/size_test.go +++ b/pkg/units/size_test.go @@ -7,6 +7,16 @@ import ( "testing" ) +func TestBytesSize(t *testing.T) { + assertEquals(t, "1 KiB", BytesSize(1024)) + assertEquals(t, "1 MiB", BytesSize(1024*1024)) + assertEquals(t, "1 MiB", BytesSize(1048576)) + assertEquals(t, "2 MiB", BytesSize(2*MiB)) + assertEquals(t, "3.42 GiB", BytesSize(3.42*GiB)) + assertEquals(t, "5.372 TiB", BytesSize(5.372*TiB)) + assertEquals(t, "2.22 PiB", BytesSize(2.22*PiB)) +} + func TestHumanSize(t *testing.T) { assertEquals(t, "1 kB", HumanSize(1000)) assertEquals(t, "1.024 kB", HumanSize(1024)) From ba311ee58a7114270d7853ebae5f1e86efdc8005 Mon Sep 17 00:00:00 2001 From: Harald Albers Date: Tue, 14 Oct 2014 17:45:29 +0200 Subject: [PATCH 022/592] Fix support for --env-file in bash completion Signed-off-by: Harald Albers --- contrib/completion/bash/docker | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker index cc16d4825f..dbe7c71442 100755 --- a/contrib/completion/bash/docker +++ b/contrib/completion/bash/docker @@ -264,10 +264,10 @@ _docker_create() { case "$cur" in -*) - COMPREPLY=( $( compgen -W "-n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir -c --cpu-shares --name -a --attach -v --volume --link -e --env -p --publish --expose --dns --volumes-from --lxc-conf" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "-n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir -c --cpu-shares --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf" -- "$cur" ) ) ;; *) - local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf') + local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--env-file|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf') if [ $cword -eq $counter ]; then __docker_image_repos_and_tags_and_ids @@ -595,11 +595,11 @@ _docker_run() { case "$cur" in -*) - COMPREPLY=( $( compgen -W "--rm -d --detach -n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env -p --publish --expose --dns --volumes-from --lxc-conf --security-opt" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--rm -d --detach -n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf --security-opt" -- "$cur" ) ) ;; *) - local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt') + local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--env-file|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt') if [ $cword -eq $counter ]; then __docker_image_repos_and_tags_and_ids From 91f33fcaaaa47ef60acc4a8cfa7a2be41edd724e Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 10:58:35 -0700 Subject: [PATCH 023/592] Replace '%s' in create tests Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_create_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-cli/docker_cli_create_test.go b/integration-cli/docker_cli_create_test.go index 226a3f5a75..a3d20bdbf5 100644 --- a/integration-cli/docker_cli_create_test.go +++ b/integration-cli/docker_cli_create_test.go @@ -107,7 +107,7 @@ func TestCreateEchoStdout(t *testing.T) { errorOut(err, t, out) if out != "test123\n" { - t.Errorf("container should've printed 'test123', got '%s'", out) + t.Errorf("container should've printed 'test123', got %q", out) } deleteAllContainers() From a44296603f8188278241cc8d8a353952ee50ec54 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 10:59:01 -0700 Subject: [PATCH 024/592] Replace '%s' in diff tests Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_diff_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-cli/docker_cli_diff_test.go b/integration-cli/docker_cli_diff_test.go index 726f23491c..c6bf2bbd18 100644 --- a/integration-cli/docker_cli_diff_test.go +++ b/integration-cli/docker_cli_diff_test.go @@ -85,7 +85,7 @@ func TestDiffEnsureOnlyKmsgAndPtmx(t *testing.T) { for _, line := range strings.Split(out, "\n") { if line != "" && !expected[line] { - t.Errorf("'%s' is shown in the diff but shouldn't", line) + t.Errorf("%q is shown in the diff but shouldn't", line) } } From be31a66b7a1f015d4f1448baebd7c7eb2c23e506 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 10:59:11 -0700 Subject: [PATCH 025/592] Replace '%s' in rm tests Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_rm_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-cli/docker_cli_rm_test.go b/integration-cli/docker_cli_rm_test.go index 6c8dc38089..22ecc363bd 100644 --- a/integration-cli/docker_cli_rm_test.go +++ b/integration-cli/docker_cli_rm_test.go @@ -102,7 +102,7 @@ func TestRmContainerOrphaning(t *testing.T) { t.Fatalf("%v: %s", err, out) } if !strings.Contains(out, img1) { - t.Fatalf("Orphaned container (could not find '%s' in docker images): %s", img1, out) + t.Fatalf("Orphaned container (could not find %q in docker images): %s", img1, out) } deleteAllContainers() From c091397d518408a7a9c493f066dc47438d588985 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 10:59:19 -0700 Subject: [PATCH 026/592] Replace '%s' in run tests Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_run_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index d50f6f3443..6bef936369 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -1182,7 +1182,7 @@ func TestRunModeHostname(t *testing.T) { t.Fatal(err) } if actual := strings.Trim(out, "\r\n"); actual != hostname { - t.Fatalf("expected %q, but says: '%s'", hostname, actual) + t.Fatalf("expected %q, but says: %q", hostname, actual) } deleteAllContainers() @@ -1357,11 +1357,11 @@ func TestRunDnsOptionsBasedOnHostResolvConf(t *testing.T) { actualSearch := resolvconf.GetSearchDomains([]byte(out)) if len(actualSearch) != len(hostSearch) { - t.Fatalf("expected %q search domain(s), but it has: '%s'", len(hostSearch), len(actualSearch)) + t.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) } for i := range actualSearch { if actualSearch[i] != hostSearch[i] { - t.Fatalf("expected %q domain, but says: '%s'", actualSearch[i], hostSearch[i]) + t.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) } } @@ -1373,11 +1373,11 @@ func TestRunDnsOptionsBasedOnHostResolvConf(t *testing.T) { actualNameservers := resolvconf.GetNameservers([]byte(out)) if len(actualNameservers) != len(hostNamservers) { - t.Fatalf("expected %q nameserver(s), but it has: '%s'", len(hostNamservers), len(actualNameservers)) + t.Fatalf("expected %q nameserver(s), but it has: %q", len(hostNamservers), len(actualNameservers)) } for i := range actualNameservers { if actualNameservers[i] != hostNamservers[i] { - t.Fatalf("expected %q nameserver, but says: '%s'", actualNameservers[i], hostNamservers[i]) + t.Fatalf("expected %q nameserver, but says: %q", actualNameservers[i], hostNamservers[i]) } } @@ -1421,7 +1421,7 @@ func TestRunDnsOptionsBasedOnHostResolvConf(t *testing.T) { } for i := range actualSearch { if actualSearch[i] != hostSearch[i] { - t.Fatalf("expected %q domain, but says: '%s'", actualSearch[i], hostSearch[i]) + t.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) } } From 949ab477103c93d6d4779b35efbd647541eb1b14 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 10:59:38 -0700 Subject: [PATCH 027/592] Replace '%s' in test utils Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_utils.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go index 17370eb707..b8058f60b3 100644 --- a/integration-cli/docker_utils.go +++ b/integration-cli/docker_utils.go @@ -44,7 +44,7 @@ func NewDaemon(t *testing.T) *Daemon { dir := filepath.Join(dest, fmt.Sprintf("daemon%d", time.Now().Unix())) daemonFolder, err := filepath.Abs(dir) if err != nil { - t.Fatalf("Could not make '%s' an absolute path: %v", dir, err) + t.Fatalf("Could not make %q an absolute path: %v", dir, err) } if err := os.MkdirAll(filepath.Join(daemonFolder, "graph"), 0600); err != nil { @@ -317,7 +317,7 @@ func imageExists(image string) error { inspectCmd := exec.Command(dockerBinary, "inspect", image) exitCode, err := runCommand(inspectCmd) if exitCode != 0 && err == nil { - err = fmt.Errorf("couldn't find image '%s'", image) + err = fmt.Errorf("couldn't find image %q", image) } return err } @@ -328,7 +328,7 @@ func pullImageIfNotExist(image string) (err error) { _, exitCode, err := runCommandWithOutput(pullCmd) if err != nil || exitCode != 0 { - err = fmt.Errorf("image '%s' wasn't found locally and it couldn't be pulled: %s", image, err) + err = fmt.Errorf("image %q wasn't found locally and it couldn't be pulled: %s", image, err) } } return @@ -341,7 +341,7 @@ func cmd(t *testing.T, args ...string) (string, int, error) { func dockerCmd(t *testing.T, args ...string) (string, int, error) { out, status, err := runCommandWithOutput(exec.Command(dockerBinary, args...)) - errorOut(err, t, fmt.Sprintf("'%s' failed with errors: %v (%v)", strings.Join(args, " "), err, out)) + errorOut(err, t, fmt.Sprintf("%q failed with errors: %v (%v)", strings.Join(args, " "), err, out)) return out, status, err } @@ -349,7 +349,7 @@ func dockerCmd(t *testing.T, args ...string) (string, int, error) { func dockerCmdWithTimeout(timeout time.Duration, args ...string) (string, int, error) { out, status, err := runCommandWithOutputAndTimeout(exec.Command(dockerBinary, args...), timeout) if err != nil { - return out, status, fmt.Errorf("'%s' failed with errors: %v : %q)", strings.Join(args, " "), err, out) + return out, status, fmt.Errorf("%q failed with errors: %v : %q)", strings.Join(args, " "), err, out) } return out, status, err } @@ -360,7 +360,7 @@ func dockerCmdInDir(t *testing.T, path string, args ...string) (string, int, err dockerCommand.Dir = path out, status, err := runCommandWithOutput(dockerCommand) if err != nil { - return out, status, fmt.Errorf("'%s' failed with errors: %v : %q)", strings.Join(args, " "), err, out) + return out, status, fmt.Errorf("%q failed with errors: %v : %q)", strings.Join(args, " "), err, out) } return out, status, err } @@ -371,7 +371,7 @@ func dockerCmdInDirWithTimeout(timeout time.Duration, path string, args ...strin dockerCommand.Dir = path out, status, err := runCommandWithOutputAndTimeout(dockerCommand, timeout) if err != nil { - return out, status, fmt.Errorf("'%s' failed with errors: %v : %q)", strings.Join(args, " "), err, out) + return out, status, fmt.Errorf("%q failed with errors: %v : %q)", strings.Join(args, " "), err, out) } return out, status, err } @@ -521,7 +521,7 @@ func getContainerState(t *testing.T, id string) (int, bool, error) { ) out, exitCode, err := dockerCmd(t, "inspect", "--format={{.State.Running}} {{.State.ExitCode}}", id) if err != nil || exitCode != 0 { - return 0, false, fmt.Errorf("'%s' doesn't exist: %s", id, err) + return 0, false, fmt.Errorf("%q doesn't exist: %s", id, err) } out = strings.Trim(out, "\n") From fbaa41b5aa920ca752481e1b94aeff29857cb76d Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Tue, 14 Oct 2014 11:17:41 -0700 Subject: [PATCH 028/592] Add logDone for TestBuildAddSingleFileToNonExistDir Signed-off-by: Alexandr Morozov --- integration-cli/docker_cli_build_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index e456d579ab..399de8a6b2 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -509,6 +509,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } + + logDone("build - add single file to non-existing dir") } func TestBuildAddDirContentToRoot(t *testing.T) { From fcfe80f63397c0a8bdd6605798c0639ec3dcda56 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Tue, 14 Oct 2014 11:47:01 -0700 Subject: [PATCH 029/592] Minor fix of tests names Signed-off-by: Alexandr Morozov --- integration-cli/docker_cli_build_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 399de8a6b2..810010a264 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -488,8 +488,8 @@ func TestBuildCopyWildcardCache(t *testing.T) { logDone("build - copy wild card cache") } -func TestBuildAddSingleFileToNonExistDir(t *testing.T) { - name := "testaddsinglefiletononexistdir" +func TestBuildAddSingleFileToNonExistingDir(t *testing.T) { + name := "testaddsinglefiletononexistingdir" defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd @@ -536,8 +536,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, logDone("build - add directory contents to root") } -func TestBuildAddDirContentToExistDir(t *testing.T) { - name := "testadddircontenttoexistdir" +func TestBuildAddDirContentToExistingDir(t *testing.T) { + name := "testadddircontenttoexistingdir" defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd From 74d0485885cbb0e5f7561bd31399e5c13209d8d6 Mon Sep 17 00:00:00 2001 From: Fred Lifton Date: Wed, 15 Oct 2014 16:08:07 -0700 Subject: [PATCH 030/592] Expanded release notes Made it clear signed images is a preview feature and added a little more info about how the feature works. Docker-DCO-1.1-Signed-off-by: Fred Lifton (github: fredlf) --- docs/sources/index.md | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/docs/sources/index.md b/docs/sources/index.md index 7e60b0dc6d..53f9f96073 100644 --- a/docs/sources/index.md +++ b/docs/sources/index.md @@ -110,14 +110,20 @@ ability to configure things like volumes or port mappings before the container is started. For example, in a rapid-response scaling situation, you could use `create` to prepare and stage ten containers in anticipation of heavy loads. -*New provenance features* - -Official images are now signed by Docker, Inc. to improve your confidence and -security. Look for the blue ribbons on the [Docker Hub](https://hub.docker.com/). -The Docker Engine has been updated to automatically verify that a given Official -Repo has a current, valid signature. If no valid signature is detected, Docker -Engine will use a prior image. +*Tech preview of new provenance features* +This release offers a sneak peek at new image signing capabilities that are +currently under development. Soon, these capabilities will allow any image +author to sign their images to certify they have not been tampered with. For +this release, Official images are now signed by Docker, Inc. Not only does this +demonstrate the new functionality, we hope it will improve your confidence in +the security of Official images. Look for the blue ribbons denoting signed +images on the [Docker Hub](https://hub.docker.com/). +The Docker Engine has been updated to automatically verify that a given +Official Repo has a current, valid signature. When pulling a signed image, +you'll see a message stating `the image you are pulling has been verified`. If +no valid signature is detected, Docker Engine will fall back to pulling a +regular, unsigned image. *Other improvements & changes* From 7a062b2b8f7751fbb926e6ddc9f7df8a1b281eb6 Mon Sep 17 00:00:00 2001 From: "Daniel, Dao Quang Minh" Date: Wed, 15 Oct 2014 22:39:51 -0400 Subject: [PATCH 031/592] Avoid fallback to SSL protocols < TLS1.0 Signed-off-by: Tibor Vass Docker-DCO-1.1-Signed-off-by: Daniel, Dao Quang Minh (github: dqminh) --- api/server/server.go | 2 ++ docker/docker.go | 2 ++ registry/registry.go | 6 +++++- 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/api/server/server.go b/api/server/server.go index 897dd6142f..93b8b60a8f 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -1439,6 +1439,8 @@ func ListenAndServe(proto, addr string, job *engine.Job) error { tlsConfig := &tls.Config{ NextProtos: []string{"http/1.1"}, Certificates: []tls.Certificate{cert}, + // Avoid fallback on insecure SSL protocols + MinVersion: tls.VersionTLS10, } if job.GetenvBool("TlsVerify") { certPool := x509.NewCertPool() diff --git a/docker/docker.go b/docker/docker.go index 37cd155bb7..f0cbb6f6ab 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -93,6 +93,8 @@ func main() { } tlsConfig.Certificates = []tls.Certificate{cert} } + // Avoid fallback to SSL protocols < TLS1.0 + tlsConfig.MinVersion = tls.VersionTLS10 } if *flTls || *flTlsVerify { diff --git a/registry/registry.go b/registry/registry.go index fd74b7514e..0c648a94b2 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -36,7 +36,11 @@ const ( ) func newClient(jar http.CookieJar, roots *x509.CertPool, cert *tls.Certificate, timeout TimeoutType) *http.Client { - tlsConfig := tls.Config{RootCAs: roots} + tlsConfig := tls.Config{ + RootCAs: roots, + // Avoid fallback to SSL protocols < TLS1.0 + MinVersion: tls.VersionTLS10, + } if cert != nil { tlsConfig.Certificates = append(tlsConfig.Certificates, *cert) From f6140060e3b7fc8e6f577ca95be0eb5bf8cd1fd4 Mon Sep 17 00:00:00 2001 From: Huayi Zhang Date: Thu, 16 Oct 2014 18:08:15 +0800 Subject: [PATCH 032/592] Fix commit api document The commit message field should be `comment`, not `m` https://github.com/docker/docker/blob/master/api/server/server.go#L478 Signed-off-by: Huayi Zhang --- docs/sources/reference/api/docker_remote_api_v1.12.md | 4 ++-- docs/sources/reference/api/docker_remote_api_v1.13.md | 4 ++-- docs/sources/reference/api/docker_remote_api_v1.14.md | 4 ++-- docs/sources/reference/api/docker_remote_api_v1.15.md | 6 +++--- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/sources/reference/api/docker_remote_api_v1.12.md b/docs/sources/reference/api/docker_remote_api_v1.12.md index 8b245f5e9c..0d547f279b 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.12.md +++ b/docs/sources/reference/api/docker_remote_api_v1.12.md @@ -1208,7 +1208,7 @@ Create a new image from a container's changes **Example request**: - POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 Content-Type: application/json { @@ -1256,7 +1256,7 @@ Query Parameters: - **container** – source container - **repo** – repository - **tag** – tag -- **m** – commit message +- **comment** – commit message - **author** – author (e.g., "John Hannibal Smith <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") diff --git a/docs/sources/reference/api/docker_remote_api_v1.13.md b/docs/sources/reference/api/docker_remote_api_v1.13.md index 1a25da18ae..b752d5c01e 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.13.md +++ b/docs/sources/reference/api/docker_remote_api_v1.13.md @@ -1197,7 +1197,7 @@ Create a new image from a container's changes **Example request**: - POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 Content-Type: application/json { @@ -1245,7 +1245,7 @@ Query Parameters: - **container** – source container - **repo** – repository - **tag** – tag -- **m** – commit message +- **comment** – commit message - **author** – author (e.g., "John Hannibal Smith <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") diff --git a/docs/sources/reference/api/docker_remote_api_v1.14.md b/docs/sources/reference/api/docker_remote_api_v1.14.md index 0c806bdd2e..6806afae06 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.14.md +++ b/docs/sources/reference/api/docker_remote_api_v1.14.md @@ -1202,7 +1202,7 @@ Create a new image from a container's changes **Example request**: - POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 Content-Type: application/json { @@ -1250,7 +1250,7 @@ Query Parameters: - **container** – source container - **repo** – repository - **tag** – tag -- **m** – commit message +- **comment** – commit message - **author** – author (e.g., "John Hannibal Smith <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") diff --git a/docs/sources/reference/api/docker_remote_api_v1.15.md b/docs/sources/reference/api/docker_remote_api_v1.15.md index e23fa0ff30..cf8f6d3cc3 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.15.md +++ b/docs/sources/reference/api/docker_remote_api_v1.15.md @@ -399,7 +399,7 @@ Status Codes: `GET /containers/(id)/resize?h=&w=` -Resize the TTY of container `id` +Resize the TTY of container `id` **Example request**: @@ -1237,7 +1237,7 @@ Create a new image from a container's changes **Example request**: - POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 Content-Type: application/json { @@ -1285,7 +1285,7 @@ Query Parameters: - **container** – source container - **repo** – repository - **tag** – tag -- **m** – commit message +- **comment** – commit message - **author** – author (e.g., "John Hannibal Smith <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") From d040de6dd4660041dd37792a9a702cf8317a71d9 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Thu, 16 Oct 2014 10:14:26 -0600 Subject: [PATCH 033/592] Fix more missing HOME references Signed-off-by: Andrew Page --- hack/release.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hack/release.sh b/hack/release.sh index da832e8bb8..1174e0cc4c 100755 --- a/hack/release.sh +++ b/hack/release.sh @@ -270,7 +270,7 @@ EOF done # Upload keys - s3cmd sync /.gnupg/ s3://$BUCKET/ubuntu/.gnupg/ + s3cmd sync $HOME/.gnupg/ s3://$BUCKET/ubuntu/.gnupg/ gpg --armor --export releasedocker > bundles/$VERSION/ubuntu/gpg s3cmd --acl-public put bundles/$VERSION/ubuntu/gpg s3://$BUCKET/gpg @@ -355,8 +355,8 @@ release_test() { setup_gpg() { # Make sure that we have our keys - mkdir -p /.gnupg/ - s3cmd sync s3://$BUCKET/ubuntu/.gnupg/ /.gnupg/ || true + mkdir -p $HOME/.gnupg/ + s3cmd sync s3://$BUCKET/ubuntu/.gnupg/ $HOME/.gnupg/ || true gpg --list-keys releasedocker >/dev/null || { gpg --gen-key --batch < Date: Thu, 16 Oct 2014 17:22:42 +0000 Subject: [PATCH 034/592] Change version to 1.3.0-dev Signed-off-by: Michael Crosby --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index f0bb29e763..b6bb93f7c7 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.3.0 +1.3.0-dev From 143f438e6e7a4c8205af59ba6e9f52cb450f1cce Mon Sep 17 00:00:00 2001 From: Tobias Gesellchen Date: Thu, 16 Oct 2014 19:36:36 +0200 Subject: [PATCH 035/592] fix minor docs error Signed-off-by: Tobias Gesellchen --- docs/sources/index.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/index.md b/docs/sources/index.md index 53f9f96073..0db731c8f0 100644 --- a/docs/sources/index.md +++ b/docs/sources/index.md @@ -128,6 +128,6 @@ regular, unsigned image. *Other improvements & changes* We've added a new security options flag that lets you set SELinux and AppArmor -labels and profiles. This means you'll longer have to use `docker run ---privileged on kernels that support SE Linux or AppArmor. +labels and profiles. This means you'll no longer have to use `docker run +--privileged` on kernels that support SE Linux or AppArmor. From 3893e220e840054dfdadba637372bc30f683fe58 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Thu, 16 Oct 2014 11:39:22 -0700 Subject: [PATCH 036/592] Setting iptables=false should propagate to ip-masq=false Signed-off-by: Jessica Frazelle --- daemon/daemon.go | 2 +- integration-cli/docker_cli_daemon_test.go | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/daemon/daemon.go b/daemon/daemon.go index 235788c684..caf0c8745f 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -731,7 +731,7 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) return nil, fmt.Errorf("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.") } if !config.EnableIptables && config.EnableIpMasq { - return nil, fmt.Errorf("You specified --iptables=false with --ipmasq=true. IP masquerading uses iptables to function. Please set --ipmasq to false or --iptables to true.") + config.EnableIpMasq = false } config.DisableNetwork = config.BridgeIface == disableNetworkBridge diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go index 906680dc6d..6160e57e94 100644 --- a/integration-cli/docker_cli_daemon_test.go +++ b/integration-cli/docker_cli_daemon_test.go @@ -82,3 +82,13 @@ func TestDaemonRestartWithVolumesRefs(t *testing.T) { logDone("daemon - volume refs are restored") } + +func TestDaemonStartIptablesFalse(t *testing.T) { + d := NewDaemon(t) + if err := d.Start("--iptables=false"); err != nil { + t.Fatalf("we should have been able to start the daemon with passing iptables=false: %v", err) + } + d.Stop() + + logDone("daemon - started daemon with iptables=false") +} From b8b9930a8ab95cfe32731e014a57690f154d01fa Mon Sep 17 00:00:00 2001 From: Ryan Detzel Date: Thu, 16 Oct 2014 15:57:38 -0400 Subject: [PATCH 037/592] Docs syntax fix the flags must come before the container name. --- docs/sources/reference/commandline/cli.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 86f02b6cf1..ec4f24df51 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -640,7 +640,7 @@ This will create a container named `ubuntu_bash` and start a Bash session. This will create a new file `/tmp/execWorks` inside the running container `ubuntu_bash`, in the background. - $ sudo docker exec ubuntu_bash -it bash + $ sudo docker exec -it ubuntu_bash bash This will create a new Bash session in the container `ubuntu_bash`. From d8cd8165a94dc60d1f7a1053bb789fd302252279 Mon Sep 17 00:00:00 2001 From: Phil Estes Date: Tue, 14 Oct 2014 17:32:25 -0400 Subject: [PATCH 038/592] Migrate container GET API tests from integration to integration-cli An initial start to migration of the API tests from integration to the integration-cli model. Docker-DCO-1.1-Signed-off-by: Phil Estes (github: estesp) --- integration-cli/docker_api_containers_test.go | 122 ++++++++++++++++ integration/api_test.go | 134 ------------------ 2 files changed, 122 insertions(+), 134 deletions(-) create mode 100644 integration-cli/docker_api_containers_test.go diff --git a/integration-cli/docker_api_containers_test.go b/integration-cli/docker_api_containers_test.go new file mode 100644 index 0000000000..89b7ab1fb9 --- /dev/null +++ b/integration-cli/docker_api_containers_test.go @@ -0,0 +1,122 @@ +package main + +import ( + "bytes" + "encoding/json" + "io" + "os/exec" + "testing" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +func TestContainerApiGetAll(t *testing.T) { + startCount, err := getContainerCount() + if err != nil { + t.Fatalf("Cannot query container count: %v", err) + } + + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatalf("Error on container creation: %v, output: %q", err, out) + } + + testContainerId := stripTrailingCharacters(out) + + body, err := sockRequest("GET", "/containers/json?all=1") + if err != nil { + t.Fatalf("GET all containers sockRequest failed: %v", err) + } + + var inspectJSON []map[string]interface{} + if err = json.Unmarshal(body, &inspectJSON); err != nil { + t.Fatalf("unable to unmarshal response body: %v", err) + } + + if len(inspectJSON) != startCount+1 { + t.Fatalf("Expected %d container(s), %d found (started with: %d)", startCount+1, len(inspectJSON), startCount) + } + if id, _ := inspectJSON[0]["Id"]; id != testContainerId { + t.Fatalf("Container ID mismatch. Expected: %s, received: %s\n", testContainerId, id) + } + + deleteAllContainers() + + logDone("container REST API - check GET json/all=1") +} + +func TestContainerApiGetExport(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "touch", "/test") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatalf("Error on container creation: %v, output: %q", err, out) + } + + testContainerId := stripTrailingCharacters(out) + + body, err := sockRequest("GET", "/containers/"+testContainerId+"/export") + if err != nil { + t.Fatalf("GET containers/export sockRequest failed: %v", err) + } + + found := false + for tarReader := tar.NewReader(bytes.NewReader(body)); ; { + h, err := tarReader.Next() + if err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + if h.Name == "test" { + found = true + break + } + } + + if !found { + t.Fatalf("The created test file has not been found in the exported image") + } + deleteAllContainers() + + logDone("container REST API - check GET containers/export") +} + +func TestContainerApiGetChanges(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "rm", "/etc/passwd") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatalf("Error on container creation: %v, output: %q", err, out) + } + + testContainerId := stripTrailingCharacters(out) + + body, err := sockRequest("GET", "/containers/"+testContainerId+"/changes") + if err != nil { + t.Fatalf("GET containers/changes sockRequest failed: %v", err) + } + + changes := []struct { + Kind int + Path string + }{} + if err = json.Unmarshal(body, &changes); err != nil { + t.Fatalf("unable to unmarshal response body: %v", err) + } + + // Check the changelog for removal of /etc/passwd + success := false + for _, elem := range changes { + if elem.Path == "/etc/passwd" && elem.Kind == 2 { + success = true + } + } + if !success { + t.Fatalf("/etc/passwd has been removed but is not present in the diff") + } + + deleteAllContainers() + + logDone("container REST API - check GET containers/changes") +} diff --git a/integration/api_test.go b/integration/api_test.go index 8fa295e7b1..6bb340d53b 100644 --- a/integration/api_test.go +++ b/integration/api_test.go @@ -21,100 +21,6 @@ import ( "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) -func TestGetContainersJSON(t *testing.T) { - eng := NewTestEngine(t) - defer mkDaemonFromEngine(eng, t).Nuke() - - job := eng.Job("containers") - job.SetenvBool("all", true) - outs, err := job.Stdout.AddTable() - if err != nil { - t.Fatal(err) - } - if err := job.Run(); err != nil { - t.Fatal(err) - } - beginLen := len(outs.Data) - - containerID := createTestContainer(eng, &runconfig.Config{ - Image: unitTestImageID, - Cmd: []string{"echo", "test"}, - }, t) - - if containerID == "" { - t.Fatalf("Received empty container ID") - } - - req, err := http.NewRequest("GET", "/containers/json?all=1", nil) - if err != nil { - t.Fatal(err) - } - - r := httptest.NewRecorder() - if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { - t.Fatal(err) - } - assertHttpNotError(r, t) - containers := engine.NewTable("", 0) - if _, err := containers.ReadListFrom(r.Body.Bytes()); err != nil { - t.Fatal(err) - } - if len(containers.Data) != beginLen+1 { - t.Fatalf("Expected %d container, %d found (started with: %d)", beginLen+1, len(containers.Data), beginLen) - } - if id := containers.Data[0].Get("Id"); id != containerID { - t.Fatalf("Container ID mismatch. Expected: %s, received: %s\n", containerID, id) - } -} - -func TestGetContainersExport(t *testing.T) { - eng := NewTestEngine(t) - defer mkDaemonFromEngine(eng, t).Nuke() - - // Create a container and remove a file - containerID := createTestContainer(eng, - &runconfig.Config{ - Image: unitTestImageID, - Cmd: []string{"touch", "/test"}, - }, - t, - ) - containerRun(eng, containerID, t) - - r := httptest.NewRecorder() - - req, err := http.NewRequest("GET", "/containers/"+containerID+"/export", nil) - if err != nil { - t.Fatal(err) - } - if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { - t.Fatal(err) - } - assertHttpNotError(r, t) - - if r.Code != http.StatusOK { - t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) - } - - found := false - for tarReader := tar.NewReader(r.Body); ; { - h, err := tarReader.Next() - if err != nil { - if err == io.EOF { - break - } - t.Fatal(err) - } - if h.Name == "test" { - found = true - break - } - } - if !found { - t.Fatalf("The created test file has not been found in the exported image") - } -} - func TestSaveImageAndThenLoad(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() @@ -186,46 +92,6 @@ func TestSaveImageAndThenLoad(t *testing.T) { } } -func TestGetContainersChanges(t *testing.T) { - eng := NewTestEngine(t) - defer mkDaemonFromEngine(eng, t).Nuke() - - // Create a container and remove a file - containerID := createTestContainer(eng, - &runconfig.Config{ - Image: unitTestImageID, - Cmd: []string{"/bin/rm", "/etc/passwd"}, - }, - t, - ) - containerRun(eng, containerID, t) - - r := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/containers/"+containerID+"/changes", nil) - if err != nil { - t.Fatal(err) - } - if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { - t.Fatal(err) - } - assertHttpNotError(r, t) - outs := engine.NewTable("", 0) - if _, err := outs.ReadListFrom(r.Body.Bytes()); err != nil { - t.Fatal(err) - } - - // Check the changelog - success := false - for _, elem := range outs.Data { - if elem.Get("Path") == "/etc/passwd" && elem.GetInt("Kind") == 2 { - success = true - } - } - if !success { - t.Fatalf("/etc/passwd as been removed but is not present in the diff") - } -} - func TestGetContainersTop(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() From 9edf96782470deb15deec3be07e3988164454148 Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Wed, 8 Oct 2014 20:34:20 -0700 Subject: [PATCH 039/592] Add failing testcase for single quotes in CMD Closes #5701 This is due to @SvenDowideit comment at: https://github.com/docker/docker/issues/5701#issuecomment-58133541 where he asked for a testcase showing the error case. Signed-off-by: Doug Davis --- integration-cli/docker_cli_build_test.go | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index e456d579ab..b82b40a71d 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -2740,3 +2740,25 @@ func TestBuildExoticShellInterpolation(t *testing.T) { logDone("build - exotic shell interpolation") } + +func TestBuildVerifySingleQuoteFails(t *testing.T) { + // This testcase is supposed to generate an error because the + // JSON array we're passing in on the CMD uses single quotes instead + // of double quotes (per the JSON spec). This means we interpret it + // as a "string" insead of "JSON array" and pass it on to "sh -c" and + // it should barf on it. + name := "testbuildsinglequotefails" + defer deleteImages(name) + + _, err := buildImage(name, + `FROM busybox + CMD [ '/bin/sh', '-c', 'echo hi' ]`, + true) + _, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", name)) + + if err == nil { + t.Fatal("The image was not supposed to be able to run") + } + + logDone("build - verify single quotes fail") +} From 3182ee5c9bc13f67858c3676a560427d4882f12c Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 11:26:53 -0700 Subject: [PATCH 040/592] Cleanup errorOut resp in commit test Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) Cleanup errorOut resp in commit test Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_commit_test.go | 42 ++++++++++++++--------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/integration-cli/docker_cli_commit_test.go b/integration-cli/docker_cli_commit_test.go index 46b998693d..ddc7a2e041 100644 --- a/integration-cli/docker_cli_commit_test.go +++ b/integration-cli/docker_cli_commit_test.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "os/exec" "strings" "testing" @@ -10,23 +9,29 @@ import ( func TestCommitAfterContainerIsDone(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, fmt.Sprintf("failed to run container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to run container: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) waitCmd := exec.Command(dockerBinary, "wait", cleanedContainerID) - _, _, err = runCommandWithOutput(waitCmd) - errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out)) + if _, _, err = runCommandWithOutput(waitCmd); err != nil { + t.Fatalf("error thrown while waiting for container: %s, %v", out, err) + } commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID) out, _, err = runCommandWithOutput(commitCmd) - errorOut(err, t, fmt.Sprintf("failed to commit container to image: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to commit container to image: %s, %v", out, err) + } cleanedImageID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedImageID) - out, _, err = runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("failed to inspect image: %v %v", out, err)) + if out, _, err = runCommandWithOutput(inspectCmd); err != nil { + t.Fatalf("failed to inspect image: %s, %v", out, err) + } deleteContainer(cleanedContainerID) deleteImages(cleanedImageID) @@ -37,23 +42,29 @@ func TestCommitAfterContainerIsDone(t *testing.T) { func TestCommitWithoutPause(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, fmt.Sprintf("failed to run container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to run container: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) waitCmd := exec.Command(dockerBinary, "wait", cleanedContainerID) - _, _, err = runCommandWithOutput(waitCmd) - errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out)) + if _, _, err = runCommandWithOutput(waitCmd); err != nil { + t.Fatalf("error thrown while waiting for container: %s, %v", out, err) + } commitCmd := exec.Command(dockerBinary, "commit", "-p=false", cleanedContainerID) out, _, err = runCommandWithOutput(commitCmd) - errorOut(err, t, fmt.Sprintf("failed to commit container to image: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to commit container to image: %s, %v", out, err) + } cleanedImageID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedImageID) - out, _, err = runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("failed to inspect image: %v %v", out, err)) + if out, _, err = runCommandWithOutput(inspectCmd); err != nil { + t.Fatalf("failed to inspect image: %s, %v", out, err) + } deleteContainer(cleanedContainerID) deleteImages(cleanedImageID) @@ -81,7 +92,7 @@ func TestCommitNewFile(t *testing.T) { t.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual != "koye" { - t.Fatalf("expected output koye received %s", actual) + t.Fatalf("expected output koye received %q", actual) } deleteAllContainers() @@ -92,7 +103,6 @@ func TestCommitNewFile(t *testing.T) { func TestCommitTTY(t *testing.T) { cmd := exec.Command(dockerBinary, "run", "-t", "--name", "tty", "busybox", "/bin/ls") - if _, err := runCommand(cmd); err != nil { t.Fatal(err) } @@ -105,7 +115,6 @@ func TestCommitTTY(t *testing.T) { imageID = strings.Trim(imageID, "\r\n") cmd = exec.Command(dockerBinary, "run", "ttytest", "/bin/ls") - if _, err := runCommand(cmd); err != nil { t.Fatal(err) } @@ -124,6 +133,7 @@ func TestCommitWithHostBindMount(t *testing.T) { if err != nil { t.Fatal(imageID, err) } + imageID = strings.Trim(imageID, "\r\n") cmd = exec.Command(dockerBinary, "run", "bindtest", "true") From 88d65cbdf4881c0e56556d9c8efbf60410f0645f Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 12:23:10 -0700 Subject: [PATCH 041/592] Cleanup errorOut resp in port test Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_port_test.go | 50 ++++++++++++++++++------- 1 file changed, 36 insertions(+), 14 deletions(-) diff --git a/integration-cli/docker_cli_port_test.go b/integration-cli/docker_cli_port_test.go index ba986b9ac6..1ea7374e85 100644 --- a/integration-cli/docker_cli_port_test.go +++ b/integration-cli/docker_cli_port_test.go @@ -11,12 +11,16 @@ func TestPortList(t *testing.T) { // one port runCmd := exec.Command(dockerBinary, "run", "-d", "-p", "9876:80", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } firstID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "port", firstID, "80") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertPortList(t, out, []string{"0.0.0.0:9876"}) { t.Error("Port list is not correct") @@ -24,14 +28,17 @@ func TestPortList(t *testing.T) { runCmd = exec.Command(dockerBinary, "port", firstID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertPortList(t, out, []string{"80/tcp -> 0.0.0.0:9876"}) { t.Error("Port list is not correct") } runCmd = exec.Command(dockerBinary, "rm", "-f", firstID) - out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } // three port runCmd = exec.Command(dockerBinary, "run", "-d", @@ -40,12 +47,16 @@ func TestPortList(t *testing.T) { "-p", "9878:82", "busybox", "top") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } ID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "port", ID, "80") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertPortList(t, out, []string{"0.0.0.0:9876"}) { t.Error("Port list is not correct") @@ -53,7 +64,9 @@ func TestPortList(t *testing.T) { runCmd = exec.Command(dockerBinary, "port", ID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertPortList(t, out, []string{ "80/tcp -> 0.0.0.0:9876", @@ -63,7 +76,9 @@ func TestPortList(t *testing.T) { } runCmd = exec.Command(dockerBinary, "rm", "-f", ID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } // more and one port mapped to the same container port runCmd = exec.Command(dockerBinary, "run", "-d", @@ -73,12 +88,16 @@ func TestPortList(t *testing.T) { "-p", "9878:82", "busybox", "top") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } ID = stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "port", ID, "80") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertPortList(t, out, []string{"0.0.0.0:9876", "0.0.0.0:9999"}) { t.Error("Port list is not correct") @@ -86,7 +105,9 @@ func TestPortList(t *testing.T) { runCmd = exec.Command(dockerBinary, "port", ID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertPortList(t, out, []string{ "80/tcp -> 0.0.0.0:9876", @@ -96,8 +117,9 @@ func TestPortList(t *testing.T) { t.Error("Port list is not correct\n", out) } runCmd = exec.Command(dockerBinary, "rm", "-f", ID) - out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } deleteAllContainers() From b645df1d39f942919a117bdf9f9528e38681dc0b Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 12:25:46 -0700 Subject: [PATCH 042/592] Cleanup errorOut resp in export test Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_export_import_test.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/integration-cli/docker_cli_export_import_test.go b/integration-cli/docker_cli_export_import_test.go index b044cd8366..e1e95e436e 100644 --- a/integration-cli/docker_cli_export_import_test.go +++ b/integration-cli/docker_cli_export_import_test.go @@ -26,19 +26,23 @@ func TestExportContainerAndImportImage(t *testing.T) { exportCmdTemplate := `%v export %v > /tmp/testexp.tar` exportCmdFinal := fmt.Sprintf(exportCmdTemplate, dockerBinary, cleanedContainerID) exportCmd := exec.Command("bash", "-c", exportCmdFinal) - out, _, err = runCommandWithOutput(exportCmd) - errorOut(err, t, fmt.Sprintf("failed to export container: %v %v", out, err)) + if out, _, err = runCommandWithOutput(exportCmd); err != nil { + t.Fatalf("failed to export container: %s, %v", out, err) + } importCmdFinal := `cat /tmp/testexp.tar | docker import - repo/testexp:v1` importCmd := exec.Command("bash", "-c", importCmdFinal) out, _, err = runCommandWithOutput(importCmd) - errorOut(err, t, fmt.Sprintf("failed to import image: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to import image: %s, %v", out, err) + } cleanedImageID := stripTrailingCharacters(out) inspectCmd = exec.Command(dockerBinary, "inspect", cleanedImageID) - out, _, err = runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("output should've been an image id: %v %v", out, err)) + if out, _, err = runCommandWithOutput(inspectCmd); err != nil { + t.Fatalf("output should've been an image id: %s, %v", out, err) + } deleteContainer(cleanedContainerID) deleteImages("repo/testexp:v1") From 475235ba7e5ad5a8c1f3bbbad6566bb646eac0f9 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 12:28:56 -0700 Subject: [PATCH 043/592] Cleanup errorOut resp in tag test Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_tag_test.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/integration-cli/docker_cli_tag_test.go b/integration-cli/docker_cli_tag_test.go index 815416f208..71c643c349 100644 --- a/integration-cli/docker_cli_tag_test.go +++ b/integration-cli/docker_cli_tag_test.go @@ -13,8 +13,9 @@ func TestTagUnprefixedRepoByName(t *testing.T) { } tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", "testfoobarbaz") - out, _, err := runCommandWithOutput(tagCmd) - errorOut(err, t, fmt.Sprintf("%v %v", out, err)) + if out, _, err := runCommandWithOutput(tagCmd); err != nil { + t.Fatal(out, err) + } deleteImages("testfoobarbaz") @@ -25,12 +26,15 @@ func TestTagUnprefixedRepoByName(t *testing.T) { func TestTagUnprefixedRepoByID(t *testing.T) { getIDCmd := exec.Command(dockerBinary, "inspect", "-f", "{{.Id}}", "busybox") out, _, err := runCommandWithOutput(getIDCmd) - errorOut(err, t, fmt.Sprintf("failed to get the image ID of busybox: %v", err)) + if err != nil { + t.Fatalf("failed to get the image ID of busybox: %s, %v", out, err) + } cleanedImageID := stripTrailingCharacters(out) tagCmd := exec.Command(dockerBinary, "tag", cleanedImageID, "testfoobarbaz") - out, _, err = runCommandWithOutput(tagCmd) - errorOut(err, t, fmt.Sprintf("%s %s", out, err)) + if out, _, err = runCommandWithOutput(tagCmd); err != nil { + t.Fatal(out, err) + } deleteImages("testfoobarbaz") From 6b858b59ed6e1f594e5552e3f5e8f46351466d78 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 12:34:02 -0700 Subject: [PATCH 044/592] Cleanup errorOut resp in create test Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_create_test.go | 33 +++++++++++++++-------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/integration-cli/docker_cli_create_test.go b/integration-cli/docker_cli_create_test.go index a3d20bdbf5..d85fde1930 100644 --- a/integration-cli/docker_cli_create_test.go +++ b/integration-cli/docker_cli_create_test.go @@ -2,7 +2,6 @@ package main import ( "encoding/json" - "fmt" "os/exec" "testing" "time" @@ -12,13 +11,17 @@ import ( func TestCreateArgs(t *testing.T) { runCmd := exec.Command(dockerBinary, "create", "busybox", "command", "arg1", "arg2", "arg with space") out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) - inspectOut, _, err := runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("out should've been a container id: %v %v", inspectOut, err)) + out, _, err = runCommandWithOutput(inspectCmd) + if err != nil { + t.Fatalf("out should've been a container id: %s, %v", out, err) + } containers := []struct { ID string @@ -27,7 +30,7 @@ func TestCreateArgs(t *testing.T) { Args []string Image string }{} - if err := json.Unmarshal([]byte(inspectOut), &containers); err != nil { + if err := json.Unmarshal([]byte(out), &containers); err != nil { t.Fatalf("Error inspecting the container: %s", err) } if len(containers) != 1 { @@ -60,20 +63,24 @@ func TestCreateArgs(t *testing.T) { func TestCreateHostConfig(t *testing.T) { runCmd := exec.Command(dockerBinary, "create", "-P", "busybox", "echo") out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) - inspectOut, _, err := runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("out should've been a container id: %v %v", inspectOut, err)) + out, _, err = runCommandWithOutput(inspectCmd) + if err != nil { + t.Fatalf("out should've been a container id: %s, %v", out, err) + } containers := []struct { HostConfig *struct { PublishAllPorts bool } }{} - if err := json.Unmarshal([]byte(inspectOut), &containers); err != nil { + if err := json.Unmarshal([]byte(out), &containers); err != nil { t.Fatalf("Error inspecting the container: %s", err) } if len(containers) != 1 { @@ -98,13 +105,17 @@ func TestCreateHostConfig(t *testing.T) { func TestCreateEchoStdout(t *testing.T) { runCmd := exec.Command(dockerBinary, "create", "busybox", "echo", "test123") out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "start", "-ai", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if out != "test123\n" { t.Errorf("container should've printed 'test123', got %q", out) From 17842840ec0fc7a0b66050b97692250b74d91372 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 12:38:00 -0700 Subject: [PATCH 045/592] Cleanup errorOut resp in nat test Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_nat_test.go | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/integration-cli/docker_cli_nat_test.go b/integration-cli/docker_cli_nat_test.go index 3f0fa2b272..01ebb73c74 100644 --- a/integration-cli/docker_cli_nat_test.go +++ b/integration-cli/docker_cli_nat_test.go @@ -26,17 +26,24 @@ func TestNetworkNat(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-dt", "-p", "8080:8080", "busybox", "nc", "-lp", "8080") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("run1 failed with errors: %v (%s)", err, out)) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "run", "busybox", "sh", "-c", fmt.Sprintf("echo hello world | nc -w 30 %s 8080", ifaceIP)) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("run2 failed with errors: %v (%s)", err, out)) + if err != nil { + t.Fatal(out, err) + } runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("failed to retrieve logs for container: %v %v", cleanedContainerID, err)) + if err != nil { + t.Fatalf("failed to retrieve logs for container: %s, %v", out, err) + } + out = strings.Trim(out, "\r\n") if expected := "hello world"; out != expected { @@ -44,8 +51,9 @@ func TestNetworkNat(t *testing.T) { } killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) - out, _, err = runCommandWithOutput(killCmd) - errorOut(err, t, fmt.Sprintf("failed to kill container: %v %v", out, err)) + if out, _, err = runCommandWithOutput(killCmd); err != nil { + t.Fatalf("failed to kill container: %s, %v", out, err) + } deleteAllContainers() logDone("network - make sure nat works through the host") From 0faf87598f19bd3520704ed18209afbbe6a6ee90 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 12:44:29 -0700 Subject: [PATCH 046/592] Cleanup errorOut resp in kill test Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_kill_test.go | 37 ++++++++++++++++--------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/integration-cli/docker_cli_kill_test.go b/integration-cli/docker_cli_kill_test.go index 6ee246f5ff..33135a3be7 100644 --- a/integration-cli/docker_cli_kill_test.go +++ b/integration-cli/docker_cli_kill_test.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "os/exec" "strings" "testing" @@ -10,21 +9,27 @@ import ( func TestKillContainer(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 10") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) - inspectOut, _, err := runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("out should've been a container id: %v %v", inspectOut, err)) + if out, _, err = runCommandWithOutput(inspectCmd); err != nil { + t.Fatalf("out should've been a container id: %s, %v", out, err) + } killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) - out, _, err = runCommandWithOutput(killCmd) - errorOut(err, t, fmt.Sprintf("failed to kill container: %v %v", out, err)) + if out, _, err = runCommandWithOutput(killCmd); err != nil { + t.Fatalf("failed to kill container: %s, %v", out, err) + } listRunningContainersCmd := exec.Command(dockerBinary, "ps", "-q") out, _, err = runCommandWithOutput(listRunningContainersCmd) - errorOut(err, t, fmt.Sprintf("failed to list running containers: %v", err)) + if err != nil { + t.Fatalf("failed to list running containers: %s, %v", out, err) + } if strings.Contains(out, cleanedContainerID) { t.Fatal("killed container is still running") @@ -38,21 +43,27 @@ func TestKillContainer(t *testing.T) { func TestKillDifferentUserContainer(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-u", "daemon", "-d", "busybox", "sh", "-c", "sleep 10") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) - inspectOut, _, err := runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("out should've been a container id: %v %v", inspectOut, err)) + if out, _, err = runCommandWithOutput(inspectCmd); err != nil { + t.Fatalf("out should've been a container id: %s, %v", out, err) + } killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) - out, _, err = runCommandWithOutput(killCmd) - errorOut(err, t, fmt.Sprintf("failed to kill container: %v %v", out, err)) + if out, _, err = runCommandWithOutput(killCmd); err != nil { + t.Fatalf("failed to kill container: %s, %v", out, err) + } listRunningContainersCmd := exec.Command(dockerBinary, "ps", "-q") out, _, err = runCommandWithOutput(listRunningContainersCmd) - errorOut(err, t, fmt.Sprintf("failed to list running containers: %v", err)) + if err != nil { + t.Fatalf("failed to list running containers: %s, %v", out, err) + } if strings.Contains(out, cleanedContainerID) { t.Fatal("killed container is still running") From b59d5a9dd2e0e474f04ce4eb31de41a1484d97bf Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 12:46:26 -0700 Subject: [PATCH 047/592] Cleanup errorOut resp in info test Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_info_test.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/integration-cli/docker_cli_info_test.go b/integration-cli/docker_cli_info_test.go index 32aa3a2125..ac6fa5f0a2 100644 --- a/integration-cli/docker_cli_info_test.go +++ b/integration-cli/docker_cli_info_test.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "os/exec" "strings" "testing" @@ -11,10 +10,8 @@ import ( func TestInfoEnsureSucceeds(t *testing.T) { versionCmd := exec.Command(dockerBinary, "info") out, exitCode, err := runCommandWithOutput(versionCmd) - errorOut(err, t, fmt.Sprintf("encountered error while running docker info: %v", err)) - if err != nil || exitCode != 0 { - t.Fatal("failed to execute docker info") + t.Fatal("failed to execute docker info: %s, %v", out, err) } stringsToCheck := []string{"Containers:", "Execution Driver:", "Kernel Version:"} From f7b3a6b292a1f8dc7cfffe2e93622ea2c3419623 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 12:47:35 -0700 Subject: [PATCH 048/592] Cleanup errorOut resp in history test Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_history_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/integration-cli/docker_cli_history_test.go b/integration-cli/docker_cli_history_test.go index 8b1a73b59c..c6355374dd 100644 --- a/integration-cli/docker_cli_history_test.go +++ b/integration-cli/docker_cli_history_test.go @@ -46,9 +46,8 @@ RUN echo "Z"`, } out, exitCode, err := runCommandWithOutput(exec.Command(dockerBinary, "history", "testbuildhistory")) - errorOut(err, t, fmt.Sprintf("image history failed: %v %v", out, err)) if err != nil || exitCode != 0 { - t.Fatal("failed to get image history") + t.Fatal("failed to get image history: %s, %v", out, err) } actualValues := strings.Split(out, "\n")[1:27] From 7d38ae7041357e58edee5c26f721e39be9ff1fbd Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 12:50:35 -0700 Subject: [PATCH 049/592] Cleanup errorOut resp in links test Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_links_test.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go index da6f5ac220..f327a52ec7 100644 --- a/integration-cli/docker_cli_links_test.go +++ b/integration-cli/docker_cli_links_test.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "io/ioutil" "os" "os/exec" @@ -14,7 +13,9 @@ import ( func TestLinksEtcHostsRegularFile(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "--net=host", "busybox", "ls", "-la", "/etc/hosts") out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !strings.HasPrefix(out, "-") { t.Errorf("/etc/hosts should be a regular file") @@ -28,7 +29,9 @@ func TestLinksEtcHostsRegularFile(t *testing.T) { func TestLinksEtcHostsContentMatch(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "--net=host", "busybox", "cat", "/etc/hosts") out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } hosts, err := ioutil.ReadFile("/etc/hosts") if os.IsNotExist(err) { @@ -51,7 +54,7 @@ func TestLinksPingUnlinkedContainers(t *testing.T) { if exitCode == 0 { t.Fatal("run ping did not fail") } else if exitCode != 1 { - errorOut(err, t, fmt.Sprintf("run ping failed with errors: %v", err)) + t.Fatalf("run ping failed with errors: %v", err) } logDone("links - ping unlinked container") From ac62c5439ac6197c3afca66db4489f96296097b6 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 12:56:13 -0700 Subject: [PATCH 050/592] Cleanup errorOut resp in diff test Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_diff_test.go | 44 ++++++++++++++++--------- 1 file changed, 28 insertions(+), 16 deletions(-) diff --git a/integration-cli/docker_cli_diff_test.go b/integration-cli/docker_cli_diff_test.go index c6bf2bbd18..4068140ce2 100644 --- a/integration-cli/docker_cli_diff_test.go +++ b/integration-cli/docker_cli_diff_test.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "os/exec" "strings" "testing" @@ -11,14 +10,18 @@ import ( func TestDiffFilenameShownInOutput(t *testing.T) { containerCmd := `echo foo > /root/bar` runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", containerCmd) - cid, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("failed to start the container: %v", err)) + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatalf("failed to start the container: %s, %v", out, err) + } - cleanCID := stripTrailingCharacters(cid) + cleanCID := stripTrailingCharacters(out) diffCmd := exec.Command(dockerBinary, "diff", cleanCID) - out, _, err := runCommandWithOutput(diffCmd) - errorOut(err, t, fmt.Sprintf("failed to run diff: %v %v", out, err)) + out, _, err = runCommandWithOutput(diffCmd) + if err != nil { + t.Fatalf("failed to run diff: %s %v", out, err) + } found := false for _, line := range strings.Split(out, "\n") { @@ -44,14 +47,18 @@ func TestDiffEnsureDockerinitFilesAreIgnored(t *testing.T) { for i := 0; i < 20; i++ { containerCmd := `echo foo > /root/bar` runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", containerCmd) - cid, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("%s", err)) + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatal(out, err) + } - cleanCID := stripTrailingCharacters(cid) + cleanCID := stripTrailingCharacters(out) diffCmd := exec.Command(dockerBinary, "diff", cleanCID) - out, _, err := runCommandWithOutput(diffCmd) - errorOut(err, t, fmt.Sprintf("failed to run diff: %v %v", out, err)) + out, _, err = runCommandWithOutput(diffCmd) + if err != nil { + t.Fatalf("failed to run diff: %s, %v", out, err) + } deleteContainer(cleanCID) @@ -67,13 +74,18 @@ func TestDiffEnsureDockerinitFilesAreIgnored(t *testing.T) { func TestDiffEnsureOnlyKmsgAndPtmx(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sleep", "0") - cid, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("%s", err)) - cleanCID := stripTrailingCharacters(cid) + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatal(out, err) + } + + cleanCID := stripTrailingCharacters(out) diffCmd := exec.Command(dockerBinary, "diff", cleanCID) - out, _, err := runCommandWithOutput(diffCmd) - errorOut(err, t, fmt.Sprintf("failed to run diff: %v %v", out, err)) + out, _, err = runCommandWithOutput(diffCmd) + if err != nil { + t.Fatalf("failed to run diff: %s, %v", out, err) + } deleteContainer(cleanCID) expected := map[string]bool{ From 73eadbc6a1af80dfb1e933644a9a2927004e6d0f Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 12:59:54 -0700 Subject: [PATCH 051/592] Cleanup errorOut resp in docker_utils.go Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_utils.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go index b8058f60b3..c3e5361713 100644 --- a/integration-cli/docker_utils.go +++ b/integration-cli/docker_utils.go @@ -341,7 +341,9 @@ func cmd(t *testing.T, args ...string) (string, int, error) { func dockerCmd(t *testing.T, args ...string) (string, int, error) { out, status, err := runCommandWithOutput(exec.Command(dockerBinary, args...)) - errorOut(err, t, fmt.Sprintf("%q failed with errors: %v (%v)", strings.Join(args, " "), err, out)) + if err != nil { + t.Fatalf("%q failed with errors: %s, %v", strings.Join(args, " "), out, err) + } return out, status, err } From ef787eb824896471485522d343c62a6edb5c947f Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 13:01:08 -0700 Subject: [PATCH 052/592] Cleanup errorOut resp in search tests Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_search_test.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/integration-cli/docker_cli_search_test.go b/integration-cli/docker_cli_search_test.go index e8b9efdc19..946c34dc9c 100644 --- a/integration-cli/docker_cli_search_test.go +++ b/integration-cli/docker_cli_search_test.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "os/exec" "strings" "testing" @@ -11,10 +10,8 @@ import ( func TestSearchOnCentralRegistry(t *testing.T) { searchCmd := exec.Command(dockerBinary, "search", "busybox") out, exitCode, err := runCommandWithOutput(searchCmd) - errorOut(err, t, fmt.Sprintf("encountered error while searching: %v", err)) - if err != nil || exitCode != 0 { - t.Fatal("failed to search on the central registry") + t.Fatal("failed to search on the central registry: %s, %v", out, err) } if !strings.Contains(out, "Busybox base image.") { From d33f2bdb114a9fd3c1413dbd4667440153df9431 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 13:04:37 -0700 Subject: [PATCH 053/592] Cleanup errorOut resp in ps tests Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_ps_test.go | 114 +++++++++++++++++++------- 1 file changed, 84 insertions(+), 30 deletions(-) diff --git a/integration-cli/docker_cli_ps_test.go b/integration-cli/docker_cli_ps_test.go index 4bcf95bf79..8be4dfb16f 100644 --- a/integration-cli/docker_cli_ps_test.go +++ b/integration-cli/docker_cli_ps_test.go @@ -10,34 +10,45 @@ import ( func TestPsListContainers(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } firstID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } secondID := stripTrailingCharacters(out) // not long running runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } thirdID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } fourthID := stripTrailingCharacters(out) // make sure third one is not running runCmd = exec.Command(dockerBinary, "wait", thirdID) - out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } // all runCmd = exec.Command(dockerBinary, "ps", "-a") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertContainerList(out, []string{fourthID, thirdID, secondID, firstID}) { t.Error("Container list is not in the correct order") @@ -46,7 +57,9 @@ func TestPsListContainers(t *testing.T) { // running runCmd = exec.Command(dockerBinary, "ps") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertContainerList(out, []string{fourthID, secondID, firstID}) { t.Error("Container list is not in the correct order") @@ -57,7 +70,9 @@ func TestPsListContainers(t *testing.T) { // limit runCmd = exec.Command(dockerBinary, "ps", "-n=2", "-a") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } expected := []string{fourthID, thirdID} if !assertContainerList(out, expected) { @@ -66,7 +81,9 @@ func TestPsListContainers(t *testing.T) { runCmd = exec.Command(dockerBinary, "ps", "-n=2") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") @@ -75,7 +92,9 @@ func TestPsListContainers(t *testing.T) { // since runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-a") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } expected = []string{fourthID, thirdID, secondID} if !assertContainerList(out, expected) { @@ -84,7 +103,9 @@ func TestPsListContainers(t *testing.T) { runCmd = exec.Command(dockerBinary, "ps", "--since", firstID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") @@ -93,7 +114,9 @@ func TestPsListContainers(t *testing.T) { // before runCmd = exec.Command(dockerBinary, "ps", "--before", thirdID, "-a") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } expected = []string{secondID, firstID} if !assertContainerList(out, expected) { @@ -102,7 +125,9 @@ func TestPsListContainers(t *testing.T) { runCmd = exec.Command(dockerBinary, "ps", "--before", thirdID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") @@ -111,7 +136,9 @@ func TestPsListContainers(t *testing.T) { // since & before runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-a") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } expected = []string{thirdID, secondID} if !assertContainerList(out, expected) { @@ -120,7 +147,9 @@ func TestPsListContainers(t *testing.T) { runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") } @@ -128,7 +157,9 @@ func TestPsListContainers(t *testing.T) { // since & limit runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-n=2", "-a") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } expected = []string{fourthID, thirdID} if !assertContainerList(out, expected) { @@ -137,7 +168,9 @@ func TestPsListContainers(t *testing.T) { runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-n=2") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") @@ -146,7 +179,9 @@ func TestPsListContainers(t *testing.T) { // before & limit runCmd = exec.Command(dockerBinary, "ps", "--before", fourthID, "-n=1", "-a") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } expected = []string{thirdID} if !assertContainerList(out, expected) { @@ -155,7 +190,9 @@ func TestPsListContainers(t *testing.T) { runCmd = exec.Command(dockerBinary, "ps", "--before", fourthID, "-n=1") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") @@ -164,7 +201,9 @@ func TestPsListContainers(t *testing.T) { // since & before & limit runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-n=1", "-a") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } expected = []string{thirdID} if !assertContainerList(out, expected) { @@ -173,7 +212,9 @@ func TestPsListContainers(t *testing.T) { runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-n=1") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") @@ -205,7 +246,9 @@ func TestPsListContainersSize(t *testing.T) { name := "test_size" runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo 1 > test") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } id, err := getIDByName(name) if err != nil { t.Fatal(err) @@ -222,7 +265,9 @@ func TestPsListContainersSize(t *testing.T) { case <-time.After(3 * time.Second): t.Fatalf("Calling \"docker ps -s\" timed out!") } - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } lines := strings.Split(strings.Trim(out, "\n "), "\n") sizeIndex := strings.Index(lines[0], "SIZE") idIndex := strings.Index(lines[0], "CONTAINER ID") @@ -247,24 +292,31 @@ func TestPsListContainersFilterStatus(t *testing.T) { // start exited container runCmd := exec.Command(dockerBinary, "run", "-d", "busybox") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } firstID := stripTrailingCharacters(out) // make sure the exited cintainer is not running runCmd = exec.Command(dockerBinary, "wait", firstID) - out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } // start running container runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 360") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } secondID := stripTrailingCharacters(out) // filter containers by exited runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--filter=status=exited") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } containerOut := strings.TrimSpace(out) if containerOut != firstID[:12] { t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out) @@ -272,7 +324,9 @@ func TestPsListContainersFilterStatus(t *testing.T) { runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--filter=status=running") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } containerOut = strings.TrimSpace(out) if containerOut != secondID[:12] { t.Fatalf("Expected id %s, got %s for running filter, output: %q", secondID[:12], containerOut, out) From ac24cabd9d05fe8843a382bb55fe5f2dcd4b9da2 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 13:06:06 -0700 Subject: [PATCH 054/592] Cleanup errorOut resp in inspect tests Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_api_inspect_test.go | 5 +++-- integration-cli/docker_cli_inspect_test.go | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/integration-cli/docker_api_inspect_test.go b/integration-cli/docker_api_inspect_test.go index 42258d7aae..112299484c 100644 --- a/integration-cli/docker_api_inspect_test.go +++ b/integration-cli/docker_api_inspect_test.go @@ -2,7 +2,6 @@ package main import ( "encoding/json" - "fmt" "os/exec" "testing" ) @@ -10,7 +9,9 @@ import ( func TestInspectApiContainerResponse(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("failed to create a container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to create a container: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) diff --git a/integration-cli/docker_cli_inspect_test.go b/integration-cli/docker_cli_inspect_test.go index 30a722047a..bb99818bf9 100644 --- a/integration-cli/docker_cli_inspect_test.go +++ b/integration-cli/docker_cli_inspect_test.go @@ -10,13 +10,14 @@ func TestInspectImage(t *testing.T) { imageTest := "scratch" imageTestID := "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" imagesCmd := exec.Command(dockerBinary, "inspect", "--format='{{.Id}}'", imageTest) - out, exitCode, err := runCommandWithOutput(imagesCmd) if exitCode != 0 || err != nil { - t.Fatalf("failed to inspect image") + t.Fatalf("failed to inspect image: %s, %v", out, err) } + if id := strings.TrimSuffix(out, "\n"); id != imageTestID { t.Fatalf("Expected id: %s for image: %s but received id: %s", imageTestID, imageTest, id) } + logDone("inspect - inspect an image") } From 0e3744ec8958faf88026338dcb096198399059e8 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 13:09:19 -0700 Subject: [PATCH 055/592] Cleanup errorOut resp in rmi tests Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_rmi_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/integration-cli/docker_cli_rmi_test.go b/integration-cli/docker_cli_rmi_test.go index 4fb150bab8..5cb126f822 100644 --- a/integration-cli/docker_cli_rmi_test.go +++ b/integration-cli/docker_cli_rmi_test.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "os/exec" "strings" "testing" @@ -13,7 +12,9 @@ func TestRmiWithContainerFails(t *testing.T) { // create a container runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("failed to create a container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to create a container: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) From 0c87424007aa1220842acca26cbadc7c1a05bef9 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 13:22:06 -0700 Subject: [PATCH 056/592] Cleanup errorOut resp in save tests Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_save_load_test.go | 124 ++++++++++++------- 1 file changed, 78 insertions(+), 46 deletions(-) diff --git a/integration-cli/docker_cli_save_load_test.go b/integration-cli/docker_cli_save_load_test.go index 8632247b7d..8bc0ec1c53 100644 --- a/integration-cli/docker_cli_save_load_test.go +++ b/integration-cli/docker_cli_save_load_test.go @@ -14,40 +14,50 @@ import ( func TestSaveAndLoadRepoStdout(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("failed to create a container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to create a container: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) repoName := "foobar-save-load-test" inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) - out, _, err = runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("output should've been a container id: %v %v", cleanedContainerID, err)) + if out, _, err = runCommandWithOutput(inspectCmd); err != nil { + t.Fatalf("output should've been a container id: %s, %v", out, err) + } commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName) - out, _, err = runCommandWithOutput(commitCmd) - errorOut(err, t, fmt.Sprintf("failed to commit container: %v %v", out, err)) + if out, _, err = runCommandWithOutput(commitCmd); err != nil { + t.Fatalf("failed to commit container: %s, %v", out, err) + } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) before, _, err := runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("the repo should exist before saving it: %v %v", before, err)) + if err != nil { + t.Fatalf("the repo should exist before saving it: %s, %v", before, err) + } saveCmdTemplate := `%v save %v > /tmp/foobar-save-load-test.tar` saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName) saveCmd := exec.Command("bash", "-c", saveCmdFinal) - out, _, err = runCommandWithOutput(saveCmd) - errorOut(err, t, fmt.Sprintf("failed to save repo: %v %v", out, err)) + if out, _, err = runCommandWithOutput(saveCmd); err != nil { + t.Fatalf("failed to save repo: %s, %v", out, err) + } deleteImages(repoName) loadCmdFinal := `cat /tmp/foobar-save-load-test.tar | docker load` loadCmd := exec.Command("bash", "-c", loadCmdFinal) - out, _, err = runCommandWithOutput(loadCmd) - errorOut(err, t, fmt.Sprintf("failed to load repo: %v %v", out, err)) + if out, _, err = runCommandWithOutput(loadCmd); err != nil { + t.Fatalf("failed to load repo: %s, %v", out, err) + } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) after, _, err := runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("the repo should exist after loading it: %v %v", after, err)) + if err != nil { + t.Fatalf("the repo should exist after loading it: %s %v", after, err) + } if before != after { t.Fatalf("inspect is not the same after a save / load") @@ -67,20 +77,24 @@ func TestSaveSingleTag(t *testing.T) { tagCmdFinal := fmt.Sprintf("%v tag busybox:latest %v:latest", dockerBinary, repoName) tagCmd := exec.Command("bash", "-c", tagCmdFinal) - out, _, err := runCommandWithOutput(tagCmd) - errorOut(err, t, fmt.Sprintf("failed to tag repo: %v %v", out, err)) + if out, _, err := runCommandWithOutput(tagCmd); err != nil { + t.Fatalf("failed to tag repo: %s, %v", out, err) + } idCmdFinal := fmt.Sprintf("%v images -q --no-trunc %v", dockerBinary, repoName) idCmd := exec.Command("bash", "-c", idCmdFinal) - out, _, err = runCommandWithOutput(idCmd) - errorOut(err, t, fmt.Sprintf("failed to get repo ID: %v %v", out, err)) + out, _, err := runCommandWithOutput(idCmd) + if err != nil { + t.Fatalf("failed to get repo ID: %s, %v", out, err) + } cleanedImageID := stripTrailingCharacters(out) saveCmdFinal := fmt.Sprintf("%v save %v:latest | tar t | grep -E '(^repositories$|%v)'", dockerBinary, repoName, cleanedImageID) saveCmd := exec.Command("bash", "-c", saveCmdFinal) - out, _, err = runCommandWithOutput(saveCmd) - errorOut(err, t, fmt.Sprintf("failed to save repo with image ID and 'repositories' file: %v %v", out, err)) + if out, _, err = runCommandWithOutput(saveCmd); err != nil { + t.Fatalf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err) + } deleteImages(repoName) @@ -92,27 +106,33 @@ func TestSaveImageId(t *testing.T) { tagCmdFinal := fmt.Sprintf("%v tag scratch:latest %v:latest", dockerBinary, repoName) tagCmd := exec.Command("bash", "-c", tagCmdFinal) - out, _, err := runCommandWithOutput(tagCmd) - errorOut(err, t, fmt.Sprintf("failed to tag repo: %v %v", out, err)) + if out, _, err := runCommandWithOutput(tagCmd); err != nil { + t.Fatalf("failed to tag repo: %s, %v", out, err) + } idLongCmdFinal := fmt.Sprintf("%v images -q --no-trunc %v", dockerBinary, repoName) idLongCmd := exec.Command("bash", "-c", idLongCmdFinal) - out, _, err = runCommandWithOutput(idLongCmd) - errorOut(err, t, fmt.Sprintf("failed to get repo ID: %v %v", out, err)) + out, _, err := runCommandWithOutput(idLongCmd) + if err != nil { + t.Fatalf("failed to get repo ID: %s, %v", out, err) + } cleanedLongImageID := stripTrailingCharacters(out) idShortCmdFinal := fmt.Sprintf("%v images -q %v", dockerBinary, repoName) idShortCmd := exec.Command("bash", "-c", idShortCmdFinal) out, _, err = runCommandWithOutput(idShortCmd) - errorOut(err, t, fmt.Sprintf("failed to get repo short ID: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to get repo short ID: %s, %v", out, err) + } cleanedShortImageID := stripTrailingCharacters(out) saveCmdFinal := fmt.Sprintf("%v save %v | tar t | grep %v", dockerBinary, cleanedShortImageID, cleanedLongImageID) saveCmd := exec.Command("bash", "-c", saveCmdFinal) - out, _, err = runCommandWithOutput(saveCmd) - errorOut(err, t, fmt.Sprintf("failed to save repo with image ID: %v %v", out, err)) + if out, _, err = runCommandWithOutput(saveCmd); err != nil { + t.Fatalf("failed to save repo with image ID: %s, %v", out, err) + } deleteImages(repoName) @@ -123,40 +143,50 @@ func TestSaveImageId(t *testing.T) { func TestSaveAndLoadRepoFlags(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("failed to create a container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to create a container: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) repoName := "foobar-save-load-test" inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) - out, _, err = runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("output should've been a container id: %v %v", cleanedContainerID, err)) + if out, _, err = runCommandWithOutput(inspectCmd); err != nil { + t.Fatalf("output should've been a container id: %s, %v", out, err) + } commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName) - out, _, err = runCommandWithOutput(commitCmd) - errorOut(err, t, fmt.Sprintf("failed to commit container: %v %v", out, err)) + if out, _, err = runCommandWithOutput(commitCmd); err != nil { + t.Fatalf("failed to commit container: %s, %v", out, err) + } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) before, _, err := runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("the repo should exist before saving it: %v %v", before, err)) + if err != nil { + t.Fatalf("the repo should exist before saving it: %s, %v", before, err) + } saveCmdTemplate := `%v save -o /tmp/foobar-save-load-test.tar %v` saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName) saveCmd := exec.Command("bash", "-c", saveCmdFinal) - out, _, err = runCommandWithOutput(saveCmd) - errorOut(err, t, fmt.Sprintf("failed to save repo: %v %v", out, err)) + if out, _, err = runCommandWithOutput(saveCmd); err != nil { + t.Fatalf("failed to save repo: %s, %v", out, err) + } deleteImages(repoName) loadCmdFinal := `docker load -i /tmp/foobar-save-load-test.tar` loadCmd := exec.Command("bash", "-c", loadCmdFinal) - out, _, err = runCommandWithOutput(loadCmd) - errorOut(err, t, fmt.Sprintf("failed to load repo: %v %v", out, err)) + if out, _, err = runCommandWithOutput(loadCmd); err != nil { + t.Fatalf("failed to load repo: %s, %v", out, err) + } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) after, _, err := runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("the repo should exist after loading it: %v %v", after, err)) + if err != nil { + t.Fatalf("the repo should exist after loading it: %s, %v", after, err) + } if before != after { t.Fatalf("inspect is not the same after a save / load") @@ -177,18 +207,21 @@ func TestSaveMultipleNames(t *testing.T) { // Make one image tagCmdFinal := fmt.Sprintf("%v tag scratch:latest %v-one:latest", dockerBinary, repoName) tagCmd := exec.Command("bash", "-c", tagCmdFinal) - out, _, err := runCommandWithOutput(tagCmd) - errorOut(err, t, fmt.Sprintf("failed to tag repo: %v %v", out, err)) + if out, _, err := runCommandWithOutput(tagCmd); err != nil { + t.Fatalf("failed to tag repo: %s, %v", out, err) + } // Make two images tagCmdFinal = fmt.Sprintf("%v tag scratch:latest %v-two:latest", dockerBinary, repoName) tagCmd = exec.Command("bash", "-c", tagCmdFinal) - out, _, err = runCommandWithOutput(tagCmd) - errorOut(err, t, fmt.Sprintf("failed to tag repo: %v %v", out, err)) + if out, _, err := runCommandWithOutput(tagCmd); err != nil { + t.Fatalf("failed to tag repo: %s, %v", out, err) + } saveCmdFinal := fmt.Sprintf("%v save %v-one %v-two:latest | tar xO repositories | grep -q -E '(-one|-two)'", dockerBinary, repoName, repoName) saveCmd := exec.Command("bash", "-c", saveCmdFinal) - out, _, err = runCommandWithOutput(saveCmd) - errorOut(err, t, fmt.Sprintf("failed to save multiple repos: %v %v", out, err)) + if out, _, err := runCommandWithOutput(saveCmd); err != nil { + t.Fatalf("failed to save multiple repos: %s, %v", out, err) + } deleteImages(repoName) @@ -202,12 +235,12 @@ func TestSaveDirectoryPermissions(t *testing.T) { name := "save-directory-permissions" tmpDir, err := ioutil.TempDir("", "save-layers-with-directories") - extractionDirectory := filepath.Join(tmpDir, "image-extraction-dir") - os.Mkdir(extractionDirectory, 0777) - if err != nil { t.Errorf("failed to create temporary directory: %s", err) } + extractionDirectory := filepath.Join(tmpDir, "image-extraction-dir") + os.Mkdir(extractionDirectory, 0777) + defer os.RemoveAll(tmpDir) defer deleteImages(name) _, err = buildImage(name, @@ -221,8 +254,7 @@ func TestSaveDirectoryPermissions(t *testing.T) { saveCmdFinal := fmt.Sprintf("%s save %s | tar -xf - -C %s", dockerBinary, name, extractionDirectory) saveCmd := exec.Command("bash", "-c", saveCmdFinal) - out, _, err := runCommandWithOutput(saveCmd) - if err != nil { + if out, _, err := runCommandWithOutput(saveCmd); err != nil { t.Errorf("failed to save and extract image: %s", out) } From b1e3c9e9cd49591f8efcbcd00d45bcd98fbe2831 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 13:51:12 -0700 Subject: [PATCH 057/592] Cleanup errorOut resp in build tests Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_build_test.go | 170 +++++++++++------------ 1 file changed, 79 insertions(+), 91 deletions(-) diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index e456d579ab..c248477c4c 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -20,7 +20,9 @@ func TestBuildOnBuildForbiddenMaintainerInSourceImage(t *testing.T) { defer deleteImages(name) createCmd := exec.Command(dockerBinary, "create", "busybox", "true") out, _, _, err := runCommandWithStdoutStderr(createCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) @@ -49,7 +51,9 @@ func TestBuildOnBuildForbiddenFromInSourceImage(t *testing.T) { defer deleteImages(name) createCmd := exec.Command(dockerBinary, "create", "busybox", "true") out, _, _, err := runCommandWithStdoutStderr(createCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) @@ -78,7 +82,9 @@ func TestBuildOnBuildForbiddenChainedInSourceImage(t *testing.T) { defer deleteImages(name) createCmd := exec.Command(dockerBinary, "create", "busybox", "true") out, _, _, err := runCommandWithStdoutStderr(createCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) @@ -299,11 +305,8 @@ RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' func TestBuildCopyAddMultipleFiles(t *testing.T) { buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") - out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testaddimg", "MultipleFiles") - errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) - - if err != nil || exitCode != 0 { - t.Fatal("failed to build the image") + if out, _, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testaddimg", "MultipleFiles"); err != nil { + t.Fatalf("build failed to complete: %s, %v", out, err) } deleteImages("testaddimg") @@ -620,11 +623,8 @@ func TestBuildCopySingleFileToRoot(t *testing.T) { t.Fatal(err) } f.Close() - out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", ".") - errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) - - if err != nil || exitCode != 0 { - t.Fatal("failed to build the image") + if out, _, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "."); err != nil { + t.Fatalf("build failed to complete: %s, %v", out, err) } deleteImages("testcopyimg") @@ -650,9 +650,8 @@ func TestBuildCopySingleFileToWorkdir(t *testing.T) { t.Fatal(err) } f.Close() - _, exitCode, err := dockerCmdInDirWithTimeout(5*time.Second, buildDirectory, "build", "-t", "testcopyimg", ".") - if err != nil || exitCode != 0 { - t.Fatalf("build failed: %s", err) + if out, _, err := dockerCmdInDirWithTimeout(5*time.Second, buildDirectory, "build", "-t", "testcopyimg", "."); err != nil { + t.Fatalf("build failed to complete: %s, %v", out, err) } deleteImages("testcopyimg") @@ -662,11 +661,8 @@ func TestBuildCopySingleFileToWorkdir(t *testing.T) { func TestBuildCopySingleFileToExistDir(t *testing.T) { buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") - out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "SingleFileToExistDir") - errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) - - if err != nil || exitCode != 0 { - t.Fatal("failed to build the image") + if out, _, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "SingleFileToExistDir"); err != nil { + t.Fatalf("build failed to complete: %s, %v", out, err) } deleteImages("testcopyimg") @@ -676,11 +672,8 @@ func TestBuildCopySingleFileToExistDir(t *testing.T) { func TestBuildCopySingleFileToNonExistDir(t *testing.T) { buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") - out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "SingleFileToNonExistDir") - errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) - - if err != nil || exitCode != 0 { - t.Fatal("failed to build the image") + if out, _, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "SingleFileToNonExistDir"); err != nil { + t.Fatalf("build failed to complete: %s, %v", out, err) } deleteImages("testcopyimg") @@ -690,11 +683,8 @@ func TestBuildCopySingleFileToNonExistDir(t *testing.T) { func TestBuildCopyDirContentToRoot(t *testing.T) { buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") - out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "DirContentToRoot") - errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) - - if err != nil || exitCode != 0 { - t.Fatal("failed to build the image") + if out, _, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "DirContentToRoot"); err != nil { + t.Fatalf("build failed to complete: %s, %v", out, err) } deleteImages("testcopyimg") @@ -704,11 +694,8 @@ func TestBuildCopyDirContentToRoot(t *testing.T) { func TestBuildCopyDirContentToExistDir(t *testing.T) { buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") - out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "DirContentToExistDir") - errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) - - if err != nil || exitCode != 0 { - t.Fatal("failed to build the image") + if out, _, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "DirContentToExistDir"); err != nil { + t.Fatalf("build failed to complete: %s, %v", out, err) } deleteImages("testcopyimg") @@ -737,11 +724,8 @@ func TestBuildCopyWholeDirToRoot(t *testing.T) { t.Fatal(err) } f.Close() - out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", ".") - errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) - - if err != nil || exitCode != 0 { - t.Fatal("failed to build the image") + if out, _, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "."); err != nil { + t.Fatalf("build failed to complete: %s, %v", out, err) } deleteImages("testcopyimg") @@ -751,11 +735,8 @@ func TestBuildCopyWholeDirToRoot(t *testing.T) { func TestBuildCopyEtcToRoot(t *testing.T) { buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") - out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "EtcToRoot") - errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) - - if err != nil || exitCode != 0 { - t.Fatal("failed to build the image") + if out, _, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "EtcToRoot"); err != nil { + t.Fatalf("build failed to complete: %s, %v", out, err) } deleteImages("testcopyimg") @@ -766,9 +747,7 @@ func TestBuildCopyDisallowRemote(t *testing.T) { buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") buildCmd := exec.Command(dockerBinary, "build", "-t", "testcopyimg", "DisallowRemote") buildCmd.Dir = buildDirectory - out, exitCode, err := runCommandWithOutput(buildCmd) - - if err == nil || exitCode == 0 { + if out, _, err := runCommandWithOutput(buildCmd); err == nil { t.Fatalf("building the image should've failed; output: %s", out) } @@ -790,14 +769,16 @@ func TestBuildWithInaccessibleFilesInContext(t *testing.T) { // This is used to ensure we detect inaccessible files early during build in the cli client pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess") - err = os.Chown(pathToFileWithoutReadAccess, 0, 0) - errorOut(err, t, fmt.Sprintf("failed to chown file to root: %s", err)) - err = os.Chmod(pathToFileWithoutReadAccess, 0700) - errorOut(err, t, fmt.Sprintf("failed to chmod file to 700: %s", err)) + if err = os.Chown(pathToFileWithoutReadAccess, 0, 0); err != nil { + t.Fatalf("failed to chown file to root: %s", err) + } + if err = os.Chmod(pathToFileWithoutReadAccess, 0700); err != nil { + t.Fatalf("failed to chmod file to 700: %s", err) + } buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) buildCmd.Dir = ctx.Dir - out, exitCode, err := runCommandWithOutput(buildCmd) - if err == nil || exitCode == 0 { + out, _, err := runCommandWithOutput(buildCmd) + if err == nil { t.Fatalf("build should have failed: %s %s", err, out) } @@ -822,17 +803,20 @@ func TestBuildWithInaccessibleFilesInContext(t *testing.T) { pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") - err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0) - errorOut(err, t, fmt.Sprintf("failed to chown directory to root: %s", err)) - err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444) - errorOut(err, t, fmt.Sprintf("failed to chmod directory to 755: %s", err)) - err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700) - errorOut(err, t, fmt.Sprintf("failed to chmod file to 444: %s", err)) + if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { + t.Fatalf("failed to chown directory to root: %s", err) + } + if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { + t.Fatalf("failed to chmod directory to 755: %s", err) + } + if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { + t.Fatalf("failed to chmod file to 444: %s", err) + } buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) buildCmd.Dir = ctx.Dir - out, exitCode, err := runCommandWithOutput(buildCmd) - if err == nil || exitCode == 0 { + out, _, err := runCommandWithOutput(buildCmd) + if err == nil { t.Fatalf("build should have failed: %s %s", err, out) } @@ -878,17 +862,19 @@ func TestBuildWithInaccessibleFilesInContext(t *testing.T) { // This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") - err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0) - errorOut(err, t, fmt.Sprintf("failed to chown directory to root: %s", err)) - err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444) - errorOut(err, t, fmt.Sprintf("failed to chmod directory to 755: %s", err)) - err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700) - errorOut(err, t, fmt.Sprintf("failed to chmod file to 444: %s", err)) + if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { + t.Fatalf("failed to chown directory to root: %s", err) + } + if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { + t.Fatalf("failed to chmod directory to 755: %s", err) + } + if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { + t.Fatalf("failed to chmod file to 444: %s", err) + } buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) buildCmd.Dir = ctx.Dir - out, exitCode, err := runCommandWithOutput(buildCmd) - if err != nil || exitCode != 0 { + if out, _, err := runCommandWithOutput(buildCmd); err != nil { t.Fatalf("build should have worked: %s %s", err, out) } @@ -913,10 +899,8 @@ func TestBuildForceRm(t *testing.T) { buildCmd := exec.Command(dockerBinary, "build", "-t", name, "--force-rm", ".") buildCmd.Dir = ctx.Dir - _, exitCode, err := runCommandWithOutput(buildCmd) - - if err == nil || exitCode == 0 { - t.Fatal("failed to build the image") + if out, _, err := runCommandWithOutput(buildCmd); err == nil { + t.Fatal("failed to build the image: %s, %v", out, err) } containerCountAfter, err := getContainerCount() @@ -945,9 +929,9 @@ func TestBuildRm(t *testing.T) { t.Fatalf("failed to get the container count: %s", err) } - out, exitCode, err := dockerCmdInDir(t, ctx.Dir, "build", "--rm", "-t", name, ".") + out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "--rm", "-t", name, ".") - if err != nil || exitCode != 0 { + if err != nil { t.Fatal("failed to build the image", out) } @@ -968,9 +952,9 @@ func TestBuildRm(t *testing.T) { t.Fatalf("failed to get the container count: %s", err) } - out, exitCode, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", name, ".") + out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", name, ".") - if err != nil || exitCode != 0 { + if err != nil { t.Fatal("failed to build the image", out) } @@ -991,9 +975,9 @@ func TestBuildRm(t *testing.T) { t.Fatalf("failed to get the container count: %s", err) } - out, exitCode, err := dockerCmdInDir(t, ctx.Dir, "build", "--rm=false", "-t", name, ".") + out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "--rm=false", "-t", name, ".") - if err != nil || exitCode != 0 { + if err != nil { t.Fatal("failed to build the image", out) } @@ -1335,7 +1319,9 @@ func TestBuildOnBuildLimitedInheritence(t *testing.T) { } out1, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", name1, ".") - errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out1, err)) + if err != nil { + t.Fatalf("build failed to complete: %s, %v", out1, err) + } defer deleteImages(name1) } { @@ -1349,7 +1335,9 @@ func TestBuildOnBuildLimitedInheritence(t *testing.T) { } out2, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name2, ".") - errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out2, err)) + if err != nil { + t.Fatalf("build failed to complete: %s, %v", out2, err) + } defer deleteImages(name2) } { @@ -1363,7 +1351,10 @@ func TestBuildOnBuildLimitedInheritence(t *testing.T) { } out3, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name3, ".") - errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out3, err)) + if err != nil { + t.Fatalf("build failed to complete: %s, %v", out3, err) + } + defer deleteImages(name3) } @@ -1763,8 +1754,7 @@ CMD ["cat", "/foo"]`, defer deleteImages(name) buildCmd.Stdin = context - out, exitCode, err := runCommandWithOutput(buildCmd) - if err != nil || exitCode != 0 { + if out, _, err := runCommandWithOutput(buildCmd); err != nil { t.Fatalf("build failed to complete: %v %v", out, err) } logDone(fmt.Sprintf("build - build an image with a context tar, compression: %v", compression)) @@ -1782,13 +1772,11 @@ func TestBuildNoContext(t *testing.T) { buildCmd := exec.Command(dockerBinary, "build", "-t", "nocontext", "-") buildCmd.Stdin = strings.NewReader("FROM busybox\nCMD echo ok\n") - out, exitCode, err := runCommandWithOutput(buildCmd) - if err != nil || exitCode != 0 { + if out, _, err := runCommandWithOutput(buildCmd); err != nil { t.Fatalf("build failed to complete: %v %v", out, err) } - out, exitCode, err = cmd(t, "run", "nocontext") - if out != "ok\n" { + if out, _, err := cmd(t, "run", "nocontext"); out != "ok\n" || err != nil { t.Fatalf("run produced invalid output: %q, expected %q", out, "ok") } @@ -2717,7 +2705,7 @@ func TestBuildExoticShellInterpolation(t *testing.T) { _, err := buildImage(name, ` FROM busybox - + ENV SOME_VAR a.b.c RUN [ "$SOME_VAR" = 'a.b.c' ] From 764030ec92f8cf8b47079c26d7c1393bd963fbf4 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 13:59:44 -0700 Subject: [PATCH 058/592] Cleanup errorOut resp in top tests Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_top_test.go | 61 ++++++++++++++++---------- 1 file changed, 39 insertions(+), 22 deletions(-) diff --git a/integration-cli/docker_cli_top_test.go b/integration-cli/docker_cli_top_test.go index f3ff15bceb..de0d3d2e89 100644 --- a/integration-cli/docker_cli_top_test.go +++ b/integration-cli/docker_cli_top_test.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "os/exec" "strings" "testing" @@ -10,17 +9,21 @@ import ( func TestTopMultipleArgs(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-i", "-d", "busybox", "sleep", "20") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("failed to start the container: %v", err)) + if err != nil { + t.Fatalf("failed to start the container: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) topCmd := exec.Command(dockerBinary, "top", cleanedContainerID, "-o", "pid") out, _, err = runCommandWithOutput(topCmd) - errorOut(err, t, fmt.Sprintf("failed to run top: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to run top: %s, %v", out, err) + } if !strings.Contains(out, "PID") { - errorOut(nil, t, fmt.Sprintf("did not see PID after top -o pid")) + t.Fatalf("did not see PID after top -o pid: %s", out) } logDone("top - multiple arguments") @@ -29,27 +32,34 @@ func TestTopMultipleArgs(t *testing.T) { func TestTopNonPrivileged(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-i", "-d", "busybox", "sleep", "20") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("failed to start the container: %v", err)) + if err != nil { + t.Fatalf("failed to start the container: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) topCmd := exec.Command(dockerBinary, "top", cleanedContainerID) - out, _, err = runCommandWithOutput(topCmd) - errorOut(err, t, fmt.Sprintf("failed to run top: %v %v", out, err)) + out1, _, err := runCommandWithOutput(topCmd) + if err != nil { + t.Fatalf("failed to run top: %s, %v", out1, err) + } topCmd = exec.Command(dockerBinary, "top", cleanedContainerID) - out2, _, err2 := runCommandWithOutput(topCmd) - errorOut(err2, t, fmt.Sprintf("failed to run top: %v %v", out2, err2)) + out2, _, err := runCommandWithOutput(topCmd) + if err != nil { + t.Fatalf("failed to run top: %s, %v", out2, err) + } killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) - _, err = runCommand(killCmd) - errorOut(err, t, fmt.Sprintf("failed to kill container: %v", err)) + if out, _, err = runCommandWithOutput(killCmd); err != nil { + t.Fatalf("failed to kill container: %s, %v", out, err) + } deleteContainer(cleanedContainerID) - if !strings.Contains(out, "sleep 20") && !strings.Contains(out2, "sleep 20") { + if !strings.Contains(out1, "sleep 20") && !strings.Contains(out2, "sleep 20") { t.Fatal("top should've listed `sleep 20` in the process list, but failed twice") - } else if !strings.Contains(out, "sleep 20") { + } else if !strings.Contains(out1, "sleep 20") { t.Fatal("top should've listed `sleep 20` in the process list, but failed the first time") } else if !strings.Contains(out2, "sleep 20") { t.Fatal("top should've listed `sleep 20` in the process list, but failed the second itime") @@ -61,27 +71,34 @@ func TestTopNonPrivileged(t *testing.T) { func TestTopPrivileged(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "--privileged", "-i", "-d", "busybox", "sleep", "20") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, fmt.Sprintf("failed to start the container: %v", err)) + if err != nil { + t.Fatalf("failed to start the container: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) topCmd := exec.Command(dockerBinary, "top", cleanedContainerID) - out, _, err = runCommandWithOutput(topCmd) - errorOut(err, t, fmt.Sprintf("failed to run top: %v %v", out, err)) + out1, _, err := runCommandWithOutput(topCmd) + if err != nil { + t.Fatalf("failed to run top: %s, %v", out1, err) + } topCmd = exec.Command(dockerBinary, "top", cleanedContainerID) - out2, _, err2 := runCommandWithOutput(topCmd) - errorOut(err2, t, fmt.Sprintf("failed to run top: %v %v", out2, err2)) + out2, _, err := runCommandWithOutput(topCmd) + if err != nil { + t.Fatalf("failed to run top: %s, %v", out2, err) + } killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) - _, err = runCommand(killCmd) - errorOut(err, t, fmt.Sprintf("failed to kill container: %v", err)) + if out, _, err = runCommandWithOutput(killCmd); err != nil { + t.Fatalf("failed to kill container: %s, %v", out, err) + } deleteContainer(cleanedContainerID) - if !strings.Contains(out, "sleep 20") && !strings.Contains(out2, "sleep 20") { + if !strings.Contains(out1, "sleep 20") && !strings.Contains(out2, "sleep 20") { t.Fatal("top should've listed `sleep 20` in the process list, but failed twice") - } else if !strings.Contains(out, "sleep 20") { + } else if !strings.Contains(out1, "sleep 20") { t.Fatal("top should've listed `sleep 20` in the process list, but failed the first time") } else if !strings.Contains(out2, "sleep 20") { t.Fatal("top should've listed `sleep 20` in the process list, but failed the second itime") From f013506aad8b7c09b3c2d3307dadfa4b2d365d40 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 14:02:15 -0700 Subject: [PATCH 059/592] Cleanup errorOut resp in push tests Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_push_test.go | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/integration-cli/docker_cli_push_test.go b/integration-cli/docker_cli_push_test.go index 160bb9e286..5db359bf2d 100644 --- a/integration-cli/docker_cli_push_test.go +++ b/integration-cli/docker_cli_push_test.go @@ -15,22 +15,17 @@ func TestPushBusyboxImage(t *testing.T) { // tag the image to upload it tot he private registry repoName := fmt.Sprintf("%v/busybox", privateRegistryURL) tagCmd := exec.Command(dockerBinary, "tag", "busybox", repoName) - out, exitCode, err := runCommandWithOutput(tagCmd) - errorOut(err, t, fmt.Sprintf("%v %v", out, err)) - - if err != nil || exitCode != 0 { - t.Fatal("image tagging failed") + if out, _, err := runCommandWithOutput(tagCmd); err != nil { + t.Fatal("image tagging failed: %s, %v", out, err) } pushCmd := exec.Command(dockerBinary, "push", repoName) - out, exitCode, err = runCommandWithOutput(pushCmd) - errorOut(err, t, fmt.Sprintf("%v %v", out, err)) + if out, _, err := runCommandWithOutput(pushCmd); err != nil { + t.Fatal("pushing the image to the private registry has failed: %s, %v", out, err) + } deleteImages(repoName) - if err != nil || exitCode != 0 { - t.Fatal("pushing the image to the private registry has failed") - } logDone("push - push busybox to private registry") } @@ -39,10 +34,8 @@ func TestPushUnprefixedRepo(t *testing.T) { // skip this test until we're able to use a registry t.Skip() pushCmd := exec.Command(dockerBinary, "push", "busybox") - _, exitCode, err := runCommandWithOutput(pushCmd) - - if err == nil || exitCode == 0 { - t.Fatal("pushing an unprefixed repo didn't result in a non-zero exit status") + if out, _, err := runCommandWithOutput(pushCmd); err == nil { + t.Fatal("pushing an unprefixed repo didn't result in a non-zero exit status: %s", out) } logDone("push - push unprefixed busybox repo --> must fail") } From 50c3e7537c055a73a71999cb3ae9f2c6aa57efce Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 14:05:37 -0700 Subject: [PATCH 060/592] Cleanup errorOut resp in version tests Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_version_test.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/integration-cli/docker_cli_version_test.go b/integration-cli/docker_cli_version_test.go index 7f1838e5d9..bb9942593d 100644 --- a/integration-cli/docker_cli_version_test.go +++ b/integration-cli/docker_cli_version_test.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "os/exec" "strings" "testing" @@ -10,11 +9,9 @@ import ( // ensure docker version works func TestVersionEnsureSucceeds(t *testing.T) { versionCmd := exec.Command(dockerBinary, "version") - out, exitCode, err := runCommandWithOutput(versionCmd) - errorOut(err, t, fmt.Sprintf("encountered error while running docker version: %v", err)) - - if err != nil || exitCode != 0 { - t.Fatal("failed to execute docker version") + out, _, err := runCommandWithOutput(versionCmd) + if err != nil { + t.Fatal("failed to execute docker version: %s, %v", out, err) } stringsToCheck := []string{ From dd248dee3c889fe376a89adcc9f9d19cafb9bf69 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 14:07:30 -0700 Subject: [PATCH 061/592] Cleanup errorOut resp in images tests Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_images_test.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/integration-cli/docker_cli_images_test.go b/integration-cli/docker_cli_images_test.go index 5a7207cec5..ad06cb2eb8 100644 --- a/integration-cli/docker_cli_images_test.go +++ b/integration-cli/docker_cli_images_test.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "os/exec" "strings" "testing" @@ -11,7 +10,9 @@ import ( func TestImagesEnsureImageIsListed(t *testing.T) { imagesCmd := exec.Command(dockerBinary, "images") out, _, err := runCommandWithOutput(imagesCmd) - errorOut(err, t, fmt.Sprintf("listing images failed with errors: %v", err)) + if err != nil { + t.Fatalf("listing images failed with errors: %s, %v", out, err) + } if !strings.Contains(out, "busybox") { t.Fatal("images should've listed busybox") @@ -46,7 +47,9 @@ func TestImagesOrderedByCreationDate(t *testing.T) { } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "images", "-q", "--no-trunc")) - errorOut(err, t, fmt.Sprintf("listing images failed with errors: %v", err)) + if err != nil { + t.Fatalf("listing images failed with errors: %s, %v", out, err) + } imgs := strings.Split(out, "\n") if imgs[0] != id3 { t.Fatalf("First image must be %s, got %s", id3, imgs[0]) From eeb009729430f0858f56be37011325f3f9486386 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 14:09:40 -0700 Subject: [PATCH 062/592] Cleanup errorOut resp pull tests Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_pull_test.go | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/integration-cli/docker_cli_pull_test.go b/integration-cli/docker_cli_pull_test.go index cadabde815..7ad6f13710 100644 --- a/integration-cli/docker_cli_pull_test.go +++ b/integration-cli/docker_cli_pull_test.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "os/exec" "testing" ) @@ -11,11 +10,8 @@ import ( // pulling an image from the central registry should work func TestPullImageFromCentralRegistry(t *testing.T) { pullCmd := exec.Command(dockerBinary, "pull", "scratch") - out, exitCode, err := runCommandWithOutput(pullCmd) - errorOut(err, t, fmt.Sprintf("%s %s", out, err)) - - if err != nil || exitCode != 0 { - t.Fatal("pulling the scratch image from the registry has failed") + if out, _, err := runCommandWithOutput(pullCmd); err != nil { + t.Fatal("pulling the scratch image from the registry has failed: %s, %v", out, err) } logDone("pull - pull scratch") } @@ -23,10 +19,8 @@ func TestPullImageFromCentralRegistry(t *testing.T) { // pulling a non-existing image from the central registry should return a non-zero exit code func TestPullNonExistingImage(t *testing.T) { pullCmd := exec.Command(dockerBinary, "pull", "fooblahblah1234") - _, exitCode, err := runCommandWithOutput(pullCmd) - - if err == nil || exitCode == 0 { - t.Fatal("expected non-zero exit status when pulling non-existing image") + if out, _, err := runCommandWithOutput(pullCmd); err == nil { + t.Fatal("expected non-zero exit status when pulling non-existing image: %s", out) } logDone("pull - pull fooblahblah1234 (non-existing image)") } From 6f1c8ff4c4fd1f001917c3af89907731561642a6 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 14:13:07 -0700 Subject: [PATCH 063/592] Cleanup errorOut resp exec tests Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_exec_test.go | 30 ++++++++++++++++--------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/integration-cli/docker_cli_exec_test.go b/integration-cli/docker_cli_exec_test.go index 0e012aa4c0..2a9e30e688 100644 --- a/integration-cli/docker_cli_exec_test.go +++ b/integration-cli/docker_cli_exec_test.go @@ -10,13 +10,15 @@ import ( func TestExec(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && sleep 100") - out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, out) + if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil { + t.Fatal(out, err) + } execCmd := exec.Command(dockerBinary, "exec", "testing", "cat", "/tmp/file") - - out, _, err = runCommandWithOutput(execCmd) - errorOut(err, t, out) + out, _, err := runCommandWithOutput(execCmd) + if err != nil { + t.Fatal(out, err) + } out = strings.Trim(out, "\r\n") @@ -31,8 +33,9 @@ func TestExec(t *testing.T) { func TestExecInteractive(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && sleep 100") - out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, out) + if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil { + t.Fatal(out, err) + } execCmd := exec.Command(dockerBinary, "exec", "-i", "testing", "sh") stdin, err := execCmd.StdinPipe() @@ -84,17 +87,22 @@ func TestExecInteractive(t *testing.T) { func TestExecAfterContainerRestart(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID) - out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } runCmd = exec.Command(dockerBinary, "exec", cleanedContainerID, "echo", "hello") out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } outStr := strings.TrimSpace(out) if outStr != "hello" { From 9f52d8e6e786e62aa63b06d26782120fd25daf31 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 14:18:55 -0700 Subject: [PATCH 064/592] Cleanup errorOut resp restart tests Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_restart_test.go | 64 +++++++++++++++------- 1 file changed, 45 insertions(+), 19 deletions(-) diff --git a/integration-cli/docker_cli_restart_test.go b/integration-cli/docker_cli_restart_test.go index 7dc1819fe3..3a390ef2c3 100644 --- a/integration-cli/docker_cli_restart_test.go +++ b/integration-cli/docker_cli_restart_test.go @@ -10,29 +10,37 @@ import ( func TestRestartStoppedContainer(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "foobar") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "wait", cleanedContainerID) - out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if out != "foobar\n" { t.Errorf("container should've printed 'foobar'") } runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID) - out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if out != "foobar\nfoobar\n" { t.Errorf("container should've printed 'foobar' twice") @@ -46,7 +54,9 @@ func TestRestartStoppedContainer(t *testing.T) { func TestRestartRunningContainer(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "echo foobar && sleep 30 && echo 'should not print this'") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) @@ -54,19 +64,24 @@ func TestRestartRunningContainer(t *testing.T) { runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if out != "foobar\n" { t.Errorf("container should've printed 'foobar'") } runCmd = exec.Command(dockerBinary, "restart", "-t", "1", cleanedContainerID) - out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } time.Sleep(1 * time.Second) @@ -83,13 +98,17 @@ func TestRestartRunningContainer(t *testing.T) { func TestRestartWithVolumes(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "-v", "/test", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } cleanedContainerID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ len .Volumes }}", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if out = strings.Trim(out, " \n\r"); out != "1" { t.Errorf("expect 1 volume received %s", out) @@ -97,15 +116,20 @@ func TestRestartWithVolumes(t *testing.T) { runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ .Volumes }}", cleanedContainerID) volumes, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, volumes) + if err != nil { + t.Fatal(volumes, err) + } runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID) - out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ len .Volumes }}", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } if out = strings.Trim(out, " \n\r"); out != "1" { t.Errorf("expect 1 volume after restart received %s", out) @@ -113,7 +137,9 @@ func TestRestartWithVolumes(t *testing.T) { runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ .Volumes }}", cleanedContainerID) volumesAfterRestart, _, err := runCommandWithOutput(runCmd) - errorOut(err, t, volumesAfterRestart) + if err != nil { + t.Fatal(volumesAfterRestart, err) + } if volumes != volumesAfterRestart { volumes = strings.Trim(volumes, " \n\r") From 842d4b6b0afb0380afeea6d1638edb8913ae70bd Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 14:22:49 -0700 Subject: [PATCH 065/592] Cleanup errorOut resp log tests Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_logs_test.go | 68 ++++++++++++++++++------- 1 file changed, 51 insertions(+), 17 deletions(-) diff --git a/integration-cli/docker_cli_logs_test.go b/integration-cli/docker_cli_logs_test.go index 2407291cdb..d6d3f9320f 100644 --- a/integration-cli/docker_cli_logs_test.go +++ b/integration-cli/docker_cli_logs_test.go @@ -16,14 +16,18 @@ func TestLogsContainerSmallerThanPage(t *testing.T) { testLen := 32767 runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + if err != nil { + t.Fatalf("run failed with errors: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) - errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to log container: %s, %v", out, err) + } if len(out) != testLen+1 { t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) @@ -39,14 +43,18 @@ func TestLogsContainerBiggerThanPage(t *testing.T) { testLen := 32768 runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + if err != nil { + t.Fatalf("run failed with errors: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) - errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to log container: %s, %v", out, err) + } if len(out) != testLen+1 { t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) @@ -62,14 +70,18 @@ func TestLogsContainerMuchBiggerThanPage(t *testing.T) { testLen := 33000 runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + if err != nil { + t.Fatalf("run failed with errors: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) - errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to log container: %s, %v", out, err) + } if len(out) != testLen+1 { t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) @@ -85,14 +97,18 @@ func TestLogsTimestamps(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)) out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + if err != nil { + t.Fatalf("run failed with errors: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() logsCmd := exec.Command(dockerBinary, "logs", "-t", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) - errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to log container: %s, %v", out, err) + } lines := strings.Split(out, "\n") @@ -124,14 +140,18 @@ func TestLogsSeparateStderr(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + if err != nil { + t.Fatalf("run failed with errors: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) stdout, stderr, _, err := runCommandWithStdoutStderr(logsCmd) - errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to log container: %s, %v", out, err) + } if stdout != "" { t.Fatalf("Expected empty stdout stream, got %v", stdout) @@ -152,14 +172,18 @@ func TestLogsStderrInStdout(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "-t", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + if err != nil { + t.Fatalf("run failed with errors: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) stdout, stderr, _, err := runCommandWithStdoutStderr(logsCmd) - errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to log container: %s, %v", out, err) + } if stderr != "" { t.Fatalf("Expected empty stderr stream, got %v", stdout) @@ -180,14 +204,18 @@ func TestLogsTail(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)) out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + if err != nil { + t.Fatalf("run failed with errors: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() logsCmd := exec.Command(dockerBinary, "logs", "--tail", "5", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) - errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to log container: %s, %v", out, err) + } lines := strings.Split(out, "\n") @@ -197,7 +225,9 @@ func TestLogsTail(t *testing.T) { logsCmd = exec.Command(dockerBinary, "logs", "--tail", "all", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) - errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to log container: %s, %v", out, err) + } lines = strings.Split(out, "\n") @@ -207,7 +237,9 @@ func TestLogsTail(t *testing.T) { logsCmd = exec.Command(dockerBinary, "logs", "--tail", "random", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) - errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + if err != nil { + t.Fatalf("failed to log container: %s, %v", out, err) + } lines = strings.Split(out, "\n") @@ -223,7 +255,9 @@ func TestLogsFollowStopped(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "hello") out, _, _, err := runCommandWithStdoutStderr(runCmd) - errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + if err != nil { + t.Fatalf("run failed with errors: %s, %v", out, err) + } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() From 66ac7420a90c832bacd0cb1832bf934b5420a311 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 14:40:28 -0700 Subject: [PATCH 066/592] Cleanup errorOut resp run tests Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_run_test.go | 50 ++++++++++++++------------ 1 file changed, 28 insertions(+), 22 deletions(-) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 6bef936369..34a500de8c 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -141,8 +141,6 @@ func TestRunPingGoogle(t *testing.T) { t.Fatalf("failed to run container: %v, output: %q", err, out) } - errorOut(err, t, "container should've been able to ping 8.8.8.8") - deleteAllContainers() logDone("run - ping 8.8.8.8") @@ -152,11 +150,8 @@ func TestRunPingGoogle(t *testing.T) { // some versions of lxc might make this test fail func TestRunExitCodeZero(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "busybox", "true") - exitCode, err := runCommand(runCmd) - errorOut(err, t, fmt.Sprintf("%s", err)) - - if exitCode != 0 { - t.Errorf("container should've exited with exit code 0") + if out, _, err := runCommandWithOutput(runCmd); err != nil { + t.Errorf("container should've exited with exit code 0: %s, %v", out, err) } deleteAllContainers() @@ -193,26 +188,31 @@ func TestRunStdinPipe(t *testing.T) { out = stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", out) - inspectOut, _, err := runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("out should've been a container id: %s %s", out, inspectOut)) + if out, _, err := runCommandWithOutput(inspectCmd); err != nil { + t.Fatalf("out should've been a container id: %s %v", out, err) + } waitCmd := exec.Command(dockerBinary, "wait", out) - _, _, err = runCommandWithOutput(waitCmd) - errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out)) + if waitOut, _, err := runCommandWithOutput(waitCmd); err != nil { + t.Fatalf("error thrown while waiting for container: %s, %v", waitOut, err) + } logsCmd := exec.Command(dockerBinary, "logs", out) - containerLogs, _, err := runCommandWithOutput(logsCmd) - errorOut(err, t, fmt.Sprintf("error thrown while trying to get container logs: %s", err)) + logsOut, _, err := runCommandWithOutput(logsCmd) + if err != nil { + t.Fatalf("error thrown while trying to get container logs: %s, %v", logsOut, err) + } - containerLogs = stripTrailingCharacters(containerLogs) + containerLogs := stripTrailingCharacters(logsOut) if containerLogs != "blahblah" { t.Errorf("logs didn't print the container's logs %s", containerLogs) } rmCmd := exec.Command(dockerBinary, "rm", out) - _, _, err = runCommandWithOutput(rmCmd) - errorOut(err, t, fmt.Sprintf("rm failed to remove container %s", err)) + if out, _, err = runCommandWithOutput(rmCmd); err != nil { + t.Fatalf("rm failed to remove container: %s, %v", out, err) + } deleteAllContainers() @@ -230,16 +230,20 @@ func TestRunDetachedContainerIDPrinting(t *testing.T) { out = stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", out) - inspectOut, _, err := runCommandWithOutput(inspectCmd) - errorOut(err, t, fmt.Sprintf("out should've been a container id: %s %s", out, inspectOut)) + if inspectOut, _, err := runCommandWithOutput(inspectCmd); err != nil { + t.Fatalf("out should've been a container id: %s %v", inspectOut, err) + } waitCmd := exec.Command(dockerBinary, "wait", out) - _, _, err = runCommandWithOutput(waitCmd) - errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out)) + if waitOut, _, err := runCommandWithOutput(waitCmd); err != nil { + t.Fatalf("error thrown while waiting for container: %s, %v", waitOut, err) + } rmCmd := exec.Command(dockerBinary, "rm", out) rmOut, _, err := runCommandWithOutput(rmCmd) - errorOut(err, t, "rm failed to remove container") + if err != nil { + t.Fatalf("rm failed to remove container: %s, %v", rmOut, err) + } rmOut = stripTrailingCharacters(rmOut) if rmOut != out { @@ -267,7 +271,9 @@ func TestRunWorkingDirectory(t *testing.T) { runCmd = exec.Command(dockerBinary, "run", "--workdir", "/root", "busybox", "pwd") out, _, _, err = runCommandWithStdoutStderr(runCmd) - errorOut(err, t, out) + if err != nil { + t.Fatal(out, err) + } out = stripTrailingCharacters(out) From 5af7facf18565100d52fa198a695ab98a9669825 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 14 Oct 2014 15:11:21 -0700 Subject: [PATCH 067/592] Cleanup errorOut remove errorOut functions Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/utils.go | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/integration-cli/utils.go b/integration-cli/utils.go index f3f128e329..e99e45591b 100644 --- a/integration-cli/utils.go +++ b/integration-cli/utils.go @@ -13,7 +13,6 @@ import ( "reflect" "strings" "syscall" - "testing" "time" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" @@ -113,18 +112,6 @@ func stripTrailingCharacters(target string) string { return target } -func errorOut(err error, t *testing.T, message string) { - if err != nil { - t.Fatal(message) - } -} - -func errorOutOnNonNilError(err error, t *testing.T, message string) { - if err == nil { - t.Fatalf(message) - } -} - func nLines(s string) int { return strings.Count(s, "\n") } From b61ad60675999439bc2d937afbb0c3c82f6f7b06 Mon Sep 17 00:00:00 2001 From: Fred Lifton Date: Wed, 15 Oct 2014 12:21:18 -0700 Subject: [PATCH 068/592] Edits and additions to release notes. Docker-DCO-1.1-Signed-off-by: Fred Lifton (github: fredlf) Added link for CVEs. Docker-DCO-1.1-Signed-off-by: Fred Lifton (github: fredlf) --- docs/sources/index.md | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/docs/sources/index.md b/docs/sources/index.md index 0db731c8f0..a2ee2f396a 100644 --- a/docs/sources/index.md +++ b/docs/sources/index.md @@ -77,8 +77,8 @@ The [Understanding Docker section](introduction/understanding-docker.md) will he ### Installation Guides -The [installation section](/installation/#installation) will show you how to install -Docker on a variety of platforms. +The [installation section](/installation/#installation) will show you how to +install Docker on a variety of platforms. ### Docker User Guide @@ -91,14 +91,17 @@ implementation, check out the [Docker User Guide](/userguide/). **Version 1.3.0** This version fixes a number of bugs and issues and adds new functions and other -improvements. These include: +improvements. The [GitHub 1.3 milestone](https://github.com/docker/docker/issues?q=milestone%3A1.3.0+) has +more detailed information. Major additions and changes include: *New command: `docker exec`* The new `docker exec` command lets you run a process in an existing, active container. The command has APIs for both the daemon and the client. With -`docker exec`, you'll be able to do things like add or remove devices from running containers, debug running containers, and run commands that are not -part of the container's static specification. +`docker exec`, you'll be able to do things like add or remove devices from +running containers, debug running containers, and run commands that are not +part of the container's static specification. Details in the [command line +reference](/reference/commandline/). *New command: `docker create`* @@ -109,6 +112,7 @@ provides more control over management of the container lifecycle, giving you the ability to configure things like volumes or port mappings before the container is started. For example, in a rapid-response scaling situation, you could use `create` to prepare and stage ten containers in anticipation of heavy loads. +Details in the [command line reference](/reference/commandline/). *Tech preview of new provenance features* @@ -127,7 +131,20 @@ regular, unsigned image. *Other improvements & changes* -We've added a new security options flag that lets you set SELinux and AppArmor -labels and profiles. This means you'll no longer have to use `docker run ---privileged` on kernels that support SE Linux or AppArmor. +* We've added a new security options flag to the `docker run` command, +`--security-opt`, that lets you set SELinux and AppArmor labels and profiles. +This means you'll no longer have to use `docker run --privileged` on kernels +that support SE Linux or AppArmor. For more information, see the +[run reference page](/reference/run). +* A new flag, `--add-host`, has been added to `docker run` that lets you add +lines to `/etc/hosts`. This allows you to specify different name +resolution for the container than it would get via DNS. For more information, +see the [run reference page](/reference/run). + +* You can now set a `DOCKER_TLS_VERIFY` environment variable to secure +connections by default (rather than having to pass the `--tlsverify` flag on +every call). For more information, see the [https guide](/articles/https). + +* Three security issues have been addressed in this release: [CVE-2014-5280, +CVE-2014-5270, and CVE-2014-5282](https://groups.google.com/forum/#!msg/docker-announce/aQoVmQlcE0A/smPuBNYf8VwJ). From d79defb7478973a0b554bc775efb1bf716fbcb1f Mon Sep 17 00:00:00 2001 From: Fred Lifton Date: Wed, 15 Oct 2014 12:21:18 -0700 Subject: [PATCH 069/592] Edits and additions to release notes. Docker-DCO-1.1-Signed-off-by: Fred Lifton (github: fredlf) Added link for CVEs. Docker-DCO-1.1-Signed-off-by: Fred Lifton (github: fredlf) Fixed some URLs. Docker-DCO-1.1-Signed-off-by: Fred Lifton (github: fredlf) --- docs/sources/index.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/sources/index.md b/docs/sources/index.md index a2ee2f396a..bcec387f6b 100644 --- a/docs/sources/index.md +++ b/docs/sources/index.md @@ -101,7 +101,7 @@ container. The command has APIs for both the daemon and the client. With `docker exec`, you'll be able to do things like add or remove devices from running containers, debug running containers, and run commands that are not part of the container's static specification. Details in the [command line -reference](/reference/commandline/). +reference](/reference/commandline/cli/#exec). *New command: `docker create`* @@ -112,7 +112,7 @@ provides more control over management of the container lifecycle, giving you the ability to configure things like volumes or port mappings before the container is started. For example, in a rapid-response scaling situation, you could use `create` to prepare and stage ten containers in anticipation of heavy loads. -Details in the [command line reference](/reference/commandline/). +Details in the [command line reference](/reference/commandline/cli/#create). *Tech preview of new provenance features* @@ -135,12 +135,12 @@ regular, unsigned image. `--security-opt`, that lets you set SELinux and AppArmor labels and profiles. This means you'll no longer have to use `docker run --privileged` on kernels that support SE Linux or AppArmor. For more information, see the -[run reference page](/reference/run). +[command line reference](/reference/commandline/cli/#run). * A new flag, `--add-host`, has been added to `docker run` that lets you add lines to `/etc/hosts`. This allows you to specify different name resolution for the container than it would get via DNS. For more information, -see the [run reference page](/reference/run). +see the [command line reference](/reference/commandline/cli/#run). * You can now set a `DOCKER_TLS_VERIFY` environment variable to secure connections by default (rather than having to pass the `--tlsverify` flag on From 210db1234dfb8d279bc1e5e2c8438dc8db2ee27d Mon Sep 17 00:00:00 2001 From: Albert Zhang Date: Fri, 17 Oct 2014 09:41:23 +0800 Subject: [PATCH 070/592] Fix lxc driver build issue on Mac OS X Docker-DCO-1.1-Signed-off-by: Albert Zhang (github: zhgwenming) --- daemon/execdriver/lxc/lxc_init_unsupported.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/daemon/execdriver/lxc/lxc_init_unsupported.go b/daemon/execdriver/lxc/lxc_init_unsupported.go index b3f2ae68eb..6a5904a4d8 100644 --- a/daemon/execdriver/lxc/lxc_init_unsupported.go +++ b/daemon/execdriver/lxc/lxc_init_unsupported.go @@ -2,12 +2,10 @@ package lxc -import "github.com/docker/docker/daemon/execdriver" - func setHostname(hostname string) error { panic("Not supported on darwin") } -func finalizeNamespace(args *execdriver.InitArgs) error { +func finalizeNamespace(args *InitArgs) error { panic("Not supported on darwin") } From 4e8f69bfe06897054611ee3c7fb41e5babce2224 Mon Sep 17 00:00:00 2001 From: Pierre Date: Thu, 16 Oct 2014 22:16:29 -0400 Subject: [PATCH 071/592] Update dockerimages.md The container ID in the text didn't match the container ID from the figure above it. --- docs/sources/userguide/dockerimages.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/userguide/dockerimages.md b/docs/sources/userguide/dockerimages.md index a0a30408c6..382eef2410 100644 --- a/docs/sources/userguide/dockerimages.md +++ b/docs/sources/userguide/dockerimages.md @@ -457,7 +457,7 @@ Next we can see each instruction in the `Dockerfile` being executed step-by-step. We can see that each step creates a new container, runs the instruction inside that container and then commits that change - just like the `docker commit` work flow we saw earlier. When all the -instructions have executed we're left with the `324104cde6ad` image +instructions have executed we're left with the `97feabe5d2ed` image (also helpfully tagged as `ouruser/sinatra:v2`) and all intermediate containers will get removed to clean things up. From 7cff8db4a381f68d503fe828fb0fb35cf979741c Mon Sep 17 00:00:00 2001 From: Blake Geno Date: Fri, 17 Oct 2014 10:49:21 -0400 Subject: [PATCH 072/592] Removed unused function layerArchive Signed-off-by: Blake Geno --- integration/runtime_test.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/integration/runtime_test.go b/integration/runtime_test.go index 6720485e28..b17d132f8a 100644 --- a/integration/runtime_test.go +++ b/integration/runtime_test.go @@ -79,15 +79,6 @@ func cleanup(eng *engine.Engine, t *testing.T) error { return nil } -func layerArchive(tarfile string) (io.Reader, error) { - // FIXME: need to close f somewhere - f, err := os.Open(tarfile) - if err != nil { - return nil, err - } - return f, nil -} - func init() { // Always use the same driver (vfs) for all integration tests. // To test other drivers, we need a dedicated driver validation suite. From ad136e1ae3175bd30c87ccbde8e31f0e422ff5ec Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Fri, 17 Oct 2014 11:06:05 -0700 Subject: [PATCH 073/592] Don't write pull output to stdout on container creating Fixes #8632 Signed-off-by: Alexandr Morozov --- api/client/commands.go | 9 +++++++-- integration-cli/docker_cli_run_test.go | 16 ++++++++++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index 6c4e5c55fe..2c44bb63c5 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -1986,6 +1986,10 @@ func (cli *DockerCli) CmdTag(args ...string) error { } func (cli *DockerCli) pullImage(image string) error { + return cli.pullImageCustomOut(image, cli.out) +} + +func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error { v := url.Values{} repos, tag := parsers.ParseRepositoryTag(image) // pull only the image tagged 'latest' if no tag was specified @@ -2014,7 +2018,7 @@ func (cli *DockerCli) pullImage(image string) error { registryAuthHeader := []string{ base64.URLEncoding.EncodeToString(buf), } - if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.out, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil { + if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, out, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil { return err } return nil @@ -2081,7 +2085,8 @@ func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runc if statusCode == 404 { fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", config.Image) - if err = cli.pullImage(config.Image); err != nil { + // we don't want to write to stdout anything apart from container.ID + if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil { return nil, err } // Retry diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 34a500de8c..81ed693b2b 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -2,6 +2,7 @@ package main import ( "bufio" + "bytes" "fmt" "io/ioutil" "net" @@ -2380,3 +2381,18 @@ func TestRunVolumesNotRecreatedOnStart(t *testing.T) { logDone("run - volumes not recreated on start") } + +func TestRunNoOutputFromPullInStdout(t *testing.T) { + defer deleteAllContainers() + // just run with unknown image + cmd := exec.Command(dockerBinary, "run", "asdfsg") + stdout := bytes.NewBuffer(nil) + cmd.Stdout = stdout + if err := cmd.Run(); err == nil { + t.Fatal("Run with unknown image should fail") + } + if stdout.Len() != 0 { + t.Fatalf("Stdout contains output from pull: %s", stdout) + } + logDone("run - no output from pull in stdout") +} From e45c92153d0c21308fe5983aa76182cc03ef398e Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Thu, 2 Oct 2014 11:33:12 -0700 Subject: [PATCH 074/592] Rewrite TestBuildCopyAddMultipleFiles to not use fixtures Signed-off-by: Alexandr Morozov --- .../TestCopy/MultipleFiles/Dockerfile | 17 --------- .../TestCopy/MultipleFiles/test_file1 | 0 .../TestCopy/MultipleFiles/test_file2 | 0 .../TestCopy/MultipleFiles/test_file3 | 0 .../TestCopy/MultipleFiles/test_file4 | 0 integration-cli/docker_cli_build_test.go | 37 ++++++++++++++++--- 6 files changed, 32 insertions(+), 22 deletions(-) delete mode 100644 integration-cli/build_tests/TestCopy/MultipleFiles/Dockerfile delete mode 100644 integration-cli/build_tests/TestCopy/MultipleFiles/test_file1 delete mode 100644 integration-cli/build_tests/TestCopy/MultipleFiles/test_file2 delete mode 100644 integration-cli/build_tests/TestCopy/MultipleFiles/test_file3 delete mode 100644 integration-cli/build_tests/TestCopy/MultipleFiles/test_file4 diff --git a/integration-cli/build_tests/TestCopy/MultipleFiles/Dockerfile b/integration-cli/build_tests/TestCopy/MultipleFiles/Dockerfile deleted file mode 100644 index 4143e65962..0000000000 --- a/integration-cli/build_tests/TestCopy/MultipleFiles/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN mkdir /exists -RUN touch /exists/exists_file -RUN chown -R dockerio.dockerio /exists -COPY test_file1 test_file2 /exists/ -ADD test_file3 test_file4 https://docker.com/robots.txt /exists/ -RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] -RUN [ $(ls -l /exists/test_file1 | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists/test_file2 | awk '{print $3":"$4}') = 'root:root' ] - -RUN [ $(ls -l /exists/test_file3 | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ] - -RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestCopy/MultipleFiles/test_file1 b/integration-cli/build_tests/TestCopy/MultipleFiles/test_file1 deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/integration-cli/build_tests/TestCopy/MultipleFiles/test_file2 b/integration-cli/build_tests/TestCopy/MultipleFiles/test_file2 deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/integration-cli/build_tests/TestCopy/MultipleFiles/test_file3 b/integration-cli/build_tests/TestCopy/MultipleFiles/test_file3 deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/integration-cli/build_tests/TestCopy/MultipleFiles/test_file4 b/integration-cli/build_tests/TestCopy/MultipleFiles/test_file4 deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 76105fbf6a..f7d6d932a2 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -304,13 +304,40 @@ RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' } func TestBuildCopyAddMultipleFiles(t *testing.T) { - buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") - if out, _, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testaddimg", "MultipleFiles"); err != nil { - t.Fatalf("build failed to complete: %s, %v", out, err) + name := "testcopymultiplefilestofile" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_file1 test_file2 /exists/ +ADD test_file3 test_file4 https://docker.com/robots.txt /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file1 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/test_file2 | awk '{print $3":"$4}') = 'root:root' ] + +RUN [ $(ls -l /exists/test_file3 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ] + +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +`, + map[string]string{ + "test_file1": "test1", + "test_file2": "test2", + "test_file3": "test3", + "test_file4": "test4", + }) + defer ctx.Close() + if err != nil { + t.Fatal(err) } - deleteImages("testaddimg") - + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } logDone("build - mulitple file copy/add tests") } From 83c5dced10c4e2a40990e927ccc15a1e0428529c Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Tue, 14 Oct 2014 10:07:04 -0700 Subject: [PATCH 075/592] Rewrite TestBuildCopySingleFileToRoot to not use fixtures Signed-off-by: Alexandr Morozov --- .../TestCopy/SingleFileToRoot/Dockerfile | 9 ----- integration-cli/docker_cli_build_test.go | 34 +++++++++---------- 2 files changed, 16 insertions(+), 27 deletions(-) delete mode 100644 integration-cli/build_tests/TestCopy/SingleFileToRoot/Dockerfile diff --git a/integration-cli/build_tests/TestCopy/SingleFileToRoot/Dockerfile b/integration-cli/build_tests/TestCopy/SingleFileToRoot/Dockerfile deleted file mode 100644 index 38fd09026d..0000000000 --- a/integration-cli/build_tests/TestCopy/SingleFileToRoot/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN touch /exists -RUN chown dockerio.dockerio /exists -COPY test_file / -RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /test_file | awk '{print $1}') = '-rw-r--r--' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index f7d6d932a2..4d379664d7 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -636,28 +636,26 @@ ADD . /`, } func TestBuildCopySingleFileToRoot(t *testing.T) { - testDirName := "SingleFileToRoot" - sourceDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy", testDirName) - buildDirectory, err := ioutil.TempDir("", "test-build-add") - defer os.RemoveAll(buildDirectory) - - err = copyWithCP(sourceDirectory, buildDirectory) - if err != nil { - t.Fatalf("failed to copy files to temporary directory: %s", err) - } - - buildDirectory = filepath.Join(buildDirectory, testDirName) - f, err := os.OpenFile(filepath.Join(buildDirectory, "test_file"), os.O_CREATE, 0644) + name := "testcopysinglefiletoroot" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +COPY test_file / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_file | awk '{print $1}') = '-rw-r--r--' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) if err != nil { t.Fatal(err) } - f.Close() - if out, _, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "."); err != nil { - t.Fatalf("build failed to complete: %s, %v", out, err) + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) } - - deleteImages("testcopyimg") - logDone("build - copy single file to root") } From d41cba6aed1ffdda9c3e3441d720dab9afc3eb66 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Tue, 14 Oct 2014 10:12:11 -0700 Subject: [PATCH 076/592] Rewrite TestBuildCopySingleFileToWorkdir to not use fixtures Signed-off-by: Alexandr Morozov --- .../TestCopy/SingleFileToWorkdir/Dockerfile | 2 -- integration-cli/docker_cli_build_test.go | 36 +++++++++---------- 2 files changed, 18 insertions(+), 20 deletions(-) delete mode 100644 integration-cli/build_tests/TestCopy/SingleFileToWorkdir/Dockerfile diff --git a/integration-cli/build_tests/TestCopy/SingleFileToWorkdir/Dockerfile b/integration-cli/build_tests/TestCopy/SingleFileToWorkdir/Dockerfile deleted file mode 100644 index ba2d797e35..0000000000 --- a/integration-cli/build_tests/TestCopy/SingleFileToWorkdir/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM busybox -COPY test_file . diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 4d379664d7..511e2dda06 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -661,28 +661,28 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, // Issue #3960: "ADD src ." hangs - adapted for COPY func TestBuildCopySingleFileToWorkdir(t *testing.T) { - testDirName := "SingleFileToWorkdir" - sourceDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy", testDirName) - buildDirectory, err := ioutil.TempDir("", "test-build-add") - defer os.RemoveAll(buildDirectory) - - err = copyWithCP(sourceDirectory, buildDirectory) - if err != nil { - t.Fatalf("failed to copy files to temporary directory: %s", err) - } - - buildDirectory = filepath.Join(buildDirectory, testDirName) - f, err := os.OpenFile(filepath.Join(buildDirectory, "test_file"), os.O_CREATE, 0644) + name := "testcopysinglefiletoworkdir" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +COPY test_file .`, + map[string]string{ + "test_file": "test1", + }) if err != nil { t.Fatal(err) } - f.Close() - if out, _, err := dockerCmdInDirWithTimeout(5*time.Second, buildDirectory, "build", "-t", "testcopyimg", "."); err != nil { - t.Fatalf("build failed to complete: %s, %v", out, err) + done := make(chan struct{}) + go func() { + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + close(done) + }() + select { + case <-time.After(5 * time.Second): + t.Fatal("Build with adding to workdir timed out") + case <-done: } - - deleteImages("testcopyimg") - logDone("build - copy single file to workdir") } From 2248109ff8607264ba40d4a4ac24bad66462d105 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Tue, 14 Oct 2014 10:15:45 -0700 Subject: [PATCH 077/592] Rewrite TestBuildCopySingleFileToExistDir to not use fixtures Signed-off-by: Alexandr Morozov --- .../TestCopy/SingleFileToExistDir/Dockerfile | 10 ------- .../TestCopy/SingleFileToExistDir/test_file | 0 integration-cli/docker_cli_build_test.go | 26 ++++++++++++++----- 3 files changed, 20 insertions(+), 16 deletions(-) delete mode 100644 integration-cli/build_tests/TestCopy/SingleFileToExistDir/Dockerfile delete mode 100644 integration-cli/build_tests/TestCopy/SingleFileToExistDir/test_file diff --git a/integration-cli/build_tests/TestCopy/SingleFileToExistDir/Dockerfile b/integration-cli/build_tests/TestCopy/SingleFileToExistDir/Dockerfile deleted file mode 100644 index 3edfe661d4..0000000000 --- a/integration-cli/build_tests/TestCopy/SingleFileToExistDir/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN mkdir /exists -RUN touch /exists/exists_file -RUN chown -R dockerio.dockerio /exists -COPY test_file /exists/ -RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] -RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestCopy/SingleFileToExistDir/test_file b/integration-cli/build_tests/TestCopy/SingleFileToExistDir/test_file deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 511e2dda06..3026a38887 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -687,13 +687,27 @@ COPY test_file .`, } func TestBuildCopySingleFileToExistDir(t *testing.T) { - buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") - if out, _, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "SingleFileToExistDir"); err != nil { - t.Fatalf("build failed to complete: %s, %v", out, err) + name := "testcopysinglefiletoexistdir" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_file /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) } - - deleteImages("testcopyimg") - logDone("build - copy single file to existing dir") } From 24d83afd5203f93a22a3d95a212e9ec295aadc8b Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Tue, 14 Oct 2014 10:19:45 -0700 Subject: [PATCH 078/592] Rewrite TestBuildCopySingleFileToNonExistDir to not use fixtures Signed-off-by: Alexandr Morozov --- .../SingleFileToNonExistDir/Dockerfile | 9 ------- .../SingleFileToNonExistDir/test_file | 0 integration-cli/docker_cli_build_test.go | 25 ++++++++++++++----- 3 files changed, 19 insertions(+), 15 deletions(-) delete mode 100644 integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/Dockerfile delete mode 100644 integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/test_file diff --git a/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/Dockerfile b/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/Dockerfile deleted file mode 100644 index 33b65a62c7..0000000000 --- a/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN touch /exists -RUN chown dockerio.dockerio /exists -COPY test_file /test_dir/ -RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/test_file b/integration-cli/build_tests/TestCopy/SingleFileToNonExistDir/test_file deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 3026a38887..66d7657f91 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -712,13 +712,26 @@ RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' } func TestBuildCopySingleFileToNonExistDir(t *testing.T) { - buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") - if out, _, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "SingleFileToNonExistDir"); err != nil { - t.Fatalf("build failed to complete: %s, %v", out, err) + name := "testcopysinglefiletononexistdir" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +COPY test_file /test_dir/ +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) } - - deleteImages("testcopyimg") - logDone("build - copy single file to non-existing dir") } From 832618afc6e6a8c489afb40f94340f158f9680c7 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Tue, 14 Oct 2014 10:22:06 -0700 Subject: [PATCH 079/592] Rewrite TestBuildCopyDirContentToRoot to not use fixtures Signed-off-by: Alexandr Morozov --- .../TestCopy/DirContentToRoot/Dockerfile | 8 ------- .../DirContentToRoot/test_dir/test_file | 0 integration-cli/docker_cli_build_test.go | 24 ++++++++++++++----- 3 files changed, 18 insertions(+), 14 deletions(-) delete mode 100644 integration-cli/build_tests/TestCopy/DirContentToRoot/Dockerfile delete mode 100644 integration-cli/build_tests/TestCopy/DirContentToRoot/test_dir/test_file diff --git a/integration-cli/build_tests/TestCopy/DirContentToRoot/Dockerfile b/integration-cli/build_tests/TestCopy/DirContentToRoot/Dockerfile deleted file mode 100644 index 45df77e563..0000000000 --- a/integration-cli/build_tests/TestCopy/DirContentToRoot/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN touch /exists -RUN chown dockerio.dockerio exists -COPY test_dir / -RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/build_tests/TestCopy/DirContentToRoot/test_dir/test_file b/integration-cli/build_tests/TestCopy/DirContentToRoot/test_dir/test_file deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 66d7657f91..6293cb8a1a 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -736,13 +736,25 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, } func TestBuildCopyDirContentToRoot(t *testing.T) { - buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") - if out, _, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "DirContentToRoot"); err != nil { - t.Fatalf("build failed to complete: %s, %v", out, err) + name := "testcopydircontenttoroot" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +COPY test_dir / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) } - - deleteImages("testcopyimg") - logDone("build - copy directory contents to root") } From 4a029259ff41c37eb72a6f221f9725f66491139f Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Tue, 14 Oct 2014 10:26:20 -0700 Subject: [PATCH 080/592] Rewrite TestBuildCopyDirContentToExistDir to not use fixtures Signed-off-by: Alexandr Morozov --- .../TestCopy/DirContentToExistDir/Dockerfile | 10 ------- .../DirContentToExistDir/test_dir/test_file | 0 integration-cli/docker_cli_build_test.go | 26 ++++++++++++++----- 3 files changed, 20 insertions(+), 16 deletions(-) delete mode 100644 integration-cli/build_tests/TestCopy/DirContentToExistDir/Dockerfile delete mode 100644 integration-cli/build_tests/TestCopy/DirContentToExistDir/test_dir/test_file diff --git a/integration-cli/build_tests/TestCopy/DirContentToExistDir/Dockerfile b/integration-cli/build_tests/TestCopy/DirContentToExistDir/Dockerfile deleted file mode 100644 index d63e8538bb..0000000000 --- a/integration-cli/build_tests/TestCopy/DirContentToExistDir/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN mkdir /exists -RUN touch /exists/exists_file -RUN chown -R dockerio.dockerio /exists -COPY test_dir/ /exists/ -RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] -RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] -RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] diff --git a/integration-cli/build_tests/TestCopy/DirContentToExistDir/test_dir/test_file b/integration-cli/build_tests/TestCopy/DirContentToExistDir/test_dir/test_file deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 6293cb8a1a..146ab15ae7 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -759,13 +759,27 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, } func TestBuildCopyDirContentToExistDir(t *testing.T) { - buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") - if out, _, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "DirContentToExistDir"); err != nil { - t.Fatalf("build failed to complete: %s, %v", out, err) + name := "testcopydircontenttoexistdir" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_dir/ /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) } - - deleteImages("testcopyimg") - logDone("build - copy directory contents to existing dir") } From 6582ea574cfcd6cc8e3b1abebe7bfcc7aee05ece Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Tue, 14 Oct 2014 10:29:22 -0700 Subject: [PATCH 081/592] Rewrite TestBuildCopyWholeDirToRoot to not use fixtures Signed-off-by: Alexandr Morozov --- .../TestCopy/WholeDirToRoot/Dockerfile | 11 ----- integration-cli/docker_cli_build_test.go | 40 +++++++++---------- 2 files changed, 18 insertions(+), 33 deletions(-) delete mode 100644 integration-cli/build_tests/TestCopy/WholeDirToRoot/Dockerfile diff --git a/integration-cli/build_tests/TestCopy/WholeDirToRoot/Dockerfile b/integration-cli/build_tests/TestCopy/WholeDirToRoot/Dockerfile deleted file mode 100644 index 91be29fe7a..0000000000 --- a/integration-cli/build_tests/TestCopy/WholeDirToRoot/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN touch /exists -RUN chown dockerio.dockerio exists -COPY test_dir /test_dir -RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] -RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '-rw-r--r--' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 146ab15ae7..26901afb5c 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -784,32 +784,28 @@ RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, } func TestBuildCopyWholeDirToRoot(t *testing.T) { - testDirName := "WholeDirToRoot" - sourceDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy", testDirName) - buildDirectory, err := ioutil.TempDir("", "test-build-add") - defer os.RemoveAll(buildDirectory) - - err = copyWithCP(sourceDirectory, buildDirectory) - if err != nil { - t.Fatalf("failed to copy files to temporary directory: %s", err) - } - - buildDirectory = filepath.Join(buildDirectory, testDirName) - testDir := filepath.Join(buildDirectory, "test_dir") - if err := os.MkdirAll(testDir, 0755); err != nil { - t.Fatal(err) - } - f, err := os.OpenFile(filepath.Join(testDir, "test_file"), os.O_CREATE, 0644) + name := "testcopywholedirtoroot" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +COPY test_dir /test_dir +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '-rw-r--r--' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) if err != nil { t.Fatal(err) } - f.Close() - if out, _, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "."); err != nil { - t.Fatalf("build failed to complete: %s, %v", out, err) + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) } - - deleteImages("testcopyimg") - logDone("build - copy whole directory to root") } From c8a5d56fd789b50b78ad58c07a535aaf01af77f7 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Tue, 14 Oct 2014 10:32:00 -0700 Subject: [PATCH 082/592] Rewrite TestBuildCopyEtcToRoot to not use fixtures Signed-off-by: Alexandr Morozov --- .../build_tests/TestCopy/EtcToRoot/Dockerfile | 2 -- integration-cli/docker_cli_build_test.go | 17 ++++++++++++----- 2 files changed, 12 insertions(+), 7 deletions(-) delete mode 100644 integration-cli/build_tests/TestCopy/EtcToRoot/Dockerfile diff --git a/integration-cli/build_tests/TestCopy/EtcToRoot/Dockerfile b/integration-cli/build_tests/TestCopy/EtcToRoot/Dockerfile deleted file mode 100644 index b4f319f80f..0000000000 --- a/integration-cli/build_tests/TestCopy/EtcToRoot/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM scratch -COPY . / diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 26901afb5c..1c6b6b5ac3 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -810,12 +810,19 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, } func TestBuildCopyEtcToRoot(t *testing.T) { - buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") - if out, _, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "EtcToRoot"); err != nil { - t.Fatalf("build failed to complete: %s, %v", out, err) + name := "testcopyetctoroot" + defer deleteImages(name) + ctx, err := fakeContext(`FROM scratch +COPY . /`, + map[string]string{ + "etc/test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) } - - deleteImages("testcopyimg") logDone("build - copy etc directory to root") } From 98e7608b4cec04463781ccb314259873bbb56a16 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Tue, 14 Oct 2014 10:37:05 -0700 Subject: [PATCH 083/592] Rewrite TestBuildCopyDisallowRemote to not use fixtures Signed-off-by: Alexandr Morozov --- .../build_tests/TestCopy/DisallowRemote/Dockerfile | 2 -- integration-cli/docker_cli_build_test.go | 14 +++++++------- 2 files changed, 7 insertions(+), 9 deletions(-) delete mode 100644 integration-cli/build_tests/TestCopy/DisallowRemote/Dockerfile diff --git a/integration-cli/build_tests/TestCopy/DisallowRemote/Dockerfile b/integration-cli/build_tests/TestCopy/DisallowRemote/Dockerfile deleted file mode 100644 index e6bc0c0dd2..0000000000 --- a/integration-cli/build_tests/TestCopy/DisallowRemote/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM busybox -COPY https://index.docker.io/robots.txt / diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 1c6b6b5ac3..ad8f22123b 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -827,14 +827,14 @@ COPY . /`, } func TestBuildCopyDisallowRemote(t *testing.T) { - buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy") - buildCmd := exec.Command(dockerBinary, "build", "-t", "testcopyimg", "DisallowRemote") - buildCmd.Dir = buildDirectory - if out, _, err := runCommandWithOutput(buildCmd); err == nil { - t.Fatalf("building the image should've failed; output: %s", out) + name := "testcopydisallowremote" + defer deleteImages(name) + _, out, err := buildImageWithOut(name, `FROM scratch +COPY https://index.docker.io/robots.txt /`, + true) + if err == nil || !strings.Contains(out, "Source can't be a URL for COPY") { + t.Fatal("Error should be about disallowed remote source, got err: %s, out: %q", err, out) } - - deleteImages("testcopyimg") logDone("build - copy - disallow copy from remote") } From 93d3c8e71d0ca8f33d905c455c0520c423fd7eb9 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Tue, 14 Oct 2014 10:40:07 -0700 Subject: [PATCH 084/592] Just remove integration-cli/build_tests/TestCopy/MultipleFilesToFile because it wasn't used Signed-off-by: Alexandr Morozov --- .../build_tests/TestCopy/MultipleFilesToFile/Dockerfile | 7 ------- .../build_tests/TestCopy/MultipleFilesToFile/test_file1 | 0 .../build_tests/TestCopy/MultipleFilesToFile/test_file2 | 0 .../build_tests/TestCopy/MultipleFilesToFile/test_file3 | 0 4 files changed, 7 deletions(-) delete mode 100644 integration-cli/build_tests/TestCopy/MultipleFilesToFile/Dockerfile delete mode 100644 integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file1 delete mode 100644 integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file2 delete mode 100644 integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file3 diff --git a/integration-cli/build_tests/TestCopy/MultipleFilesToFile/Dockerfile b/integration-cli/build_tests/TestCopy/MultipleFilesToFile/Dockerfile deleted file mode 100644 index 520d356c72..0000000000 --- a/integration-cli/build_tests/TestCopy/MultipleFilesToFile/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN mkdir /exists -RUN chown -R dockerio.dockerio /exists -COPY test_file1 /exists/ -ADD test_file2 test_file3 /exists/test_file1 diff --git a/integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file1 b/integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file1 deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file2 b/integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file2 deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file3 b/integration-cli/build_tests/TestCopy/MultipleFilesToFile/test_file3 deleted file mode 100644 index e69de29bb2..0000000000 From 6f09d064bd438ab4425d6105f40887f02bb9e97e Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Mon, 6 Oct 2014 09:41:22 -0700 Subject: [PATCH 085/592] Use logs instead of attach for builder Signed-off-by: Alexandr Morozov --- builder/internals.go | 23 ++++++++--------------- integration-cli/docker_cli_build_test.go | 19 +++++++++++++++++++ 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/builder/internals.go b/builder/internals.go index 5fd03f7745..20f3380fb8 100644 --- a/builder/internals.go +++ b/builder/internals.go @@ -24,7 +24,6 @@ import ( "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/tarsum" @@ -512,25 +511,19 @@ func (b *Builder) create() (*daemon.Container, error) { } func (b *Builder) run(c *daemon.Container) error { - var errCh chan error - if b.Verbose { - errCh = promise.Go(func() error { - // FIXME: call the 'attach' job so that daemon.Attach can be made private - // - // FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach - // but without hijacking for stdin. Also, with attach there can be race - // condition because of some output already was printed before it. - return <-b.Daemon.Attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, nil, nil, b.OutStream, b.ErrStream) - }) - } - //start the container if err := c.Start(); err != nil { return err } - if errCh != nil { - if err := <-errCh; err != nil { + if b.Verbose { + logsJob := b.Engine.Job("logs", c.ID) + logsJob.Setenv("follow", "1") + logsJob.Setenv("stdout", "1") + logsJob.Setenv("stderr", "1") + logsJob.Stdout.Add(b.OutStream) + logsJob.Stderr.Add(b.ErrStream) + if err := logsJob.Run(); err != nil { return err } } diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 76105fbf6a..c9b9db6b3c 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -2752,3 +2752,22 @@ func TestBuildVerifySingleQuoteFails(t *testing.T) { logDone("build - verify single quotes fail") } + +func TestBuildVerboseOut(t *testing.T) { + name := "testbuildverboseout" + defer deleteImages(name) + + _, out, err := buildImageWithOut(name, + `FROM busybox +RUN echo 123`, + false) + + if err != nil { + t.Fatal(err) + } + if !strings.Contains(out, "\n123\n") { + t.Fatalf("Output should contain %q: %q", "123", out) + } + + logDone("build - verbose output from commands") +} From 2db1caee4f23e81107b2647c06b4c677f6ecd7a1 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Mon, 6 Oct 2014 11:57:18 -0700 Subject: [PATCH 086/592] Make daemon.Attach private Signed-off-by: Alexandr Morozov --- daemon/attach.go | 10 ++-------- daemon/exec.go | 2 +- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/daemon/attach.go b/daemon/attach.go index 7ccaadf442..e115dac2e0 100644 --- a/daemon/attach.go +++ b/daemon/attach.go @@ -103,7 +103,7 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status { cStderr = job.Stderr } - <-daemon.Attach(&container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, cStdin, cStdinCloser, cStdout, cStderr) + <-daemon.attach(&container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, cStdin, cStdinCloser, cStdout, cStderr) // If we are in stdinonce mode, wait for the process to end // otherwise, simply return if container.Config.StdinOnce && !container.Config.Tty { @@ -113,13 +113,7 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status { return engine.StatusOK } -// FIXME: this should be private, and every outside subsystem -// should go through the "container_attach" job. But that would require -// that job to be properly documented, as well as the relationship between -// Attach and ContainerAttach. -// -// This method is in use by builder/builder.go. -func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error { +func (daemon *Daemon) attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error { var ( cStdout, cStderr io.ReadCloser nJobs int diff --git a/daemon/exec.go b/daemon/exec.go index 0ab1c0bf5f..a6113b0fca 100644 --- a/daemon/exec.go +++ b/daemon/exec.go @@ -204,7 +204,7 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status { execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } - attachErr := d.Attach(&execConfig.StreamConfig, execConfig.OpenStdin, false, execConfig.ProcessConfig.Tty, cStdin, cStdinCloser, cStdout, cStderr) + attachErr := d.attach(&execConfig.StreamConfig, execConfig.OpenStdin, false, execConfig.ProcessConfig.Tty, cStdin, cStdinCloser, cStdout, cStderr) execErr := make(chan error) From df0e0c76831bed08cf5e08ac9a1abebf6739da23 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Fri, 17 Oct 2014 19:15:07 +0000 Subject: [PATCH 087/592] builder: fix escaping for ENV variables. Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- builder/support.go | 14 +++++- integration-cli/docker_cli_build_test.go | 55 ++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 1 deletion(-) diff --git a/builder/support.go b/builder/support.go index a084190f2c..6c7ac4096e 100644 --- a/builder/support.go +++ b/builder/support.go @@ -10,13 +10,25 @@ var ( // `\$` - match literal $ // `[[:alnum:]_]+` - match things like `$SOME_VAR` // `{[[:alnum:]_]+}` - match things like `${SOME_VAR}` - tokenEnvInterpolation = regexp.MustCompile(`(\\\\+|[^\\]|\b|\A)\$([[:alnum:]_]+|{[[:alnum:]_]+})`) + tokenEnvInterpolation = regexp.MustCompile(`(\\|\\\\+|[^\\]|\b|\A)\$([[:alnum:]_]+|{[[:alnum:]_]+})`) // this intentionally punts on more exotic interpolations like ${SOME_VAR%suffix} and lets the shell handle those directly ) // handle environment replacement. Used in dispatcher. func (b *Builder) replaceEnv(str string) string { for _, match := range tokenEnvInterpolation.FindAllString(str, -1) { + idx := strings.Index(match, "\\$") + if idx != -1 { + if idx+2 >= len(match) { + str = strings.Replace(str, match, "\\$", -1) + continue + } + + stripped := match[idx+2:] + str = strings.Replace(str, match, "$"+stripped, -1) + continue + } + match = match[strings.Index(match, "$"):] matchKey := strings.Trim(match, "${}") diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index c248477c4c..7f805ba396 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -15,6 +15,61 @@ import ( "github.com/docker/docker/pkg/archive" ) +func TestBuildEnvEscapes(t *testing.T) { + name := "testbuildenvescapes" + defer deleteAllContainers() + defer deleteImages(name) + _, err := buildImage(name, + ` + FROM busybox + ENV TEST foo + CMD echo \$ + `, + true) + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-t", name)) + + if err != nil { + t.Fatal(err) + } + + if strings.TrimSpace(out) != "$" { + t.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) + } + + logDone("build - env should handle \\$ properly") +} + +func TestBuildEnvOverwrite(t *testing.T) { + name := "testbuildenvoverwrite" + defer deleteAllContainers() + defer deleteImages(name) + + _, err := buildImage(name, + ` + FROM busybox + ENV TEST foo + CMD echo \${TEST} + `, + true) + + if err != nil { + t.Fatal(err) + } + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-e", "TEST=bar", "-t", name)) + + if err != nil { + t.Fatal(err) + } + + if strings.TrimSpace(out) != "bar" { + t.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) + } + + logDone("build - env should overwrite builder ENV during run") +} + func TestBuildOnBuildForbiddenMaintainerInSourceImage(t *testing.T) { name := "testbuildonbuildforbiddenmaintainerinsourceimage" defer deleteImages(name) From c980fe09b77700fcaf47459e91a149876b7abef9 Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Wed, 8 Oct 2014 14:55:02 -0700 Subject: [PATCH 088/592] Add a testcase to make sure we don't squash tabs or convert them to spaces This is in response to @SvenDowideit asking if we had a "tab" testcase in https://github.com/docker/docker/issues/2315#issuecomment-58133508 I couldn't find one so I'm adding one Closes #2315 Signed-off-by: Doug Davis --- integration-cli/docker_cli_build_test.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 8f3f008c13..12b8e00b6f 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -2907,3 +2907,22 @@ RUN echo 123`, logDone("build - verbose output from commands") } + +func TestBuildWithTabs(t *testing.T) { + name := "testbuildwithtabs" + defer deleteImages(name) + _, err := buildImage(name, + "FROM busybox\nRUN echo\tone\t\ttwo", true) + if err != nil { + t.Fatal(err) + } + res, err := inspectFieldJSON(name, "ContainerConfig.Cmd") + if err != nil { + t.Fatal(err) + } + expected := "[\"/bin/sh\",\"-c\",\"echo\\u0009one\\u0009\\u0009two\"]" + if res != expected { + t.Fatalf("Missing tabs.\nGot:%s\nExp:%s", res, expected) + } + logDone("build - with tabs") +} From dbe24a048bc557d34925e5ca9f7dd99f57d1d9e1 Mon Sep 17 00:00:00 2001 From: Amit Bakshi Date: Mon, 4 Aug 2014 12:32:12 -0700 Subject: [PATCH 089/592] install.sh: Fix for Amazon's Linux AMI Use /etc/os-release to determine distro Contents of /etc/os-release on Amazon Linux AMI 2014.09: NAME="Amazon Linux AMI" VERSION="2014.09" ID="amzn" ID_LIKE="rhel fedora" VERSION_ID="2014.09" PRETTY_NAME="Amazon Linux AMI 2014.09" ANSI_COLOR="0;33" CPE_NAME="cpe:/o:amazon:linux:2014.09:ga" HOME_URL="http://aws.amazon.com/amazon-linux-ami/" Signed-off-by: Amit Bakshi Signed-off-by: Jessica Frazelle --- hack/install.sh | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/hack/install.sh b/hack/install.sh index 9652e4672d..e1ba23d82f 100755 --- a/hack/install.sh +++ b/hack/install.sh @@ -75,13 +75,23 @@ fi if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then lsb_dist='Fedora' fi +if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then + lsb_dist="$(. /etc/os-release && echo "$NAME" | cut -d' ' -f1)" +fi case "$lsb_dist" in - Fedora) - ( - set -x - $sh_c 'sleep 3; yum -y -q install docker-io' - ) + Amazon|Fedora) + if [ "$lsb_dist" = 'Amazon' ]; then + ( + set -x + $sh_c 'sleep 3; yum -y -q install docker' + ) + else + ( + set -x + $sh_c 'sleep 3; yum -y -q install docker-io' + ) + fi if command_exists docker && [ -e /var/run/docker.sock ]; then ( set -x From b7249317c1a703df8a4d7b113cfc839ebbc56a29 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Fri, 17 Oct 2014 16:21:09 -0700 Subject: [PATCH 090/592] check ID in os-release instead Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- hack/install.sh | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/hack/install.sh b/hack/install.sh index e1ba23d82f..395d59a6e6 100755 --- a/hack/install.sh +++ b/hack/install.sh @@ -70,18 +70,19 @@ if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")" fi if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then - lsb_dist='Debian' + lsb_dist='debian' fi if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then - lsb_dist='Fedora' + lsb_dist='fedora' fi if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then - lsb_dist="$(. /etc/os-release && echo "$NAME" | cut -d' ' -f1)" + lsb_dist="$(. /etc/os-release && echo "$ID")" fi +lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')" case "$lsb_dist" in - Amazon|Fedora) - if [ "$lsb_dist" = 'Amazon' ]; then + amzn|fedora) + if [ "$lsb_dist" = 'amzn' ]; then ( set -x $sh_c 'sleep 3; yum -y -q install docker' @@ -111,7 +112,7 @@ case "$lsb_dist" in exit 0 ;; - Ubuntu|Debian|LinuxMint) + ubuntu|debian|linuxmint) export DEBIAN_FRONTEND=noninteractive did_apt_get_update= @@ -188,7 +189,7 @@ case "$lsb_dist" in exit 0 ;; - Gentoo) + gentoo) if [ "$url" = "https://test.docker.com/" ]; then echo >&2 echo >&2 ' You appear to be trying to install the latest nightly build in Gentoo.' From 91502ba66bfc2a3edafb2ddf7f27393250bfa459 Mon Sep 17 00:00:00 2001 From: Marc Tamsky Date: Fri, 17 Oct 2014 16:58:07 -0700 Subject: [PATCH 091/592] Add mention of e2fsprogs to runtime dependencies. Signed-off-by: Marc Tamsky (github: tamsky) --- hack/PACKAGERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/hack/PACKAGERS.md b/hack/PACKAGERS.md index 265f7d676b..823b1721b9 100644 --- a/hack/PACKAGERS.md +++ b/hack/PACKAGERS.md @@ -267,6 +267,7 @@ installed and available at runtime: * iptables version 1.4 or later * procps (or similar provider of a "ps" executable) +* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, mkfs.xfs, tune2fs) * XZ Utils version 4.9 or later * a [properly mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount) From 65edb07065e9e8a08090a4ac88cf449b7faaff09 Mon Sep 17 00:00:00 2001 From: Phil Estes Date: Sat, 18 Oct 2014 22:47:48 -0400 Subject: [PATCH 092/592] Return container exit code with start -a/-i Addresses #8555 Docker-DCO-1.1-Signed-off-by: Phil Estes --- api/client/commands.go | 15 ++++++++--- integration-cli/docker_cli_start_test.go | 32 ++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 3 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index 6c4e5c55fe..8312b08c33 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -619,13 +619,13 @@ func (cli *DockerCli) CmdStart(args ...string) error { return fmt.Errorf("You cannot start and attach multiple containers at once.") } - steam, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false) + stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false) if err != nil { return err } env := engine.Env{} - if err := env.Decode(steam); err != nil { + if err := env.Decode(stream); err != nil { return err } config := env.GetSubEnv("Config") @@ -681,7 +681,16 @@ func (cli *DockerCli) CmdStart(args ...string) error { log.Errorf("Error monitoring TTY size: %s", err) } } - return <-cErr + if attchErr := <-cErr; attchErr != nil { + return attchErr + } + _, status, err := getExitCode(cli, cmd.Arg(0)) + if err != nil { + return err + } + if status != 0 { + return &utils.StatusError{StatusCode: status} + } } return nil } diff --git a/integration-cli/docker_cli_start_test.go b/integration-cli/docker_cli_start_test.go index 18ad96aef1..af0a785185 100644 --- a/integration-cli/docker_cli_start_test.go +++ b/integration-cli/docker_cli_start_test.go @@ -1,7 +1,9 @@ package main import ( + "fmt" "os/exec" + "strings" "testing" "time" ) @@ -36,3 +38,33 @@ func TestStartAttachReturnsOnError(t *testing.T) { logDone("start - error on start with attach exits") } + +// gh#8555: Exit code should be passed through when using start -a +func TestStartAttachCorrectExitCode(t *testing.T) { + defer deleteAllContainers() + + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 2; exit 1") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("failed to run container: %v, output: %q", err, out) + } + + out = stripTrailingCharacters(out) + + // make sure the container has exited before trying the "start -a" + waitCmd := exec.Command(dockerBinary, "wait", out) + if out, _, err = runCommandWithOutput(waitCmd); err != nil { + t.Fatal(out, err) + } + + startCmd := exec.Command(dockerBinary, "start", "-a", out) + startOut, exitCode, err := runCommandWithOutput(startCmd) + if err != nil && !strings.Contains("exit status 1", fmt.Sprintf("%s", err)) { + t.Fatalf("start command failed unexpectedly with error: %v, output: %q", err, startOut) + } + if exitCode != 1 { + t.Fatalf("start -a did not respond with proper exit code: expected 1, got %d", exitCode) + } + + logDone("start - correct exit code returned with -a") +} From d2cd8e77a6ce83fdfafd32828610e8cd3ed258eb Mon Sep 17 00:00:00 2001 From: Phil Estes Date: Sun, 19 Oct 2014 02:47:53 -0400 Subject: [PATCH 093/592] Clean up comment sections and fix typos in CmdRun Docker-DCO-1.1-Signed-off-by: Phil Estes (github: estesp) --- api/client/commands.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index 2c44bb63c5..8618d89f5e 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -2186,7 +2186,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { config.StdinOnce = false } - // Disable flSigProxy in case on TTY + // Disable flSigProxy when in TTY mode sigProxy := *flSigProxy if config.Tty { sigProxy = false @@ -2208,7 +2208,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { ) if !config.AttachStdout && !config.AttachStderr { - // Make this asynchrone in order to let the client write to stdin before having to read the ID + // Make this asynchronous to allow the client to write to stdin before having to read the ID waitDisplayId = make(chan struct{}) go func() { defer close(waitDisplayId) @@ -2220,7 +2220,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { return ErrConflictRestartPolicyAndAutoRemove } - // We need to instanciate the chan because the select needs it. It can + // We need to instantiate the chan because the select needs it. It can // be closed but can't be uninitialized. hijacked := make(chan io.Closer) @@ -2267,8 +2267,8 @@ func (cli *DockerCli) CmdRun(args ...string) error { // Acknowledge the hijack before starting select { case closer := <-hijacked: - // Make sure that hijack gets closed when returning. (result - // in closing hijack chan and freeing server's goroutines. + // Make sure that the hijack gets closed when returning (results + // in closing the hijack chan and freeing server's goroutines) if closer != nil { defer closer.Close() } @@ -2320,15 +2320,15 @@ func (cli *DockerCli) CmdRun(args ...string) error { return err } } else { + // No Autoremove: Simply retrieve the exit code if !config.Tty { - // In non-tty mode, we can't dettach, so we know we need to wait. + // In non-TTY mode, we can't detach, so we must wait for container exit if status, err = waitForExit(cli, runResult.Get("Id")); err != nil { return err } } else { - // In TTY mode, there is a race. If the process dies too slowly, the state can be update after the getExitCode call - // and result in a wrong exit code. - // No Autoremove: Simply retrieve the exit code + // In TTY mode, there is a race: if the process dies too slowly, the state could + // be updated after the getExitCode call and result in the wrong exit code being reported if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil { return err } From 18141652ba4654458e0cd78dbabe6fc6cc1729ee Mon Sep 17 00:00:00 2001 From: Madhu Venugopal Date: Sun, 19 Oct 2014 10:06:58 -0700 Subject: [PATCH 094/592] Fixed a minor docs issue in a Dockerfile Example The Dockerfile Instruction to create the .vnc directory results in a failure : -storepasswd failed for file: /root/.vnc/passwd Signed-off-by: Madhu Venugopal --- docs/sources/reference/builder.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index 2678a87a19..2f36942ce6 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -642,7 +642,7 @@ For example you might add something like this: # Install vnc, xvfb in order to create a 'fake' display and firefox RUN apt-get update && apt-get install -y x11vnc xvfb firefox - RUN mkdir /.vnc + RUN mkdir ~/.vnc # Setup a password RUN x11vnc -storepasswd 1234 ~/.vnc/passwd # Autostart firefox (might not be the best way, but it does the trick) From c496f24157afd81c9a26f5746175236485e97fa7 Mon Sep 17 00:00:00 2001 From: Lei Jitang Date: Mon, 20 Oct 2014 11:10:31 +0800 Subject: [PATCH 095/592] Add docker tag tests. Signed-off-by: Lei Jitang --- integration-cli/docker_cli_tag_test.go | 40 ++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/integration-cli/docker_cli_tag_test.go b/integration-cli/docker_cli_tag_test.go index 815416f208..00228c0963 100644 --- a/integration-cli/docker_cli_tag_test.go +++ b/integration-cli/docker_cli_tag_test.go @@ -3,6 +3,7 @@ package main import ( "fmt" "os/exec" + "strings" "testing" ) @@ -88,3 +89,42 @@ func TestTagValidPrefixedRepo(t *testing.T) { logDone(logMessage) } } + +// tag an image with an existed tag name without -f option should fail +func TestTagExistedNameWithoutForce(t *testing.T) { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + t.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + + tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", "busybox:test") + if out, _, err := runCommandWithOutput(tagCmd); err != nil { + t.Fatal(out, err) + } + tagCmd = exec.Command(dockerBinary, "tag", "busybox:latest", "busybox:test") + out, _, err := runCommandWithOutput(tagCmd) + if err == nil || !strings.Contains(out, "Conflict: Tag test is already set to image") { + t.Fatal("tag busybox busybox:test should have failed,because busybox:test is existed") + } + deleteImages("busybox:test") + + logDone("tag - busybox with an existed tag name without -f option --> must fail") +} + +// tag an image with an existed tag name with -f option should work +func TestTagExistedNameWithForce(t *testing.T) { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + t.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + + tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", "busybox:test") + if out, _, err := runCommandWithOutput(tagCmd); err != nil { + t.Fatal(out, err) + } + tagCmd = exec.Command(dockerBinary, "tag", "-f", "busybox:latest", "busybox:test") + if out, _, err := runCommandWithOutput(tagCmd); err != nil { + t.Fatal(out, err) + } + deleteImages("busybox:test") + + logDone("tag - busybox with an existed tag name with -f option work") +} From 3d10d50c6d85e2d20e10bb925f3a3fc3212d52ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B0=B9=E5=90=89=E5=B3=B0?= Date: Mon, 20 Oct 2014 14:17:33 +0800 Subject: [PATCH 096/592] small typo --- docs/sources/userguide/usingdocker.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/userguide/usingdocker.md b/docs/sources/userguide/usingdocker.md index e64db0bc2e..e6564d588c 100644 --- a/docs/sources/userguide/usingdocker.md +++ b/docs/sources/userguide/usingdocker.md @@ -25,7 +25,7 @@ The `docker` client is pretty simple. Each action you can take with Docker is a command and each command can take a series of flags and arguments. - # Usage: [sudo] docker [flags] [command] [arguments] .. + # Usage: [sudo] docker [command] [flags] [arguments] .. # Example: $ sudo docker run -i -t ubuntu /bin/bash From ed7934fd63ed52ad0086aaac62203e58e304bcb7 Mon Sep 17 00:00:00 2001 From: Vincent Bernat Date: Mon, 20 Oct 2014 15:33:17 +0200 Subject: [PATCH 097/592] zsh: update zsh completion for docker command zsh completion is updated with the content of felixr/docker-zsh-completion. The major change since the last merge is the addition of exec/create (but they were already present in the docker repository) as well as pause/unpause/logout/events and the use of short/long options when they are available. Some missing options were also added. 12f00abd7178 Add completion for `exec' 4e2faa075f9a Merge `run' and `create' commands. 34134de077de Add missing long/short options for most commands. d09f62339ab5 Add completion for `pause' and `unpause' e4754c3b3b9d Add completion for `logout' e0935eb3d5d2 Add completion for `events' dae353cb9afb Add completion for `create` Docker-DCO-1.1-Signed-off-by: Vincent Bernat (github: vincentbernat) --- contrib/completion/zsh/_docker | 148 ++++++++++++++++++--------------- 1 file changed, 79 insertions(+), 69 deletions(-) diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker index aff59ee77c..4c0937e10c 100644 --- a/contrib/completion/zsh/_docker +++ b/contrib/completion/zsh/_docker @@ -195,17 +195,18 @@ __docker_subcommand () { ;; (build) _arguments \ - '--force-rm[Always remove intermediate containers, even after unsuccessful builds]' \ + '--force-rm[Always remove intermediate containers]' \ '--no-cache[Do not use cache when building the image]' \ - '-q[Suppress verbose build output]' \ + {-q,--quiet}'[Suppress verbose build output]' \ '--rm[Remove intermediate containers after a successful build]' \ - '-t:repository:__docker_repositories_with_tags' \ + {-t,--tag=-}'[Repository, name and tag to be applied]:repository:__docker_repositories_with_tags' \ ':path or URL:_directories' ;; (commit) _arguments \ - '--author=-[Author]:author: ' \ - '-m[Commit message]:message: ' \ + {-a,--author=-}'[Author]:author: ' \ + {-m,--message=-}'[Commit message]:message: ' \ + {-p,--pause}'[Pause container during commit]' \ '--run=-[Configuration automatically applied when the image is run]:configuration: ' \ ':container:__docker_containers' \ ':repository:__docker_repositories_with_tags' @@ -224,60 +225,40 @@ __docker_subcommand () { ;; esac ;; - (create) - _arguments \ - '-P[Publish all exposed ports to the host]' \ - '-a[Attach to stdin, stdout or stderr]' \ - '-c=-[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)' \ - '--cidfile=-[Write the container ID to the file]:CID file:_files' \ - '*--dns=-[Set custom dns servers]:dns server: ' \ - '*-e=-[Set environment variables]:environment variable: ' \ - '--entrypoint=-[Overwrite the default entrypoint of the image]:entry point: ' \ - '*--expose=-[Expose a port from the container without publishing it]: ' \ - '-h=-[Container host name]:hostname:_hosts' \ - '-i[Keep stdin open even if not attached]' \ - '--link=-[Add link to another container]:link:->link' \ - '--lxc-conf=-[Add custom lxc options]:lxc options: ' \ - '-m=-[Memory limit (in bytes)]:limit: ' \ - '--name=-[Container name]:name: ' \ - '*-p=-[Expose a container'"'"'s port to the host]:port:_ports' \ - '--privileged[Give extended privileges to this container]' \ - '-t[Allocate a pseudo-tty]' \ - '-u=-[Username or UID]:user:_users' \ - '*-v=-[Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)]:volume: '\ - '--volumes-from=-[Mount volumes from the specified container]:volume: ' \ - '-w=-[Working directory inside the container]:directory:_directories' \ - '(-):images:__docker_images' \ - '(-):command: _command_names -e' \ - '*::arguments: _normal' (diff|export) _arguments '*:containers:__docker_containers' ;; + (events) + _arguments \ + '--since=-[Events created since this timestamp]:timestamp: ' \ + '--until=-[Events created until this timestamp]:timestamp: ' + ;; (exec) _arguments \ - '-d[Detached mode: leave the container running in the background]' \ - '-i[Keep stdin open even if not attached]' \ - '-t[Allocate a pseudo-tty]' \ + {-d,--detach}'[Detached mode: leave the container running in the background]' \ + {-i,--interactive}'[Keep stdin open even if not attached]' \ + {-t,--tty}'[Allocate a pseudo-tty]' \ ':containers:__docker_runningcontainers' ;; (history) _arguments \ '--no-trunc[Do not truncate output]' \ - '-q[Only show numeric IDs]' \ + {-q,--quiet}'[Only show numeric IDs]' \ '*:images:__docker_images' ;; (images) _arguments \ - '-a[Show all images]' \ + {-a,--all}'[Show all images]' \ + '*'{-f,--filter=-}'[Filter values]:filter: ' \ '--no-trunc[Do not truncate output]' \ - '-q[Only show numeric IDs]' \ + {-q,--quiet}'[Only show numeric IDs]' \ '--tree[Output graph in tree format]' \ '--viz[Output graph in graphviz format]' \ ':repository:__docker_repositories' ;; (inspect) _arguments \ - '--format=-[Format the output using the given go template]:template: ' \ + {-f,--format=-}'[Format the output using the given go template]:template: ' \ '*:containers:__docker_containers' ;; (import) @@ -298,20 +279,29 @@ __docker_subcommand () { '3:file:_files' ;; (kill) - _arguments '*:containers:__docker_runningcontainers' + _arguments \ + {-s,--signal=-}'[Signal to send]:signal:_signals' \ + '*:containers:__docker_runningcontainers' ;; (load) + _arguments \ + {-i,--input=-}'[Read from tar archive file]:tar:_files' ;; (login) _arguments \ - '-e[Email]:email: ' \ - '-p[Password]:password: ' \ - '-u[Username]:username: ' \ + {-e,--email=-}'[Email]:email: ' \ + {-p,--password=-}'[Password]:password: ' \ + {-u,--user=-}'[Username]:username: ' \ + ':server: ' + ;; + (logout) + _arguments \ ':server: ' ;; (logs) _arguments \ - '-f[Follow log output]' \ + {-f,--follow}'[Follow log output]' \ + {-t,--timestamps}'[Show timestamps]' \ '*:containers:__docker_containers' ;; (port) @@ -319,24 +309,32 @@ __docker_subcommand () { '1:containers:__docker_runningcontainers' \ '2:port:_ports' ;; + (pause|unpause) + _arguments \ + '1:containers:__docker_runningcontainers' + ;; (start) _arguments \ - '-a[Attach container'"'"'s stdout/stderr and forward all signals]' \ - '-i[Attach container'"'"'s stding]' \ + {-a,--attach}'[Attach container'"'"'s stdout/stderr and forward all signals]' \ + {-i,--interactive}'[Attach container'"'"'s stding]' \ '*:containers:__docker_stoppedcontainers' ;; (rm) _arguments \ - '--link[Remove the specified link and not the underlying container]' \ - '-v[Remove the volumes associated to the container]' \ + {-f,--force}'[Force removal]' \ + {-l,--link}'[Remove the specified link and not the underlying container]' \ + {-v,--volumes}'[Remove the volumes associated to the container]' \ '*:containers:__docker_stoppedcontainers' ;; (rmi) _arguments \ + {-f,--force}'[Force removal]' \ + '--no-prune[Do not delete untagged parents]' \ '*:images:__docker_images' ;; (restart|stop) - _arguments '-t[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)' \ + _arguments \ + {-t,--time=-}'[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)' \ '*:containers:__docker_runningcontainers' ;; (top) @@ -352,47 +350,58 @@ __docker_subcommand () { ;; (ps) _arguments \ - '-a[Show all containers]' \ + {-a,--all}'[Show all containers]' \ '--before=-[Show only container created before...]:containers:__docker_containers' \ - '-l[Show only the latest created container]' \ + '*'{-f,--filter=-}'[Filter values]:filter: ' \ + {-l,--latest}'[Show only the latest created container]' \ '-n[Show n last created containers, include non-running one]:n:(1 5 10 25 50)' \ '--no-trunc[Do not truncate output]' \ - '-q[Only show numeric IDs]' \ - '-s[Display sizes]' \ + {-q,--quiet}'[Only show numeric IDs]' \ + {-s,--size}'[Display sizes]' \ '--since=-[Show only containers created since...]:containers:__docker_containers' ;; (tag) _arguments \ - '-f[force]'\ + {-f,--force}'[force]'\ ':image:__docker_images'\ ':repository:__docker_repositories_with_tags' ;; - (run) + (create|run) _arguments \ - '-P[Publish all exposed ports to the host]' \ - '-a[Attach to stdin, stdout or stderr]' \ - '-c[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)' \ + {-a,--attach}'[Attach to stdin, stdout or stderr]' \ + '*--add-host=-[Add a custom host-to-IP mapping]:host\:ip mapping: ' \ + {-c,--cpu-shares=-}'[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)' \ + '*--cap-add=-[Add Linux capabilities]:capability: ' \ + '*--cap-drop=-[Drop Linux capabilities]:capability: ' \ '--cidfile=-[Write the container ID to the file]:CID file:_files' \ - '-d[Detached mode: leave the container running in the background]' \ + '--cpuset=-[CPUs in which to allow execution]:CPU set: ' \ + {-d,--detach}'[Detached mode: leave the container running in the background]' \ + '*--device=-[Add a host device to the container]:device:_files' \ '*--dns=-[Set custom dns servers]:dns server: ' \ - '*-e[Set environment variables]:environment variable: ' \ + '*--dns-search=-[Set custom DNS search domains]:dns domains: ' \ + '*'{-e,--environment=-}'[Set environment variables]:environment variable: ' \ '--entrypoint=-[Overwrite the default entrypoint of the image]:entry point: ' \ + '*--env-file=-[Read environment variables from a file]:environment file:_files' \ '*--expose=-[Expose a port from the container without publishing it]: ' \ - '-h[Container host name]:hostname:_hosts' \ - '-i[Keep stdin open even if not attached]' \ - '--link=-[Add link to another container]:link:->link' \ - '--lxc-conf=-[Add custom lxc options]:lxc options: ' \ + {-h,--hostname=-}'[Container host name]:hostname:_hosts' \ + {-i,--interactive}'[Keep stdin open even if not attached]' \ + '*--link=-[Add link to another container]:link:->link' \ + '*--lxc-conf=-[Add custom lxc options]:lxc options: ' \ '-m[Memory limit (in bytes)]:limit: ' \ '--name=-[Container name]:name: ' \ - '*-p[Expose a container'"'"'s port to the host]:port:_ports' \ + '--net=-[Network mode]:network mode:(bridge none container: host)' \ + {-P,--publish-all}'[Publish all exposed ports]' \ + '*'{-p,--publish=-}'[Expose a container'"'"'s port to the host]:port:_ports' \ '--privileged[Give extended privileges to this container]' \ + '--restart=-[Restart policy]:restart policy:(no on-failure always)' \ '--rm[Remove intermediate containers when it exits]' \ + '*--security-opt=-[Security options]:security option: ' \ '--sig-proxy[Proxify all received signal]' \ - '-t[Allocate a pseudo-tty]' \ - '-u[Username or UID]:user:_users' \ - '*-v[Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)]:volume: '\ - '--volumes-from=-[Mount volumes from the specified container]:volume: ' \ - '-w[Working directory inside the container]:directory:_directories' \ + {-t,--tty}'[Allocate a pseudo-tty]' \ + {-u,--user=-}'[Username or UID]:user:_users' \ + '*-v[Bind mount a volume]:volume: '\ + '*--volumes-from=-[Mount volumes from the specified container]:volume: ' \ + {-w,--workdir=-}'[Working directory inside the container]:directory:_directories' \ '(-):images:__docker_images' \ '(-):command: _command_names -e' \ '*::arguments: _normal' @@ -416,6 +425,7 @@ __docker_subcommand () { ;; (save) _arguments \ + {-o,--output=-}'[Write to file]:file:_files' \ ':images:__docker_images' ;; (wait) From 69fe3e1a3493e53acb2da7220764bd3807415ea2 Mon Sep 17 00:00:00 2001 From: Dan Walsh Date: Tue, 14 Oct 2014 09:19:45 -0400 Subject: [PATCH 098/592] On Red Hat Registry Servers we return 404 on certification errors. We do this to prevent leakage of information, we don't want people to be able to probe for existing content. According to RFC 2616, "This status code (404) is commonly used when the server does not wish to reveal exactly why the request has been refused, or when no other response i is applicable." https://www.ietf.org/rfc/rfc2616.txt 10.4.4 403 Forbidden The server understood the request, but is refusing to fulfill it. Authorization will not help and the request SHOULD NOT be repeated. If the request method was not HEAD and the server wishes to make public why the request has not been fulfilled, it SHOULD describe the reason for the refusal in the entity. If the server does not wish to make this information available to the client, the status code 404 (Not Found) can be used instead. 10.4.5 404 Not Found The server has not found anything matching the Request-URI. No indication is given of whether the condition is temporary or permanent. The 410 (Gone) status code SHOULD be used if the server knows, through some internally configurable mechanism, that an old resource is permanently unavailable and has no forwarding address. This status code is commonly used when the server does not wish to reveal exactly why the request has been refused, or when no other response is applicable. When docker is running through its certificates, it should continue trying with a new certificate even if it gets back a 404 error code. Docker-DCO-1.1-Signed-off-by: Dan Walsh (github: rhatdan) --- docs/sources/articles/certificates.md | 2 +- registry/registry.go | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/docs/sources/articles/certificates.md b/docs/sources/articles/certificates.md index 90d3f1b356..e031676402 100644 --- a/docs/sources/articles/certificates.md +++ b/docs/sources/articles/certificates.md @@ -31,7 +31,7 @@ repository. > **Note:** > If there are multiple certificates, each will be tried in alphabetical -> order. If there is an authentication error (e.g., 403, 5xx, etc.), Docker +> order. If there is an authentication error (e.g., 403, 404, 5xx, etc.), Docker > will continue to try with the next certificate. Our example is set up like this: diff --git a/registry/registry.go b/registry/registry.go index 0c648a94b2..d1315ed4b6 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -147,7 +147,10 @@ func doRequest(req *http.Request, jar http.CookieJar, timeout TimeoutType) (*htt client := newClient(jar, pool, cert, timeout) res, err := client.Do(req) // If this is the last cert, otherwise, continue to next cert if 403 or 5xx - if i == len(certs)-1 || err == nil && res.StatusCode != 403 && res.StatusCode < 500 { + if i == len(certs)-1 || err == nil && + res.StatusCode != 403 && + res.StatusCode != 404 && + res.StatusCode < 500 { return res, client, err } } From 16346253537267b42bbf35e81c0139b4d0aee43c Mon Sep 17 00:00:00 2001 From: Srini Brahmaroutu Date: Mon, 13 Oct 2014 06:12:44 +0000 Subject: [PATCH 099/592] Adding capability to filter by name, id or status to list containers api Closes #7599 Signed-off-by: Srini Brahmaroutu --- daemon/list.go | 19 +++++---- integration-cli/docker_cli_ps_test.go | 60 +++++++++++++++++++++++++++ pkg/parsers/filters/parse.go | 20 +++++++++ 3 files changed, 92 insertions(+), 7 deletions(-) diff --git a/daemon/list.go b/daemon/list.go index 35effa4344..347d3c20d8 100644 --- a/daemon/list.go +++ b/daemon/list.go @@ -28,7 +28,6 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status { size = job.GetenvBool("size") psFilters filters.Args filt_exited []int - filt_status []string ) outs := engine.NewTable("Created", 0) @@ -46,8 +45,6 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status { } } - filt_status, _ = psFilters["status"] - names := map[string][]string{} daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error { names[e.ID()] = append(names[e.ID()], p) @@ -76,6 +73,15 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status { if !container.Running && !all && n <= 0 && since == "" && before == "" { return nil } + + if !psFilters.Match("name", container.Name) { + return nil + } + + if !psFilters.Match("id", container.ID) { + return nil + } + if before != "" && !foundBefore { if container.ID == beforeCont.ID { foundBefore = true @@ -102,10 +108,9 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status { return nil } } - for _, status := range filt_status { - if container.State.StateString() != strings.ToLower(status) { - return nil - } + + if !psFilters.Match("status", container.State.StateString()) { + return nil } displayed++ out := &engine.Env{} diff --git a/integration-cli/docker_cli_ps_test.go b/integration-cli/docker_cli_ps_test.go index 8be4dfb16f..3874fa70b5 100644 --- a/integration-cli/docker_cli_ps_test.go +++ b/integration-cli/docker_cli_ps_test.go @@ -336,3 +336,63 @@ func TestPsListContainersFilterStatus(t *testing.T) { logDone("ps - test ps filter status") } + +func TestPsListContainersFilterID(t *testing.T) { + // start container + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatal(out, err) + } + firstID := stripTrailingCharacters(out) + + // start another container + runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 360") + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } + + // filter containers by id + runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--filter=id="+firstID) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } + containerOut := strings.TrimSpace(out) + if containerOut != firstID[:12] { + t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out) + } + + deleteAllContainers() + + logDone("ps - test ps filter id") +} + +func TestPsListContainersFilterName(t *testing.T) { + // start container + runCmd := exec.Command(dockerBinary, "run", "-d", "--name=a_name_to_match", "busybox") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatal(out, err) + } + firstID := stripTrailingCharacters(out) + + // start another container + runCmd = exec.Command(dockerBinary, "run", "-d", "--name=b_name_to_match", "busybox", "sh", "-c", "sleep 360") + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } + + // filter containers by name + runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--filter=name=a_name_to_match") + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } + containerOut := strings.TrimSpace(out) + if containerOut != firstID[:12] { + t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out) + } + + deleteAllContainers() + + logDone("ps - test ps filter name") +} diff --git a/pkg/parsers/filters/parse.go b/pkg/parsers/filters/parse.go index 27c7132e8e..403959223c 100644 --- a/pkg/parsers/filters/parse.go +++ b/pkg/parsers/filters/parse.go @@ -3,6 +3,7 @@ package filters import ( "encoding/json" "errors" + "regexp" "strings" ) @@ -61,3 +62,22 @@ func FromParam(p string) (Args, error) { } return args, nil } + +func (filters Args) Match(field, source string) bool { + fieldValues := filters[field] + + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + for _, name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} From 964f9965c75b89f95060c62ba512ed6ceb525992 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Mon, 20 Oct 2014 15:27:26 -0400 Subject: [PATCH 100/592] Clean volume paths Fixes #8659 Signed-off-by: Brian Goff --- daemon/volumes.go | 3 ++ integration-cli/docker_cli_run_test.go | 50 ++++++++++++++++++++++++++ integration-cli/docker_test_vars.go | 6 ++-- integration-cli/docker_utils.go | 10 ++++++ volumes/repository.go | 5 +-- 5 files changed, 70 insertions(+), 4 deletions(-) diff --git a/daemon/volumes.go b/daemon/volumes.go index c7a8d7bfcb..b34d9678cb 100644 --- a/daemon/volumes.go +++ b/daemon/volumes.go @@ -133,6 +133,7 @@ func (container *Container) parseVolumeMountConfig() (map[string]*Mount, error) // Get the rest of the volumes for path := range container.Config.Volumes { // Check if this is already added as a bind-mount + path = filepath.Clean(path) if _, exists := mounts[path]; exists { continue } @@ -182,6 +183,8 @@ func parseBindMountSpec(spec string) (string, string, bool, error) { return "", "", false, fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", path) } + path = filepath.Clean(path) + mountToPath = filepath.Clean(mountToPath) return path, mountToPath, writable, nil } diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 81ed693b2b..eeb3601e7a 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -2396,3 +2396,53 @@ func TestRunNoOutputFromPullInStdout(t *testing.T) { } logDone("run - no output from pull in stdout") } + +func TestRunVolumesCleanPaths(t *testing.T) { + defer deleteAllContainers() + + if _, err := buildImage("run_volumes_clean_paths", + `FROM busybox + VOLUME /foo/`, + true); err != nil { + t.Fatal(err) + } + defer deleteImages("run_volumes_clean_paths") + + cmd := exec.Command(dockerBinary, "run", "-v", "/foo", "-v", "/bar/", "--name", "dark_helmet", "run_volumes_clean_paths") + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatal(err, out) + } + + out, err := inspectFieldMap("dark_helmet", "Volumes", "/foo/") + if err != nil { + t.Fatal(err) + } + if out != "" { + t.Fatalf("Found unexpected volume entry for '/foo/' in volumes\n%q", out) + } + + out, err = inspectFieldMap("dark_helmet", "Volumes", "/foo") + if err != nil { + t.Fatal(err) + } + if !strings.Contains(out, volumesStoragePath) { + t.Fatalf("Volume was not defined for /foo\n%q", out) + } + + out, err = inspectFieldMap("dark_helmet", "Volumes", "/bar/") + if err != nil { + t.Fatal(err) + } + if out != "" { + t.Fatalf("Found unexpected volume entry for '/bar/' in volumes\n%q", out) + } + out, err = inspectFieldMap("dark_helmet", "Volumes", "/bar") + if err != nil { + t.Fatal(err) + } + if !strings.Contains(out, volumesStoragePath) { + t.Fatalf("Volume was not defined for /bar\n%q", out) + } + + logDone("run - volume paths are cleaned") +} diff --git a/integration-cli/docker_test_vars.go b/integration-cli/docker_test_vars.go index fdbcf073ec..23903a39a9 100644 --- a/integration-cli/docker_test_vars.go +++ b/integration-cli/docker_test_vars.go @@ -16,8 +16,10 @@ var ( // the private registry to use for tests privateRegistryURL = "127.0.0.1:5000" - execDriverPath = "/var/lib/docker/execdriver/native" - volumesConfigPath = "/var/lib/docker/volumes" + dockerBasePath = "/var/lib/docker" + execDriverPath = dockerBasePath + "/execdriver/native" + volumesConfigPath = dockerBasePath + "/volumes" + volumesStoragePath = dockerBasePath + "/vfs/dir" workingDirectory string ) diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go index c3e5361713..109014db74 100644 --- a/integration-cli/docker_utils.go +++ b/integration-cli/docker_utils.go @@ -509,6 +509,16 @@ func inspectFieldJSON(name, field string) (string, error) { return strings.TrimSpace(out), nil } +func inspectFieldMap(name, path, field string) (string, error) { + format := fmt.Sprintf("{{index .%s %q}}", path, field) + inspectCmd := exec.Command(dockerBinary, "inspect", "-f", format, name) + out, exitCode, err := runCommandWithOutput(inspectCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to inspect %s: %s", name, out) + } + return strings.TrimSpace(out), nil +} + func getIDByName(name string) (string, error) { return inspectField(name, "Id") } diff --git a/volumes/repository.go b/volumes/repository.go index e765d944c2..2383f34a93 100644 --- a/volumes/repository.go +++ b/volumes/repository.go @@ -55,6 +55,7 @@ func (r *Repository) newVolume(path string, writable bool) (*Volume, error) { return nil, err } } + path = filepath.Clean(path) path, err = filepath.EvalSymlinks(path) if err != nil { @@ -126,7 +127,7 @@ func (r *Repository) get(path string) *Volume { if err != nil { return nil } - return r.volumes[path] + return r.volumes[filepath.Clean(path)] } func (r *Repository) Add(volume *Volume) error { @@ -160,7 +161,7 @@ func (r *Repository) Delete(path string) error { if err != nil { return err } - volume := r.get(path) + volume := r.get(filepath.Clean(path)) if volume == nil { return fmt.Errorf("Volume %s does not exist", path) } From fdd2abe7b34f15fbe5ec64309e9d700ba2e66e65 Mon Sep 17 00:00:00 2001 From: Gleb M Borisov Date: Tue, 21 Oct 2014 03:45:45 +0400 Subject: [PATCH 101/592] Use dual-stack Dialer when talking to registy Signed-off-by: Gleb M. Borisov --- registry/registry.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/registry/registry.go b/registry/registry.go index d1315ed4b6..0b3ec12bf3 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -56,7 +56,9 @@ func newClient(jar http.CookieJar, roots *x509.CertPool, cert *tls.Certificate, case ConnectTimeout: httpTransport.Dial = func(proto string, addr string) (net.Conn, error) { // Set the connect timeout to 5 seconds - conn, err := net.DialTimeout(proto, addr, 5*time.Second) + d := net.Dialer{Timeout: 5 * time.Second, DualStack: true} + + conn, err := d.Dial(proto, addr) if err != nil { return nil, err } @@ -66,7 +68,9 @@ func newClient(jar http.CookieJar, roots *x509.CertPool, cert *tls.Certificate, } case ReceiveTimeout: httpTransport.Dial = func(proto string, addr string) (net.Conn, error) { - conn, err := net.Dial(proto, addr) + d := net.Dialer{DualStack: true} + + conn, err := d.Dial(proto, addr) if err != nil { return nil, err } From ef98fe0763024abd90bd5a573fec816895ee92e4 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Wed, 24 Sep 2014 09:07:11 -0400 Subject: [PATCH 102/592] Make container.Copy support volumes Fixes #1992 Right now when you `docker cp` a path which is in a volume, the cp itself works, however you end up getting files that are in the container's fs rather than the files in the volume (which is not in the container's fs). This makes it so when you `docker cp` a path that is in a volume it follows the volume to the real path on the host. archive.go has been modified so that when you do `docker cp mydata:/foo .`, and /foo is the volume, the outputed folder is called "foo" instead of the volume ID (because we are telling it to tar up `/var/lib/docker/vfs/dir/` and not "foo", but the user would be expecting "foo", not the ID Signed-off-by: Brian Goff --- daemon/container.go | 10 ++- daemon/volumes.go | 13 ++++ integration-cli/docker_cli_cp_test.go | 107 ++++++++++++++++++++++++++ pkg/archive/archive.go | 11 +++ volumes/volume.go | 37 +++++++++ 5 files changed, 176 insertions(+), 2 deletions(-) diff --git a/daemon/container.go b/daemon/container.go index 6fd4507972..e5c9fadace 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -826,19 +826,25 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) { return nil, err } - var filter []string - basePath, err := container.getResourcePath(resource) if err != nil { container.Unmount() return nil, err } + // Check if this is actually in a volume + for _, mnt := range container.VolumeMounts() { + if len(mnt.MountToPath) > 0 && strings.HasPrefix(resource, mnt.MountToPath[1:]) { + return mnt.Export(resource) + } + } + stat, err := os.Stat(basePath) if err != nil { container.Unmount() return nil, err } + var filter []string if !stat.IsDir() { d, f := path.Split(basePath) basePath = d diff --git a/daemon/volumes.go b/daemon/volumes.go index c7a8d7bfcb..056d32b548 100644 --- a/daemon/volumes.go +++ b/daemon/volumes.go @@ -2,6 +2,7 @@ package daemon import ( "fmt" + "io" "io/ioutil" "os" "path/filepath" @@ -24,6 +25,18 @@ type Mount struct { copyData bool } +func (mnt *Mount) Export(resource string) (io.ReadCloser, error) { + var name string + if resource == mnt.MountToPath[1:] { + name = filepath.Base(resource) + } + path, err := filepath.Rel(mnt.MountToPath[1:], resource) + if err != nil { + return nil, err + } + return mnt.volume.Export(path, name) +} + func (container *Container) prepareVolumes() error { if container.Volumes == nil || len(container.Volumes) == 0 { container.Volumes = make(map[string]string) diff --git a/integration-cli/docker_cli_cp_test.go b/integration-cli/docker_cli_cp_test.go index aecc68edb4..b89ddde0b4 100644 --- a/integration-cli/docker_cli_cp_test.go +++ b/integration-cli/docker_cli_cp_test.go @@ -1,6 +1,7 @@ package main import ( + "bytes" "fmt" "io/ioutil" "os" @@ -371,3 +372,109 @@ func TestCpUnprivilegedUser(t *testing.T) { logDone("cp - unprivileged user") } + +func TestCpVolumePath(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "cp-test-volumepath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + outDir, err := ioutil.TempDir("", "cp-test-volumepath-out") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(outDir) + _, err = os.Create(tmpDir + "/test") + if err != nil { + t.Fatal(err) + } + + out, exitCode, err := cmd(t, "run", "-d", "-v", "/foo", "-v", tmpDir+"/test:/test", "-v", tmpDir+":/baz", "busybox", "/bin/sh", "-c", "touch /foo/bar") + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = cmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + // Copy actual volume path + _, _, err = cmd(t, "cp", cleanedContainerID+":/foo", outDir) + if err != nil { + t.Fatalf("couldn't copy from volume path: %s:%s %v", cleanedContainerID, "/foo", err) + } + stat, err := os.Stat(outDir + "/foo") + if err != nil { + t.Fatal(err) + } + if !stat.IsDir() { + t.Fatal("expected copied content to be dir") + } + stat, err = os.Stat(outDir + "/foo/bar") + if err != nil { + t.Fatal(err) + } + if stat.IsDir() { + t.Fatal("Expected file `bar` to be a file") + } + + // Copy file nested in volume + _, _, err = cmd(t, "cp", cleanedContainerID+":/foo/bar", outDir) + if err != nil { + t.Fatalf("couldn't copy from volume path: %s:%s %v", cleanedContainerID, "/foo", err) + } + stat, err = os.Stat(outDir + "/bar") + if err != nil { + t.Fatal(err) + } + if stat.IsDir() { + t.Fatal("Expected file `bar` to be a file") + } + + // Copy Bind-mounted dir + _, _, err = cmd(t, "cp", cleanedContainerID+":/baz", outDir) + if err != nil { + t.Fatalf("couldn't copy from bind-mounted volume path: %s:%s %v", cleanedContainerID, "/baz", err) + } + stat, err = os.Stat(outDir + "/baz") + if err != nil { + t.Fatal(err) + } + if !stat.IsDir() { + t.Fatal("Expected `baz` to be a dir") + } + + // Copy file nested in bind-mounted dir + _, _, err = cmd(t, "cp", cleanedContainerID+":/baz/test", outDir) + fb, err := ioutil.ReadFile(outDir + "/baz/test") + if err != nil { + t.Fatal(err) + } + fb2, err := ioutil.ReadFile(tmpDir + "/test") + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(fb, fb2) { + t.Fatalf("Expected copied file to be duplicate of bind-mounted file") + } + + // Copy bind-mounted file + _, _, err = cmd(t, "cp", cleanedContainerID+":/test", outDir) + fb, err = ioutil.ReadFile(outDir + "/test") + if err != nil { + t.Fatal(err) + } + fb2, err = ioutil.ReadFile(tmpDir + "/test") + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(fb, fb2) { + t.Fatalf("Expected copied file to be duplicate of bind-mounted file") + } + + logDone("cp - volume path") +} diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 7d9103e103..98149160b3 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -34,6 +34,7 @@ type ( Excludes []string Compression Compression NoLchown bool + Name string } ) @@ -359,6 +360,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) twBuf := pools.BufioWriter32KPool.Get(nil) defer pools.BufioWriter32KPool.Put(twBuf) + var renamedRelFilePath string // For when tar.Options.Name is set for _, include := range options.Includes { filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error { if err != nil { @@ -384,6 +386,15 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) return nil } + // Rename the base resource + if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) { + renamedRelFilePath = relFilePath + } + // Set this to make sure the items underneath also get renamed + if options.Name != "" { + relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1) + } + if err := addTarFile(filePath, relFilePath, tw, twBuf); err != nil { log.Debugf("Can't add file %s to tar: %s", srcPath, err) } diff --git a/volumes/volume.go b/volumes/volume.go index e2d7a726db..73cbb3640d 100644 --- a/volumes/volume.go +++ b/volumes/volume.go @@ -2,11 +2,14 @@ package volumes import ( "encoding/json" + "io" "io/ioutil" "os" + "path" "path/filepath" "sync" + "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/symlink" ) @@ -21,6 +24,35 @@ type Volume struct { lock sync.Mutex } +func (v *Volume) Export(resource, name string) (io.ReadCloser, error) { + if v.IsBindMount && filepath.Base(resource) == name { + name = "" + } + + basePath, err := v.getResourcePath(resource) + if err != nil { + return nil, err + } + stat, err := os.Stat(basePath) + if err != nil { + return nil, err + } + var filter []string + if !stat.IsDir() { + d, f := path.Split(basePath) + basePath = d + filter = []string{f} + } else { + filter = []string{path.Base(basePath)} + basePath = path.Dir(basePath) + } + return archive.TarWithOptions(basePath, &archive.TarOptions{ + Compression: archive.Uncompressed, + Name: name, + Includes: filter, + }) +} + func (v *Volume) IsDir() (bool, error) { stat, err := os.Stat(v.Path) if err != nil { @@ -137,3 +169,8 @@ func (v *Volume) getRootResourcePath(path string) (string, error) { cleanPath := filepath.Join("/", path) return symlink.FollowSymlinkInScope(filepath.Join(v.configPath, cleanPath), v.configPath) } + +func (v *Volume) getResourcePath(path string) (string, error) { + cleanPath := filepath.Join("/", path) + return symlink.FollowSymlinkInScope(filepath.Join(v.Path, cleanPath), v.Path) +} From 9e137ceb578bd427124d4937bc6e440ae5267bf6 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Tue, 21 Oct 2014 10:48:58 +1000 Subject: [PATCH 103/592] Link to run reference from cli doc Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/reference/commandline/cli.md | 3 ++ docs/sources/reference/run.md | 36 +++++++++++------------ 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 131413c8fe..4ede5e9ccf 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -1226,6 +1226,9 @@ specified image, and then `starts` it using the specified command. That is, previous changes intact using `docker start`. See `docker ps -a` to view a list of all containers. +There is detailed infortmation about `docker run` in the [Docker run reference]( +/reference/run/). + The `docker run` command can be used in combination with `docker commit` to [*change the command that a container runs*](#commit-an-existing-container). diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index 2183ee957c..88e3f5d491 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -1,8 +1,8 @@ -page_title: Docker Run Reference +page_title: Docker run reference page_description: Configure containers at runtime page_keywords: docker, run, configure, runtime -# Docker Run Reference +# Docker run reference **Docker runs processes in isolated containers**. When an operator executes `docker run`, she starts a process with its own file system, @@ -14,7 +14,7 @@ the container from the image. That's the main reason [*run*](/reference/commandline/cli/#run) has more options than any other `docker` command. -## General Form +## General form The basic `docker run` command takes this form: @@ -39,7 +39,7 @@ behavior, allowing them to override all defaults set by the developer during `docker build` and nearly all the defaults set by the Docker runtime itself. -## Operator Exclusive Options +## Operator exclusive options Only the operator (the person executing `docker run`) can set the following options. @@ -55,7 +55,7 @@ following options. - [Runtime Constraints on CPU and Memory](#runtime-constraints-on-cpu-and-memory) - [Runtime Privilege, Linux Capabilities, and LXC Configuration](#runtime-privilege-linux-capabilities-and-lxc-configuration) -## Detached vs Foreground +## Detached vs foreground When starting a Docker container, you must first decide if you want to run the container in the background in a "detached" mode or in the @@ -97,7 +97,7 @@ For interactive processes (like a shell) you will typically want a tty as well as persistent standard input (`STDIN`), so you'll use `-i -t` together in most interactive cases. -## Container Identification +## Container identification ### Name (–-name) @@ -116,7 +116,7 @@ add meaning to a container since you can use this name when defining other place you need to identify a container). This works for both background and foreground Docker containers. -### PID Equivalent +### PID equivalent Finally, to help with automation, you can have Docker write the container ID out to a file of your choosing. This is similar to how some @@ -131,7 +131,7 @@ While not strictly a means of identifying a container, you can specify a version image you'd like to run the container with by adding `image[:tag]` to the command. For example, `docker run ubuntu:14.04`. -## Network Settings +## Network settings --dns=[] : Set custom dns servers for the container --net="bridge" : Set the Network mode for the container @@ -213,7 +213,7 @@ container itself as well as `localhost` and a few other common things. The ::1 localhost ip6-localhost ip6-loopback 86.75.30.9 db-static -## Clean Up (–-rm) +## Clean up (–-rm) By default a container's file system persists even after the container exits. This makes debugging a lot easier (since you can inspect the @@ -225,7 +225,7 @@ the container exits**, you can add the `--rm` flag: --rm=false: Automatically remove the container when it exits (incompatible with -d) -## Security Configuration +## Security configuration --security-opt="label:user:USER" : Set the label user for the container --security-opt="label:role:ROLE" : Set the label role for the container --security-opt="label:type:TYPE" : Set the label type for the container @@ -261,7 +261,7 @@ Note: You would have to write policy defining a `svirt_apache_t` type. -## Runtime Constraints on CPU and Memory +## Runtime constraints on CPU and memory The operator can also adjust the performance parameters of the container: @@ -279,7 +279,7 @@ get the same proportion of CPU cycles, but you can tell the kernel to give more shares of CPU time to one or more containers when you start them via Docker. -## Runtime Privilege, Linux Capabilities, and LXC Configuration +## Runtime privilege, Linux capabilities, and LXC configuration --cap-add: Add Linux capabilities --cap-drop: Drop Linux capabilities @@ -347,7 +347,7 @@ Note that in the future, a given host's docker daemon may not use LXC, so this is an implementation-specific configuration meant for operators already familiar with using LXC directly. -## Overriding Dockerfile Image Defaults +## Overriding Dockerfile image defaults When a developer builds an image from a [*Dockerfile*](/reference/builder/#dockerbuilder) or when she commits it, the developer can set a number of default parameters @@ -367,7 +367,7 @@ Dockerfile instruction and how the operator can override that setting. - [USER](#user) - [WORKDIR](#workdir) -## CMD (Default Command or Options) +## CMD (default command or options) Recall the optional `COMMAND` in the Docker commandline: @@ -383,7 +383,7 @@ image), you can override that `CMD` instruction just by specifying a new If the image also specifies an `ENTRYPOINT` then the `CMD` or `COMMAND` get appended as arguments to the `ENTRYPOINT`. -## ENTRYPOINT (Default Command to Execute at Runtime) +## ENTRYPOINT (default command to execute at runtime) --entrypoint="": Overwrite the default entrypoint set by the image @@ -406,7 +406,7 @@ or two examples of how to pass more parameters to that ENTRYPOINT: $ sudo docker run -i -t --entrypoint /bin/bash example/redis -c ls -l $ sudo docker run -i -t --entrypoint /usr/bin/redis-cli example/redis --help -## EXPOSE (Incoming Ports) +## EXPOSE (incoming ports) The Dockerfile doesn't give much control over networking, only providing the `EXPOSE` instruction to give a hint to the operator about what @@ -446,7 +446,7 @@ then the client container can access the exposed port via a private networking interface. Docker will set some environment variables in the client container to help indicate which interface and port to use. -## ENV (Environment Variables) +## ENV (environment variables) When a new container is created, Docker will set the following environment variables automatically: @@ -555,7 +555,7 @@ mechanism to communicate with a linked container by its alias: If you restart the source container (`servicename` in this case), the recipient container's `/etc/hosts` entry will be automatically updated. -## VOLUME (Shared Filesystems) +## VOLUME (shared filesystems) -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "container-dir" is missing, then docker creates a new volume. From d0f12a0f7379b95e848c773fa16dd706a49197c6 Mon Sep 17 00:00:00 2001 From: Zach Borboa Date: Wed, 1 Oct 2014 18:26:36 -0700 Subject: [PATCH 104/592] Fix typo Signed-off-by: Zach Borboa --- pkg/namesgenerator/names-generator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/namesgenerator/names-generator.go b/pkg/namesgenerator/names-generator.go index ebb5850bda..beb8a95f06 100644 --- a/pkg/namesgenerator/names-generator.go +++ b/pkg/namesgenerator/names-generator.go @@ -56,7 +56,7 @@ var ( // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - http://en.wikipedia.org/wiki/Mary_Leakey // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. http://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB // Niels Bohr is the father of quantum theory. http://en.wikipedia.org/wiki/Niels_Bohr. - // Nikola Tesla invented the AC electric system and every gaget ever used by a James Bond villain. http://en.wikipedia.org/wiki/Nikola_Tesla + // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. http://en.wikipedia.org/wiki/Nikola_Tesla // Pierre de Fermat pioneered several aspects of modern mathematics. http://en.wikipedia.org/wiki/Pierre_de_Fermat // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. http://en.wikipedia.org/wiki/Rachel_Carson // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). http://en.wikipedia.org/wiki/Radia_Perlman From 6ed610fb8014d500e001bb0677f0e1af0dc9312d Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Tue, 21 Oct 2014 15:59:23 +1000 Subject: [PATCH 105/592] DOCKER_VERSION and docker-version havn't been implemented. So far, it looks like the declarations are not used, and so its safer not to confuse people into thinking they do something. Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- Dockerfile | 1 - builder/parser/testfiles/docker/Dockerfile | 1 - builder/parser/testfiles/docker/result | 1 - contrib/desktop-integration/chromium/Dockerfile | 2 -- contrib/desktop-integration/gparted/Dockerfile | 2 -- contrib/host-integration/Dockerfile.dev | 2 -- docs/sources/examples/nodejs_web_app.md | 7 +------ 7 files changed, 1 insertion(+), 15 deletions(-) diff --git a/Dockerfile b/Dockerfile index 34b0460c07..9fffa63f30 100644 --- a/Dockerfile +++ b/Dockerfile @@ -23,7 +23,6 @@ # the case. Therefore, you don't have to disable it anymore. # -docker-version 0.6.1 FROM ubuntu:14.04 MAINTAINER Tianon Gravi (@tianon) diff --git a/builder/parser/testfiles/docker/Dockerfile b/builder/parser/testfiles/docker/Dockerfile index 68f8f0b78b..1c173126ae 100644 --- a/builder/parser/testfiles/docker/Dockerfile +++ b/builder/parser/testfiles/docker/Dockerfile @@ -23,7 +23,6 @@ # the case. Therefore, you don't have to disable it anymore. # -docker-version 0.6.1 FROM ubuntu:14.04 MAINTAINER Tianon Gravi (@tianon) diff --git a/builder/parser/testfiles/docker/result b/builder/parser/testfiles/docker/result index 915e2ef17b..3ab006ec40 100644 --- a/builder/parser/testfiles/docker/result +++ b/builder/parser/testfiles/docker/result @@ -1,4 +1,3 @@ -(docker-version) (from "ubuntu:14.04") (maintainer "Tianon Gravi (@tianon)") (run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq apt-utils aufs-tools automake btrfs-tools build-essential curl dpkg-sig git iptables libapparmor-dev libcap-dev libsqlite3-dev lxc=1.0* mercurial pandoc parallel reprepro ruby1.9.1 ruby1.9.1-dev s3cmd=1.1.0* --no-install-recommends") diff --git a/contrib/desktop-integration/chromium/Dockerfile b/contrib/desktop-integration/chromium/Dockerfile index 0e0a7ce90e..5cacd1f999 100644 --- a/contrib/desktop-integration/chromium/Dockerfile +++ b/contrib/desktop-integration/chromium/Dockerfile @@ -20,8 +20,6 @@ # docker run --volumes-from chromium-data -v /tmp/.X11-unix:/tmp/.X11-unix \ # -e DISPLAY=unix$DISPLAY chromium -DOCKER_VERSION 1.3 - # Base docker image FROM debian:jessie MAINTAINER Jessica Frazelle diff --git a/contrib/desktop-integration/gparted/Dockerfile b/contrib/desktop-integration/gparted/Dockerfile index 6db1d24098..e76e658973 100644 --- a/contrib/desktop-integration/gparted/Dockerfile +++ b/contrib/desktop-integration/gparted/Dockerfile @@ -17,8 +17,6 @@ # -e DISPLAY=unix$DISPLAY gparted # -DOCKER-VERSION 1.3 - # Base docker image FROM debian:jessie MAINTAINER Jessica Frazelle diff --git a/contrib/host-integration/Dockerfile.dev b/contrib/host-integration/Dockerfile.dev index 1c0fbd8323..c8df852899 100644 --- a/contrib/host-integration/Dockerfile.dev +++ b/contrib/host-integration/Dockerfile.dev @@ -2,8 +2,6 @@ # This Dockerfile will create an image that allows to generate upstart and # systemd scripts (more to come) # -# docker-version 0.6.2 -# FROM ubuntu:12.10 MAINTAINER Guillaume J. Charmes diff --git a/docs/sources/examples/nodejs_web_app.md b/docs/sources/examples/nodejs_web_app.md index d634251fb8..3a9183e325 100644 --- a/docs/sources/examples/nodejs_web_app.md +++ b/docs/sources/examples/nodejs_web_app.md @@ -59,12 +59,8 @@ Create an empty file called `Dockerfile`: touch Dockerfile Open the `Dockerfile` in your favorite text editor -and add the following line that defines the version of Docker the image -requires to build (this example uses Docker 0.3.4): - # DOCKER-VERSION 0.3.4 - -Next, define the parent image you want to use to build your own image on +Define the parent image you want to use to build your own image on top of. Here, we'll use [CentOS](https://registry.hub.docker.com/_/centos/) (tag: `centos6`) available on the [Docker Hub](https://hub.docker.com/): @@ -108,7 +104,6 @@ defines your runtime, i.e. `node`, and the path to our app, i.e. `src/index.js` Your `Dockerfile` should now look like this: - # DOCKER-VERSION 0.3.4 FROM centos:centos6 # Enable EPEL for Node.js From 7cf322dffc5e9a4ea495ec08e0b0594cad01da92 Mon Sep 17 00:00:00 2001 From: Qiang Huang Date: Tue, 21 Oct 2014 15:00:25 +0800 Subject: [PATCH 106/592] daemon: resolve the graphdriver to show graphdriver is not always specified when the log printed, because it's provided in another thread. This patch will fix this. Signed-off-by: Qiang Huang --- docker/daemon.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docker/daemon.go b/docker/daemon.go index 2f65878472..4a591ec5ea 100644 --- a/docker/daemon.go +++ b/docker/daemon.go @@ -45,6 +45,13 @@ func mainDaemon() { if err != nil { log.Fatal(err) } + log.Infof("docker daemon: %s %s; execdriver: %s; graphdriver: %s", + dockerversion.VERSION, + dockerversion.GITCOMMIT, + d.ExecutionDriver().Name(), + d.GraphDriver().String(), + ) + if err := d.Install(eng); err != nil { log.Fatal(err) } @@ -58,13 +65,6 @@ func mainDaemon() { log.Fatal(err) } }() - // TODO actually have a resolved graphdriver to show? - log.Infof("docker daemon: %s %s; execdriver: %s; graphdriver: %s", - dockerversion.VERSION, - dockerversion.GITCOMMIT, - daemonCfg.ExecDriver, - daemonCfg.GraphDriver, - ) // Serve api job := eng.Job("serveapi", flHosts...) From 36dae27fa26fe58efaf68296169cd2c6ba6dfcfe Mon Sep 17 00:00:00 2001 From: Andy Wilson Date: Tue, 21 Oct 2014 10:06:57 -0500 Subject: [PATCH 107/592] fix cli dashes in boot2docker resizing article The CLI commands had long dashes that won't work on most terminals when copy pasting. Signed-off-by: wilsaj --- docs/sources/articles/b2d_volume_resize.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/articles/b2d_volume_resize.md b/docs/sources/articles/b2d_volume_resize.md index 7d6790965e..1b39b49eda 100644 --- a/docs/sources/articles/b2d_volume_resize.md +++ b/docs/sources/articles/b2d_volume_resize.md @@ -28,7 +28,7 @@ it. Using the command line VirtualBox tools, clone the VMDK image to a VDI image: - $ vboxmanage clonehd /full/path/to/boot2docker-hd.vmdk /full/path/to/.vdi -—format VDI -—variant Standard + $ vboxmanage clonehd /full/path/to/boot2docker-hd.vmdk /full/path/to/.vdi --format VDI --variant Standard ## 3. Resize the VDI volume @@ -36,7 +36,7 @@ Choose a size that will be appropriate for your needs. If you’re spinning up a lot of containers, or your containers are particularly large, larger will be better: - $ vboxmanage modifyhd /full/path/to/.vdi —-resize + $ vboxmanage modifyhd /full/path/to/.vdi --resize ## 4. Download a disk partitioning tool ISO From 5df2c878a1b2baeb22538bb66be631b8da33236a Mon Sep 17 00:00:00 2001 From: Philipp Weissensteiner Date: Tue, 21 Oct 2014 20:17:20 +0200 Subject: [PATCH 108/592] Update container linking documentation As of 1.3 `docker ps` no longer shows links between containers. This updates the documentation to reflect that change. sudo docker docker inspect -f "{{ .HostConfig.Links }}" web Signed-off-by: Philipp Weissensteiner --- docs/sources/userguide/dockerlinks.md | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/docs/sources/userguide/dockerlinks.md b/docs/sources/userguide/dockerlinks.md index ce14bfa12a..631f4bdea5 100644 --- a/docs/sources/userguide/dockerlinks.md +++ b/docs/sources/userguide/dockerlinks.md @@ -151,18 +151,13 @@ earlier. The `--link` flag takes the form: Where `name` is the name of the container we're linking to and `alias` is an alias for the link name. You'll see how that alias gets used shortly. -Next, look at the names of your linked containers by filtering the full output of -`docker ps` to the last column (NAMES) using `docker ps --no-trunc | awk '{print $NF}'`. +Next, inspect your linked containers with `docker inspect`: - $ sudo docker ps --no-trunc | awk '{print $NF}' - NAMES - db, web/db - web + $ sudo docker inspect -f "{{ .HostConfig.Links }}" web + [/db:/web/db] -You can see your named containers, `db` and `web`, and you can see that the `db` -container also shows `web/db` in the `NAMES` column. This tells you that the -`web` container is linked to the `db` container, which allows it to access information -about the `db` container. +You can see that the `web` container is now linked to the `db` container +`web/db`. Which allows it to access information about the `db` container. So what does linking the containers actually do? You've learned that a link creates a source container that can provide information about itself to a recipient container. In From a34831f0168156ded7ecf96a1734c2735fede1ca Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Tue, 21 Oct 2014 19:26:20 +0000 Subject: [PATCH 109/592] builder: handle cases where onbuild is not uppercase. Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- builder/dispatchers.go | 3 +- integration-cli/docker_cli_build_test.go | 35 ++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/builder/dispatchers.go b/builder/dispatchers.go index 82bb6ce5fd..0c2a580872 100644 --- a/builder/dispatchers.go +++ b/builder/dispatchers.go @@ -11,6 +11,7 @@ import ( "fmt" "io/ioutil" "path/filepath" + "regexp" "strings" "github.com/docker/docker/nat" @@ -129,7 +130,7 @@ func onbuild(b *Builder, args []string, attributes map[string]bool, original str return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) } - original = strings.TrimSpace(strings.TrimLeft(original, "ONBUILD")) + original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "") b.Config.OnBuild = append(b.Config.OnBuild, original) return b.commit("", b.Config.Cmd, fmt.Sprintf("ONBUILD %s", original)) diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 12b8e00b6f..c909b14f0d 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -15,6 +15,41 @@ import ( "github.com/docker/docker/pkg/archive" ) +func TestBuildOnBuildLowercase(t *testing.T) { + name := "testbuildonbuildlowercase" + name2 := "testbuildonbuildlowercase2" + + defer deleteImages(name, name2) + + _, err := buildImage(name, + ` + FROM busybox + onbuild run echo quux + `, true) + + if err != nil { + t.Fatal(err) + } + + _, out, err := buildImageWithOut(name2, fmt.Sprintf(` + FROM %s + `, name), true) + + if err != nil { + t.Fatal(err) + } + + if !strings.Contains(out, "quux") { + t.Fatalf("Did not receive the expected echo text, got %s", out) + } + + if strings.Contains(out, "ONBUILD ONBUILD") { + t.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", out) + } + + logDone("build - handle case-insensitive onbuild statement") +} + func TestBuildEnvEscapes(t *testing.T) { name := "testbuildenvescapes" defer deleteAllContainers() From ba5370c116e3879c88736d3456586ec5703f581b Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 21 Oct 2014 22:48:32 +0000 Subject: [PATCH 110/592] Fix racy integration tests Do not run containers in the background in the integration tests if you depend on the run completing. It is better especially if you just want to ensure that the run has completed with a `true` to just run in foreground and use a known name for the container to query it after it has stopped. The failures can be reproduced on most machines by giving your dind container one core and a cpushare. docker run -c 200 --cpuset 0 -ti --rm --privileged -e DOCKER_GRAPHDRIVER=vfs docker hack/make.sh binary test-integration-cli Signed-off-by: Michael Crosby --- integration-cli/docker_api_containers_test.go | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/integration-cli/docker_api_containers_test.go b/integration-cli/docker_api_containers_test.go index 89b7ab1fb9..605c24bf91 100644 --- a/integration-cli/docker_api_containers_test.go +++ b/integration-cli/docker_api_containers_test.go @@ -16,20 +16,21 @@ func TestContainerApiGetAll(t *testing.T) { t.Fatalf("Cannot query container count: %v", err) } - runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + name := "getall" + runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf("Error on container creation: %v, output: %q", err, out) } - testContainerId := stripTrailingCharacters(out) - body, err := sockRequest("GET", "/containers/json?all=1") if err != nil { t.Fatalf("GET all containers sockRequest failed: %v", err) } - var inspectJSON []map[string]interface{} + var inspectJSON []struct { + Names []string + } if err = json.Unmarshal(body, &inspectJSON); err != nil { t.Fatalf("unable to unmarshal response body: %v", err) } @@ -37,8 +38,9 @@ func TestContainerApiGetAll(t *testing.T) { if len(inspectJSON) != startCount+1 { t.Fatalf("Expected %d container(s), %d found (started with: %d)", startCount+1, len(inspectJSON), startCount) } - if id, _ := inspectJSON[0]["Id"]; id != testContainerId { - t.Fatalf("Container ID mismatch. Expected: %s, received: %s\n", testContainerId, id) + + if actual := inspectJSON[0].Names[0]; actual != "/"+name { + t.Fatalf("Container Name mismatch. Expected: %q, received: %q\n", "/"+name, actual) } deleteAllContainers() @@ -47,15 +49,14 @@ func TestContainerApiGetAll(t *testing.T) { } func TestContainerApiGetExport(t *testing.T) { - runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "touch", "/test") + name := "exportcontainer" + runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "touch", "/test") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf("Error on container creation: %v, output: %q", err, out) } - testContainerId := stripTrailingCharacters(out) - - body, err := sockRequest("GET", "/containers/"+testContainerId+"/export") + body, err := sockRequest("GET", "/containers/"+name+"/export") if err != nil { t.Fatalf("GET containers/export sockRequest failed: %v", err) } @@ -84,15 +85,14 @@ func TestContainerApiGetExport(t *testing.T) { } func TestContainerApiGetChanges(t *testing.T) { - runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "rm", "/etc/passwd") + name := "changescontainer" + runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "rm", "/etc/passwd") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf("Error on container creation: %v, output: %q", err, out) } - testContainerId := stripTrailingCharacters(out) - - body, err := sockRequest("GET", "/containers/"+testContainerId+"/changes") + body, err := sockRequest("GET", "/containers/"+name+"/changes") if err != nil { t.Fatalf("GET containers/changes sockRequest failed: %v", err) } From 373fd1ce16f5699afc55d595ec6a4e684b7483bc Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 21 Oct 2014 16:42:21 -0700 Subject: [PATCH 111/592] Fixes #8690 Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- graph/graph.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graph/graph.go b/graph/graph.go index 00c0324ea8..d5d4fcdab1 100644 --- a/graph/graph.go +++ b/graph/graph.go @@ -72,7 +72,7 @@ func (graph *Graph) restore() error { // FIXME: Implement error subclass instead of looking at the error text // Note: This is the way golang implements os.IsNotExists on Plan9 func (graph *Graph) IsNotExist(err error) bool { - return err != nil && (strings.Contains(err.Error(), "does not exist") || strings.Contains(err.Error(), "No such")) + return err != nil && (strings.Contains(strings.ToLower(err.Error()), "does not exist") || strings.Contains(strings.ToLower(err.Error()), "no such")) } // Exists returns true if an image is registered at the given id. From 64cb7725381740986022eb4633c8f91be3dd7b4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torstein=20Huseb=C3=B8?= Date: Wed, 22 Oct 2014 16:48:37 +0200 Subject: [PATCH 112/592] Correct url from github.com/dotcloud/docker{,-py} to github.com/docker/docker{,-py} MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Torstein Husebø --- CONTRIBUTING.md | 2 +- docs/sources/reference/api/remote_api_client_libraries.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3ed8bf9d43..c65f8d1070 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -25,7 +25,7 @@ When considering a design proposal, we are looking for: * A description of the problem this design proposal solves * An issue -- not a pull request -- that describes what you will take action on * Please prefix your issue with `Proposal:` in the title -* Please review [the existing Proposals](https://github.com/dotcloud/docker/issues?direction=asc&labels=Proposal&page=1&sort=created&state=open) +* Please review [the existing Proposals](https://github.com/docker/docker/issues?direction=asc&labels=Proposal&page=1&sort=created&state=open) before reporting a new issue. You can always pair with someone if you both have the same idea. diff --git a/docs/sources/reference/api/remote_api_client_libraries.md b/docs/sources/reference/api/remote_api_client_libraries.md index 71bd2ebfc1..bff2fa30cf 100644 --- a/docs/sources/reference/api/remote_api_client_libraries.md +++ b/docs/sources/reference/api/remote_api_client_libraries.md @@ -131,7 +131,7 @@ will add the libraries here. Python docker-py - https://github.com/dotcloud/docker-py + https://github.com/docker/docker-py Active From 5572dbb7504c951f4ddd2710a4037844a95caa6a Mon Sep 17 00:00:00 2001 From: Andy Goldstein Date: Fri, 3 Oct 2014 13:38:44 -0400 Subject: [PATCH 113/592] Fix stdout premature EOF Never close attached stream before both stdout and stderr have written all their buffered contents. Remove stdinCloser because it is not needed any more as the stream is closed anyway after attach has finished. Fixes #3631 Signed-off-by: Andy Goldstein --- daemon/attach.go | 23 ++------------ daemon/exec.go | 5 ++- integration-cli/docker_cli_run_test.go | 44 ++++++++++++++++++++++++++ 3 files changed, 49 insertions(+), 23 deletions(-) diff --git a/daemon/attach.go b/daemon/attach.go index e115dac2e0..0dc8789f37 100644 --- a/daemon/attach.go +++ b/daemon/attach.go @@ -83,7 +83,6 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status { var ( cStdin io.ReadCloser cStdout, cStderr io.Writer - cStdinCloser io.Closer ) if stdin { @@ -94,7 +93,6 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status { io.Copy(w, job.Stdin) }() cStdin = r - cStdinCloser = job.Stdin } if stdout { cStdout = job.Stdout @@ -103,7 +101,7 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status { cStderr = job.Stderr } - <-daemon.attach(&container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, cStdin, cStdinCloser, cStdout, cStderr) + <-daemon.attach(&container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, cStdin, cStdout, cStderr) // If we are in stdinonce mode, wait for the process to end // otherwise, simply return if container.Config.StdinOnce && !container.Config.Tty { @@ -113,7 +111,7 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status { return engine.StatusOK } -func (daemon *Daemon) attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error { +func (daemon *Daemon) attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error { var ( cStdout, cStderr io.ReadCloser nJobs int @@ -130,10 +128,10 @@ func (daemon *Daemon) attach(streamConfig *StreamConfig, openStdin, stdinOnce, t go func() { log.Debugf("attach: stdin: begin") defer log.Debugf("attach: stdin: end") - // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr if stdinOnce && !tty { defer cStdin.Close() } else { + // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr defer func() { if cStdout != nil { cStdout.Close() @@ -173,9 +171,6 @@ func (daemon *Daemon) attach(streamConfig *StreamConfig, openStdin, stdinOnce, t if stdinOnce && stdin != nil { defer stdin.Close() } - if stdinCloser != nil { - defer stdinCloser.Close() - } _, err := io.Copy(stdout, cStdout) if err == io.ErrClosedPipe { err = nil @@ -189,9 +184,6 @@ func (daemon *Daemon) attach(streamConfig *StreamConfig, openStdin, stdinOnce, t } else { // Point stdout of container to a no-op writer. go func() { - if stdinCloser != nil { - defer stdinCloser.Close() - } if cStdout, err := streamConfig.StdoutPipe(); err != nil { log.Errorf("attach: stdout pipe: %s", err) } else { @@ -213,9 +205,6 @@ func (daemon *Daemon) attach(streamConfig *StreamConfig, openStdin, stdinOnce, t if stdinOnce && stdin != nil { defer stdin.Close() } - if stdinCloser != nil { - defer stdinCloser.Close() - } _, err := io.Copy(stderr, cStderr) if err == io.ErrClosedPipe { err = nil @@ -229,10 +218,6 @@ func (daemon *Daemon) attach(streamConfig *StreamConfig, openStdin, stdinOnce, t } else { // Point stderr at a no-op writer. go func() { - if stdinCloser != nil { - defer stdinCloser.Close() - } - if cStderr, err := streamConfig.StderrPipe(); err != nil { log.Errorf("attach: stdout pipe: %s", err) } else { @@ -251,8 +236,6 @@ func (daemon *Daemon) attach(streamConfig *StreamConfig, openStdin, stdinOnce, t } }() - // FIXME: how to clean up the stdin goroutine without the unwanted side effect - // of closing the passed stdin? Add an intermediary io.Pipe? for i := 0; i < nJobs; i++ { log.Debugf("attach: waiting for job %d/%d", i+1, nJobs) if err := <-errors; err != nil { diff --git a/daemon/exec.go b/daemon/exec.go index a6113b0fca..809002a8c9 100644 --- a/daemon/exec.go +++ b/daemon/exec.go @@ -155,7 +155,6 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status { var ( cStdin io.ReadCloser cStdout, cStderr io.Writer - cStdinCloser io.Closer execName = job.Args[0] ) @@ -183,10 +182,10 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status { r, w := io.Pipe() go func() { defer w.Close() + defer log.Debugf("Closing buffered stdin pipe") io.Copy(w, job.Stdin) }() cStdin = r - cStdinCloser = job.Stdin } if execConfig.OpenStdout { cStdout = job.Stdout @@ -204,7 +203,7 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status { execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } - attachErr := d.attach(&execConfig.StreamConfig, execConfig.OpenStdin, false, execConfig.ProcessConfig.Tty, cStdin, cStdinCloser, cStdout, cStderr) + attachErr := d.attach(&execConfig.StreamConfig, execConfig.OpenStdin, false, execConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr) execErr := make(chan error) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index eeb3601e7a..ae2e8d3ba3 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -4,6 +4,7 @@ import ( "bufio" "bytes" "fmt" + "io" "io/ioutil" "net" "os" @@ -2446,3 +2447,46 @@ func TestRunVolumesCleanPaths(t *testing.T) { logDone("run - volume paths are cleaned") } + +// Regression test for #3631 +func TestRunSlowStdoutConsumer(t *testing.T) { + defer deleteAllContainers() + + c := exec.Command("/bin/bash", "-c", dockerBinary+` run --rm -i busybox /bin/sh -c "dd if=/dev/zero of=/foo bs=1024 count=2000 &>/dev/null; catv /foo"`) + + stdout, err := c.StdoutPipe() + if err != nil { + t.Fatal(err) + } + + if err := c.Start(); err != nil { + t.Fatal(err) + } + n, err := consumeSlow(stdout, 10000, 5*time.Millisecond) + if err != nil { + t.Fatal(err) + } + + expected := 2 * 1024 * 2000 + if n != expected { + t.Fatalf("Expected %d, got %d", expected, n) + } + + logDone("run - slow consumer") +} + +func consumeSlow(reader io.Reader, chunkSize int, interval time.Duration) (n int, err error) { + buffer := make([]byte, chunkSize) + for { + var readBytes int + readBytes, err = reader.Read(buffer) + n += readBytes + if err != nil { + if err == io.EOF { + err = nil + } + return + } + time.Sleep(interval) + } +} From e0c9d7b654221a0d4e5a310b0f9a0adb9ef69aa0 Mon Sep 17 00:00:00 2001 From: Andrea Luzzardi Date: Mon, 13 Oct 2014 20:41:22 -0700 Subject: [PATCH 114/592] Add MemInfo to the system pkg. MemInfo provides a simple API to get memory information from the system. Signed-off-by: Andrea Luzzardi --- pkg/system/meminfo.go | 17 ++++++++ pkg/system/meminfo_linux.go | 67 +++++++++++++++++++++++++++++++ pkg/system/meminfo_linux_test.go | 37 +++++++++++++++++ pkg/system/meminfo_unsupported.go | 7 ++++ 4 files changed, 128 insertions(+) create mode 100644 pkg/system/meminfo.go create mode 100644 pkg/system/meminfo_linux.go create mode 100644 pkg/system/meminfo_linux_test.go create mode 100644 pkg/system/meminfo_unsupported.go diff --git a/pkg/system/meminfo.go b/pkg/system/meminfo.go new file mode 100644 index 0000000000..3b6e947e67 --- /dev/null +++ b/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/pkg/system/meminfo_linux.go b/pkg/system/meminfo_linux.go new file mode 100644 index 0000000000..b7de3ff776 --- /dev/null +++ b/pkg/system/meminfo_linux.go @@ -0,0 +1,67 @@ +package system + +import ( + "bufio" + "errors" + "io" + "os" + "strconv" + "strings" + + "github.com/docker/docker/pkg/units" +) + +var ( + ErrMalformed = errors.New("malformed file") +) + +// Retrieve memory statistics of the host system and parse them into a MemInfo +// type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/pkg/system/meminfo_linux_test.go b/pkg/system/meminfo_linux_test.go new file mode 100644 index 0000000000..377405ea69 --- /dev/null +++ b/pkg/system/meminfo_linux_test.go @@ -0,0 +1,37 @@ +package system + +import ( + "strings" + "testing" + + "github.com/docker/docker/pkg/units" +) + +func TestMemInfo(t *testing.T) { + const input = ` + MemTotal: 1 kB + MemFree: 2 kB + SwapTotal: 3 kB + SwapFree: 4 kB + Malformed1: + Malformed2: 1 + Malformed3: 2 MB + Malformed4: X kB + ` + meminfo, err := parseMemInfo(strings.NewReader(input)) + if err != nil { + t.Fatal(err) + } + if meminfo.MemTotal != 1*units.KiB { + t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal) + } + if meminfo.MemFree != 2*units.KiB { + t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree) + } + if meminfo.SwapTotal != 3*units.KiB { + t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal) + } + if meminfo.SwapFree != 4*units.KiB { + t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree) + } +} diff --git a/pkg/system/meminfo_unsupported.go b/pkg/system/meminfo_unsupported.go new file mode 100644 index 0000000000..63b8b16e05 --- /dev/null +++ b/pkg/system/meminfo_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux + +package system + +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} From 93e17b2ecadc4a409fc55b1460bc2721a4345716 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Wed, 22 Oct 2014 15:51:14 -0700 Subject: [PATCH 115/592] TestRunRedirectStdout kept failing with timed out. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_run_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index eeb3601e7a..c4cf8820bd 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -2266,7 +2266,7 @@ func TestRunRedirectStdout(t *testing.T) { }() select { - case <-time.After(time.Second): + case <-time.After(2 * time.Second): t.Fatal("command timeout") case <-ch: } From 0390d04d8a236c427f553ead4328f6b4947a61ef Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Wed, 22 Oct 2014 12:30:59 +1000 Subject: [PATCH 116/592] Rewrite ENTRYPOINT documentation covering all the combinations with examples. Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/reference/builder.md | 217 +++++++++++++++++++++++++----- 1 file changed, 186 insertions(+), 31 deletions(-) diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index 2f36942ce6..4bb02e3e21 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -460,43 +460,140 @@ The copy obeys the following rules: ENTRYPOINT has two forms: - `ENTRYPOINT ["executable", "param1", "param2"]` - (*exec* form, the preferred form) + (the preferred *exec* form) - `ENTRYPOINT command param1 param2` (*shell* form) -There can only be one `ENTRYPOINT` in a `Dockerfile`. If you have more -than one `ENTRYPOINT`, then only the last one in the `Dockerfile` will -have an effect. +An `ENTRYPOINT` allows you to configure a container that will run as an executable. -An `ENTRYPOINT` helps you to configure a container that you can run as -an executable. That is, when you specify an `ENTRYPOINT`, then the whole -container runs as if it was just that executable. +For example, the following will start nginx with its default content, listening +on port 80: -Unlike the behavior of the `CMD` instruction, The `ENTRYPOINT` -instruction adds an entry command that will **not** be overwritten when -arguments are passed to `docker run`. This allows arguments to be passed -to the entry point, i.e. `docker run -d` will pass the `-d` -argument to the entry point. + docker run -i -t --rm -p 80:80 nginx -You can specify parameters either in the `ENTRYPOINT` JSON array (as in -"like an exec" above), or by using a `CMD` instruction. Parameters in -the `ENTRYPOINT` instruction will not be overridden by the `docker run` -arguments, but parameters specified via a `CMD` instruction will be -overridden by `docker run` arguments. +Command line arguments to `docker run ` will be appended after all +elements in an *exec* form `ENTRYPOINT`, and will override all elements specified +using `CMD`. +This allows arguments to be passed to the entry point, i.e., `docker run -d` +will pass the `-d` argument to the entry point. +You can override the `ENTRYPOINT` instruction using the `docker run --entrypoint` +flag. -Like a `CMD`, you can specify a plain string for the `ENTRYPOINT` and it -will execute in `/bin/sh -c`: +The *shell* form prevents any `CMD` or `run` command line arguments from being +used, but has the disadvantage that your `ENTRYPOINT` will be started as a +subcommand of `/bin/sh -c`, which does not pass signals. +This means that the executable will not be the container's `PID 1` - and +will _not_ receive Unix signals - so your executable will not receive a +`SIGTERM` from `docker stop `. + +Only the last `ENTRYPOINT` instruction in the `Dockerfile` will have an effect. + +### Exec form ENTRYPOINT example + +You can use the *exec* form of `ENTRYPOINT` to set fairly stable default commands +and arguments and then use either form of `CMD` to set additional defaults that +are more likely to be changed. FROM ubuntu - ENTRYPOINT ls -l + ENTRYPOINT ["top", "-b"] + CMD ["-c"] -For example, that `Dockerfile`'s image will *always* take a directory as -an input and return a directory listing. If you wanted to make this -optional but default, you could use a `CMD` instruction: +When you run the container, you can see that `top` is the only process: - FROM ubuntu - CMD ["-l"] - ENTRYPOINT ["ls"] + $ docker run -it --rm --name test top -H + top - 08:25:00 up 7:27, 0 users, load average: 0.00, 0.01, 0.05 + Threads: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + %Cpu(s): 0.1 us, 0.1 sy, 0.0 ni, 99.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st + KiB Mem: 2056668 total, 1616832 used, 439836 free, 99352 buffers + KiB Swap: 1441840 total, 0 used, 1441840 free. 1324440 cached Mem + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 19744 2336 2080 R 0.0 0.1 0:00.04 top + +To examine the result further, you can use `docker exec`: + + $ docker exec -it test ps aux + USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND + root 1 2.6 0.1 19752 2352 ? Ss+ 08:24 0:00 top -b -H + root 7 0.0 0.1 15572 2164 ? R+ 08:25 0:00 ps aux + +And you can gracefully request `top` to shut down using `docker stop test`. + +If you need to write a starter script for a single executable, you can ensure that +the final executable receives the Unix signals by using `exec` and `gosu` +(see [the Dockerfile best practices](/articles/dockerfile_best-practices/#entrypoint) +for more details): + +```bash +#!/bin/bash +set -e + +if [ "$1" = 'postgres' ]; then + chown -R postgres "$PGDATA" + + if [ -z "$(ls -A "$PGDATA")" ]; then + gosu postgres initdb + fi + + exec gosu postgres "$@" +fi + +exec "$@" +``` + +Lastly, if you need to do some extra cleanup (or communicate with other containers) +on shutdown, or are co-ordinating more than one executable, you may need to ensure +that the `ENTRYPOINT` script receives the Unix signals, passes them on, and then +does some more work: + +``` +#!/bin/sh +# Note: I've written this using sh so it works in the busybox container too + +# USE the trap if you need to also do manual cleanup after the service is stopped, +# or need to start multiple services in the one container +trap "echo TRAPed signal" HUP INT QUIT KILL TERM + +# start service in background here +/usr/sbin/apachectl start + +echo "[hit enter key to exit] or run 'docker stop '" +read + +# stop service and clean up here +echo "stopping apache" +/usr/sbin/apachectl stop + +echo "exited $0" +``` + +If you run this image with `docker run -it --rm -p 80:80 --name test apache`, +you can then examine the container's processes with `docker exec`, or `docker top`, +and then ask the script to stop Apache: + +```bash +$ docker exec -it test ps aux +USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND +root 1 0.1 0.0 4448 692 ? Ss+ 00:42 0:00 /bin/sh /run.sh 123 cmd cmd2 +root 19 0.0 0.2 71304 4440 ? Ss 00:42 0:00 /usr/sbin/apache2 -k start +www-data 20 0.2 0.2 360468 6004 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start +www-data 21 0.2 0.2 360468 6000 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start +root 81 0.0 0.1 15572 2140 ? R+ 00:44 0:00 ps aux +$ docker top test +PID USER COMMAND +10035 root {run.sh} /bin/sh /run.sh 123 cmd cmd2 +10054 root /usr/sbin/apache2 -k start +10055 33 /usr/sbin/apache2 -k start +10056 33 /usr/sbin/apache2 -k start +$ /usr/bin/time docker stop test +test +real 0m 0.27s +user 0m 0.03s +sys 0m 0.03s +``` + +> **Note:** you can over ride the `ENTRYPOINT` setting using `--entrypoint`, +> but this can only set the binary to *exec* (no `sh -c` will be used). > **Note**: > The *exec* form is parsed as a JSON array, which means that @@ -505,13 +602,71 @@ optional but default, you could use a `CMD` instruction: > **Note**: > Unlike the *shell* form, the *exec* form does not invoke a command shell. > This means that normal shell processing does not happen. For example, -> `CMD [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +> `ENTRYPOINT [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. > If you want shell processing then either use the *shell* form or execute -> a shell directly, for example: `CMD [ "sh", "-c", "echo", "$HOME" ]`. +> a shell directly, for example: `ENTRYPOINT [ "sh", "-c", "echo", "$HOME" ]`. +> Variables that are defined in the `Dockerfile`using `ENV`, will be substituted by +> the `Dockerfile` parser. -> **Note**: -> It is preferable to use the JSON array format for specifying -> `ENTRYPOINT` instructions. +### Shell form ENTRYPOINT example + +You can specify a plain string for the `ENTRYPOINT` and it will execute in `/bin/sh -c`. +This form will use shell processing to substitute shell environment variables, +and will ignore any `CMD` or `docker run` command line arguments. +To ensure that `docker stop` will signal any long running `ENTRYPOINT` executable +correctly, you need to remember to start it with `exec`: + + FROM ubuntu + ENTRYPOINT exec top -b + +When you run this image, you'll see the single `PID 1` process: + + $ docker run -it --rm --name test top + Mem: 1704520K used, 352148K free, 0K shrd, 0K buff, 140368121167873K cached + CPU: 5% usr 0% sys 0% nic 94% idle 0% io 0% irq 0% sirq + Load average: 0.08 0.03 0.05 2/98 6 + PID PPID USER STAT VSZ %VSZ %CPU COMMAND + 1 0 root R 3164 0% 0% top -b + +Which will exit cleanly on `docker stop`: + + $ /usr/bin/time docker stop test + test + real 0m 0.20s + user 0m 0.02s + sys 0m 0.04s + +If you forget to add `exec` to the beginning of your `ENTRYPOINT`: + + FROM ubuntu + ENTRYPOINT top -b + CMD --ignored-param1 + +You can then run it (giving it a name for the next step): + + $ docker run -it --name test top --ignored-param2 + Mem: 1704184K used, 352484K free, 0K shrd, 0K buff, 140621524238337K cached + CPU: 9% usr 2% sys 0% nic 88% idle 0% io 0% irq 0% sirq + Load average: 0.01 0.02 0.05 2/101 7 + PID PPID USER STAT VSZ %VSZ %CPU COMMAND + 1 0 root S 3168 0% 0% /bin/sh -c top -b cmd cmd2 + 7 1 root R 3164 0% 0% top -b + +You can see from the output of `top` that the specified `ENTRYPOINT` is not `PID 1`. + +If you then run `docker stop test`, the container will not exit cleanly - the +`stop` command will be forced to send a `SIGKILL` after the timeout: + + $ docker exec -it test ps aux + PID USER COMMAND + 1 root /bin/sh -c top -b cmd cmd2 + 7 root top -b + 8 root ps aux + $ /usr/bin/time docker stop test + test + real 0m 10.19s + user 0m 0.04s + sys 0m 0.03s ## VOLUME From fb6ee865a949905f678aa7c7066c809664a8a4aa Mon Sep 17 00:00:00 2001 From: "Daniel, Dao Quang Minh" Date: Tue, 30 Sep 2014 04:30:58 -0400 Subject: [PATCH 117/592] save start error into State.Error when a container failed to start, saves the error message into State.Error so that it can be retrieved when calling `docker inspect` instead of having to look at the log Docker-DCO-1.1-Signed-off-by: Daniel, Dao Quang Minh (github: dqminh) --- daemon/container.go | 2 ++ daemon/state.go | 9 ++++++ integration-cli/docker_cli_start_test.go | 41 ++++++++++++++++++++++++ 3 files changed, 52 insertions(+) diff --git a/daemon/container.go b/daemon/container.go index e5c9fadace..f9500dff76 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -297,6 +297,8 @@ func (container *Container) Start() (err error) { // setup has been cleaned up properly defer func() { if err != nil { + container.setError(err) + container.toDisk() container.cleanup() } }() diff --git a/daemon/state.go b/daemon/state.go index b7dc149959..2dd57bd94b 100644 --- a/daemon/state.go +++ b/daemon/state.go @@ -15,6 +15,7 @@ type State struct { Restarting bool Pid int ExitCode int + Error string // contains last known error when starting the container StartedAt time.Time FinishedAt time.Time waitChan chan struct{} @@ -137,6 +138,7 @@ func (s *State) SetRunning(pid int) { } func (s *State) setRunning(pid int) { + s.Error = "" s.Running = true s.Paused = false s.Restarting = false @@ -179,6 +181,13 @@ func (s *State) SetRestarting(exitCode int) { s.Unlock() } +// setError sets the container's error state. This is useful when we want to +// know the error that occurred when container transits to another state +// when inspecting it +func (s *State) setError(err error) { + s.Error = err.Error() +} + func (s *State) IsRestarting() bool { s.Lock() res := s.Restarting diff --git a/integration-cli/docker_cli_start_test.go b/integration-cli/docker_cli_start_test.go index af0a785185..72c0bfc4ef 100644 --- a/integration-cli/docker_cli_start_test.go +++ b/integration-cli/docker_cli_start_test.go @@ -68,3 +68,44 @@ func TestStartAttachCorrectExitCode(t *testing.T) { logDone("start - correct exit code returned with -a") } + +func TestStartRecordError(t *testing.T) { + defer deleteAllContainers() + + // when container runs successfully, we should not have state.Error + cmd(t, "run", "-d", "-p", "9999:9999", "--name", "test", "busybox", "top") + stateErr, err := inspectField("test", "State.Error") + if err != nil { + t.Fatalf("Failed to inspect %q state's error, got error %q", "test", err) + } + if stateErr != "" { + t.Fatalf("Expected to not have state error but got state.Error(%q)", stateErr) + } + + // Expect this to fail and records error because of ports conflict + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "test2", "-p", "9999:9999", "busybox", "top")) + if err == nil { + t.Fatalf("Expected error but got none, output %q", out) + } + stateErr, err = inspectField("test2", "State.Error") + if err != nil { + t.Fatalf("Failed to inspect %q state's error, got error %q", "test2", err) + } + expected := "port is already allocated" + if stateErr == "" || !strings.Contains(stateErr, expected) { + t.Fatalf("State.Error(%q) does not include %q", stateErr, expected) + } + + // Expect the conflict to be resolved when we stop the initial container + cmd(t, "stop", "test") + cmd(t, "start", "test2") + stateErr, err = inspectField("test2", "State.Error") + if err != nil { + t.Fatalf("Failed to inspect %q state's error, got error %q", "test", err) + } + if stateErr != "" { + t.Fatalf("Expected to not have state error but got state.Error(%q)", stateErr) + } + + logDone("start - set state error when start fails") +} From 9451cf39eff037eccb04319c1e601d08495cab3c Mon Sep 17 00:00:00 2001 From: shuai-z Date: Tue, 21 Oct 2014 12:24:01 +0800 Subject: [PATCH 118/592] Port number 49153(BeginPortRange) would be returned twice, causing duplication and potential errors. If we first request port 49153 (BeginPortRange) explicitly, and later some time request the next free port (of same ip/proto) by calling RequestPort() with port number 0, we will again get 49153 returned, even if it's currently in use. Because findPort() blindly retured BeginPortRange the first run, without checking if it has already been taken. Signed-off-by: shuai-z --- daemon/networkdriver/portallocator/portallocator.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/daemon/networkdriver/portallocator/portallocator.go b/daemon/networkdriver/portallocator/portallocator.go index d4fcc6e725..c6e8a66e8d 100644 --- a/daemon/networkdriver/portallocator/portallocator.go +++ b/daemon/networkdriver/portallocator/portallocator.go @@ -15,6 +15,7 @@ type portMap struct { func newPortMap() *portMap { return &portMap{ p: map[int]struct{}{}, + last: EndPortRange, } } @@ -135,12 +136,6 @@ func ReleaseAll() error { } func (pm *portMap) findPort() (int, error) { - if pm.last == 0 { - pm.p[BeginPortRange] = struct{}{} - pm.last = BeginPortRange - return BeginPortRange, nil - } - for port := pm.last + 1; port != pm.last; port++ { if port > EndPortRange { port = BeginPortRange From 2c2edabca5471fa969358cb98619f277d585e76c Mon Sep 17 00:00:00 2001 From: shuai-z Date: Tue, 21 Oct 2014 13:27:47 +0800 Subject: [PATCH 119/592] added test, gofmtd Signed-off-by: shuai-z --- .../networkdriver/portallocator/portallocator.go | 2 +- .../portallocator/portallocator_test.go | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/daemon/networkdriver/portallocator/portallocator.go b/daemon/networkdriver/portallocator/portallocator.go index c6e8a66e8d..e5dd077a9e 100644 --- a/daemon/networkdriver/portallocator/portallocator.go +++ b/daemon/networkdriver/portallocator/portallocator.go @@ -14,7 +14,7 @@ type portMap struct { func newPortMap() *portMap { return &portMap{ - p: map[int]struct{}{}, + p: map[int]struct{}{}, last: EndPortRange, } } diff --git a/daemon/networkdriver/portallocator/portallocator_test.go b/daemon/networkdriver/portallocator/portallocator_test.go index 9869c332e9..3fb218502c 100644 --- a/daemon/networkdriver/portallocator/portallocator_test.go +++ b/daemon/networkdriver/portallocator/portallocator_test.go @@ -214,3 +214,19 @@ func TestPortAllocation(t *testing.T) { t.Fatal("Requesting a dynamic port should never allocate a used port") } } + +func TestNoDuplicateBPR(t *testing.T) { + defer reset() + + if port, err := RequestPort(defaultIP, "tcp", BeginPortRange); err != nil { + t.Fatal(err) + } else if port != BeginPortRange { + t.Fatalf("Expected port %d got %d", BeginPortRange, port) + } + + if port, err := RequestPort(defaultIP, "tcp", 0); err != nil { + t.Fatal(err) + } else if port == BeginPortRange { + t.Fatalf("Acquire(0) allocated the same port twice: %d", port) + } +} From 9b430f4ec1083d2e73abe4269a85c8cdd91a5ade Mon Sep 17 00:00:00 2001 From: Phil Estes Date: Thu, 23 Oct 2014 13:34:06 -0400 Subject: [PATCH 120/592] Fix error string mapping to HTTP response code to ignore case Docker-DCO-1.1-Signed-off-by: Phil Estes (github: estesp) --- api/server/server.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/api/server/server.go b/api/server/server.go index a3edbdc636..ffad992caf 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -92,17 +92,18 @@ func httpError(w http.ResponseWriter, err error) { // FIXME: this is brittle and should not be necessary. // If we need to differentiate between different possible error types, we should // create appropriate error types with clearly defined meaning. - if strings.Contains(err.Error(), "no such") { + errStr := strings.ToLower(err.Error()) + if strings.Contains(errStr, "no such") { statusCode = http.StatusNotFound - } else if strings.Contains(err.Error(), "Bad parameter") { + } else if strings.Contains(errStr, "bad parameter") { statusCode = http.StatusBadRequest - } else if strings.Contains(err.Error(), "Conflict") { + } else if strings.Contains(errStr, "conflict") { statusCode = http.StatusConflict - } else if strings.Contains(err.Error(), "Impossible") { + } else if strings.Contains(errStr, "impossible") { statusCode = http.StatusNotAcceptable - } else if strings.Contains(err.Error(), "Wrong login/password") { + } else if strings.Contains(errStr, "wrong login/password") { statusCode = http.StatusUnauthorized - } else if strings.Contains(err.Error(), "hasn't been activated") { + } else if strings.Contains(errStr, "hasn't been activated") { statusCode = http.StatusForbidden } @@ -1050,7 +1051,7 @@ func postContainersCopy(eng *engine.Engine, version version.Version, w http.Resp w.Header().Set("Content-Type", "application/x-tar") if err := job.Run(); err != nil { log.Errorf("%s", err.Error()) - if strings.Contains(err.Error(), "No such container") { + if strings.Contains(strings.ToLower(err.Error()), "no such container") { w.WriteHeader(http.StatusNotFound) } else if strings.Contains(err.Error(), "no such file or directory") { return fmt.Errorf("Could not find the file %s in container %s", origResource, vars["name"]) From 8b40d385b58f118cbda9f62da9cc88f91afd1f26 Mon Sep 17 00:00:00 2001 From: Fred Lifton Date: Thu, 23 Oct 2014 10:54:40 -0700 Subject: [PATCH 121/592] Removed James' email address Changed email address for abuse reports from James to abuse@ --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c65f8d1070..94c99e3d9e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -310,7 +310,7 @@ do need a fair way to deal with people who are making our community suck. will be addressed immediately and are not subject to 3 strikes or forgiveness. -* Contact james@docker.com to report abuse or appeal violations. In the case of +* Contact abuse@docker.com to report abuse or appeal violations. In the case of appeals, we know that mistakes happen, and we'll work with you to come up with a fair solution if there has been a misunderstanding. From 3a1eafb4889041be13f0e4e450b5eaf0ae7938c8 Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Thu, 23 Oct 2014 11:06:36 -0700 Subject: [PATCH 122/592] Replace fast with quickly Technically an adverb should be used there :-) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 94c99e3d9e..b6a05de26a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -62,7 +62,7 @@ docs](http://docs.docker.com/contributing/devenvironment/). ### Pull requests are always welcome We are always thrilled to receive pull requests, and do our best to -process them as fast as possible. Not sure if that typo is worth a pull +process them as quickly as possible. Not sure if that typo is worth a pull request? Do it! We will appreciate it. If your pull request is not accepted on the first try, don't be From 6589044b5b84f82a71a756708b4a77b0bc49db42 Mon Sep 17 00:00:00 2001 From: Phil Estes Date: Thu, 23 Oct 2014 14:13:11 -0400 Subject: [PATCH 123/592] Fix volume test using "find" to properly call find with /hello path Docker-DCO-1.1-Signed-off-by: Phil Estes (github: estesp) --- integration-cli/docker_cli_run_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index c4cf8820bd..927b48e59a 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -1611,7 +1611,7 @@ func TestRunCopyVolumeContent(t *testing.T) { } // Test that the content is copied from the image to the volume - cmd := exec.Command(dockerBinary, "run", "--rm", "-v", "/hello", name, "sh", "-c", "find", "/hello") + cmd := exec.Command(dockerBinary, "run", "--rm", "-v", "/hello", name, "find", "/hello") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) From ee17b93df9ef2150d0ef25e077f1f87637a54508 Mon Sep 17 00:00:00 2001 From: Phil Estes Date: Thu, 23 Oct 2014 14:30:39 -0400 Subject: [PATCH 124/592] Up test timeout to 10s based on recent drone.io timeout failures Docker-DCO-1.1-Signed-off-by: Phil Estes (github: estesp) --- integration-cli/docker_cli_run_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index c4cf8820bd..db63e3408d 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -2266,7 +2266,7 @@ func TestRunRedirectStdout(t *testing.T) { }() select { - case <-time.After(2 * time.Second): + case <-time.After(10 * time.Second): t.Fatal("command timeout") case <-ch: } From 291b84610fc87622c7116ac71328c1ce3181a6db Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 23 Oct 2014 19:00:05 +0000 Subject: [PATCH 125/592] Update libcontainer to aab3f6d17f2f56606f07f3a6eb6 Signed-off-by: Michael Crosby --- hack/vendor.sh | 2 +- ...{CONTRIBUTORS_GUIDE.md => CONTRIBUTING.md} | 57 ++++++++++- .../github.com/docker/libcontainer/Dockerfile | 5 +- .../libcontainer/cgroups/fs/apply_raw.go | 13 +++ .../cgroups/systemd/apply_nosystemd.go | 4 + .../cgroups/systemd/apply_systemd.go | 6 ++ .../docker/libcontainer/console/console.go | 4 +- .../docker/libcontainer/integration/doc.go | 2 + .../libcontainer/integration/exec_test.go | 38 ++++++++ .../libcontainer/integration/init_test.go | 39 ++++++++ .../libcontainer/integration/template_test.go | 64 +++++++++++++ .../libcontainer/integration/utils_test.go | 95 +++++++++++++++++++ .../libcontainer/netlink/netlink_linux.go | 11 ++- .../netlink/netlink_linux_test.go | 2 +- .../netlink/netlink_unsupported.go | 2 +- .../docker/libcontainer/network/network.go | 4 +- .../docker/libcontainer/network/types.go | 5 + .../docker/libcontainer/network/veth.go | 11 ++- .../docker/libcontainer/network/veth_test.go | 6 +- .../libcontainer/system/syscall_linux_386.go | 24 +++++ .../system/syscall_linux_amd64.go | 24 +++++ .../libcontainer/system/syscall_linux_arm.go | 24 +++++ .../docker/libcontainer/utils/utils_test.go | 15 +++ .../xattr/{xattr.go => xattr_linux.go} | 0 .../libcontainer/xattr/xattr_unsupported.go | 15 +++ 25 files changed, 452 insertions(+), 20 deletions(-) rename vendor/src/github.com/docker/libcontainer/{CONTRIBUTORS_GUIDE.md => CONTRIBUTING.md} (84%) create mode 100644 vendor/src/github.com/docker/libcontainer/integration/doc.go create mode 100644 vendor/src/github.com/docker/libcontainer/integration/exec_test.go create mode 100644 vendor/src/github.com/docker/libcontainer/integration/init_test.go create mode 100644 vendor/src/github.com/docker/libcontainer/integration/template_test.go create mode 100644 vendor/src/github.com/docker/libcontainer/integration/utils_test.go create mode 100644 vendor/src/github.com/docker/libcontainer/system/syscall_linux_386.go create mode 100644 vendor/src/github.com/docker/libcontainer/system/syscall_linux_amd64.go create mode 100644 vendor/src/github.com/docker/libcontainer/system/syscall_linux_arm.go create mode 100644 vendor/src/github.com/docker/libcontainer/utils/utils_test.go rename vendor/src/github.com/docker/libcontainer/xattr/{xattr.go => xattr_linux.go} (100%) create mode 100644 vendor/src/github.com/docker/libcontainer/xattr/xattr_unsupported.go diff --git a/hack/vendor.sh b/hack/vendor.sh index 7ecb1a5cd4..d43aba9ce6 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -64,7 +64,7 @@ if [ "$1" = '--go' ]; then mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar fi -clone git github.com/docker/libcontainer 8d1d0ba38a7348c5cfdc05aea3be34d75aadc8de +clone git github.com/docker/libcontainer aab3f6d17f2f56606f07f3a6eb6b693303f75812 # see src/github.com/docker/libcontainer/update-vendor.sh which is the "source of truth" for libcontainer deps (just like this file) rm -rf src/github.com/docker/libcontainer/vendor eval "$(grep '^clone ' src/github.com/docker/libcontainer/update-vendor.sh | grep -v 'github.com/codegangsta/cli')" diff --git a/vendor/src/github.com/docker/libcontainer/CONTRIBUTORS_GUIDE.md b/vendor/src/github.com/docker/libcontainer/CONTRIBUTING.md similarity index 84% rename from vendor/src/github.com/docker/libcontainer/CONTRIBUTORS_GUIDE.md rename to vendor/src/github.com/docker/libcontainer/CONTRIBUTING.md index 07bf22a031..667cc5a63f 100644 --- a/vendor/src/github.com/docker/libcontainer/CONTRIBUTORS_GUIDE.md +++ b/vendor/src/github.com/docker/libcontainer/CONTRIBUTING.md @@ -6,7 +6,7 @@ feels wrong or incomplete. ## Reporting Issues -When reporting [issues](https://github.com/docker/libcontainer/issues) +When reporting [issues](https://github.com/docker/libcontainer/issues) on GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc), the output of `uname -a`. Please include the steps required to reproduce the problem if possible and applicable. @@ -14,7 +14,60 @@ This information will help us review and fix your issue faster. ## Development Environment -*Add instructions on setting up the development environment.* +### Requirements + +For best results, use a Linux development environment. +The following packages are required to compile libcontainer natively. + +- Golang 1.3 +- GCC +- git +- cgutils + +You can develop on OSX, but you are limited to Dockerfile-based builds only. + +### Building libcontainer from Dockerfile + + make all + +This is the easiest way of building libcontainer. +As this build is done using Docker, you can even run this from [OSX](https://github.com/boot2docker/boot2docker) + +### Testing changes with "nsinit" + + make sh + +This will create an container that runs `nsinit exec sh` on a busybox rootfs with the configuration from ['minimal.json'](https://github.com/docker/libcontainer/blob/master/sample_configs/minimal.json). +Like the previous command, you can run this on OSX too! + +### Building libcontainer directly + +> Note: You should add the `vendor` directory to your GOPATH to use the vendored libraries + + ./update-vendor.sh + go get -d ./... + make direct-build + # Run the tests + make direct-test-short | egrep --color 'FAIL|$' + # Run all the test + make direct-test | egrep --color 'FAIL|$' + +### Testing Changes with "nsinit" directly + +To test a change: + + # Install nsinit + make direct-install + + # Optional, add a docker0 bridge + ip link add docker0 type bridge + ifconfig docker0 172.17.0.1/16 up + + mkdir testfs + curl -sSL https://github.com/jpetazzo/docker-busybox/raw/buildroot-2014.02/rootfs.tar | tar -xC testfs + cd testfs + cp container.json + nsinit exec sh ## Contribution Guidelines diff --git a/vendor/src/github.com/docker/libcontainer/Dockerfile b/vendor/src/github.com/docker/libcontainer/Dockerfile index 65bf5731d2..96d8f35255 100644 --- a/vendor/src/github.com/docker/libcontainer/Dockerfile +++ b/vendor/src/github.com/docker/libcontainer/Dockerfile @@ -3,6 +3,9 @@ FROM crosbymichael/golang RUN apt-get update && apt-get install -y gcc make RUN go get code.google.com/p/go.tools/cmd/cover +ENV GOPATH $GOPATH:/go/src/github.com/docker/libcontainer/vendor +RUN go get github.com/docker/docker/pkg/term + # setup a playground for us to spawn containers in RUN mkdir /busybox && \ curl -sSL 'https://github.com/jpetazzo/docker-busybox/raw/buildroot-2014.02/rootfs.tar' | tar -xC /busybox @@ -14,8 +17,6 @@ COPY . /go/src/github.com/docker/libcontainer WORKDIR /go/src/github.com/docker/libcontainer RUN cp sample_configs/minimal.json /busybox/container.json -ENV GOPATH $GOPATH:/go/src/github.com/docker/libcontainer/vendor - RUN go get -d -v ./... RUN make direct-install diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go index 133241e472..599ab57272 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go @@ -73,6 +73,19 @@ func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { return d, nil } +// Symmetrical public function to update device based cgroups. Also available +// in the systemd implementation. +func ApplyDevices(c *cgroups.Cgroup, pid int) error { + d, err := getCgroupData(c, pid) + if err != nil { + return err + } + + devices := subsystems["devices"] + + return devices.Set(d) +} + func Cleanup(c *cgroups.Cgroup) error { d, err := getCgroupData(c, 0) if err != nil { diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go index 685591090b..42a09e3feb 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go @@ -20,6 +20,10 @@ func GetPids(c *cgroups.Cgroup) ([]int, error) { return nil, fmt.Errorf("Systemd not supported") } +func ApplyDevices(c *cgroups.Cgroup, pid int) error { + return fmt.Errorf("Systemd not supported") +} + func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error { return fmt.Errorf("Systemd not supported") } diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go index 7af4818e23..1f84a9c6f2 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go @@ -327,6 +327,12 @@ func joinDevices(c *cgroups.Cgroup, pid int) error { return nil } +// Symmetrical public function to update device based cgroups. Also available +// in the fs implementation. +func ApplyDevices(c *cgroups.Cgroup, pid int) error { + return joinDevices(c, pid) +} + func joinMemory(c *cgroups.Cgroup, pid int) error { memorySwap := c.MemorySwap diff --git a/vendor/src/github.com/docker/libcontainer/console/console.go b/vendor/src/github.com/docker/libcontainer/console/console.go index 346f537d53..438e670420 100644 --- a/vendor/src/github.com/docker/libcontainer/console/console.go +++ b/vendor/src/github.com/docker/libcontainer/console/console.go @@ -67,14 +67,14 @@ func OpenAndDup(consolePath string) error { // Unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. // Unlockpt should be called before opening the slave side of a pseudoterminal. func Unlockpt(f *os.File) error { - var u int + var u int32 return Ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) } // Ptsname retrieves the name of the first available pts for the given master. func Ptsname(f *os.File) (string, error) { - var n int + var n int32 if err := Ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil { return "", err diff --git a/vendor/src/github.com/docker/libcontainer/integration/doc.go b/vendor/src/github.com/docker/libcontainer/integration/doc.go new file mode 100644 index 0000000000..87545bc99c --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/integration/doc.go @@ -0,0 +1,2 @@ +// integration is used for integration testing of libcontainer +package integration diff --git a/vendor/src/github.com/docker/libcontainer/integration/exec_test.go b/vendor/src/github.com/docker/libcontainer/integration/exec_test.go new file mode 100644 index 0000000000..9609918943 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/integration/exec_test.go @@ -0,0 +1,38 @@ +package integration + +import ( + "strings" + "testing" +) + +func TestExecPS(t *testing.T) { + if testing.Short() { + return + } + + rootfs, err := newRootFs() + if err != nil { + t.Fatal(err) + } + defer remove(rootfs) + + config := newTemplateConfig(rootfs) + buffers, exitCode, err := runContainer(config, "", "ps") + if err != nil { + t.Fatal(err) + } + + if exitCode != 0 { + t.Fatalf("exit code not 0. code %d stderr %q", exitCode, buffers.Stderr) + } + + lines := strings.Split(buffers.Stdout.String(), "\n") + if len(lines) < 2 { + t.Fatalf("more than one process running for output %q", buffers.Stdout.String()) + } + expected := `1 root ps` + actual := strings.Trim(lines[1], "\n ") + if actual != expected { + t.Fatalf("expected output %q but received %q", expected, actual) + } +} diff --git a/vendor/src/github.com/docker/libcontainer/integration/init_test.go b/vendor/src/github.com/docker/libcontainer/integration/init_test.go new file mode 100644 index 0000000000..a0570f3245 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/integration/init_test.go @@ -0,0 +1,39 @@ +package integration + +import ( + "log" + "os" + "runtime" + + "github.com/docker/libcontainer/namespaces" + "github.com/docker/libcontainer/syncpipe" +) + +// init runs the libcontainer initialization code because of the busybox style needs +// to work around the go runtime and the issues with forking +func init() { + if len(os.Args) < 2 || os.Args[1] != "init" { + return + } + runtime.LockOSThread() + + container, err := loadConfig() + if err != nil { + log.Fatal(err) + } + + rootfs, err := os.Getwd() + if err != nil { + log.Fatal(err) + } + + syncPipe, err := syncpipe.NewSyncPipeFromFd(0, 3) + if err != nil { + log.Fatalf("unable to create sync pipe: %s", err) + } + + if err := namespaces.Init(container, rootfs, "", syncPipe, os.Args[3:]); err != nil { + log.Fatalf("unable to initialize for container: %s", err) + } + os.Exit(1) +} diff --git a/vendor/src/github.com/docker/libcontainer/integration/template_test.go b/vendor/src/github.com/docker/libcontainer/integration/template_test.go new file mode 100644 index 0000000000..1805eba980 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/integration/template_test.go @@ -0,0 +1,64 @@ +package integration + +import ( + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/cgroups" + "github.com/docker/libcontainer/devices" +) + +// newTemplateConfig returns a base template for running a container +// +// it uses a network strategy of just setting a loopback interface +// and the default setup for devices +func newTemplateConfig(rootfs string) *libcontainer.Config { + return &libcontainer.Config{ + RootFs: rootfs, + Tty: false, + Capabilities: []string{ + "CHOWN", + "DAC_OVERRIDE", + "FSETID", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL", + "AUDIT_WRITE", + }, + Namespaces: map[string]bool{ + "NEWNS": true, + "NEWUTS": true, + "NEWIPC": true, + "NEWPID": true, + "NEWNET": true, + }, + Cgroups: &cgroups.Cgroup{ + Parent: "integration", + AllowAllDevices: false, + AllowedDevices: devices.DefaultAllowedDevices, + }, + + MountConfig: &libcontainer.MountConfig{ + DeviceNodes: devices.DefaultAutoCreatedDevices, + }, + Hostname: "integration", + Env: []string{ + "HOME=/root", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=integration", + "TERM=xterm", + }, + Networks: []*libcontainer.Network{ + { + Type: "loopback", + Address: "127.0.0.1/0", + Gateway: "localhost", + }, + }, + } +} diff --git a/vendor/src/github.com/docker/libcontainer/integration/utils_test.go b/vendor/src/github.com/docker/libcontainer/integration/utils_test.go new file mode 100644 index 0000000000..6393fb9982 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/integration/utils_test.go @@ -0,0 +1,95 @@ +package integration + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + + "github.com/docker/libcontainer" + "github.com/docker/libcontainer/namespaces" +) + +func newStdBuffers() *stdBuffers { + return &stdBuffers{ + Stdin: bytes.NewBuffer(nil), + Stdout: bytes.NewBuffer(nil), + Stderr: bytes.NewBuffer(nil), + } +} + +type stdBuffers struct { + Stdin *bytes.Buffer + Stdout *bytes.Buffer + Stderr *bytes.Buffer +} + +func writeConfig(config *libcontainer.Config) error { + f, err := os.OpenFile(filepath.Join(config.RootFs, "container.json"), os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) + if err != nil { + return err + } + defer f.Close() + return json.NewEncoder(f).Encode(config) +} + +func loadConfig() (*libcontainer.Config, error) { + f, err := os.Open(filepath.Join(os.Getenv("data_path"), "container.json")) + if err != nil { + return nil, err + } + defer f.Close() + + var container *libcontainer.Config + if err := json.NewDecoder(f).Decode(&container); err != nil { + return nil, err + } + return container, nil +} + +// newRootFs creates a new tmp directory and copies the busybox root filesystem +func newRootFs() (string, error) { + dir, err := ioutil.TempDir("", "") + if err != nil { + return "", err + } + if err := os.MkdirAll(dir, 0700); err != nil { + return "", err + } + if err := copyBusybox(dir); err != nil { + return "", nil + } + return dir, nil +} + +func remove(dir string) { + os.RemoveAll(dir) +} + +// copyBusybox copies the rootfs for a busybox container created for the test image +// into the new directory for the specific test +func copyBusybox(dest string) error { + out, err := exec.Command("sh", "-c", fmt.Sprintf("cp -R /busybox/* %s/", dest)).CombinedOutput() + if err != nil { + return fmt.Errorf("copy error %q: %q", err, out) + } + return nil +} + +// runContainer runs the container with the specific config and arguments +// +// buffers are returned containing the STDOUT and STDERR output for the run +// along with the exit code and any go error +func runContainer(config *libcontainer.Config, console string, args ...string) (buffers *stdBuffers, exitCode int, err error) { + if err := writeConfig(config); err != nil { + return nil, -1, err + } + + buffers = newStdBuffers() + exitCode, err = namespaces.Exec(config, buffers.Stdin, buffers.Stdout, buffers.Stderr, + console, config.RootFs, args, namespaces.DefaultCreateCommand, nil) + return +} diff --git a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go index 3083cf907a..c858b1129e 100644 --- a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go +++ b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go @@ -681,7 +681,7 @@ func NetworkChangeName(iface *net.Interface, newName string) error { // Add a new VETH pair link on the host // This is identical to running: ip link add name $name type veth peer name $peername -func NetworkCreateVethPair(name1, name2 string) error { +func NetworkCreateVethPair(name1, name2 string, txQueueLen int) error { s, err := getNetlinkSocket() if err != nil { return err @@ -696,6 +696,11 @@ func NetworkCreateVethPair(name1, name2 string) error { nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name1)) wb.AddData(nameData) + txqLen := make([]byte, 4) + native.PutUint32(txqLen, uint32(txQueueLen)) + txqData := newRtAttr(syscall.IFLA_TXQLEN, txqLen) + wb.AddData(txqData) + nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil) newRtAttrChild(nest1, IFLA_INFO_KIND, zeroTerminated("veth")) nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil) @@ -704,6 +709,10 @@ func NetworkCreateVethPair(name1, name2 string) error { newIfInfomsgChild(nest3, syscall.AF_UNSPEC) newRtAttrChild(nest3, syscall.IFLA_IFNAME, zeroTerminated(name2)) + txqLen2 := make([]byte, 4) + native.PutUint32(txqLen2, uint32(txQueueLen)) + newRtAttrChild(nest3, syscall.IFLA_TXQLEN, txqLen2) + wb.AddData(nest1) if err := s.Send(wb); err != nil { diff --git a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go index 88c2e04a3a..0320c47221 100644 --- a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go +++ b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go @@ -290,7 +290,7 @@ func TestCreateVethPair(t *testing.T) { name2 = "veth2" ) - if err := NetworkCreateVethPair(name1, name2); err != nil { + if err := NetworkCreateVethPair(name1, name2, 0); err != nil { t.Fatalf("Could not create veth pair %s %s: %s", name1, name2, err) } defer NetworkLinkDel(name1) diff --git a/vendor/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go b/vendor/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go index f6e84adf7e..747cd1d80a 100644 --- a/vendor/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go +++ b/vendor/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go @@ -47,7 +47,7 @@ func NetworkSetMTU(iface *net.Interface, mtu int) error { return ErrNotImplemented } -func NetworkCreateVethPair(name1, name2 string) error { +func NetworkCreateVethPair(name1, name2 string, txQueueLen int) error { return ErrNotImplemented } diff --git a/vendor/src/github.com/docker/libcontainer/network/network.go b/vendor/src/github.com/docker/libcontainer/network/network.go index 014ba74315..2c3499b6d6 100644 --- a/vendor/src/github.com/docker/libcontainer/network/network.go +++ b/vendor/src/github.com/docker/libcontainer/network/network.go @@ -32,8 +32,8 @@ func ChangeInterfaceName(old, newName string) error { return netlink.NetworkChangeName(iface, newName) } -func CreateVethPair(name1, name2 string) error { - return netlink.NetworkCreateVethPair(name1, name2) +func CreateVethPair(name1, name2 string, txQueueLen int) error { + return netlink.NetworkCreateVethPair(name1, name2, txQueueLen) } func SetInterfaceInNamespacePid(name string, nsPid int) error { diff --git a/vendor/src/github.com/docker/libcontainer/network/types.go b/vendor/src/github.com/docker/libcontainer/network/types.go index 383e27c81a..ea0741be1c 100644 --- a/vendor/src/github.com/docker/libcontainer/network/types.go +++ b/vendor/src/github.com/docker/libcontainer/network/types.go @@ -36,6 +36,11 @@ type Network struct { // container's interfaces if a pair is created, specifically in the case of type veth // Note: This does not apply to loopback interfaces. Mtu int `json:"mtu,omitempty"` + + // TxQueueLen sets the tx_queuelen value for the interface and will be mirrored on both the host and + // container's interfaces if a pair is created, specifically in the case of type veth + // Note: This does not apply to loopback interfaces. + TxQueueLen int `json:"txqueuelen,omitempty"` } // Struct describing the network specific runtime state that will be maintained by libcontainer for all running containers diff --git a/vendor/src/github.com/docker/libcontainer/network/veth.go b/vendor/src/github.com/docker/libcontainer/network/veth.go index e5185de7c7..3d7dc8729e 100644 --- a/vendor/src/github.com/docker/libcontainer/network/veth.go +++ b/vendor/src/github.com/docker/libcontainer/network/veth.go @@ -19,8 +19,9 @@ const defaultDevice = "eth0" func (v *Veth) Create(n *Network, nspid int, networkState *NetworkState) error { var ( - bridge = n.Bridge - prefix = n.VethPrefix + bridge = n.Bridge + prefix = n.VethPrefix + txQueueLen = n.TxQueueLen ) if bridge == "" { return fmt.Errorf("bridge is not specified") @@ -28,7 +29,7 @@ func (v *Veth) Create(n *Network, nspid int, networkState *NetworkState) error { if prefix == "" { return fmt.Errorf("veth prefix is not specified") } - name1, name2, err := createVethPair(prefix) + name1, name2, err := createVethPair(prefix, txQueueLen) if err != nil { return err } @@ -96,7 +97,7 @@ func (v *Veth) Initialize(config *Network, networkState *NetworkState) error { // createVethPair will automatically generage two random names for // the veth pair and ensure that they have been created -func createVethPair(prefix string) (name1 string, name2 string, err error) { +func createVethPair(prefix string, txQueueLen int) (name1 string, name2 string, err error) { for i := 0; i < 10; i++ { if name1, err = utils.GenerateRandomName(prefix, 7); err != nil { return @@ -106,7 +107,7 @@ func createVethPair(prefix string) (name1 string, name2 string, err error) { return } - if err = CreateVethPair(name1, name2); err != nil { + if err = CreateVethPair(name1, name2, txQueueLen); err != nil { if err == netlink.ErrInterfaceExists { continue } diff --git a/vendor/src/github.com/docker/libcontainer/network/veth_test.go b/vendor/src/github.com/docker/libcontainer/network/veth_test.go index e09a6042c7..b92b284eb0 100644 --- a/vendor/src/github.com/docker/libcontainer/network/veth_test.go +++ b/vendor/src/github.com/docker/libcontainer/network/veth_test.go @@ -15,7 +15,7 @@ func TestGenerateVethNames(t *testing.T) { prefix := "veth" - name1, name2, err := createVethPair(prefix) + name1, name2, err := createVethPair(prefix, 0) if err != nil { t.Fatal(err) } @@ -36,13 +36,13 @@ func TestCreateDuplicateVethPair(t *testing.T) { prefix := "veth" - name1, name2, err := createVethPair(prefix) + name1, name2, err := createVethPair(prefix, 0) if err != nil { t.Fatal(err) } // retry to create the name interfaces and make sure that we get the correct error - err = CreateVethPair(name1, name2) + err = CreateVethPair(name1, name2, 0) if err == nil { t.Fatal("expected error to not be nil with duplicate interface") } diff --git a/vendor/src/github.com/docker/libcontainer/system/syscall_linux_386.go b/vendor/src/github.com/docker/libcontainer/system/syscall_linux_386.go new file mode 100644 index 0000000000..2fcbf21309 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/system/syscall_linux_386.go @@ -0,0 +1,24 @@ +// +build linux,386 +package system + +import ( + "syscall" +) + +// Setuid sets the uid of the calling thread to the specified uid. +func Setuid(uid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// Setgid sets the gid of the calling thread to the specified gid. +func Setgid(gid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/src/github.com/docker/libcontainer/system/syscall_linux_amd64.go b/vendor/src/github.com/docker/libcontainer/system/syscall_linux_amd64.go new file mode 100644 index 0000000000..0a346c3b9c --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/system/syscall_linux_amd64.go @@ -0,0 +1,24 @@ +// +build linux,amd64 +package system + +import ( + "syscall" +) + +// Setuid sets the uid of the calling thread to the specified uid. +func Setuid(uid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// Setgid sets the gid of the calling thread to the specified gid. +func Setgid(gid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/src/github.com/docker/libcontainer/system/syscall_linux_arm.go b/vendor/src/github.com/docker/libcontainer/system/syscall_linux_arm.go new file mode 100644 index 0000000000..faf1799577 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/system/syscall_linux_arm.go @@ -0,0 +1,24 @@ +// +build linux,arm +package system + +import ( + "syscall" +) + +// Setuid sets the uid of the calling thread to the specified uid. +func Setuid(uid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// Setgid sets the gid of the calling thread to the specified gid. +func Setgid(gid int) (err error) { + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/src/github.com/docker/libcontainer/utils/utils_test.go b/vendor/src/github.com/docker/libcontainer/utils/utils_test.go new file mode 100644 index 0000000000..41ef1aa3df --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/utils/utils_test.go @@ -0,0 +1,15 @@ +package utils + +import "testing" + +func TestGenerateName(t *testing.T) { + name, err := GenerateRandomName("veth", 5) + if err != nil { + t.Fatal(err) + } + + expected := 5 + len("veth") + if len(name) != 5+len("veth") { + t.Fatalf("expected name to be %d chars but received %d", expected, len(name)) + } +} diff --git a/vendor/src/github.com/docker/libcontainer/xattr/xattr.go b/vendor/src/github.com/docker/libcontainer/xattr/xattr_linux.go similarity index 100% rename from vendor/src/github.com/docker/libcontainer/xattr/xattr.go rename to vendor/src/github.com/docker/libcontainer/xattr/xattr_linux.go diff --git a/vendor/src/github.com/docker/libcontainer/xattr/xattr_unsupported.go b/vendor/src/github.com/docker/libcontainer/xattr/xattr_unsupported.go new file mode 100644 index 0000000000..821dea3be1 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/xattr/xattr_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux + +package xattr + +func Listxattr(path string) ([]string, error) { + return nil, ErrNotSupportedPlatform +} + +func Getxattr(path, attr string) (string, error) { + return "", ErrNotSupportedPlatform +} + +func Setxattr(path, xattr, value string) error { + return ErrNotSupportedPlatform +} From 1262b5f605e8f897acb2b9ee261edbab7f1f8947 Mon Sep 17 00:00:00 2001 From: Phil Estes Date: Sun, 5 Oct 2014 00:21:59 -0400 Subject: [PATCH 126/592] Gracefully handle network bridge without IP association at startup Addresses #8444 Docker-DCO-1.1-Signed-off-by: Phil Estes (github: estesp) --- daemon/networkdriver/bridge/driver.go | 20 ++++++++----- integration-cli/docker_cli_daemon_test.go | 36 +++++++++++++++++++++++ 2 files changed, 49 insertions(+), 7 deletions(-) diff --git a/daemon/networkdriver/bridge/driver.go b/daemon/networkdriver/bridge/driver.go index e05a2c21a5..44d864e709 100644 --- a/daemon/networkdriver/bridge/driver.go +++ b/daemon/networkdriver/bridge/driver.go @@ -4,6 +4,7 @@ import ( "fmt" "io/ioutil" "net" + "os" "strings" "sync" @@ -104,8 +105,8 @@ func InitDriver(job *engine.Job) engine.Status { if !usingDefaultBridge { return job.Error(err) } - // If the iface is not found, try to create it - if err := createBridge(bridgeIP); err != nil { + // If the bridge interface is not found (or has no address), try to create it and/or add an address + if err := configureBridge(bridgeIP); err != nil { return job.Error(err) } @@ -251,10 +252,12 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error { return nil } -// CreateBridgeIface creates a network bridge interface on the host system with the name `ifaceName`, -// and attempts to configure it with an address which doesn't conflict with any other interface on the host. -// If it can't find an address which doesn't conflict, it will return an error. -func createBridge(bridgeIP string) error { +// configureBridge attempts to create and configure a network bridge interface named `ifaceName` on the host +// If bridgeIP is empty, it will try to find a non-conflicting IP from the Docker-specified private ranges +// If the bridge `ifaceName` already exists, it will only perform the IP address association with the existing +// bridge (fixes issue #8444) +// If an address which doesn't conflict with existing interfaces can't be found, an error is returned. +func configureBridge(bridgeIP string) error { nameservers := []string{} resolvConf, _ := resolvconf.Get() // we don't check for an error here, because we don't really care @@ -295,7 +298,10 @@ func createBridge(bridgeIP string) error { log.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr) if err := createBridgeIface(bridgeIface); err != nil { - return err + // the bridge may already exist, therefore we can ignore an "exists" error + if !os.IsExist(err) { + return err + } } iface, err := net.InterfaceByName(bridgeIface) diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go index 6160e57e94..9d238c15ee 100644 --- a/integration-cli/docker_cli_daemon_test.go +++ b/integration-cli/docker_cli_daemon_test.go @@ -3,6 +3,7 @@ package main import ( "encoding/json" "os" + "os/exec" "strings" "testing" ) @@ -92,3 +93,38 @@ func TestDaemonStartIptablesFalse(t *testing.T) { logDone("daemon - started daemon with iptables=false") } + +// Issue #8444: If docker0 bridge is modified (intentionally or unintentionally) and +// no longer has an IP associated, we should gracefully handle that case and associate +// an IP with it rather than fail daemon start +func TestDaemonStartBridgeWithoutIPAssociation(t *testing.T) { + d := NewDaemon(t) + // rather than depending on brctl commands to verify docker0 is created and up + // let's start the daemon and stop it, and then make a modification to run the + // actual test + if err := d.Start(); err != nil { + t.Fatalf("Could not start daemon: %v", err) + } + if err := d.Stop(); err != nil { + t.Fatalf("Could not stop daemon: %v", err) + } + + // now we will remove the ip from docker0 and then try starting the daemon + ipCmd := exec.Command("ip", "addr", "flush", "dev", "docker0") + stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd) + if err != nil { + t.Fatalf("failed to remove docker0 IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr) + } + + if err := d.Start(); err != nil { + warning := "**WARNING: Docker bridge network in bad state--delete docker0 bridge interface to fix" + t.Fatalf("Could not start daemon when docker0 has no IP address: %v\n%s", err, warning) + } + + // cleanup - stop the daemon if test passed + if err := d.Stop(); err != nil { + t.Fatalf("Could not stop daemon: %v", err) + } + + logDone("daemon - successful daemon start when bridge has no IP association") +} From 78a272ce14a43f81a79f1b948d5cfd120405f8c9 Mon Sep 17 00:00:00 2001 From: Srini Brahmaroutu Date: Thu, 23 Oct 2014 20:30:18 +0000 Subject: [PATCH 127/592] Allowing resize tty to only work when container is started Addresses #8728 Signed-off-by: Srini Brahmaroutu --- daemon/container.go | 3 ++ integration-cli/docker_api_resize_test.go | 53 +++++++++++++++++++++++ integration-cli/docker_utils.go | 3 +- 3 files changed, 58 insertions(+), 1 deletion(-) create mode 100644 integration-cli/docker_api_resize_test.go diff --git a/daemon/container.go b/daemon/container.go index e5c9fadace..f3db268633 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -691,6 +691,9 @@ func (container *Container) Restart(seconds int) error { } func (container *Container) Resize(h, w int) error { + if !container.IsRunning() { + return fmt.Errorf("Cannot resize container %s, container is not running", container.ID) + } return container.command.ProcessConfig.Terminal.Resize(h, w) } diff --git a/integration-cli/docker_api_resize_test.go b/integration-cli/docker_api_resize_test.go new file mode 100644 index 0000000000..3595999a71 --- /dev/null +++ b/integration-cli/docker_api_resize_test.go @@ -0,0 +1,53 @@ +package main + +import ( + "os/exec" + "strings" + "testing" +) + +func TestResizeApiResponse(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatalf(out, err) + } + defer deleteAllContainers() + cleanedContainerID := stripTrailingCharacters(out) + + endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" + _, err = sockRequest("POST", endpoint) + if err != nil { + t.Fatalf("resize Request failed %v", err) + } + + logDone("container resize - when started") +} + +func TestResizeApiResponseWhenContainerNotStarted(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatalf(out, err) + } + defer deleteAllContainers() + cleanedContainerID := stripTrailingCharacters(out) + + // make sure the exited cintainer is not running + runCmd = exec.Command(dockerBinary, "wait", cleanedContainerID) + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + t.Fatalf(out, err) + } + + endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" + body, err := sockRequest("POST", endpoint) + if err == nil { + t.Fatalf("resize should fail when container is not started") + } + if !strings.Contains(string(body), "Cannot resize container") && !strings.Contains(string(body), cleanedContainerID) { + t.Fatalf("resize should fail with message 'Cannot resize container' but instead received %s", string(body)) + } + + logDone("container resize - when not started should not resize") +} diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go index 109014db74..b9660d20b6 100644 --- a/integration-cli/docker_utils.go +++ b/integration-cli/docker_utils.go @@ -254,7 +254,8 @@ func sockRequest(method, endpoint string) ([]byte, error) { } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("received status != 200 OK: %s", resp.Status) + body, _ := ioutil.ReadAll(resp.Body) + return body, fmt.Errorf("received status != 200 OK: %s", resp.Status) } return ioutil.ReadAll(resp.Body) From 3e10b93106dea94e5747ab32fe4ac765aa22f9bc Mon Sep 17 00:00:00 2001 From: Michael Hudson-Doyle Date: Wed, 1 Oct 2014 13:37:30 +1300 Subject: [PATCH 128/592] Use code generation to set IAMSTATIC instead of -X Signed-off-by: Michael Hudson-Doyle --- .gitignore | 1 + hack/make.sh | 3 ++- hack/make/binary | 11 +++++++++++ hack/make/dynbinary | 9 +++++++++ 4 files changed, 23 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 2a86e41caf..6f6cfa1682 100644 --- a/.gitignore +++ b/.gitignore @@ -27,3 +27,4 @@ docs/AWS_S3_BUCKET docs/GIT_BRANCH docs/VERSION docs/GITCOMMIT +dockerversion/static.go diff --git a/hack/make.sh b/hack/make.sh index d6da3057fa..5d0ddd879f 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -94,6 +94,8 @@ if [ -z "$DOCKER_CLIENTONLY" ]; then DOCKER_BUILDTAGS+=" daemon" fi +rm -f dockerversion/static.go + # Use these flags when compiling the tests and final binary LDFLAGS=' -w @@ -115,7 +117,6 @@ TESTFLAGS+=" -test.timeout=${TIMEOUT}" EXTLDFLAGS_STATIC_DOCKER="$EXTLDFLAGS_STATIC -lpthread -Wl,--unresolved-symbols=ignore-in-object-files" LDFLAGS_STATIC_DOCKER=" $LDFLAGS_STATIC - -X $DOCKER_PKG/dockerversion.IAMSTATIC true -extldflags \"$EXTLDFLAGS_STATIC_DOCKER\" " diff --git a/hack/make/binary b/hack/make/binary index b97069a856..2c71f201b9 100755 --- a/hack/make/binary +++ b/hack/make/binary @@ -3,6 +3,17 @@ set -e DEST=$1 +: ${IAMSTATIC:=true} + +cat > dockerversion/static.go < dockerversion/static.go < Date: Wed, 1 Oct 2014 14:56:45 +1300 Subject: [PATCH 129/592] use code generation for GITCOMMIT/VERSION too Signed-off-by: Michael Hudson-Doyle --- .gitignore | 1 + hack/make.sh | 17 +++++++++++------ 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index 6f6cfa1682..d7dae009d6 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,4 @@ docs/GIT_BRANCH docs/VERSION docs/GITCOMMIT dockerversion/static.go +dockerversion/details.go diff --git a/hack/make.sh b/hack/make.sh index 5d0ddd879f..eda96b5e26 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -94,14 +94,19 @@ if [ -z "$DOCKER_CLIENTONLY" ]; then DOCKER_BUILDTAGS+=" daemon" fi -rm -f dockerversion/static.go +rm -f dockerversion/static.go dockerversion/details.go +cat > dockerversion/details.go < Date: Wed, 8 Oct 2014 15:31:43 +1300 Subject: [PATCH 130/592] finally, use code generation for INITSHA1 & INITPATH too Signed-off-by: Michael Hudson-Doyle --- .gitignore | 1 + hack/make.sh | 2 +- hack/make/dynbinary | 11 ++++++++++- hack/make/dyntest-integration | 12 +++++++++--- hack/make/dyntest-unit | 12 +++++++++--- 5 files changed, 30 insertions(+), 8 deletions(-) diff --git a/.gitignore b/.gitignore index d7dae009d6..21df574df6 100644 --- a/.gitignore +++ b/.gitignore @@ -29,3 +29,4 @@ docs/VERSION docs/GITCOMMIT dockerversion/static.go dockerversion/details.go +dockerversion/init.go diff --git a/hack/make.sh b/hack/make.sh index eda96b5e26..a841c13424 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -94,7 +94,7 @@ if [ -z "$DOCKER_CLIENTONLY" ]; then DOCKER_BUILDTAGS+=" daemon" fi -rm -f dockerversion/static.go dockerversion/details.go +rm -f dockerversion/static.go dockerversion/details.go dockerversion/init.go cat > dockerversion/details.go < dockerversion/init.go < dockerversion/init.go < dockerversion/init.go < Date: Fri, 24 Oct 2014 00:22:21 +0000 Subject: [PATCH 131/592] do not send hostconfig at start, as we do on create now Signed-off-by: Victor Vieux --- api/client/commands.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/client/commands.go b/api/client/commands.go index f4ced5ecff..d657d47608 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -2289,7 +2289,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { } //start the container - if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/start", hostConfig, false)); err != nil { + if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/start", nil, false)); err != nil { return err } From cdd6e97910c50e5766c8bec2121f906e9ee2d119 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Fri, 24 Oct 2014 00:23:25 +0000 Subject: [PATCH 132/592] builder: some small fixups + fix a bug where empty entrypoints would not override inheritance. Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- builder/dispatchers.go | 10 +++--- builder/evaluator.go | 2 +- builder/parser/parser.go | 5 +-- integration-cli/docker_cli_build_test.go | 43 ++++++++++++++++++++++++ runconfig/merge.go | 5 ++- 5 files changed, 56 insertions(+), 9 deletions(-) diff --git a/builder/dispatchers.go b/builder/dispatchers.go index 0c2a580872..e585c4021e 100644 --- a/builder/dispatchers.go +++ b/builder/dispatchers.go @@ -195,7 +195,7 @@ func run(b *Builder, args []string, attributes map[string]bool, original string) defer func(cmd []string) { b.Config.Cmd = cmd }(cmd) - log.Debugf("Command to be executed: %v", b.Config.Cmd) + log.Debugf("[BUILDER] Command to be executed: %v", b.Config.Cmd) hit, err := b.probeCache() if err != nil { @@ -261,14 +261,14 @@ func entrypoint(b *Builder, args []string, attributes map[string]bool, original parsed := handleJsonArgs(args, attributes) switch { - case len(parsed) == 0: - // ENTYRPOINT [] - b.Config.Entrypoint = nil case attributes["json"]: // ENTRYPOINT ["echo", "hi"] b.Config.Entrypoint = parsed + case len(parsed) == 0: + // ENTRYPOINT [] + b.Config.Entrypoint = nil default: - // ENTYRPOINT echo hi + // ENTRYPOINT echo hi b.Config.Entrypoint = []string{"/bin/sh", "-c", parsed[0]} } diff --git a/builder/evaluator.go b/builder/evaluator.go index 4122616350..aed8d29335 100644 --- a/builder/evaluator.go +++ b/builder/evaluator.go @@ -149,7 +149,7 @@ func (b *Builder) Run(context io.Reader) (string, error) { b.dockerfile = ast // some initializations that would not have been supplied by the caller. - b.Config = &runconfig.Config{Entrypoint: []string{}, Cmd: nil} + b.Config = &runconfig.Config{} b.TmpContainers = map[string]struct{}{} for i, n := range b.dockerfile.Children { diff --git a/builder/parser/parser.go b/builder/parser/parser.go index 5e8bcb5a9c..6b0ab7ab8c 100644 --- a/builder/parser/parser.go +++ b/builder/parser/parser.go @@ -87,10 +87,11 @@ func parseLine(line string) (string, *Node, error) { if sexp.Value != "" || sexp.Next != nil || sexp.Children != nil { node.Next = sexp - node.Attributes = attrs - node.Original = line } + node.Attributes = attrs + node.Original = line + return "", node, nil } diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index c909b14f0d..80d4ad1488 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -1429,6 +1429,49 @@ func TestBuildExpose(t *testing.T) { logDone("build - expose") } +func TestBuildEmptyEntrypointInheritance(t *testing.T) { + name := "testbuildentrypointinheritance" + name2 := "testbuildentrypointinheritance2" + defer deleteImages(name, name2) + + _, err := buildImage(name, + `FROM busybox + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + t.Fatal(err) + } + res, err := inspectField(name, "Config.Entrypoint") + if err != nil { + t.Fatal(err) + } + + expected := "[/bin/echo]" + if res != expected { + t.Fatalf("Entrypoint %s, expected %s", res, expected) + } + + _, err = buildImage(name2, + fmt.Sprintf(`FROM %s + ENTRYPOINT []`, name), + true) + if err != nil { + t.Fatal(err) + } + res, err = inspectField(name2, "Config.Entrypoint") + if err != nil { + t.Fatal(err) + } + + expected = "[]" + + if res != expected { + t.Fatalf("Entrypoint %s, expected %s", res, expected) + } + + logDone("build - empty entrypoint inheritance") +} + func TestBuildEmptyEntrypoint(t *testing.T) { name := "testbuildentrypoint" defer deleteImages(name) diff --git a/runconfig/merge.go b/runconfig/merge.go index 0c60d1df0b..64950bf625 100644 --- a/runconfig/merge.go +++ b/runconfig/merge.go @@ -88,7 +88,10 @@ func Merge(userConf, imageConf *Config) error { if len(userConf.Cmd) == 0 { userConf.Cmd = imageConf.Cmd } - userConf.Entrypoint = imageConf.Entrypoint + + if userConf.Entrypoint == nil { + userConf.Entrypoint = imageConf.Entrypoint + } } if userConf.WorkingDir == "" { userConf.WorkingDir = imageConf.WorkingDir From 4bae6235c0aec3a0e3805b046a08fc7e6aadd46b Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Thu, 23 Oct 2014 11:45:21 +1000 Subject: [PATCH 133/592] Add --dryrun to allow testing to the docs-release site, and then fix the double dollar mistake Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- Makefile | 2 +- docs/release.sh | 10 +++++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 842cc18e71..2d1c79ac2e 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,7 @@ docs-shell: docs-build $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash docs-release: docs-build - $(DOCKER_RUN_DOCS) -e BUILD_ROOT "$(DOCKER_DOCS_IMAGE)" ./release.sh + $(DOCKER_RUN_DOCS) -e OPTIONS -e BUILD_ROOT "$(DOCKER_DOCS_IMAGE)" ./release.sh test: build $(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration test-integration-cli diff --git a/docs/release.sh b/docs/release.sh index cdb1a94c82..41881fc05b 100755 --- a/docs/release.sh +++ b/docs/release.sh @@ -14,6 +14,8 @@ If you're publishing the current release's documentation, also set `BUILD_ROOT=y make AWS_S3_BUCKET=docs-stage.docker.com docs-release will then push the documentation site to your s3 bucket. + + Note: you can add `OPTIONS=--dryrun` to see what will be done without sending to the server EOF exit 1 } @@ -22,7 +24,7 @@ EOF VERSION=$(cat VERSION) -if [ "$$AWS_S3_BUCKET" == "docs.docker.com" ]; then +if [ "$AWS_S3_BUCKET" == "docs.docker.com" ]; then if [ "${VERSION%-dev}" != "$VERSION" ]; then echo "Please do not push '-dev' documentation to docs.docker.com ($VERSION)" exit 1 @@ -96,7 +98,7 @@ upload_current_documentation() { done include="--include *.$i $include" echo "uploading *.$i" - run="aws s3 sync --profile $BUCKET --cache-control \"max-age=3600\" --acl public-read \ + run="aws s3 sync $OPTIONS --profile $BUCKET --cache-control \"max-age=3600\" --acl public-read \ $include \ --exclude *.text* \ --exclude *.*~ \ @@ -118,7 +120,9 @@ upload_current_documentation() { done } -setup_s3 +if [ "$OPTIONS" != "--dryrun" ]; then + setup_s3 +fi # Default to only building the version specific docs so we don't clober the latest by accident with old versions if [ "$BUILD_ROOT" == "yes" ]; then From 7f1ea7129e7b87e60ea4b1e4449b0541f6f432c9 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Fri, 24 Oct 2014 12:41:19 +1000 Subject: [PATCH 134/592] Add a link to some documentation about exec. Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/articles/dockerfile_best-practices.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/sources/articles/dockerfile_best-practices.md b/docs/sources/articles/dockerfile_best-practices.md index 31f932d651..c403c2f286 100644 --- a/docs/sources/articles/dockerfile_best-practices.md +++ b/docs/sources/articles/dockerfile_best-practices.md @@ -313,8 +313,9 @@ beginning user will then be forced to learn about `ENTRYPOINT` and `--entrypoint`. In order to avoid a situation where commands are run without clear visibility -to the user, make sure your script ends with something like `exec "$@"`. After -the entrypoint completes, the script will transparently bootstrap the command +to the user, make sure your script ends with something like `exec "$@"` (see +[the exec builtin command](http://wiki.bash-hackers.org/commands/builtin/exec)). +After the entrypoint completes, the script will transparently bootstrap the command invoked by the user, making what has been run clear to the user (for example, `docker run -it mysql mysqld --some --flags` will transparently run `mysqld --some --flags` after `ENTRYPOINT` runs `initdb`). From 417fde550e30e6e2b8853f73f6c85f25b9c6a80f Mon Sep 17 00:00:00 2001 From: Mathias Monnerville Date: Fri, 24 Oct 2014 09:09:20 +0200 Subject: [PATCH 135/592] dockerimages.md: typo fix --- docs/sources/userguide/dockerimages.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/userguide/dockerimages.md b/docs/sources/userguide/dockerimages.md index 382eef2410..fe40313773 100644 --- a/docs/sources/userguide/dockerimages.md +++ b/docs/sources/userguide/dockerimages.md @@ -65,7 +65,7 @@ So when we run a container we refer to a tagged image like so: $ sudo docker run -t -i ubuntu:14.04 /bin/bash -If instead we wanted to build an Ubuntu 12.04 image we'd use: +If instead we wanted to run an Ubuntu 12.04 image we'd use: $ sudo docker run -t -i ubuntu:12.04 /bin/bash From ab03e898ff22e1bdb2452cfc2d9aad1313c97fc9 Mon Sep 17 00:00:00 2001 From: Nicolas De loof Date: Sat, 18 Oct 2014 12:58:20 +0200 Subject: [PATCH 136/592] Document alternate command to check encrypted TCP socket as curl command seems to fail on OSX Mavericks Signed-off-by: Nicolas De Loof --- docs/sources/reference/api/docker_remote_api.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md index 39c83743bf..8033478303 100644 --- a/docs/sources/reference/api/docker_remote_api.md +++ b/docs/sources/reference/api/docker_remote_api.md @@ -8,8 +8,10 @@ page_keywords: API, Docker, rcli, REST, documentation and the client must have `root` access to interact with the daemon. - If the Docker daemon is set to use an encrypted TCP socket (`--tls`, or `--tlsverify`) as with Boot2Docker 1.3.0, then you need to add extra - parameters to `curl` when making test API requests: + parameters to `curl` or `wget` when making test API requests: `curl --insecure --cert ~/.docker/cert.pem --key ~/.docker/key.pem https://boot2docker:2376/images/json` + or + `wget --no-check-certificate --certificate=$DOCKER_CERT_PATH/cert.pem --private-key=$DOCKER_CERT_PATH/key.pem https://boot2docker:2376/images/json -O - -q` - If a group named `docker` exists on your system, docker will apply ownership of the socket to the group. - The API tends to be REST, but for some complex commands, like attach From 0fdf7839a21390f5813589195639caf594a3ddc2 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Fri, 26 Sep 2014 11:55:23 -0400 Subject: [PATCH 137/592] archive: tests and benchmarks for hardlinks Adding moar information, so benchmark comparisons can be moar comparative. Signed-off-by: Vincent Batts --- pkg/archive/archive_test.go | 111 ++++++++++++++++++++++++++++++++++-- 1 file changed, 107 insertions(+), 4 deletions(-) diff --git a/pkg/archive/archive_test.go b/pkg/archive/archive_test.go index b46f953228..900fff5f01 100644 --- a/pkg/archive/archive_test.go +++ b/pkg/archive/archive_test.go @@ -8,6 +8,7 @@ import ( "os" "os/exec" "path" + "syscall" "testing" "time" @@ -63,6 +64,50 @@ func TestCmdStreamGood(t *testing.T) { } } +func TestTarFiles(t *testing.T) { + // try without hardlinks + if err := checkNoChanges(1000, false); err != nil { + t.Fatal(err) + } + // try with hardlinks + if err := checkNoChanges(1000, true); err != nil { + t.Fatal(err) + } +} + +func checkNoChanges(fileNum int, hardlinks bool) error { + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + if err != nil { + return err + } + defer os.RemoveAll(srcDir) + + destDir, err := ioutil.TempDir("", "docker-test-destDir") + if err != nil { + return err + } + defer os.RemoveAll(destDir) + + _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) + if err != nil { + return err + } + + err = TarUntar(srcDir, destDir) + if err != nil { + return err + } + + changes, err := ChangesDirs(destDir, srcDir) + if err != nil { + return err + } + if len(changes) > 0 { + return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) + } + return nil +} + func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { @@ -204,13 +249,42 @@ func TestUntarUstarGnuConflict(t *testing.T) { } } -func prepareUntarSourceDirectory(numberOfFiles int, targetPath string) (int, error) { +func getNlink(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + return statT.Nlink, nil +} + +func getInode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + return statT.Ino, nil +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } } totalSize := numberOfFiles * len(fileData) return totalSize, nil @@ -226,14 +300,43 @@ func BenchmarkTarUntar(b *testing.B) { b.Fatal(err) } target := path.Join(tempDir, "dest") - n, err := prepareUntarSourceDirectory(100, origin) + n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } - b.ResetTimer() - b.SetBytes(int64(n)) defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) + for n := 0; n < b.N; n++ { + err := TarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func BenchmarkTarUntarWithLinks(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := path.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin, true) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := TarUntar(origin, target) if err != nil { From ca6cc6560987a87b8f3b4fe4c3c227d692c49117 Mon Sep 17 00:00:00 2001 From: Andrea Luzzardi Date: Thu, 23 Oct 2014 17:23:36 -0700 Subject: [PATCH 138/592] Bump API to v1.16. Signed-off-by: Andrea Luzzardi --- api/common.go | 2 +- docs/mkdocs.yml | 1 + .../reference/api/docker_remote_api.md | 14 +- .../reference/api/docker_remote_api_v1.16.md | 1573 +++++++++++++++++ 4 files changed, 1586 insertions(+), 4 deletions(-) create mode 100644 docs/sources/reference/api/docker_remote_api_v1.16.md diff --git a/api/common.go b/api/common.go index 3eecaa0455..7470df6ccd 100644 --- a/api/common.go +++ b/api/common.go @@ -12,7 +12,7 @@ import ( ) const ( - APIVERSION version.Version = "1.15" + APIVERSION version.Version = "1.16" DEFAULTHTTPHOST = "127.0.0.1" DEFAULTUNIXSOCKET = "/var/run/docker.sock" ) diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 5ea8d56e60..ca99701d8d 100755 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -112,6 +112,7 @@ pages: - ['reference/api/registry_api_client_libraries.md', 'Reference', 'Docker Registry API Client Libraries'] - ['reference/api/hub_registry_spec.md', 'Reference', 'Docker Hub and Registry Spec'] - ['reference/api/docker_remote_api.md', 'Reference', 'Docker Remote API'] +- ['reference/api/docker_remote_api_v1.16.md', 'Reference', 'Docker Remote API v1.16'] - ['reference/api/docker_remote_api_v1.15.md', 'Reference', 'Docker Remote API v1.15'] - ['reference/api/docker_remote_api_v1.14.md', 'Reference', 'Docker Remote API v1.14'] - ['reference/api/docker_remote_api_v1.13.md', 'Reference', 'Docker Remote API v1.13'] diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md index 39c83743bf..4dd7fff688 100644 --- a/docs/sources/reference/api/docker_remote_api.md +++ b/docs/sources/reference/api/docker_remote_api.md @@ -28,13 +28,21 @@ page_keywords: API, Docker, rcli, REST, documentation Client applications need to take this into account to ensure they will not break when talking to newer Docker daemons. -The current version of the API is v1.15 +The current version of the API is v1.16 Calling `/info` is the same as calling -`/v1.15/info`. +`/v1.16/info`. You can still call an old version of the API using -`/v1.14/info`. +`/v1.15/info`. + +## v1.16 + +### Full Documentation + +[*Docker Remote API v1.16*](/reference/api/docker_remote_api_v1.16/) + +### What's new ## v1.15 diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md new file mode 100644 index 0000000000..887bebcee1 --- /dev/null +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -0,0 +1,1573 @@ +page_title: Remote API v1.16 +page_description: API Documentation for Docker +page_keywords: API, Docker, rcli, REST, documentation + +# Docker Remote API v1.16 + +## 1. Brief introduction + + - The Remote API has replaced `rcli`. + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket]( + /articles/basics/#bind-docker-to-another-hostport-or-a-unix-socket). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `STDOUT`, + `STDIN` and `STDERR`. + +# 2. Endpoints + +## 2.1 Containers + +### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Image": "base:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "9cd87474be90", + "Image": "base:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "3176a2479c92", + "Image": "base:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Image": "base:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports":[], + "SizeRw":12288, + "SizeRootFs":0 + } + ] + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "Domainname": "", + "User":"", + "Memory":0, + "MemorySwap":0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Image":"base", + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "NetworkDisabled": false, + "ExposedPorts":{ + "22/tcp": {} + }, + "RestartPolicy": { "Name": "always" } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806" + "Warnings":[] + } + +Json Parameters: + +- **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) +- **Volumes** – An object mapping mountpoint paths (strings) inside the + container to empty objects. +- **config** – the container's configuration + +Query Parameters: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +Status Codes: + +- **201** – no error +- **404** – no such container +- **406** – impossible to attach (container not running) +- **500** – server error + +### Inspect a container + +`GET /containers/(id)/json` + +Return low-level information on the container `id` + + +**Example request**: + + GET /containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", + "Created": "2013-05-07T14:51:42.041847+02:00", + "Path": "date", + "Args": [], + "Config": { + "Hostname": "4fa6e0f0c678", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Dns": null, + "Image": "base", + "Volumes": {}, + "VolumesFrom": "", + "WorkingDir":"" + + }, + "State": { + "Running": false, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-05-07T14:51:42.087658+02:01360", + "Ghost": false + }, + "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "NetworkSettings": { + "IpAddress": "", + "IpPrefixLen": 0, + "Gateway": "", + "Bridge": "", + "PortMapping": null + }, + "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker", + "ResolvConfPath": "/etc/resolv.conf", + "Volumes": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": ["/name:alias"], + "PublishAllPorts": false, + "CapAdd: ["NET_ADMIN"], + "CapDrop: ["MKNOD"] + } + } + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### List processes running inside a container + +`GET /containers/(id)/top` + +List processes running inside the container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles":[ + "USER", + "PID", + "%CPU", + "%MEM", + "VSZ", + "RSS", + "TTY", + "STAT", + "START", + "TIME", + "COMMAND" + ], + "Processes":[ + ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], + ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] + ] + } + +Query Parameters: + +- **ps_args** – ps arguments to use (e.g., aux) + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Get container logs + +`GET /containers/(id)/logs` + +Get stdout and stderr logs from the container ``id`` + +**Example request**: + + GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **follow** – 1/True/true or 0/False/false, return stream. Default false +- **stdout** – 1/True/true or 0/False/false, show stdout log. Default false +- **stderr** – 1/True/true or 0/False/false, show stderr log. Default false +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default false +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Inspect changes on a container's filesystem + +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path":"/dev", + "Kind":0 + }, + { + "Path":"/dev/kmsg", + "Kind":1 + }, + { + "Path":"/test", + "Kind":1 + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Export a container + +`GET /containers/(id)/export` + +Export the contents of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Resize a container TTY + +`GET /containers/(id)/resize?h=&w=` + +Resize the TTY of container `id` + +**Example request**: + + GET /containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +Status Codes: + +- **200** – no error +- **404** – No such container +- **500** – bad file descriptor + +### Start a container + +`POST /containers/(id)/start` + +Start the container `id` + +**Example request**: + + POST /containers/(id)/start HTTP/1.1 + Content-Type: application/json + + { + "Binds":["/tmp:/tmp"], + "Links":["redis3:redis"], + "LxcConf":{"lxc.utsname":"docker"}, + "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts":false, + "Privileged":false, + "Dns": ["8.8.8.8"], + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"] + } + +**Example response**: + + HTTP/1.1 204 No Content + +Json Parameters: + +- **Binds** – A list of volume bindings for this container. Each volume + binding is a string of the form `container_path` (to create a new + volume for the container), `host_path:container_path` (to bind-mount + a host path into the container), or `host_path:container_path:ro` + (to make the bind-mount read-only inside the container). +- **hostConfig** – the container's host configuration (optional) + +Status Codes: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +### Stop a container + +`POST /containers/(id)/stop` + +Stop the container `id` + +**Example request**: + + POST /containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +### Restart a container + +`POST /containers/(id)/restart` + +Restart the container `id` + +**Example request**: + + POST /containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **t** – number of seconds to wait before killing the container + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Kill a container + +`POST /containers/(id)/kill` + +Kill the container `id` + +**Example request**: + + POST /containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters + +- **signal** - Signal to send to the container: integer or string like "SIGINT". + When not set, SIGKILL is assumed and the call will waits for the container to exit. + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Pause a container + +`POST /containers/(id)/pause` + +Pause the container `id` + +**Example request**: + + POST /containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Unpause a container + +`POST /containers/(id)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Status Codes: + +- **204** – no error +- **404** – no such container +- **500** – server error + +### Attach to a container + +`POST /containers/(id)/attach` + +Attach to the container `id` + +**Example request**: + + POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + +Query Parameters: + +- **logs** – 1/True/true or 0/False/false, return logs. Default false +- **stream** – 1/True/true or 0/False/false, return stream. + Default false +- **stdin** – 1/True/true or 0/False/false, if stream=true, attach + to stdin. Default false +- **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log, if stream=true, attach to stdout. Default false +- **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log, if stream=true, attach to stderr. Default false + +Status Codes: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + + **Stream details**: + + When using the TTY setting is enabled in + [`POST /containers/create` + ](../docker_remote_api_v1.9/#post--containers-create "POST /containers/create"), + the stream is the raw data from the process PTY and client's stdin. + When the TTY is disabled, then the stream is multiplexed to separate + stdout and stderr. + + The format is a **Header** and a **Payload** (frame). + + **HEADER** + + The header will contain the information on which stream write the + stream (stdout or stderr). It also contain the size of the + associated frame encoded on the last 4 bytes (uint32). + + It is encoded on the first 8 bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + + `STREAM_TYPE` can be: + +- 0: stdin (will be written on stdout) +- 1: stdou +- 2: stderr + + `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of + the uint32 size encoded as big endian. + + **PAYLOAD** + + The payload is the raw stream. + + **IMPLEMENTATION** + + The simplest way to implement the Attach protocol is the following: + + 1. Read 8 bytes + 2. chose stdout or stderr depending on the first byte + 3. Extract the frame size from the last 4 byets + 4. Read the extracted size and output it on the correct output + 5. Goto 1 + +### Wait a container + +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode":0} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### Remove a container + +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +Query Parameters: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default false +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default false + +Status Codes: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +### Copy files or folders from a container + +`POST /containers/(id)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource":"test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {{ TAR STREAM }} + +Status Codes: + +- **200** – no error +- **404** – no such container +- **500** – server error + +## 2.2 Images + +### List Images + +`GET /images/json` + +**Example request**: + + GET /images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + + +Query Parameters: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list. + +### Create an image + +`POST /images/create` + +Create an image, either by pulling it from the registry or by importing it + +**Example request**: + + POST /images/create?fromImage=base HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pulling..."} + {"status":"Pulling", "progress":"1 B/ 100 B", "progressDetail":{"current":1, "total":100}} + {"error":"Invalid..."} + ... + + When using this endpoint to pull an image from the registry, the + `X-Registry-Auth` header can be used to include + a base64-encoded AuthConfig object. + +Query Parameters: + +- **fromImage** – name of the image to pull +- **fromSrc** – source to import, - means stdin +- **repo** – repository +- **tag** – tag +- **registry** – the registry to pull from + + Request Headers: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +Status Codes: + +- **200** – no error +- **500** – server error + + + +### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /images/base/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created":"2013-03-23T22:24:18.818426-07:00", + "Container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":false, + "AttachStderr":false, + "PortSpecs":null, + "Tty":true, + "OpenStdin":true, + "StdinOnce":false, + "Env":null, + "Cmd": ["/bin/bash"], + "Dns":null, + "Image":"base", + "Volumes":null, + "VolumesFrom":"", + "WorkingDir":"" + }, + "Id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent":"27cf784147099545", + "Size": 6824592 + } + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /images/base/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id":"b750fe79269d", + "Created":1364102658, + "CreatedBy":"/bin/bash" + }, + { + "Id":"27cf78414709", + "Created":1364068391, + "CreatedBy":"" + } + ] + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pushing..."} + {"status":"Pushing", "progress":"1/? (n/a)", "progressDetail":{"current":1}}} + {"error":"Invalid..."} + ... + + If you wish to push an image on to a private registry, that image must already have been tagged + into a repository which references that registry host name and port. This repository name should + then be used in the URL. This mirrors the flow of the CLI. + +**Example request**: + + POST /images/registry.acme.com:5000/test/push HTTP/1.1 + + +Query Parameters: + +- **tag** – the tag to associate with the image on the registry, optional + +Request Headers: + +- **X-Registry-Auth** – include a base64-encoded AuthConfig + object. + +Status Codes: + +- **200** – no error +- **404** – no such image +- **500** – server error + +### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 OK + +Query Parameters: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +Status Codes: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged":"3e2f21a89f"}, + {"Deleted":"3e2f21a89f"}, + {"Deleted":"53b4f83ac9"} + ] + +Query Parameters: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +Status Codes: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +Query Parameters: + +- **term** – term to search + +Status Codes: + +- **200** – no error +- **500** – server error + +## 2.3 Misc + +### Build an image from Dockerfile via stdin + +`POST /build` + +Build an image from Dockerfile via stdin + +**Example request**: + + POST /build HTTP/1.1 + + {{ TAR STREAM }} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream":"Step 1..."} + {"stream":"..."} + {"error":"Error...", "errorDetail":{"code": 123, "message": "Error..."}} + + The stream must be a tar archive compressed with one of the + following algorithms: identity (no compression), gzip, bzip2, xz. + + The archive must include a file called `Dockerfile` + at its root. It may include any number of other files, + which will be accessible in the build context (See the [*ADD build + command*](/reference/builder/#dockerbuilder)). + +Query Parameters: + +- **t** – repository name (and optionally a tag) to be applied to + the resulting image in case of success +- **q** – suppress verbose build output +- **nocache** – do not use the cache when building the image +- **rm** - remove intermediate containers after a successful build (default behavior) +- **forcerm - always remove intermediate containers (includes rm) + + Request Headers: + +- **Content-type** – should be set to `"application/tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile objec + +Status Codes: + +- **200** – no error +- **500** – server error + +### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /auth HTTP/1.1 + Content-Type: application/json + + { + "username":"hannibal", + "password:"xxxx", + "email":"hannibal@a-team.com", + "serveraddress":"https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **204** – no error +- **500** – server error + +### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers":11, + "Images":16, + "Driver":"btrfs", + "ExecutionDriver":"native-0.1", + "KernelVersion":"3.12.0-1-amd64" + "Debug":false, + "NFd": 11, + "NGoroutines":21, + "NEventsListener":0, + "InitPath":"/usr/bin/docker", + "IndexServerAddress":["https://index.docker.io/v1/"], + "MemoryLimit":true, + "SwapLimit":false, + "IPv4Forwarding":true + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "ApiVersion":"1.12", + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" + } + +Status Codes: + +- **200** – no error +- **500** – server error + +### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +Status Codes: + +- **200** - no error +- **500** - server error + +### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "Domainname": "", + "User":"", + "Memory":0, + "MemorySwap":0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "NetworkDisabled": false, + "ExposedPorts":{ + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/vnd.docker.raw-stream + + {"Id":"596069db4bf5"} + +Json Parameters: + +- **config** - the container's configuration + +Query Parameters: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +Status Codes: + +- **201** – no error +- **404** – no such container +- **500** – server error + +### Monitor Docker's events + +`GET /events` + +Get container events from docker, either in real time via streaming, or via +polling (using since). + +Docker containers will report the following events: + + create, destroy, die, export, kill, pause, restart, start, stop, unpause + +and Docker images will report: + + untag, delete + +**Example request**: + + GET /events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924} + {"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924} + {"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966} + {"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970} + +Query Parameters: + +- **since** – timestamp used for polling +- **until** – timestamp used for polling + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only tha +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Get a tarball containing all images. + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +ubuntu:latest), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +Status Codes: + +- **200** – no error +- **500** – server error + +### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into the docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /images/load + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +Status Codes: + +- **200** – no error +- **500** – server error + +### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing three files: + +1. `VERSION`: currently `1.0` - the file format version +2. `json`: detailed layer information, similar to `docker inspect layer_id` +3. `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file will contain `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, there will also be a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest":"565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +### Exec Create + +`POST /containers/(id)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "Tty":false, + "Cmd":[ + "date" + ], + "Container":"e90e34656806", + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/json + + { + "Id":"f90e34656806" + } + +Json Parameters: + +- **execConfig** ? exec configuration. + +Status Codes: + +- **201** – no error +- **404** – no such container + +### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up exec instance `id`. If `detach` is true, this API returns after +starting the `exec` command. Otherwise, this API sets up an interactive session with the `exec` command. + +**Example request**: + + POST /exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach":false, + "Tty":false, + } + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: application/json + + {{ STREAM }} + +Json Parameters: + +- **execConfig** ? exec configuration. + +Status Codes: + +- **201** – no error +- **404** – no such exec instance + + **Stream details**: + Similar to the stream behavior of `POST /container/(id)/attach` API + +### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the tty session used by the exec command `id`. +This API is valid only if `tty` was specified as part of creating and starting the exec command. + +**Example request**: + + POST /exec/e90e34656806/resize HTTP/1.1 + Content-Type: plain/text + +**Example response**: + + HTTP/1.1 201 OK + Content-Type: plain/text + +Query Parameters: + +- **h** – height of tty session +- **w** – width + +Status Codes: + +- **201** – no error +- **404** – no such exec instance + +# 3. Going further + +## 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it + - Then retry to create the container + +- Start the container + +- If you are not in detached mode: +- Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 + +- If in detached mode or only stdin is attached: +- Display the container's id + +## 3.2 Hijacking + +In this version of the API, /attach, uses hijacking to transport stdin, +stdout and stderr on the same socket. This might change in the future. + +## 3.3 CORS Requests + +To enable cross origin requests to the remote api add the flag +"--api-enable-cors" when running docker in daemon mode. + + $ docker -d -H="192.168.1.9:2375" --api-enable-cors From cc45b13ad4e4384f016764c15a52eda95eb548a4 Mon Sep 17 00:00:00 2001 From: Fred Lifton Date: Fri, 24 Oct 2014 12:00:47 -0700 Subject: [PATCH 139/592] Tweaks to Dockerfile tutorial Made a few tweaks to Dockerfile tutorial links and removed some cruft from the tutorial itself. Docker-DCO-1.1-Signed-off-by: Fred Lifton (github: fredlf) --- docs/sources/reference/builder.md | 3 ++- docs/sources/userguide/dockerimages.md | 4 +++- docs/sources/userguide/level2.md | 7 ++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index 4bb02e3e21..0dc5962661 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -14,7 +14,8 @@ successively. This page discusses the specifics of all the instructions you can use in your `Dockerfile`. To further help you write a clear, readable, maintainable `Dockerfile`, we've also written a [`Dockerfile` Best Practices -guide](/articles/dockerfile_best-practices). +guide](/articles/dockerfile_best-practices). Lastly, you can test your +Dockerfile knowledge with the [Dockerfile tutorial](/userguide/level1). ## Usage diff --git a/docs/sources/userguide/dockerimages.md b/docs/sources/userguide/dockerimages.md index 382eef2410..e68905c34a 100644 --- a/docs/sources/userguide/dockerimages.md +++ b/docs/sources/userguide/dockerimages.md @@ -168,7 +168,6 @@ update and create images. 1. We can update a container created from an image and commit the results to an image. 2. We can use a `Dockerfile` to specify instructions to create an image. -To learn more, check out the [Dockerfile tutorial](/userguide/level1). ### Updating and committing an image @@ -539,6 +538,9 @@ Until now we've seen how to build individual applications inside Docker containers. Now learn how to build whole application stacks with Docker by linking together multiple Docker containers. +Test your Dockerfile knowledge with the +[Dockerfile tutorial](/userguide/level1). + Go to [Linking Containers Together](/userguide/dockerlinks). diff --git a/docs/sources/userguide/level2.md b/docs/sources/userguide/level2.md index c4f2a2802c..8d16ee8a45 100644 --- a/docs/sources/userguide/level2.md +++ b/docs/sources/userguide/level2.md @@ -89,9 +89,6 @@ RUN apt-get install -y
## What's next?

-Thanks for going through our tutorial! We will be posting Level 3 shortly. Follow us on twitter
- - -

-

In the meantime, check out this blog post by Michael Crosby that describes Dockerfile Best Practices.

+Thanks for going through our tutorial! We will be posting Level 3 in the future. + Back to the Docs! \ No newline at end of file From a7aedca4a1afe72d63346b67b577437f175776b9 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Fri, 24 Oct 2014 11:04:54 -0700 Subject: [PATCH 140/592] Client should use go log package. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- docker/client.go | 2 +- docker/docker.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/client.go b/docker/client.go index 58a67067bf..cde1a6d3dc 100644 --- a/docker/client.go +++ b/docker/client.go @@ -3,7 +3,7 @@ package main import ( - "github.com/docker/docker/pkg/log" + "log" // see gh#8745, client needs to use go log pkg ) const CanDaemon = false diff --git a/docker/docker.go b/docker/docker.go index cb780b2443..de13c7829c 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -5,13 +5,13 @@ import ( "crypto/x509" "fmt" "io/ioutil" + "log" // see gh#8745, client needs to use go log pkg "os" "strings" "github.com/docker/docker/api" "github.com/docker/docker/api/client" "github.com/docker/docker/dockerversion" - "github.com/docker/docker/pkg/log" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/reexec" "github.com/docker/docker/utils" @@ -106,7 +106,7 @@ func main() { if err := cli.Cmd(flag.Args()...); err != nil { if sterr, ok := err.(*utils.StatusError); ok { if sterr.Status != "" { - log.Infof("%s", sterr.Status) + log.Println("%s", sterr.Status) } os.Exit(sterr.StatusCode) } From fcae37402aac7372414fd4947b7a280bec398557 Mon Sep 17 00:00:00 2001 From: Fred Lifton Date: Fri, 24 Oct 2014 13:17:14 -0700 Subject: [PATCH 141/592] Add back Best Practices link. Docker-DCO-1.1-Signed-off-by: Fred Lifton (github: fredlf) --- docs/sources/userguide/level2.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/sources/userguide/level2.md b/docs/sources/userguide/level2.md index 8d16ee8a45..31043b73f6 100644 --- a/docs/sources/userguide/level2.md +++ b/docs/sources/userguide/level2.md @@ -91,4 +91,6 @@ RUN apt-get install -y

Thanks for going through our tutorial! We will be posting Level 3 in the future. +To improve your Dockerfile writing skills even further, visit the Dockerfile best practices page. + Back to the Docs! \ No newline at end of file From 3eba719400d5016aa0cbecbed1e222f810237b5b Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Fri, 3 Oct 2014 15:46:42 -0400 Subject: [PATCH 142/592] registry/endpoint: make it testable Signed-off-by: Vincent Batts --- registry/endpoint.go | 27 +++++++++++++++++---------- registry/endpoint_test.go | 27 +++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 10 deletions(-) create mode 100644 registry/endpoint_test.go diff --git a/registry/endpoint.go b/registry/endpoint.go index 58311d32d1..99f5257854 100644 --- a/registry/endpoint.go +++ b/registry/endpoint.go @@ -35,16 +35,7 @@ func scanForAPIVersion(hostname string) (string, APIVersion) { } func NewEndpoint(hostname string) (*Endpoint, error) { - var ( - endpoint Endpoint - trimmedHostname string - err error - ) - if !strings.HasPrefix(hostname, "http") { - hostname = "https://" + hostname - } - trimmedHostname, endpoint.Version = scanForAPIVersion(hostname) - endpoint.URL, err = url.Parse(trimmedHostname) + endpoint, err := newEndpoint(hostname) if err != nil { return nil, err } @@ -59,6 +50,22 @@ func NewEndpoint(hostname string) (*Endpoint, error) { } } + return endpoint, nil +} +func newEndpoint(hostname string) (*Endpoint, error) { + var ( + endpoint Endpoint + trimmedHostname string + err error + ) + if !strings.HasPrefix(hostname, "http") { + hostname = "https://" + hostname + } + trimmedHostname, endpoint.Version = scanForAPIVersion(hostname) + endpoint.URL, err = url.Parse(trimmedHostname) + if err != nil { + return nil, err + } return &endpoint, nil } diff --git a/registry/endpoint_test.go b/registry/endpoint_test.go new file mode 100644 index 0000000000..0ec1220d9c --- /dev/null +++ b/registry/endpoint_test.go @@ -0,0 +1,27 @@ +package registry + +import "testing" + +func TestEndpointParse(t *testing.T) { + testData := []struct { + str string + expected string + }{ + {IndexServerAddress(), IndexServerAddress()}, + {"http://0.0.0.0:5000", "http://0.0.0.0:5000/v1/"}, + {"0.0.0.0:5000", "https://0.0.0.0:5000/v1/"}, + } + for _, td := range testData { + e, err := newEndpoint(td.str) + if err != nil { + t.Errorf("%q: %s", td.str, err) + } + if e == nil { + t.Logf("something's fishy, endpoint for %q is nil", td.str) + continue + } + if e.String() != td.expected { + t.Errorf("expected %q, got %q", td.expected, e.String()) + } + } +} From 6229a364324ba603789bc38607bb628177280ebf Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Fri, 24 Oct 2014 09:38:36 -0700 Subject: [PATCH 143/592] Vendor logrus logging framework Signed-off-by: Alexandr Morozov --- hack/vendor.sh | 2 + .../src/github.com/Sirupsen/logrus/.gitignore | 1 + .../github.com/Sirupsen/logrus/.travis.yml | 7 + vendor/src/github.com/Sirupsen/logrus/LICENSE | 21 ++ .../src/github.com/Sirupsen/logrus/README.md | 336 ++++++++++++++++++ .../src/github.com/Sirupsen/logrus/entry.go | 242 +++++++++++++ .../Sirupsen/logrus/examples/basic/basic.go | 29 ++ .../Sirupsen/logrus/examples/hook/hook.go | 35 ++ .../github.com/Sirupsen/logrus/exported.go | 177 +++++++++ .../github.com/Sirupsen/logrus/formatter.go | 54 +++ .../github.com/Sirupsen/logrus/hook_test.go | 122 +++++++ .../src/github.com/Sirupsen/logrus/hooks.go | 34 ++ .../logrus/hooks/airbrake/airbrake.go | 54 +++ .../Sirupsen/logrus/hooks/syslog/README.md | 20 ++ .../Sirupsen/logrus/hooks/syslog/syslog.go | 59 +++ .../logrus/hooks/syslog/syslog_test.go | 26 ++ .../Sirupsen/logrus/json_formatter.go | 19 + .../src/github.com/Sirupsen/logrus/logger.go | 161 +++++++++ .../src/github.com/Sirupsen/logrus/logrus.go | 72 ++++ .../github.com/Sirupsen/logrus/logrus_test.go | 173 +++++++++ .../Sirupsen/logrus/terminal_darwin.go | 12 + .../Sirupsen/logrus/terminal_freebsd.go | 20 ++ .../Sirupsen/logrus/terminal_linux.go | 12 + .../Sirupsen/logrus/terminal_notwindows.go | 21 ++ .../Sirupsen/logrus/terminal_windows.go | 27 ++ .../Sirupsen/logrus/text_formatter.go | 86 +++++ 26 files changed, 1822 insertions(+) create mode 100644 vendor/src/github.com/Sirupsen/logrus/.gitignore create mode 100644 vendor/src/github.com/Sirupsen/logrus/.travis.yml create mode 100644 vendor/src/github.com/Sirupsen/logrus/LICENSE create mode 100644 vendor/src/github.com/Sirupsen/logrus/README.md create mode 100644 vendor/src/github.com/Sirupsen/logrus/entry.go create mode 100644 vendor/src/github.com/Sirupsen/logrus/examples/basic/basic.go create mode 100644 vendor/src/github.com/Sirupsen/logrus/examples/hook/hook.go create mode 100644 vendor/src/github.com/Sirupsen/logrus/exported.go create mode 100644 vendor/src/github.com/Sirupsen/logrus/formatter.go create mode 100644 vendor/src/github.com/Sirupsen/logrus/hook_test.go create mode 100644 vendor/src/github.com/Sirupsen/logrus/hooks.go create mode 100644 vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go create mode 100644 vendor/src/github.com/Sirupsen/logrus/hooks/syslog/README.md create mode 100644 vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go create mode 100644 vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go create mode 100644 vendor/src/github.com/Sirupsen/logrus/json_formatter.go create mode 100644 vendor/src/github.com/Sirupsen/logrus/logger.go create mode 100644 vendor/src/github.com/Sirupsen/logrus/logrus.go create mode 100644 vendor/src/github.com/Sirupsen/logrus/logrus_test.go create mode 100644 vendor/src/github.com/Sirupsen/logrus/terminal_darwin.go create mode 100644 vendor/src/github.com/Sirupsen/logrus/terminal_freebsd.go create mode 100644 vendor/src/github.com/Sirupsen/logrus/terminal_linux.go create mode 100644 vendor/src/github.com/Sirupsen/logrus/terminal_notwindows.go create mode 100644 vendor/src/github.com/Sirupsen/logrus/terminal_windows.go create mode 100644 vendor/src/github.com/Sirupsen/logrus/text_formatter.go diff --git a/hack/vendor.sh b/hack/vendor.sh index d43aba9ce6..2dd12c6497 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -53,6 +53,8 @@ clone hg code.google.com/p/gosqlite 74691fb6f837 clone git github.com/docker/libtrust d273ef2565ca +clone git github.com/Sirupsen/logrus v0.5.1 + # get Go tip's archive/tar, for xattr support and improved performance # TODO after Go 1.4 drops, bump our minimum supported version and drop this vendored dep if [ "$1" = '--go' ]; then diff --git a/vendor/src/github.com/Sirupsen/logrus/.gitignore b/vendor/src/github.com/Sirupsen/logrus/.gitignore new file mode 100644 index 0000000000..66be63a005 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/.gitignore @@ -0,0 +1 @@ +logrus diff --git a/vendor/src/github.com/Sirupsen/logrus/.travis.yml b/vendor/src/github.com/Sirupsen/logrus/.travis.yml new file mode 100644 index 0000000000..2efbc54a17 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/.travis.yml @@ -0,0 +1,7 @@ +language: go +go: + - 1.1 + - 1.2 + - tip +before_script: + - go get github.com/stretchr/testify diff --git a/vendor/src/github.com/Sirupsen/logrus/LICENSE b/vendor/src/github.com/Sirupsen/logrus/LICENSE new file mode 100644 index 0000000000..f090cb42f3 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/src/github.com/Sirupsen/logrus/README.md b/vendor/src/github.com/Sirupsen/logrus/README.md new file mode 100644 index 0000000000..6843fcc0e8 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/README.md @@ -0,0 +1,336 @@ +# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not +yet stable (pre 1.0), the core API is unlikely change much but please version +control your Logrus to make sure you aren't fetching latest `master` on every +build.** + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not +attached, the output is compatible with the +[l2met](http://r.32k.io/l2met-introduction) format: + +```text +time="2014-04-20 15:36:23.830442383 -0400 EDT" level="info" msg="A group of walrus emerges from the ocean" animal="walrus" size=10 +time="2014-04-20 15:36:23.830584199 -0400 EDT" level="warning" msg="The group's number increased tremendously!" omg=true number=122 +time="2014-04-20 15:36:23.830596521 -0400 EDT" level="info" msg="A giant walrus appears!" animal="walrus" size=10 +time="2014-04-20 15:36:23.830611837 -0400 EDT" level="info" msg="Tremendously sized cow enters the ocean." animal="walrus" size=9 +time="2014-04-20 15:36:23.830626464 -0400 EDT" level="fatal" msg="The ice breaks!" omg=true number=100 +``` + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/Sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/hooks/airbrake" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(logrus_airbrake.AirbrakeHook) + + // Output to stderr instead of stdout, could also be a file. + log.SetOutput(os.Stderr) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "github.com/Sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stderr + + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging though logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +```go +// Not the real implementation of the Airbrake hook. Just a simple sample. +import ( + log "github.com/Sirupsen/logrus" +) + +func init() { + log.AddHook(new(AirbrakeHook)) +} + +type AirbrakeHook struct{} + +// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains +// the fields for the entry. See the Fields section of the README. +func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error { + err := airbrake.Notify(entry.Data["error"].(error)) + if err != nil { + log.WithFields(log.Fields{ + "source": "airbrake", + "endpoint": airbrake.Endpoint, + }).Info("Failed to send error to Airbrake") + } + + return nil +} + +// `Levels()` returns a slice of `Levels` the hook is fired for. +func (hook *AirbrakeHook) Levels() []log.Level { + return []log.Level{ + log.ErrorLevel, + log.FatalLevel, + log.PanicLevel, + } +} +``` + +Logrus comes with built-in hooks. Add those, or your custom hook, in `init`: + +```go +import ( + log "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/hooks/airbrake" + "github.com/Sirupsen/logrus/hooks/syslog" +) + +func init() { + log.AddHook(new(logrus_airbrake.AirbrakeHook)) + log.AddHook(logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")) +} +``` + +* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go). + Send errors to an exception tracking service compatible with the Airbrake API. + Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. + +* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go). + Send errors to remote syslog server. + Uses standard library `log/syslog` behind the scenes. + +#### Level logging + +Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/Sirupsen/logrus" +) + +init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(logrus.JSONFormatter) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(logrus.TextFormatter) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true` +* `logrus.JSONFormatter`. Logs fields as JSON. + +Third party logging formatters: + +* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotated(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + + +[godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/vendor/src/github.com/Sirupsen/logrus/entry.go b/vendor/src/github.com/Sirupsen/logrus/entry.go new file mode 100644 index 0000000000..44ff0566c9 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/entry.go @@ -0,0 +1,242 @@ +package logrus + +import ( + "bytes" + "fmt" + "io" + "os" + "time" +) + +// An entry is the final or intermediate Logrus logging entry. It containts all +// the fields passed with WithField{,s}. It's finally logged when Debug, Info, +// Warn, Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + Level Level + + // Message passed to Debug, Info, Warn, Error, Fatal or Panic + Message string +} + +var baseTimestamp time.Time + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, give a little extra room + Data: make(Fields, 5), + } +} + +// Returns a reader for the entry, which is a proxy to the formatter. +func (entry *Entry) Reader() (*bytes.Buffer, error) { + serialized, err := entry.Logger.Formatter.Format(entry) + return bytes.NewBuffer(serialized), err +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + reader, err := entry.Reader() + if err != nil { + return "", err + } + + return reader.String(), err +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := Fields{} + for k, v := range entry.Data { + data[k] = v + } + for k, v := range fields { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data} +} + +func (entry *Entry) log(level Level, msg string) string { + entry.Time = time.Now() + entry.Level = level + entry.Message = msg + + if err := entry.Logger.Hooks.Fire(level, entry); err != nil { + fmt.Fprintf(os.Stderr, "Failed to fire hook", err) + } + + reader, err := entry.Reader() + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v", err) + } + + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + + _, err = io.Copy(entry.Logger.Out, reader) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v", err) + } + + return reader.String() +} + +func (entry *Entry) Debug(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.log(DebugLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.log(InfoLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warn(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.log(WarnLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Error(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.log(ErrorLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Fatal(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.log(FatalLevel, fmt.Sprint(args...)) + } + os.Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + msg := entry.log(PanicLevel, fmt.Sprint(args...)) + panic(msg) + } + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Debugf(format string, args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(fmt.Sprintf(format, args...)) + } +} + +// Entry Println family functions + +func (entry *Entry) Debugln(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Infoln(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Fatalln(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Panicln(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(entry.sprintlnn(args...)) + } +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/vendor/src/github.com/Sirupsen/logrus/examples/basic/basic.go b/vendor/src/github.com/Sirupsen/logrus/examples/basic/basic.go new file mode 100644 index 0000000000..35945509c3 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/examples/basic/basic.go @@ -0,0 +1,29 @@ +package main + +import ( + "github.com/Sirupsen/logrus" +) + +var log = logrus.New() + +func init() { + log.Formatter = new(logrus.JSONFormatter) + log.Formatter = new(logrus.TextFormatter) // default +} + +func main() { + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") +} diff --git a/vendor/src/github.com/Sirupsen/logrus/examples/hook/hook.go b/vendor/src/github.com/Sirupsen/logrus/examples/hook/hook.go new file mode 100644 index 0000000000..42e7a4c982 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/examples/hook/hook.go @@ -0,0 +1,35 @@ +package main + +import ( + "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/hooks/airbrake" + "github.com/tobi/airbrake-go" +) + +var log = logrus.New() + +func init() { + log.Formatter = new(logrus.TextFormatter) // default + log.Hooks.Add(new(logrus_airbrake.AirbrakeHook)) +} + +func main() { + airbrake.Endpoint = "https://exceptions.whatever.com/notifier_api/v2/notices.xml" + airbrake.ApiKey = "whatever" + airbrake.Environment = "production" + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") +} diff --git a/vendor/src/github.com/Sirupsen/logrus/exported.go b/vendor/src/github.com/Sirupsen/logrus/exported.go new file mode 100644 index 0000000000..383ce93d4d --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/exported.go @@ -0,0 +1,177 @@ +package logrus + +import ( + "io" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.mu.Lock() + defer std.mu.Unlock() + std.Out = out +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.mu.Lock() + defer std.mu.Unlock() + std.Formatter = formatter +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.mu.Lock() + defer std.mu.Unlock() + std.Level = level +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.mu.Lock() + defer std.mu.Unlock() + std.Hooks.Add(hook) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// Debugf logs a message at level Debugf on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Pancf on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/vendor/src/github.com/Sirupsen/logrus/formatter.go b/vendor/src/github.com/Sirupsen/logrus/formatter.go new file mode 100644 index 0000000000..fc0ebd7a97 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/formatter.go @@ -0,0 +1,54 @@ +package logrus + +import ( + "time" +) + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(entry *Entry) { + _, ok := entry.Data["time"] + if ok { + entry.Data["fields.time"] = entry.Data["time"] + } + + entry.Data["time"] = entry.Time.Format(time.RFC3339) + + _, ok = entry.Data["msg"] + if ok { + entry.Data["fields.msg"] = entry.Data["msg"] + } + + entry.Data["msg"] = entry.Message + + _, ok = entry.Data["level"] + if ok { + entry.Data["fields.level"] = entry.Data["level"] + } + + entry.Data["level"] = entry.Level.String() +} diff --git a/vendor/src/github.com/Sirupsen/logrus/hook_test.go b/vendor/src/github.com/Sirupsen/logrus/hook_test.go new file mode 100644 index 0000000000..13f34cb6f8 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/hook_test.go @@ -0,0 +1,122 @@ +package logrus + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type TestHook struct { + Fired bool +} + +func (hook *TestHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *TestHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookFires(t *testing.T) { + hook := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + assert.Equal(t, hook.Fired, false) + + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} + +type ModifyHook struct { +} + +func (hook *ModifyHook) Fire(entry *Entry) error { + entry.Data["wow"] = "whale" + return nil +} + +func (hook *ModifyHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookCanModifyEntry(t *testing.T) { + hook := new(ModifyHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + }) +} + +func TestCanFireMultipleHooks(t *testing.T) { + hook1 := new(ModifyHook) + hook2 := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook1) + log.Hooks.Add(hook2) + + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + assert.Equal(t, hook2.Fired, true) + }) +} + +type ErrorHook struct { + Fired bool +} + +func (hook *ErrorHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *ErrorHook) Levels() []Level { + return []Level{ + ErrorLevel, + } +} + +func TestErrorHookShouldntFireOnInfo(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, false) + }) +} + +func TestErrorHookShouldFireOnError(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Error("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks.go b/vendor/src/github.com/Sirupsen/logrus/hooks.go new file mode 100644 index 0000000000..0da2b3653f --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type levelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks levelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks levelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go b/vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go new file mode 100644 index 0000000000..880d21ecdc --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go @@ -0,0 +1,54 @@ +package logrus_airbrake + +import ( + "github.com/Sirupsen/logrus" + "github.com/tobi/airbrake-go" +) + +// AirbrakeHook to send exceptions to an exception-tracking service compatible +// with the Airbrake API. You must set: +// * airbrake.Endpoint +// * airbrake.ApiKey +// * airbrake.Environment (only sends exceptions when set to "production") +// +// Before using this hook, to send an error. Entries that trigger an Error, +// Fatal or Panic should now include an "error" field to send to Airbrake. +type AirbrakeHook struct{} + +func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error { + if entry.Data["error"] == nil { + entry.Logger.WithFields(logrus.Fields{ + "source": "airbrake", + "endpoint": airbrake.Endpoint, + }).Warn("Exceptions sent to Airbrake must have an 'error' key with the error") + return nil + } + + err, ok := entry.Data["error"].(error) + if !ok { + entry.Logger.WithFields(logrus.Fields{ + "source": "airbrake", + "endpoint": airbrake.Endpoint, + }).Warn("Exceptions sent to Airbrake must have an `error` key of type `error`") + return nil + } + + airErr := airbrake.Notify(err) + if airErr != nil { + entry.Logger.WithFields(logrus.Fields{ + "source": "airbrake", + "endpoint": airbrake.Endpoint, + "error": airErr, + }).Warn("Failed to send error to Airbrake") + } + + return nil +} + +func (hook *AirbrakeHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.ErrorLevel, + logrus.FatalLevel, + logrus.PanicLevel, + } +} diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/README.md b/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/README.md new file mode 100644 index 0000000000..cd706bc1b1 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/README.md @@ -0,0 +1,20 @@ +# Syslog Hooks for Logrus :walrus: + +## Usage + +```go +import ( + "log/syslog" + "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/hooks/syslog" +) + +func main() { + log := logrus.New() + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + + if err == nil { + log.Hooks.Add(hook) + } +} +``` \ No newline at end of file diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go new file mode 100644 index 0000000000..2a18ce6130 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go @@ -0,0 +1,59 @@ +package logrus_syslog + +import ( + "fmt" + "github.com/Sirupsen/logrus" + "log/syslog" + "os" +) + +// SyslogHook to send logs via syslog. +type SyslogHook struct { + Writer *syslog.Writer + SyslogNetwork string + SyslogRaddr string +} + +// Creates a hook to be added to an instance of logger. This is called with +// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")` +// `if err == nil { log.Hooks.Add(hook) }` +func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) { + w, err := syslog.Dial(network, raddr, priority, tag) + return &SyslogHook{w, network, raddr}, err +} + +func (hook *SyslogHook) Fire(entry *logrus.Entry) error { + line, err := entry.String() + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err) + return err + } + + switch entry.Data["level"] { + case "panic": + return hook.Writer.Crit(line) + case "fatal": + return hook.Writer.Crit(line) + case "error": + return hook.Writer.Err(line) + case "warn": + return hook.Writer.Warning(line) + case "info": + return hook.Writer.Info(line) + case "debug": + return hook.Writer.Debug(line) + default: + return nil + } +} + +func (hook *SyslogHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + logrus.WarnLevel, + logrus.InfoLevel, + logrus.DebugLevel, + } +} diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go b/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go new file mode 100644 index 0000000000..42762dc10d --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go @@ -0,0 +1,26 @@ +package logrus_syslog + +import ( + "github.com/Sirupsen/logrus" + "log/syslog" + "testing" +) + +func TestLocalhostAddAndPrint(t *testing.T) { + log := logrus.New() + hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + + if err != nil { + t.Errorf("Unable to connect to local syslog.") + } + + log.Hooks.Add(hook) + + for _, level := range hook.Levels() { + if len(log.Hooks[level]) != 1 { + t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level])) + } + } + + log.Info("Congratulations!") +} diff --git a/vendor/src/github.com/Sirupsen/logrus/json_formatter.go b/vendor/src/github.com/Sirupsen/logrus/json_formatter.go new file mode 100644 index 0000000000..c0e2d18436 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/json_formatter.go @@ -0,0 +1,19 @@ +package logrus + +import ( + "encoding/json" + "fmt" +) + +type JSONFormatter struct { +} + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + prefixFieldClashes(entry) + + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/vendor/src/github.com/Sirupsen/logrus/logger.go b/vendor/src/github.com/Sirupsen/logrus/logger.go new file mode 100644 index 0000000000..7374fe365d --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/logger.go @@ -0,0 +1,161 @@ +package logrus + +import ( + "io" + "os" + "sync" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stdout`. You can also set this to + // something more adventorous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks levelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. `logrus.Debug` is useful in + Level Level + // Used to sync writing to the log. + mu sync.Mutex +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &Logger{ +// Out: os.Stderr, +// Formatter: new(JSONFormatter), +// Hooks: make(levelHooks), +// Level: logrus.Debug, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stdout, + Formatter: new(TextFormatter), + Hooks: make(levelHooks), + Level: InfoLevel, + } +} + +// Adds a field to the log entry, note that you it doesn't log until you call +// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// Ff you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + return NewEntry(logger).WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + return NewEntry(logger).WithFields(fields) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + NewEntry(logger).Debugf(format, args...) +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + NewEntry(logger).Infof(format, args...) +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + NewEntry(logger).Printf(format, args...) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + NewEntry(logger).Warnf(format, args...) +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + NewEntry(logger).Warnf(format, args...) +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + NewEntry(logger).Errorf(format, args...) +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + NewEntry(logger).Fatalf(format, args...) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + NewEntry(logger).Panicf(format, args...) +} + +func (logger *Logger) Debug(args ...interface{}) { + NewEntry(logger).Debug(args...) +} + +func (logger *Logger) Info(args ...interface{}) { + NewEntry(logger).Info(args...) +} + +func (logger *Logger) Print(args ...interface{}) { + NewEntry(logger).Info(args...) +} + +func (logger *Logger) Warn(args ...interface{}) { + NewEntry(logger).Warn(args...) +} + +func (logger *Logger) Warning(args ...interface{}) { + NewEntry(logger).Warn(args...) +} + +func (logger *Logger) Error(args ...interface{}) { + NewEntry(logger).Error(args...) +} + +func (logger *Logger) Fatal(args ...interface{}) { + NewEntry(logger).Fatal(args...) +} + +func (logger *Logger) Panic(args ...interface{}) { + NewEntry(logger).Panic(args...) +} + +func (logger *Logger) Debugln(args ...interface{}) { + NewEntry(logger).Debugln(args...) +} + +func (logger *Logger) Infoln(args ...interface{}) { + NewEntry(logger).Infoln(args...) +} + +func (logger *Logger) Println(args ...interface{}) { + NewEntry(logger).Println(args...) +} + +func (logger *Logger) Warnln(args ...interface{}) { + NewEntry(logger).Warnln(args...) +} + +func (logger *Logger) Warningln(args ...interface{}) { + NewEntry(logger).Warnln(args...) +} + +func (logger *Logger) Errorln(args ...interface{}) { + NewEntry(logger).Errorln(args...) +} + +func (logger *Logger) Fatalln(args ...interface{}) { + NewEntry(logger).Fatalln(args...) +} + +func (logger *Logger) Panicln(args ...interface{}) { + NewEntry(logger).Panicln(args...) +} diff --git a/vendor/src/github.com/Sirupsen/logrus/logrus.go b/vendor/src/github.com/Sirupsen/logrus/logrus.go new file mode 100644 index 0000000000..79df39cb71 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/logrus.go @@ -0,0 +1,72 @@ +package logrus + +import ( + "log" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint8 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + switch level { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warning" + case ErrorLevel: + return "error" + case FatalLevel: + return "fatal" + case PanicLevel: + return "panic" + } + + return "unknown" +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var _ StdLogger = &log.Logger{} + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} diff --git a/vendor/src/github.com/Sirupsen/logrus/logrus_test.go b/vendor/src/github.com/Sirupsen/logrus/logrus_test.go new file mode 100644 index 0000000000..6202300366 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/logrus_test.go @@ -0,0 +1,173 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + log(logger) + + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assertions(fields) +} + +func TestPrint(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestInfo(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestWarn(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Warn("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "warning") + }) +} + +func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test test") + }) +} + +func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test 10") + }) +} + +func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "testtest") + }) +} + +func TestWithFieldsShouldAllowAssignments(t *testing.T) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + localLog := logger.WithFields(Fields{ + "key1": "value1", + }) + + localLog.WithField("key2", "value2").Info("test") + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assert.Equal(t, "value2", fields["key2"]) + assert.Equal(t, "value1", fields["key1"]) + + buffer = bytes.Buffer{} + fields = Fields{} + localLog.Info("test") + err = json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + _, ok := fields["key2"] + assert.Equal(t, false, ok) + assert.Equal(t, "value1", fields["key1"]) +} + +func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + }) +} + +func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["fields.msg"], "hello") + }) +} + +func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("time", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["fields.time"], "hello") + }) +} + +func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("level", 1).Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["level"], "info") + assert.Equal(t, fields["fields.level"], 1) + }) +} + +func TestConvertLevelToString(t *testing.T) { + assert.Equal(t, "debug", DebugLevel.String()) + assert.Equal(t, "info", InfoLevel.String()) + assert.Equal(t, "warning", WarnLevel.String()) + assert.Equal(t, "error", ErrorLevel.String()) + assert.Equal(t, "fatal", FatalLevel.String()) + assert.Equal(t, "panic", PanicLevel.String()) +} diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_darwin.go b/vendor/src/github.com/Sirupsen/logrus/terminal_darwin.go new file mode 100644 index 0000000000..8fe02a4aec --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/terminal_darwin.go @@ -0,0 +1,12 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_freebsd.go b/vendor/src/github.com/Sirupsen/logrus/terminal_freebsd.go new file mode 100644 index 0000000000..0428ee5d52 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/terminal_freebsd.go @@ -0,0 +1,20 @@ +/* + Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin. +*/ +package logrus + +import ( + "syscall" +) + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/src/github.com/Sirupsen/logrus/terminal_linux.go new file mode 100644 index 0000000000..a2c0b40db6 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/terminal_linux.go @@ -0,0 +1,12 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TCGETS + +type Termios syscall.Termios diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/src/github.com/Sirupsen/logrus/terminal_notwindows.go new file mode 100644 index 0000000000..276447bd5c --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -0,0 +1,21 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,!appengine darwin freebsd + +package logrus + +import ( + "syscall" + "unsafe" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stdout + var termios Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/src/github.com/Sirupsen/logrus/terminal_windows.go new file mode 100644 index 0000000000..2e09f6f7e3 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/terminal_windows.go @@ -0,0 +1,27 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package logrus + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stdout + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff --git a/vendor/src/github.com/Sirupsen/logrus/text_formatter.go b/vendor/src/github.com/Sirupsen/logrus/text_formatter.go new file mode 100644 index 0000000000..4b93690e7d --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/text_formatter.go @@ -0,0 +1,86 @@ +package logrus + +import ( + "bytes" + "fmt" + "sort" + "strings" + "time" +) + +const ( + nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 34 +) + +func init() { + baseTimestamp = time.Now() +} + +func miniTS() int { + return int(time.Since(baseTimestamp) / time.Second) +} + +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + DisableColors bool +} + +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + b := &bytes.Buffer{} + + prefixFieldClashes(entry) + + if (f.ForceColors || IsTerminal()) && !f.DisableColors { + levelText := strings.ToUpper(entry.Data["level"].(string))[0:4] + + levelColor := blue + + if entry.Data["level"] == "warning" { + levelColor = yellow + } else if entry.Data["level"] == "error" || + entry.Data["level"] == "fatal" || + entry.Data["level"] == "panic" { + levelColor = red + } + + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Data["msg"]) + + keys := make([]string, 0) + for k, _ := range entry.Data { + if k != "level" && k != "time" && k != "msg" { + keys = append(keys, k) + } + } + sort.Strings(keys) + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v) + } + } else { + f.AppendKeyValue(b, "time", entry.Data["time"].(string)) + f.AppendKeyValue(b, "level", entry.Data["level"].(string)) + f.AppendKeyValue(b, "msg", entry.Data["msg"].(string)) + + for key, value := range entry.Data { + if key != "time" && key != "level" && key != "msg" { + f.AppendKeyValue(b, key, value) + } + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) AppendKeyValue(b *bytes.Buffer, key, value interface{}) { + if _, ok := value.(string); ok { + fmt.Fprintf(b, "%v=%q ", key, value) + } else { + fmt.Fprintf(b, "%v=%v ", key, value) + } +} From 7c62cee51edc91634046b4faa6c6f1841cd53ec1 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Fri, 24 Oct 2014 10:12:35 -0700 Subject: [PATCH 144/592] Use logrus everywhere for logging Fixed #8761 Signed-off-by: Alexandr Morozov --- api/client/commands.go | 2 +- api/client/hijack.go | 2 +- api/client/utils.go | 2 +- api/common.go | 2 +- api/server/server.go | 2 +- builder/dispatchers.go | 2 +- builder/evaluator.go | 2 +- builder/internals.go | 2 +- daemon/attach.go | 2 +- daemon/container.go | 2 +- daemon/daemon.go | 7 +-- daemon/daemon_aufs.go | 2 +- daemon/delete.go | 2 +- daemon/exec.go | 2 +- daemon/execdriver/lxc/driver.go | 2 +- daemon/graphdriver/aufs/aufs.go | 2 +- daemon/graphdriver/aufs/mount.go | 2 +- .../graphdriver/devmapper/attach_loopback.go | 2 +- daemon/graphdriver/devmapper/deviceset.go | 2 +- daemon/graphdriver/devmapper/devmapper.go | 2 +- daemon/graphdriver/devmapper/driver.go | 2 +- daemon/graphdriver/fsdiff.go | 2 +- daemon/info.go | 2 +- daemon/logs.go | 2 +- daemon/monitor.go | 2 +- daemon/networkdriver/bridge/driver.go | 2 +- daemon/networkdriver/portmapper/mapper.go | 2 +- daemon/volumes.go | 2 +- docker/daemon.go | 2 +- docker/docker.go | 3 ++ docker/log.go | 16 +++++++ graph/export.go | 2 +- graph/graph.go | 2 +- graph/load.go | 2 +- graph/pull.go | 2 +- graph/push.go | 2 +- graph/service.go | 2 +- image/image.go | 2 +- integration/commands_test.go | 2 +- integration/runtime_test.go | 2 +- integration/utils_test.go | 43 ++++++++++--------- pkg/archive/archive.go | 2 +- pkg/archive/changes.go | 2 +- pkg/broadcastwriter/broadcastwriter.go | 2 +- pkg/fileutils/fileutils.go | 2 +- pkg/httputils/resumablerequestreader.go | 2 +- pkg/iptables/iptables.go | 2 +- pkg/log/log.go | 9 ++++ pkg/signal/trap.go | 2 +- pkg/stdcopy/stdcopy.go | 2 +- pkg/tarsum/tarsum.go | 2 +- registry/endpoint.go | 2 +- registry/registry_mock_test.go | 2 +- registry/session.go | 2 +- registry/session_v2.go | 2 +- runconfig/merge.go | 2 +- trust/service.go | 2 +- trust/trusts.go | 2 +- utils/http.go | 2 +- utils/utils.go | 2 +- volumes/repository.go | 2 +- 61 files changed, 111 insertions(+), 79 deletions(-) create mode 100644 docker/log.go diff --git a/api/client/commands.go b/api/client/commands.go index f4ced5ecff..cccdfec851 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -22,6 +22,7 @@ import ( "text/template" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/dockerversion" "github.com/docker/docker/engine" @@ -29,7 +30,6 @@ import ( "github.com/docker/docker/nat" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/log" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/filters" diff --git a/api/client/hijack.go b/api/client/hijack.go index 00170a4a37..adc012bace 100644 --- a/api/client/hijack.go +++ b/api/client/hijack.go @@ -11,9 +11,9 @@ import ( "runtime" "strings" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/dockerversion" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/docker/pkg/term" diff --git a/api/client/utils.go b/api/client/utils.go index 11e39729af..b3cd30975b 100644 --- a/api/client/utils.go +++ b/api/client/utils.go @@ -16,10 +16,10 @@ import ( "strings" "syscall" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/dockerversion" "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/docker/pkg/term" "github.com/docker/docker/registry" diff --git a/api/common.go b/api/common.go index 7470df6ccd..b151552412 100644 --- a/api/common.go +++ b/api/common.go @@ -5,8 +5,8 @@ import ( "mime" "strings" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/version" ) diff --git a/api/server/server.go b/api/server/server.go index ffad992caf..d77a6c22a2 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -23,10 +23,10 @@ import ( "github.com/docker/libcontainer/user" "github.com/gorilla/mux" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/listenbuffer" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/docker/pkg/systemd" diff --git a/builder/dispatchers.go b/builder/dispatchers.go index 0c2a580872..fcbbbbdc2d 100644 --- a/builder/dispatchers.go +++ b/builder/dispatchers.go @@ -14,8 +14,8 @@ import ( "regexp" "strings" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/nat" - "github.com/docker/docker/pkg/log" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/runconfig" ) diff --git a/builder/evaluator.go b/builder/evaluator.go index 4122616350..d84bd852f9 100644 --- a/builder/evaluator.go +++ b/builder/evaluator.go @@ -27,10 +27,10 @@ import ( "path" "strings" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/builder/parser" "github.com/docker/docker/daemon" "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" diff --git a/builder/internals.go b/builder/internals.go index 20f3380fb8..d8093507d3 100644 --- a/builder/internals.go +++ b/builder/internals.go @@ -18,11 +18,11 @@ import ( "syscall" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/builder/parser" "github.com/docker/docker/daemon" imagepkg "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/system" diff --git a/daemon/attach.go b/daemon/attach.go index e115dac2e0..de583846ee 100644 --- a/daemon/attach.go +++ b/daemon/attach.go @@ -9,7 +9,7 @@ import ( "github.com/docker/docker/engine" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/utils" ) diff --git a/daemon/container.go b/daemon/container.go index 67bc6942fb..1a0d943d3f 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -25,7 +25,7 @@ import ( "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/broadcastwriter" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/networkfs/etchosts" "github.com/docker/docker/pkg/networkfs/resolvconf" "github.com/docker/docker/pkg/promise" diff --git a/daemon/daemon.go b/daemon/daemon.go index caf0c8745f..658d578e4e 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -14,6 +14,7 @@ import ( "github.com/docker/libcontainer/label" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver/execdrivers" "github.com/docker/docker/daemon/execdriver/lxc" @@ -29,7 +30,6 @@ import ( "github.com/docker/docker/pkg/broadcastwriter" "github.com/docker/docker/pkg/graphdb" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/namesgenerator" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/kernel" @@ -304,7 +304,7 @@ func (daemon *Daemon) restore() error { ) if !debug { - log.Infof("Loading containers: ") + log.Infof("Loading containers: start.") } dir, err := ioutil.ReadDir(daemon.repository) if err != nil { @@ -392,7 +392,8 @@ func (daemon *Daemon) restore() error { } if !debug { - log.Infof(": done.") + fmt.Println() + log.Infof("Loading containers: done.") } return nil diff --git a/daemon/daemon_aufs.go b/daemon/daemon_aufs.go index a370a4ce3c..6b84d40727 100644 --- a/daemon/daemon_aufs.go +++ b/daemon/daemon_aufs.go @@ -6,7 +6,7 @@ import ( "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/aufs" "github.com/docker/docker/graph" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) // Given the graphdriver ad, if it is aufs, then migrate it. diff --git a/daemon/delete.go b/daemon/delete.go index 77be926c1c..b382f6d843 100644 --- a/daemon/delete.go +++ b/daemon/delete.go @@ -6,7 +6,7 @@ import ( "path" "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status { diff --git a/daemon/exec.go b/daemon/exec.go index a6113b0fca..473a6a0d15 100644 --- a/daemon/exec.go +++ b/daemon/exec.go @@ -14,7 +14,7 @@ import ( "github.com/docker/docker/engine" "github.com/docker/docker/pkg/broadcastwriter" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go index 0809b05c1e..ca8573c30f 100644 --- a/daemon/execdriver/lxc/driver.go +++ b/daemon/execdriver/lxc/driver.go @@ -18,7 +18,7 @@ import ( "github.com/kr/pty" "github.com/docker/docker/daemon/execdriver" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/term" "github.com/docker/docker/utils" "github.com/docker/libcontainer/cgroups" diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go index 8e3ae0b181..a60b8c7f30 100644 --- a/daemon/graphdriver/aufs/aufs.go +++ b/daemon/graphdriver/aufs/aufs.go @@ -32,7 +32,7 @@ import ( "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" mountpk "github.com/docker/docker/pkg/mount" "github.com/docker/docker/utils" "github.com/docker/libcontainer/label" diff --git a/daemon/graphdriver/aufs/mount.go b/daemon/graphdriver/aufs/mount.go index fa74e05b07..bb935f6919 100644 --- a/daemon/graphdriver/aufs/mount.go +++ b/daemon/graphdriver/aufs/mount.go @@ -4,7 +4,7 @@ import ( "os/exec" "syscall" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) func Unmount(target string) error { diff --git a/daemon/graphdriver/devmapper/attach_loopback.go b/daemon/graphdriver/devmapper/attach_loopback.go index 9cfa18a4d3..dce5b23ee8 100644 --- a/daemon/graphdriver/devmapper/attach_loopback.go +++ b/daemon/graphdriver/devmapper/attach_loopback.go @@ -7,7 +7,7 @@ import ( "os" "syscall" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) func stringToLoopName(src string) [LoNameSize]uint8 { diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index b3b5c84399..6c16a2798f 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -19,7 +19,7 @@ import ( "time" "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/units" "github.com/docker/libcontainer/label" diff --git a/daemon/graphdriver/devmapper/devmapper.go b/daemon/graphdriver/devmapper/devmapper.go index 42cba76230..cd281f0bb3 100644 --- a/daemon/graphdriver/devmapper/devmapper.go +++ b/daemon/graphdriver/devmapper/devmapper.go @@ -9,7 +9,7 @@ import ( "runtime" "syscall" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) type DevmapperLogger interface { diff --git a/daemon/graphdriver/devmapper/driver.go b/daemon/graphdriver/devmapper/driver.go index 8f9de85d4e..8c5b148fab 100644 --- a/daemon/graphdriver/devmapper/driver.go +++ b/daemon/graphdriver/devmapper/driver.go @@ -9,7 +9,7 @@ import ( "path" "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/units" ) diff --git a/daemon/graphdriver/fsdiff.go b/daemon/graphdriver/fsdiff.go index 5e9d32c1c8..fcfa908853 100644 --- a/daemon/graphdriver/fsdiff.go +++ b/daemon/graphdriver/fsdiff.go @@ -6,7 +6,7 @@ import ( "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/utils" ) diff --git a/daemon/info.go b/daemon/info.go index 3d3c9ba6ca..1bf1b2176b 100644 --- a/daemon/info.go +++ b/daemon/info.go @@ -6,7 +6,7 @@ import ( "github.com/docker/docker/dockerversion" "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/parsers/operatingsystem" "github.com/docker/docker/registry" diff --git a/daemon/logs.go b/daemon/logs.go index b4df401efd..31d6fd5223 100644 --- a/daemon/logs.go +++ b/daemon/logs.go @@ -10,7 +10,7 @@ import ( "github.com/docker/docker/engine" "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/tailfile" "github.com/docker/docker/pkg/timeutils" ) diff --git a/daemon/monitor.go b/daemon/monitor.go index b5dd741012..c0943465e1 100644 --- a/daemon/monitor.go +++ b/daemon/monitor.go @@ -7,7 +7,7 @@ import ( "time" "github.com/docker/docker/daemon/execdriver" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/runconfig" ) diff --git a/daemon/networkdriver/bridge/driver.go b/daemon/networkdriver/bridge/driver.go index 44d864e709..08b955d03b 100644 --- a/daemon/networkdriver/bridge/driver.go +++ b/daemon/networkdriver/bridge/driver.go @@ -14,7 +14,7 @@ import ( "github.com/docker/docker/daemon/networkdriver/portmapper" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/iptables" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/networkfs/resolvconf" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/libcontainer/netlink" diff --git a/daemon/networkdriver/portmapper/mapper.go b/daemon/networkdriver/portmapper/mapper.go index 24ca0d892f..b7bc90244f 100644 --- a/daemon/networkdriver/portmapper/mapper.go +++ b/daemon/networkdriver/portmapper/mapper.go @@ -8,7 +8,7 @@ import ( "github.com/docker/docker/daemon/networkdriver/portallocator" "github.com/docker/docker/pkg/iptables" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) type mapping struct { diff --git a/daemon/volumes.go b/daemon/volumes.go index d4ded29161..84de15a609 100644 --- a/daemon/volumes.go +++ b/daemon/volumes.go @@ -12,7 +12,7 @@ import ( "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/volumes" ) diff --git a/docker/daemon.go b/docker/daemon.go index 2f65878472..dd0baa5fa4 100644 --- a/docker/daemon.go +++ b/docker/daemon.go @@ -3,6 +3,7 @@ package main import ( + log "github.com/Sirupsen/logrus" "github.com/docker/docker/builder" "github.com/docker/docker/builtins" "github.com/docker/docker/daemon" @@ -10,7 +11,6 @@ import ( _ "github.com/docker/docker/daemon/execdriver/native" "github.com/docker/docker/dockerversion" "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/log" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/signal" ) diff --git a/docker/docker.go b/docker/docker.go index de13c7829c..6d0979723a 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -28,6 +28,7 @@ func main() { if reexec.Init() { return } + flag.Parse() // FIXME: validate daemon flags here @@ -39,6 +40,8 @@ func main() { os.Setenv("DEBUG", "1") } + initLogging(*flDebug) + if len(flHosts) == 0 { defaultHost := os.Getenv("DOCKER_HOST") if defaultHost == "" || *flDaemon { diff --git a/docker/log.go b/docker/log.go new file mode 100644 index 0000000000..a245aed1fb --- /dev/null +++ b/docker/log.go @@ -0,0 +1,16 @@ +package main + +import ( + "os" + + log "github.com/Sirupsen/logrus" +) + +func initLogging(debug bool) { + log.SetOutput(os.Stderr) + if debug { + log.SetLevel(log.DebugLevel) + } else { + log.SetLevel(log.InfoLevel) + } +} diff --git a/graph/export.go b/graph/export.go index 86dc5a342a..591dcff67a 100644 --- a/graph/export.go +++ b/graph/export.go @@ -9,7 +9,7 @@ import ( "github.com/docker/docker/engine" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/parsers" ) diff --git a/graph/graph.go b/graph/graph.go index d5d4fcdab1..07fdd5b48d 100644 --- a/graph/graph.go +++ b/graph/graph.go @@ -16,7 +16,7 @@ import ( "github.com/docker/docker/dockerversion" "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" diff --git a/graph/load.go b/graph/load.go index 753f31d2c9..112e7668ed 100644 --- a/graph/load.go +++ b/graph/load.go @@ -10,7 +10,7 @@ import ( "github.com/docker/docker/engine" "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) // Loads a set of images into the repository. This is the complementary of ImageExport. diff --git a/graph/pull.go b/graph/pull.go index 5d7e84ed72..c858d9b498 100644 --- a/graph/pull.go +++ b/graph/pull.go @@ -14,7 +14,7 @@ import ( "github.com/docker/docker/engine" "github.com/docker/docker/image" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/registry" "github.com/docker/docker/utils" "github.com/docker/libtrust" diff --git a/graph/push.go b/graph/push.go index 3511245b30..0a291d580f 100644 --- a/graph/push.go +++ b/graph/push.go @@ -9,7 +9,7 @@ import ( "github.com/docker/docker/engine" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/registry" "github.com/docker/docker/utils" ) diff --git a/graph/service.go b/graph/service.go index 1be986f8d5..3ed7184687 100644 --- a/graph/service.go +++ b/graph/service.go @@ -6,7 +6,7 @@ import ( "github.com/docker/docker/engine" "github.com/docker/docker/image" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) func (s *TagStore) Install(eng *engine.Engine) error { diff --git a/image/image.go b/image/image.go index fabd897d29..ccd77b5067 100644 --- a/image/image.go +++ b/image/image.go @@ -10,7 +10,7 @@ import ( "time" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) diff --git a/integration/commands_test.go b/integration/commands_test.go index 532e6f79fa..446fba3f92 100644 --- a/integration/commands_test.go +++ b/integration/commands_test.go @@ -11,7 +11,7 @@ import ( "github.com/docker/docker/api/client" "github.com/docker/docker/daemon" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/term" "github.com/docker/docker/utils" "github.com/docker/libtrust" diff --git a/integration/runtime_test.go b/integration/runtime_test.go index b17d132f8a..00daf2c5c0 100644 --- a/integration/runtime_test.go +++ b/integration/runtime_test.go @@ -21,7 +21,7 @@ import ( "github.com/docker/docker/image" "github.com/docker/docker/nat" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/reexec" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" diff --git a/integration/utils_test.go b/integration/utils_test.go index e1abfa72fc..1d6e3ec609 100644 --- a/integration/utils_test.go +++ b/integration/utils_test.go @@ -18,20 +18,23 @@ import ( "github.com/docker/docker/builtins" "github.com/docker/docker/daemon" "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/log" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) +type Fataler interface { + Fatal(...interface{}) +} + // This file contains utility functions for docker's unit test suite. // It has to be named XXX_test.go, apparently, in other to access private functions // from other XXX_test.go functions. // Create a temporary daemon suitable for unit testing. // Call t.Fatal() at the first error. -func mkDaemon(f log.Fataler) *daemon.Daemon { +func mkDaemon(f Fataler) *daemon.Daemon { eng := newTestEngine(f, false, "") return mkDaemonFromEngine(eng, f) // FIXME: @@ -40,7 +43,7 @@ func mkDaemon(f log.Fataler) *daemon.Daemon { // [...] } -func createNamedTestContainer(eng *engine.Engine, config *runconfig.Config, f log.Fataler, name string) (shortId string) { +func createNamedTestContainer(eng *engine.Engine, config *runconfig.Config, f Fataler, name string) (shortId string) { job := eng.Job("create", name) if err := job.ImportEnv(config); err != nil { f.Fatal(err) @@ -53,23 +56,23 @@ func createNamedTestContainer(eng *engine.Engine, config *runconfig.Config, f lo return engine.Tail(outputBuffer, 1) } -func createTestContainer(eng *engine.Engine, config *runconfig.Config, f log.Fataler) (shortId string) { +func createTestContainer(eng *engine.Engine, config *runconfig.Config, f Fataler) (shortId string) { return createNamedTestContainer(eng, config, f, "") } -func startContainer(eng *engine.Engine, id string, t log.Fataler) { +func startContainer(eng *engine.Engine, id string, t Fataler) { job := eng.Job("start", id) if err := job.Run(); err != nil { t.Fatal(err) } } -func containerRun(eng *engine.Engine, id string, t log.Fataler) { +func containerRun(eng *engine.Engine, id string, t Fataler) { startContainer(eng, id, t) containerWait(eng, id, t) } -func containerFileExists(eng *engine.Engine, id, dir string, t log.Fataler) bool { +func containerFileExists(eng *engine.Engine, id, dir string, t Fataler) bool { c := getContainer(eng, id, t) if err := c.Mount(); err != nil { t.Fatal(err) @@ -84,7 +87,7 @@ func containerFileExists(eng *engine.Engine, id, dir string, t log.Fataler) bool return true } -func containerAttach(eng *engine.Engine, id string, t log.Fataler) (io.WriteCloser, io.ReadCloser) { +func containerAttach(eng *engine.Engine, id string, t Fataler) (io.WriteCloser, io.ReadCloser) { c := getContainer(eng, id, t) i, err := c.StdinPipe() if err != nil { @@ -97,31 +100,31 @@ func containerAttach(eng *engine.Engine, id string, t log.Fataler) (io.WriteClos return i, o } -func containerWait(eng *engine.Engine, id string, t log.Fataler) int { +func containerWait(eng *engine.Engine, id string, t Fataler) int { ex, _ := getContainer(eng, id, t).WaitStop(-1 * time.Second) return ex } -func containerWaitTimeout(eng *engine.Engine, id string, t log.Fataler) error { +func containerWaitTimeout(eng *engine.Engine, id string, t Fataler) error { _, err := getContainer(eng, id, t).WaitStop(500 * time.Millisecond) return err } -func containerKill(eng *engine.Engine, id string, t log.Fataler) { +func containerKill(eng *engine.Engine, id string, t Fataler) { if err := eng.Job("kill", id).Run(); err != nil { t.Fatal(err) } } -func containerRunning(eng *engine.Engine, id string, t log.Fataler) bool { +func containerRunning(eng *engine.Engine, id string, t Fataler) bool { return getContainer(eng, id, t).IsRunning() } -func containerAssertExists(eng *engine.Engine, id string, t log.Fataler) { +func containerAssertExists(eng *engine.Engine, id string, t Fataler) { getContainer(eng, id, t) } -func containerAssertNotExists(eng *engine.Engine, id string, t log.Fataler) { +func containerAssertNotExists(eng *engine.Engine, id string, t Fataler) { daemon := mkDaemonFromEngine(eng, t) if c := daemon.Get(id); c != nil { t.Fatal(fmt.Errorf("Container %s should not exist", id)) @@ -130,7 +133,7 @@ func containerAssertNotExists(eng *engine.Engine, id string, t log.Fataler) { // assertHttpNotError expect the given response to not have an error. // Otherwise the it causes the test to fail. -func assertHttpNotError(r *httptest.ResponseRecorder, t log.Fataler) { +func assertHttpNotError(r *httptest.ResponseRecorder, t Fataler) { // Non-error http status are [200, 400) if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest { t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code)) @@ -139,14 +142,14 @@ func assertHttpNotError(r *httptest.ResponseRecorder, t log.Fataler) { // assertHttpError expect the given response to have an error. // Otherwise the it causes the test to fail. -func assertHttpError(r *httptest.ResponseRecorder, t log.Fataler) { +func assertHttpError(r *httptest.ResponseRecorder, t Fataler) { // Non-error http status are [200, 400) if !(r.Code < http.StatusOK || r.Code >= http.StatusBadRequest) { t.Fatal(fmt.Errorf("Unexpected http success code: %v", r.Code)) } } -func getContainer(eng *engine.Engine, id string, t log.Fataler) *daemon.Container { +func getContainer(eng *engine.Engine, id string, t Fataler) *daemon.Container { daemon := mkDaemonFromEngine(eng, t) c := daemon.Get(id) if c == nil { @@ -155,7 +158,7 @@ func getContainer(eng *engine.Engine, id string, t log.Fataler) *daemon.Containe return c } -func mkDaemonFromEngine(eng *engine.Engine, t log.Fataler) *daemon.Daemon { +func mkDaemonFromEngine(eng *engine.Engine, t Fataler) *daemon.Daemon { iDaemon := eng.Hack_GetGlobalVar("httpapi.daemon") if iDaemon == nil { panic("Legacy daemon field not set in engine") @@ -167,7 +170,7 @@ func mkDaemonFromEngine(eng *engine.Engine, t log.Fataler) *daemon.Daemon { return daemon } -func newTestEngine(t log.Fataler, autorestart bool, root string) *engine.Engine { +func newTestEngine(t Fataler, autorestart bool, root string) *engine.Engine { if root == "" { if dir, err := newTestDirectory(unitTestStoreBase); err != nil { t.Fatal(err) @@ -200,7 +203,7 @@ func newTestEngine(t log.Fataler, autorestart bool, root string) *engine.Engine return eng } -func NewTestEngine(t log.Fataler) *engine.Engine { +func NewTestEngine(t Fataler) *engine.Engine { return newTestEngine(t, false, "") } diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 98149160b3..e4db63ab65 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -19,7 +19,7 @@ import ( "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/system" diff --git a/pkg/archive/changes.go b/pkg/archive/changes.go index 5fbdcc90af..557b5db583 100644 --- a/pkg/archive/changes.go +++ b/pkg/archive/changes.go @@ -12,7 +12,7 @@ import ( "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" ) diff --git a/pkg/broadcastwriter/broadcastwriter.go b/pkg/broadcastwriter/broadcastwriter.go index 1898302e79..a9ae1047dc 100644 --- a/pkg/broadcastwriter/broadcastwriter.go +++ b/pkg/broadcastwriter/broadcastwriter.go @@ -7,7 +7,7 @@ import ( "time" "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) // BroadcastWriter accumulate multiple io.WriteCloser by stream. diff --git a/pkg/fileutils/fileutils.go b/pkg/fileutils/fileutils.go index acc27f55b5..4e4a91b91a 100644 --- a/pkg/fileutils/fileutils.go +++ b/pkg/fileutils/fileutils.go @@ -1,7 +1,7 @@ package fileutils import ( - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "path/filepath" ) diff --git a/pkg/httputils/resumablerequestreader.go b/pkg/httputils/resumablerequestreader.go index 3cd1f49179..10edd43a98 100644 --- a/pkg/httputils/resumablerequestreader.go +++ b/pkg/httputils/resumablerequestreader.go @@ -6,7 +6,7 @@ import ( "net/http" "time" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) type resumableRequestReader struct { diff --git a/pkg/iptables/iptables.go b/pkg/iptables/iptables.go index b8d9e56705..53e6e1430c 100644 --- a/pkg/iptables/iptables.go +++ b/pkg/iptables/iptables.go @@ -9,7 +9,7 @@ import ( "strconv" "strings" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) type Action string diff --git a/pkg/log/log.go b/pkg/log/log.go index b06d958cb1..d636f763ef 100644 --- a/pkg/log/log.go +++ b/pkg/log/log.go @@ -8,9 +8,18 @@ import ( "strings" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/timeutils" ) +func init() { + log.SetOutput(os.Stderr) + log.SetLevel(log.InfoLevel) + if os.Getenv("DEBUG") != "" { + log.SetLevel(log.DebugLevel) + } +} + type priority int const ( diff --git a/pkg/signal/trap.go b/pkg/signal/trap.go index 42ddb4d277..9be82671a1 100644 --- a/pkg/signal/trap.go +++ b/pkg/signal/trap.go @@ -6,7 +6,7 @@ import ( "sync/atomic" "syscall" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) // Trap sets up a simplified signal "trap", appropriate for common diff --git a/pkg/stdcopy/stdcopy.go b/pkg/stdcopy/stdcopy.go index 79e15bc852..a61779ce53 100644 --- a/pkg/stdcopy/stdcopy.go +++ b/pkg/stdcopy/stdcopy.go @@ -5,7 +5,7 @@ import ( "errors" "io" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) const ( diff --git a/pkg/tarsum/tarsum.go b/pkg/tarsum/tarsum.go index 6581f3f234..88d603c45b 100644 --- a/pkg/tarsum/tarsum.go +++ b/pkg/tarsum/tarsum.go @@ -13,7 +13,7 @@ import ( "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) const ( diff --git a/registry/endpoint.go b/registry/endpoint.go index 58311d32d1..05b5c08be1 100644 --- a/registry/endpoint.go +++ b/registry/endpoint.go @@ -9,7 +9,7 @@ import ( "net/url" "strings" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) // scans string for api version in the URL path. returns the trimmed hostname, if version found, string and API version. diff --git a/registry/registry_mock_test.go b/registry/registry_mock_test.go index 967d8b2615..02884c6224 100644 --- a/registry/registry_mock_test.go +++ b/registry/registry_mock_test.go @@ -15,7 +15,7 @@ import ( "github.com/gorilla/mux" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) var ( diff --git a/registry/session.go b/registry/session.go index ff0be343d5..de97db3aea 100644 --- a/registry/session.go +++ b/registry/session.go @@ -18,7 +18,7 @@ import ( "time" "github.com/docker/docker/pkg/httputils" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/utils" ) diff --git a/registry/session_v2.go b/registry/session_v2.go index c0bc19b337..20e9e2ee9c 100644 --- a/registry/session_v2.go +++ b/registry/session_v2.go @@ -8,7 +8,7 @@ import ( "net/url" "strconv" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/utils" "github.com/gorilla/mux" ) diff --git a/runconfig/merge.go b/runconfig/merge.go index 0c60d1df0b..f084d70dd8 100644 --- a/runconfig/merge.go +++ b/runconfig/merge.go @@ -4,7 +4,7 @@ import ( "strings" "github.com/docker/docker/nat" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) func Merge(userConf, imageConf *Config) error { diff --git a/trust/service.go b/trust/service.go index c056ac7191..592515db18 100644 --- a/trust/service.go +++ b/trust/service.go @@ -5,7 +5,7 @@ import ( "time" "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/libtrust" ) diff --git a/trust/trusts.go b/trust/trusts.go index a3c0f5f548..33354bd3ce 100644 --- a/trust/trusts.go +++ b/trust/trusts.go @@ -12,7 +12,7 @@ import ( "sync" "time" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/libtrust/trustgraph" ) diff --git a/utils/http.go b/utils/http.go index c877eefdd2..bcf1865e2e 100644 --- a/utils/http.go +++ b/utils/http.go @@ -5,7 +5,7 @@ import ( "net/http" "strings" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) // VersionInfo is used to model entities which has a version. diff --git a/utils/utils.go b/utils/utils.go index 792b80bd51..b495f442a1 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -23,7 +23,7 @@ import ( "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" ) type KeyValuePair struct { diff --git a/volumes/repository.go b/volumes/repository.go index 2383f34a93..eddb295150 100644 --- a/volumes/repository.go +++ b/volumes/repository.go @@ -8,7 +8,7 @@ import ( "sync" "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/log" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/utils" ) From b3b40433451d8e76d02dc70d591027f680c9a3bf Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Fri, 24 Oct 2014 11:20:17 -0700 Subject: [PATCH 145/592] Use common logging in engine Signed-off-by: Alexandr Morozov --- engine/job.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/engine/job.go b/engine/job.go index d032ff0215..ecd9441ff5 100644 --- a/engine/job.go +++ b/engine/job.go @@ -6,6 +6,8 @@ import ( "io" "strings" "time" + + log "github.com/Sirupsen/logrus" ) // A job is the fundamental unit of work in the docker engine. @@ -66,10 +68,12 @@ func (job *Job) Run() error { return fmt.Errorf("%s: job has already completed", job.Name) } // Log beginning and end of the job - job.Eng.Logf("+job %s", job.CallString()) - defer func() { - job.Eng.Logf("-job %s%s", job.CallString(), job.StatusString()) - }() + if job.Eng.Logging { + log.Infof("+job %s", job.CallString()) + defer func() { + log.Infof("-job %s%s", job.CallString(), job.StatusString()) + }() + } var errorMessage = bytes.NewBuffer(nil) job.Stderr.Add(errorMessage) if job.handler == nil { From b99dcb3c7ef6e66754354292f2ec0217341c8d2b Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Fri, 24 Oct 2014 13:47:52 -0700 Subject: [PATCH 146/592] Remove pkg/log Signed-off-by: Alexandr Morozov --- pkg/log/log.go | 123 -------------------------------------------- pkg/log/log_test.go | 39 -------------- 2 files changed, 162 deletions(-) delete mode 100644 pkg/log/log.go delete mode 100644 pkg/log/log_test.go diff --git a/pkg/log/log.go b/pkg/log/log.go deleted file mode 100644 index d636f763ef..0000000000 --- a/pkg/log/log.go +++ /dev/null @@ -1,123 +0,0 @@ -package log - -import ( - "fmt" - "io" - "os" - "runtime" - "strings" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/timeutils" -) - -func init() { - log.SetOutput(os.Stderr) - log.SetLevel(log.InfoLevel) - if os.Getenv("DEBUG") != "" { - log.SetLevel(log.DebugLevel) - } -} - -type priority int - -const ( - errorFormat = "[%s] [%s] %s:%d %s\n" - logFormat = "[%s] [%s] %s\n" - - fatalPriority priority = iota - errorPriority - infoPriority - debugPriority -) - -// A common interface to access the Fatal method of -// both testing.B and testing.T. -type Fataler interface { - Fatal(args ...interface{}) -} - -func (p priority) String() string { - switch p { - case fatalPriority: - return "fatal" - case errorPriority: - return "error" - case infoPriority: - return "info" - case debugPriority: - return "debug" - } - - return "" -} - -var DefaultLogger = Logger{Out: os.Stdout, Err: os.Stderr} - -// Debug function, if the debug flag is set, then display. Do nothing otherwise -// If Docker is in damon mode, also send the debug info on the socket -func Debugf(format string, a ...interface{}) (int, error) { - return DefaultLogger.Debugf(format, a...) -} - -func Infof(format string, a ...interface{}) (int, error) { - return DefaultLogger.Infof(format, a...) -} - -func Errorf(format string, a ...interface{}) (int, error) { - return DefaultLogger.Errorf(format, a...) -} - -func Fatal(a ...interface{}) { - DefaultLogger.Fatalf("%s", a...) -} - -func Fatalf(format string, a ...interface{}) { - DefaultLogger.Fatalf(format, a...) -} - -type Logger struct { - Err io.Writer - Out io.Writer -} - -func (l Logger) Debugf(format string, a ...interface{}) (int, error) { - if os.Getenv("DEBUG") != "" { - return l.logf(l.Err, debugPriority, format, a...) - } - return 0, nil -} - -func (l Logger) Infof(format string, a ...interface{}) (int, error) { - return l.logf(l.Out, infoPriority, format, a...) -} - -func (l Logger) Errorf(format string, a ...interface{}) (int, error) { - return l.logf(l.Err, errorPriority, format, a...) -} - -func (l Logger) Fatalf(format string, a ...interface{}) { - l.logf(l.Err, fatalPriority, format, a...) - os.Exit(1) -} - -func (l Logger) logf(stream io.Writer, level priority, format string, a ...interface{}) (int, error) { - var prefix string - - if level <= errorPriority || level == debugPriority { - // Retrieve the stack infos - _, file, line, ok := runtime.Caller(2) - if !ok { - file = "" - line = -1 - } else { - file = file[strings.LastIndex(file, "/")+1:] - } - prefix = fmt.Sprintf(errorFormat, time.Now().Format(timeutils.RFC3339NanoFixed), level.String(), file, line, format) - } else { - prefix = fmt.Sprintf(logFormat, time.Now().Format(timeutils.RFC3339NanoFixed), level.String(), format) - } - - return fmt.Fprintf(stream, prefix, a...) -} diff --git a/pkg/log/log_test.go b/pkg/log/log_test.go deleted file mode 100644 index 4f5b3f82ed..0000000000 --- a/pkg/log/log_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package log - -import ( - "bytes" - "regexp" - - "testing" -) - -var reRFC3339NanoFixed = "[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{9}.([0-9]{2}:[0-9]{2})?" - -func TestLogFatalf(t *testing.T) { - var output *bytes.Buffer - - tests := []struct { - Level priority - Format string - Values []interface{} - ExpectedPattern string - }{ - {fatalPriority, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[" + reRFC3339NanoFixed + "\\] \\[fatal\\] testing.go:\\d+ 1 \\+ 1 = 2"}, - {errorPriority, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[" + reRFC3339NanoFixed + "\\] \\[error\\] testing.go:\\d+ 1 \\+ 1 = 2"}, - {infoPriority, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[" + reRFC3339NanoFixed + "\\] \\[info\\] 1 \\+ 1 = 2"}, - {debugPriority, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[" + reRFC3339NanoFixed + "\\] \\[debug\\] testing.go:\\d+ 1 \\+ 1 = 2"}, - } - - for i, test := range tests { - output = &bytes.Buffer{} - DefaultLogger.logf(output, test.Level, test.Format, test.Values...) - - expected := regexp.MustCompile(test.ExpectedPattern) - if !expected.MatchString(output.String()) { - t.Errorf("[%d] Log output does not match expected pattern:\n\tExpected: %s\n\tOutput: %s", - i, - expected.String(), - output.String()) - } - } -} From ee7dd44c017458c8fe0be8e09569b1238366dca3 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Fri, 24 Oct 2014 15:11:48 -0700 Subject: [PATCH 147/592] Mass gofmt Signed-off-by: Alexandr Morozov --- daemon/attach.go | 2 +- daemon/container.go | 2 +- daemon/daemon_aufs.go | 2 +- daemon/delete.go | 2 +- daemon/exec.go | 2 +- daemon/execdriver/lxc/driver.go | 2 +- daemon/graphdriver/aufs/aufs.go | 2 +- daemon/graphdriver/devmapper/deviceset.go | 2 +- daemon/graphdriver/devmapper/driver.go | 2 +- daemon/graphdriver/fsdiff.go | 2 +- daemon/info.go | 2 +- daemon/logs.go | 2 +- daemon/monitor.go | 2 +- daemon/networkdriver/bridge/driver.go | 2 +- daemon/networkdriver/portmapper/mapper.go | 2 +- daemon/volumes.go | 2 +- graph/export.go | 2 +- graph/graph.go | 2 +- graph/load.go | 2 +- graph/pull.go | 2 +- graph/push.go | 2 +- graph/service.go | 2 +- image/image.go | 2 +- integration/commands_test.go | 2 +- integration/runtime_test.go | 2 +- pkg/archive/archive.go | 2 +- pkg/broadcastwriter/broadcastwriter.go | 2 +- registry/session.go | 2 +- runconfig/merge.go | 2 +- trust/service.go | 2 +- utils/utils.go | 2 +- volumes/repository.go | 2 +- 32 files changed, 32 insertions(+), 32 deletions(-) diff --git a/daemon/attach.go b/daemon/attach.go index de583846ee..f2cc6bd469 100644 --- a/daemon/attach.go +++ b/daemon/attach.go @@ -6,10 +6,10 @@ import ( "os" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/jsonlog" - log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/utils" ) diff --git a/daemon/container.go b/daemon/container.go index 1a0d943d3f..a477f19f22 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -17,6 +17,7 @@ import ( "github.com/docker/libcontainer/devices" "github.com/docker/libcontainer/label" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/engine" "github.com/docker/docker/image" @@ -25,7 +26,6 @@ import ( "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/broadcastwriter" "github.com/docker/docker/pkg/ioutils" - log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/networkfs/etchosts" "github.com/docker/docker/pkg/networkfs/resolvconf" "github.com/docker/docker/pkg/promise" diff --git a/daemon/daemon_aufs.go b/daemon/daemon_aufs.go index 6b84d40727..7d4d3c32e9 100644 --- a/daemon/daemon_aufs.go +++ b/daemon/daemon_aufs.go @@ -3,10 +3,10 @@ package daemon import ( + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/aufs" "github.com/docker/docker/graph" - log "github.com/Sirupsen/logrus" ) // Given the graphdriver ad, if it is aufs, then migrate it. diff --git a/daemon/delete.go b/daemon/delete.go index b382f6d843..55678f90a1 100644 --- a/daemon/delete.go +++ b/daemon/delete.go @@ -5,8 +5,8 @@ import ( "os" "path" - "github.com/docker/docker/engine" log "github.com/Sirupsen/logrus" + "github.com/docker/docker/engine" ) func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status { diff --git a/daemon/exec.go b/daemon/exec.go index 473a6a0d15..eae096e680 100644 --- a/daemon/exec.go +++ b/daemon/exec.go @@ -9,12 +9,12 @@ import ( "strings" "sync" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver/lxc" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/broadcastwriter" "github.com/docker/docker/pkg/ioutils" - log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go index ca8573c30f..7583a3e64f 100644 --- a/daemon/execdriver/lxc/driver.go +++ b/daemon/execdriver/lxc/driver.go @@ -17,8 +17,8 @@ import ( "github.com/kr/pty" - "github.com/docker/docker/daemon/execdriver" log "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/pkg/term" "github.com/docker/docker/utils" "github.com/docker/libcontainer/cgroups" diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go index a60b8c7f30..8ba097d45a 100644 --- a/daemon/graphdriver/aufs/aufs.go +++ b/daemon/graphdriver/aufs/aufs.go @@ -30,9 +30,9 @@ import ( "sync" "syscall" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" - log "github.com/Sirupsen/logrus" mountpk "github.com/docker/docker/pkg/mount" "github.com/docker/docker/utils" "github.com/docker/libcontainer/label" diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index 6c16a2798f..fdfc089a82 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -18,8 +18,8 @@ import ( "syscall" "time" - "github.com/docker/docker/daemon/graphdriver" log "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/units" "github.com/docker/libcontainer/label" diff --git a/daemon/graphdriver/devmapper/driver.go b/daemon/graphdriver/devmapper/driver.go index 8c5b148fab..53b8da436d 100644 --- a/daemon/graphdriver/devmapper/driver.go +++ b/daemon/graphdriver/devmapper/driver.go @@ -8,8 +8,8 @@ import ( "os" "path" - "github.com/docker/docker/daemon/graphdriver" log "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/units" ) diff --git a/daemon/graphdriver/fsdiff.go b/daemon/graphdriver/fsdiff.go index fcfa908853..269379bddf 100644 --- a/daemon/graphdriver/fsdiff.go +++ b/daemon/graphdriver/fsdiff.go @@ -4,9 +4,9 @@ import ( "fmt" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/ioutils" - log "github.com/Sirupsen/logrus" "github.com/docker/docker/utils" ) diff --git a/daemon/info.go b/daemon/info.go index 1bf1b2176b..b1c195d736 100644 --- a/daemon/info.go +++ b/daemon/info.go @@ -4,9 +4,9 @@ import ( "os" "runtime" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/dockerversion" "github.com/docker/docker/engine" - log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/parsers/operatingsystem" "github.com/docker/docker/registry" diff --git a/daemon/logs.go b/daemon/logs.go index 31d6fd5223..a5fac2c3d9 100644 --- a/daemon/logs.go +++ b/daemon/logs.go @@ -8,9 +8,9 @@ import ( "os" "strconv" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/jsonlog" - log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/tailfile" "github.com/docker/docker/pkg/timeutils" ) diff --git a/daemon/monitor.go b/daemon/monitor.go index c0943465e1..cbb74c335b 100644 --- a/daemon/monitor.go +++ b/daemon/monitor.go @@ -6,8 +6,8 @@ import ( "sync" "time" - "github.com/docker/docker/daemon/execdriver" log "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/runconfig" ) diff --git a/daemon/networkdriver/bridge/driver.go b/daemon/networkdriver/bridge/driver.go index 08b955d03b..c967aebb79 100644 --- a/daemon/networkdriver/bridge/driver.go +++ b/daemon/networkdriver/bridge/driver.go @@ -8,13 +8,13 @@ import ( "strings" "sync" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/networkdriver" "github.com/docker/docker/daemon/networkdriver/ipallocator" "github.com/docker/docker/daemon/networkdriver/portallocator" "github.com/docker/docker/daemon/networkdriver/portmapper" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/iptables" - log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/networkfs/resolvconf" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/libcontainer/netlink" diff --git a/daemon/networkdriver/portmapper/mapper.go b/daemon/networkdriver/portmapper/mapper.go index b7bc90244f..4bf8cd142c 100644 --- a/daemon/networkdriver/portmapper/mapper.go +++ b/daemon/networkdriver/portmapper/mapper.go @@ -6,9 +6,9 @@ import ( "net" "sync" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/networkdriver/portallocator" "github.com/docker/docker/pkg/iptables" - log "github.com/Sirupsen/logrus" ) type mapping struct { diff --git a/daemon/volumes.go b/daemon/volumes.go index 84de15a609..0fd54144ed 100644 --- a/daemon/volumes.go +++ b/daemon/volumes.go @@ -10,9 +10,9 @@ import ( "strings" "syscall" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/pkg/archive" - log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/volumes" ) diff --git a/graph/export.go b/graph/export.go index 591dcff67a..75314076ed 100644 --- a/graph/export.go +++ b/graph/export.go @@ -7,9 +7,9 @@ import ( "os" "path" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/archive" - log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/parsers" ) diff --git a/graph/graph.go b/graph/graph.go index 07fdd5b48d..75b1825034 100644 --- a/graph/graph.go +++ b/graph/graph.go @@ -12,11 +12,11 @@ import ( "syscall" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/dockerversion" "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" - log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" diff --git a/graph/load.go b/graph/load.go index 112e7668ed..05e963daaa 100644 --- a/graph/load.go +++ b/graph/load.go @@ -7,10 +7,10 @@ import ( "os" "path" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" - log "github.com/Sirupsen/logrus" ) // Loads a set of images into the repository. This is the complementary of ImageExport. diff --git a/graph/pull.go b/graph/pull.go index c858d9b498..9345d7d489 100644 --- a/graph/pull.go +++ b/graph/pull.go @@ -12,9 +12,9 @@ import ( "strings" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/image" - log "github.com/Sirupsen/logrus" "github.com/docker/docker/registry" "github.com/docker/docker/utils" "github.com/docker/libtrust" diff --git a/graph/push.go b/graph/push.go index 0a291d580f..a2bd7136f9 100644 --- a/graph/push.go +++ b/graph/push.go @@ -7,9 +7,9 @@ import ( "os" "path" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/archive" - log "github.com/Sirupsen/logrus" "github.com/docker/docker/registry" "github.com/docker/docker/utils" ) diff --git a/graph/service.go b/graph/service.go index 3ed7184687..9b1509af29 100644 --- a/graph/service.go +++ b/graph/service.go @@ -4,9 +4,9 @@ import ( "fmt" "io" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/image" - log "github.com/Sirupsen/logrus" ) func (s *TagStore) Install(eng *engine.Engine) error { diff --git a/image/image.go b/image/image.go index ccd77b5067..728a188a14 100644 --- a/image/image.go +++ b/image/image.go @@ -9,8 +9,8 @@ import ( "strconv" "time" - "github.com/docker/docker/pkg/archive" log "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) diff --git a/integration/commands_test.go b/integration/commands_test.go index 446fba3f92..b00c68641e 100644 --- a/integration/commands_test.go +++ b/integration/commands_test.go @@ -9,9 +9,9 @@ import ( "testing" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/api/client" "github.com/docker/docker/daemon" - log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/term" "github.com/docker/docker/utils" "github.com/docker/libtrust" diff --git a/integration/runtime_test.go b/integration/runtime_test.go index 00daf2c5c0..d2aac17081 100644 --- a/integration/runtime_test.go +++ b/integration/runtime_test.go @@ -16,12 +16,12 @@ import ( "testing" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon" "github.com/docker/docker/engine" "github.com/docker/docker/image" "github.com/docker/docker/nat" "github.com/docker/docker/pkg/ioutils" - log "github.com/Sirupsen/logrus" "github.com/docker/docker/reexec" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index e4db63ab65..9c4d881cfa 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -18,8 +18,8 @@ import ( "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "github.com/docker/docker/pkg/fileutils" log "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/system" diff --git a/pkg/broadcastwriter/broadcastwriter.go b/pkg/broadcastwriter/broadcastwriter.go index a9ae1047dc..232cf3dfc8 100644 --- a/pkg/broadcastwriter/broadcastwriter.go +++ b/pkg/broadcastwriter/broadcastwriter.go @@ -6,8 +6,8 @@ import ( "sync" "time" - "github.com/docker/docker/pkg/jsonlog" log "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/jsonlog" ) // BroadcastWriter accumulate multiple io.WriteCloser by stream. diff --git a/registry/session.go b/registry/session.go index de97db3aea..0c5f01397a 100644 --- a/registry/session.go +++ b/registry/session.go @@ -17,8 +17,8 @@ import ( "strings" "time" - "github.com/docker/docker/pkg/httputils" log "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/utils" ) diff --git a/runconfig/merge.go b/runconfig/merge.go index f084d70dd8..98f70873db 100644 --- a/runconfig/merge.go +++ b/runconfig/merge.go @@ -3,8 +3,8 @@ package runconfig import ( "strings" - "github.com/docker/docker/nat" log "github.com/Sirupsen/logrus" + "github.com/docker/docker/nat" ) func Merge(userConf, imageConf *Config) error { diff --git a/trust/service.go b/trust/service.go index 592515db18..324a478f10 100644 --- a/trust/service.go +++ b/trust/service.go @@ -4,8 +4,8 @@ import ( "fmt" "time" - "github.com/docker/docker/engine" log "github.com/Sirupsen/logrus" + "github.com/docker/docker/engine" "github.com/docker/libtrust" ) diff --git a/utils/utils.go b/utils/utils.go index b495f442a1..061e3bc638 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -20,10 +20,10 @@ import ( "sync" "syscall" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/ioutils" - log "github.com/Sirupsen/logrus" ) type KeyValuePair struct { diff --git a/volumes/repository.go b/volumes/repository.go index eddb295150..d6612e7a34 100644 --- a/volumes/repository.go +++ b/volumes/repository.go @@ -7,8 +7,8 @@ import ( "path/filepath" "sync" - "github.com/docker/docker/daemon/graphdriver" log "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/utils" ) From 61f8001c23dc4fbd7b1113f20b89a0dbade9aa82 Mon Sep 17 00:00:00 2001 From: Andrea Luzzardi Date: Fri, 24 Oct 2014 15:48:23 -0700 Subject: [PATCH 148/592] Expose # of CPUs and memory available on docker info. Signed-off-by: Andrea Luzzardi --- api/client/commands.go | 7 +++++++ daemon/info.go | 8 ++++++++ docs/man/docker-info.1.md | 2 ++ docs/sources/reference/api/docker_remote_api.md | 6 ++++++ docs/sources/reference/api/docker_remote_api_v1.16.md | 2 ++ docs/sources/reference/commandline/cli.md | 2 ++ 6 files changed, 27 insertions(+) diff --git a/api/client/commands.go b/api/client/commands.go index f4ced5ecff..2a7190d3d7 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -481,6 +481,13 @@ func (cli *DockerCli) CmdInfo(args ...string) error { fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion")) fmt.Fprintf(cli.out, "Operating System: %s\n", remoteInfo.Get("OperatingSystem")) + if remoteInfo.Exists("NCPU") { + fmt.Fprintf(cli.out, "CPUs: %d\n", remoteInfo.GetInt("NCPU")) + } + if remoteInfo.Exists("MemTotal") { + fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(remoteInfo.GetInt64("MemTotal")))) + } + if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" { fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug")) fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "") diff --git a/daemon/info.go b/daemon/info.go index 3d3c9ba6ca..79af8cc21b 100644 --- a/daemon/info.go +++ b/daemon/info.go @@ -9,6 +9,7 @@ import ( "github.com/docker/docker/pkg/log" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/parsers/operatingsystem" + "github.com/docker/docker/pkg/system" "github.com/docker/docker/registry" "github.com/docker/docker/utils" ) @@ -37,6 +38,11 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status { operatingSystem += " (containerized)" } + meminfo, err := system.ReadMemInfo() + if err != nil { + log.Errorf("Could not read system memory info: %v", err) + } + // if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION) initPath := utils.DockerInitPath("") if initPath == "" { @@ -67,6 +73,8 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status { v.Set("IndexServerAddress", registry.IndexServerAddress()) v.Set("InitSha1", dockerversion.INITSHA1) v.Set("InitPath", initPath) + v.SetInt("NCPU", runtime.NumCPU()) + v.SetInt64("MemTotal", meminfo.MemTotal) if _, err := v.WriteTo(job.Stdout); err != nil { return job.Error(err) } diff --git a/docs/man/docker-info.1.md b/docs/man/docker-info.1.md index bf64a7b543..0547b44b07 100644 --- a/docs/man/docker-info.1.md +++ b/docs/man/docker-info.1.md @@ -37,6 +37,8 @@ Here is a sample output: Execution Driver: native-0.2 Kernel Version: 3.13.0-24-generic Operating System: Ubuntu 14.04 LTS + CPUs: 1 + Total Memory: 2 GiB # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md index 4dd7fff688..5d144b9075 100644 --- a/docs/sources/reference/api/docker_remote_api.md +++ b/docs/sources/reference/api/docker_remote_api.md @@ -44,6 +44,12 @@ You can still call an old version of the API using ### What's new +`GET /info` + +**New!** +`info` now returns the number of CPUs available on the machine (`NCPU`) and +total memory available (`MemTotal`). + ## v1.15 ### Full Documentation diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md index 887bebcee1..fbb89294e2 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.16.md +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -1164,6 +1164,8 @@ Display system-wide information "Driver":"btrfs", "ExecutionDriver":"native-0.1", "KernelVersion":"3.12.0-1-amd64" + "NCPU":1, + "MemTotal":2099236864, "Debug":false, "NFd": 11, "NGoroutines":21, diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 4ede5e9ccf..e1beaeb29c 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -812,6 +812,8 @@ For example: Execution Driver: native-0.2 Kernel Version: 3.13.0-24-generic Operating System: Ubuntu 14.04 LTS + CPUs: 1 + Total Memory: 2 GiB Debug mode (server): false Debug mode (client): true Fds: 10 From d3ac9ea98e872fee808693c736bc5a465d6426e2 Mon Sep 17 00:00:00 2001 From: Lakshan Perera Date: Wed, 8 Oct 2014 04:09:08 +0000 Subject: [PATCH 149/592] Add HasValidGITPrefix to utils/utils.go This will allow us to use a common Git prefix check for both api/clients/commands.go and builder/job.go. Previous prefix check in build from Git (in builder/jobs.go) ignored valid prefixes such as "git@", "http://" or "https://". Signed-off-by: Lakshan Perera --- api/client/commands.go | 2 +- builder/job.go | 3 +-- utils/utils.go | 4 ++++ utils/utils_test.go | 21 +++++++++++++++++++++ 4 files changed, 27 insertions(+), 3 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index 6c4e5c55fe..ab8ac96f03 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -116,7 +116,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error { root := cmd.Arg(0) if utils.IsGIT(root) { remoteURL := cmd.Arg(0) - if !strings.HasPrefix(remoteURL, "git://") && !strings.HasPrefix(remoteURL, "git@") && !utils.IsURL(remoteURL) { + if !utils.ValidGitTransport(remoteURL) { remoteURL = "https://" + remoteURL } diff --git a/builder/job.go b/builder/job.go index 555232c9ae..4ce8cbe020 100644 --- a/builder/job.go +++ b/builder/job.go @@ -5,7 +5,6 @@ import ( "io/ioutil" "os" "os/exec" - "strings" "github.com/docker/docker/daemon" "github.com/docker/docker/engine" @@ -59,7 +58,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { if remoteURL == "" { context = ioutil.NopCloser(job.Stdin) } else if utils.IsGIT(remoteURL) { - if !strings.HasPrefix(remoteURL, "git://") { + if !utils.ValidGitTransport(remoteURL) { remoteURL = "https://" + remoteURL } root, err := ioutil.TempDir("", "docker-build-git") diff --git a/utils/utils.go b/utils/utils.go index 792b80bd51..70ab420791 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -304,6 +304,10 @@ func IsGIT(str string) bool { return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/") || strings.HasPrefix(str, "git@github.com:") || (strings.HasSuffix(str, ".git") && IsURL(str)) } +func ValidGitTransport(str string) bool { + return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") || IsURL(str) +} + var ( localHostRx = regexp.MustCompile(`(?m)^nameserver 127[^\n]+\n*`) ) diff --git a/utils/utils_test.go b/utils/utils_test.go index ce304482b8..6e2de7e041 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -97,3 +97,24 @@ func TestReadSymlinkedDirectoryToFile(t *testing.T) { t.Errorf("failed to remove symlink: %s", err) } } + +func TestValidGitTransport(t *testing.T) { + for _, url := range []string{ + "git://github.com/docker/docker", + "git@github.com:docker/docker.git", + "https://github.com/docker/docker.git", + "http://github.com/docker/docker.git", + } { + if ValidGitTransport(url) == false { + t.Fatalf("%q should be detected as valid Git prefix", url) + } + } + + for _, url := range []string{ + "github.com/docker/docker", + } { + if ValidGitTransport(url) == true { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} From ce407ccff4fb4245384e50d86870d2e247cd43ec Mon Sep 17 00:00:00 2001 From: shuai-z Date: Sun, 26 Oct 2014 13:55:29 +0800 Subject: [PATCH 150/592] removed redundant Clean The doc (or src) says: The result is Cleaned. http://golang.org/pkg/path/filepath/#Join Signed-off-by: shuai-z --- pkg/symlink/fs.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/symlink/fs.go b/pkg/symlink/fs.go index da9c590675..d761732571 100644 --- a/pkg/symlink/fs.go +++ b/pkg/symlink/fs.go @@ -35,7 +35,6 @@ func FollowSymlinkInScope(link, root string) (string, error) { for _, p := range strings.Split(link, "/") { prev = filepath.Join(prev, p) - prev = filepath.Clean(prev) loopCounter := 0 for { @@ -72,7 +71,7 @@ func FollowSymlinkInScope(link, root string) (string, error) { } else { prev, _ = filepath.Abs(prev) - if prev = filepath.Clean(filepath.Join(filepath.Dir(prev), dest)); len(prev) < len(root) { + if prev = filepath.Join(filepath.Dir(prev), dest); len(prev) < len(root) { prev = filepath.Join(root, filepath.Base(dest)) } } From 6e208b7f1c8130735018940ff95bf8b3662cab72 Mon Sep 17 00:00:00 2001 From: Alan Thompson Date: Sun, 26 Oct 2014 19:57:52 -0700 Subject: [PATCH 151/592] Update faq.md Minor language clarification. --- docs/sources/faq.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/faq.md b/docs/sources/faq.md index 531afc3ea7..5e16698436 100644 --- a/docs/sources/faq.md +++ b/docs/sources/faq.md @@ -144,7 +144,7 @@ Currently the recommended way to link containers is via the link primitive. You can see details of how to [work with links here](/userguide/dockerlinks). -Also of useful when enabling more flexible service portability is the +Also useful for more flexible service portability is the [Ambassador linking pattern](/articles/ambassador_pattern_linking/). ### How do I run more than one process in a Docker container? From 9a03652d9614e9d0a816bab2d2c672236d2d3d23 Mon Sep 17 00:00:00 2001 From: Joe Ferguson Date: Wed, 22 Oct 2014 15:14:02 -0700 Subject: [PATCH 152/592] Cleanup docs Dockerfile, so it matches best practices. Docker-DCO-1.1-Signed-off-by: Joseph Ferguson (github: yosifkit) --- docs/Dockerfile | 76 ++++++++++++++++++++++++++++--------------------- 1 file changed, 43 insertions(+), 33 deletions(-) diff --git a/docs/Dockerfile b/docs/Dockerfile index 3c58193b99..0e5d3ae60d 100644 --- a/docs/Dockerfile +++ b/docs/Dockerfile @@ -1,49 +1,59 @@ # # See the top level Makefile in https://github.com/docker/docker for usage. # -FROM debian:jessie -MAINTAINER Sven Dowideit (@SvenDowideit) +FROM debian:jessie +MAINTAINER Sven Dowideit (@SvenDowideit) -RUN apt-get update && apt-get install -y make python-pip python-setuptools vim-tiny git gettext python-dev libssl-dev +RUN apt-get update \ + && apt-get install -y \ + gettext \ + git \ + libssl-dev \ + make \ + python-dev \ + python-pip \ + python-setuptools \ + vim-tiny -RUN pip install mkdocs +RUN pip install mkdocs # add MarkdownTools to get transclusion # (future development) -#RUN easy_install -U setuptools -#RUN pip install MarkdownTools2 +#RUN easy_install -U setuptools +#RUN pip install MarkdownTools2 # this version works, the current versions fail in different ways -RUN pip install awscli==1.4.4 pyopenssl==0.12 - -# make sure the git clone is not an old cache - we've published old versions a few times now -ENV CACHE_BUST Jul2014 +RUN pip install awscli==1.4.4 pyopenssl==0.12 # get my sitemap.xml branch of mkdocs and use that for now -RUN git clone https://github.com/SvenDowideit/mkdocs &&\ - cd mkdocs/ &&\ - git checkout docker-markdown-merge &&\ - ./setup.py install +# commit hash of the newest commit of SvenDowideit/mkdocs on +# docker-markdown-merge branch, it is used to break docker cache +# see: https://github.com/SvenDowideit/mkdocs/tree/docker-markdown-merge +RUN git clone -b docker-markdown-merge https://github.com/SvenDowideit/mkdocs \ + && cd mkdocs/ \ + && git checkout ad32549c452963b8854951d6783f4736c0f7c5d5 \ + && ./setup.py install -ADD . /docs -ADD MAINTAINERS /docs/sources/humans.txt -WORKDIR /docs +COPY . /docs +COPY MAINTAINERS /docs/sources/humans.txt +WORKDIR /docs -RUN VERSION=$(cat /docs/VERSION) &&\ - MAJOR_MINOR="${VERSION%.*}" &&\ - for i in $(seq $MAJOR_MINOR -0.1 1.0) ; do echo "

  • Version v$i
  • " ; done > /docs/sources/versions.html_fragment &&\ - GIT_BRANCH=$(cat /docs/GIT_BRANCH) &&\ - GITCOMMIT=$(cat /docs/GITCOMMIT) &&\ - AWS_S3_BUCKET=$(cat /docs/AWS_S3_BUCKET) &&\ - BUILD_DATE=$(date) &&\ - sed -i "s/\$VERSION/$VERSION/g" /docs/theme/mkdocs/base.html &&\ - sed -i "s/\$MAJOR_MINOR/v$MAJOR_MINOR/g" /docs/theme/mkdocs/base.html &&\ - sed -i "s/\$GITCOMMIT/$GITCOMMIT/g" /docs/theme/mkdocs/base.html &&\ - sed -i "s/\$GIT_BRANCH/$GIT_BRANCH/g" /docs/theme/mkdocs/base.html &&\ - sed -i "s/\$BUILD_DATE/$BUILD_DATE/g" /docs/theme/mkdocs/base.html &&\ - sed -i "s/\$AWS_S3_BUCKET/$AWS_S3_BUCKET/g" /docs/theme/mkdocs/base.html +RUN VERSION=$(cat VERSION) \ + && MAJOR_MINOR="${VERSION%.*}" \ + && for i in $(seq $MAJOR_MINOR -0.1 1.0); do \ + echo "
  • Version v$i
  • "; \ + done > sources/versions.html_fragment \ + && GIT_BRANCH=$(cat GIT_BRANCH) \ + && GITCOMMIT=$(cat GITCOMMIT) \ + && AWS_S3_BUCKET=$(cat AWS_S3_BUCKET) \ + && BUILD_DATE=$(date) \ + && sed -i "s/\$VERSION/$VERSION/g" theme/mkdocs/base.html \ + && sed -i "s/\$MAJOR_MINOR/v$MAJOR_MINOR/g" theme/mkdocs/base.html \ + && sed -i "s/\$GITCOMMIT/$GITCOMMIT/g" .heme/mkdocs/base.html \ + && sed -i "s/\$GIT_BRANCH/$GIT_BRANCH/g" theme/mkdocs/base.html \ + && sed -i "s/\$BUILD_DATE/$BUILD_DATE/g" theme/mkdocs/base.html \ + && sed -i "s/\$AWS_S3_BUCKET/$AWS_S3_BUCKET/g" theme/mkdocs/base.html -# note, EXPOSE is only last because of https://github.com/docker/docker/issues/3525 -EXPOSE 8000 +EXPOSE 8000 -CMD ["mkdocs", "serve"] +CMD ["mkdocs", "serve"] From be49867cab663b5bdcf7804f3d2504f056db9db1 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Sat, 25 Oct 2014 17:58:57 +0000 Subject: [PATCH 153/592] builder: handle escapes without swallowing all of them. Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- builder/support.go | 3 +- integration-cli/docker_cli_build_test.go | 86 ++++++++++++++++++++++++ 2 files changed, 88 insertions(+), 1 deletion(-) diff --git a/builder/support.go b/builder/support.go index 6c7ac4096e..6833457f3a 100644 --- a/builder/support.go +++ b/builder/support.go @@ -24,8 +24,9 @@ func (b *Builder) replaceEnv(str string) string { continue } + prefix := match[:idx] stripped := match[idx+2:] - str = strings.Replace(str, match, "$"+stripped, -1) + str = strings.Replace(str, match, prefix+"$"+stripped, -1) continue } diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 80d4ad1488..ec1cb5bcc1 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -15,6 +15,92 @@ import ( "github.com/docker/docker/pkg/archive" ) +func TestBuildHandleEscapes(t *testing.T) { + name := "testbuildhandleescapes" + + defer deleteImages(name) + + _, err := buildImage(name, + ` + FROM scratch + ENV FOO bar + VOLUME ${FOO} + `, true) + + if err != nil { + t.Fatal(err) + } + + var result map[string]map[string]struct{} + + res, err := inspectFieldJSON(name, "Config.Volumes") + if err != nil { + t.Fatal(err) + } + + if err = unmarshalJSON([]byte(res), &result); err != nil { + t.Fatal(err) + } + + if _, ok := result["bar"]; !ok { + t.Fatal("Could not find volume bar set from env foo in volumes table") + } + + _, err = buildImage(name, + ` + FROM scratch + ENV FOO bar + VOLUME \${FOO} + `, true) + + if err != nil { + t.Fatal(err) + } + + res, err = inspectFieldJSON(name, "Config.Volumes") + if err != nil { + t.Fatal(err) + } + + if err = unmarshalJSON([]byte(res), &result); err != nil { + t.Fatal(err) + } + + if _, ok := result["${FOO}"]; !ok { + t.Fatal("Could not find volume ${FOO} set from env foo in volumes table") + } + + // this test in particular provides *7* backslashes and expects 6 to come back. + // Like above, the first escape is swallowed and the rest are treated as + // literals, this one is just less obvious because of all the character noise. + + _, err = buildImage(name, + ` + FROM scratch + ENV FOO bar + VOLUME \\\\\\\${FOO} + `, true) + + if err != nil { + t.Fatal(err) + } + + res, err = inspectFieldJSON(name, "Config.Volumes") + if err != nil { + t.Fatal(err) + } + + if err = unmarshalJSON([]byte(res), &result); err != nil { + t.Fatal(err) + } + + if _, ok := result[`\\\\\\${FOO}`]; !ok { + t.Fatal(`Could not find volume \\\\\\${FOO} set from env foo in volumes table`) + } + + logDone("build - handle escapes") +} + func TestBuildOnBuildLowercase(t *testing.T) { name := "testbuildonbuildlowercase" name2 := "testbuildonbuildlowercase2" From 4e74cd498b66e494b3336118a19c02000b282251 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Sat, 25 Oct 2014 18:29:18 +0000 Subject: [PATCH 154/592] builder: whitelist verbs useful for environment replacement. Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- builder/evaluator.go | 20 ++- integration-cli/docker_cli_build_test.go | 184 ++++++++++++++++++++++- 2 files changed, 201 insertions(+), 3 deletions(-) diff --git a/builder/evaluator.go b/builder/evaluator.go index d229270d0a..645038bb1d 100644 --- a/builder/evaluator.go +++ b/builder/evaluator.go @@ -41,6 +41,17 @@ var ( ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty") ) +// Environment variable interpolation will happen on these statements only. +var replaceEnvAllowed = map[string]struct{}{ + "env": {}, + "add": {}, + "copy": {}, + "workdir": {}, + "expose": {}, + "volume": {}, + "user": {}, +} + var evaluateTable map[string]func(*Builder, []string, map[string]bool, string) error func init() { @@ -196,13 +207,18 @@ func (b *Builder) dispatch(stepN int, ast *parser.Node) error { if cmd == "onbuild" { ast = ast.Next.Children[0] - strs = append(strs, b.replaceEnv(ast.Value)) + strs = append(strs, ast.Value) msg += " " + ast.Value } for ast.Next != nil { ast = ast.Next - strs = append(strs, b.replaceEnv(ast.Value)) + var str string + str = ast.Value + if _, ok := replaceEnvAllowed[cmd]; ok { + str = b.replaceEnv(ast.Value) + } + strs = append(strs, str) msg += " " + ast.Value } diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index ec1cb5bcc1..d100b5a4a6 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -2,6 +2,7 @@ package main import ( "archive/tar" + "encoding/json" "fmt" "io/ioutil" "os" @@ -15,6 +16,186 @@ import ( "github.com/docker/docker/pkg/archive" ) +func TestBuildEnvironmentReplacementUser(t *testing.T) { + name := "testbuildenvironmentreplacement" + defer deleteImages(name) + + _, err := buildImage(name, ` + FROM scratch + ENV user foo + USER ${user} + `, true) + if err != nil { + t.Fatal(err) + } + + res, err := inspectFieldJSON(name, "Config.User") + if err != nil { + t.Fatal(err) + } + + if res != `"foo"` { + t.Fatal("User foo from environment not in Config.User on image") + } + + logDone("build - user environment replacement") +} + +func TestBuildEnvironmentReplacementVolume(t *testing.T) { + name := "testbuildenvironmentreplacement" + defer deleteImages(name) + + _, err := buildImage(name, ` + FROM scratch + ENV volume /quux + VOLUME ${volume} + `, true) + if err != nil { + t.Fatal(err) + } + + res, err := inspectFieldJSON(name, "Config.Volumes") + if err != nil { + t.Fatal(err) + } + + var volumes map[string]interface{} + + if err := json.Unmarshal([]byte(res), &volumes); err != nil { + t.Fatal(err) + } + + if _, ok := volumes["/quux"]; !ok { + t.Fatal("Volume /quux from environment not in Config.Volumes on image") + } + + logDone("build - volume environment replacement") +} + +func TestBuildEnvironmentReplacementExpose(t *testing.T) { + name := "testbuildenvironmentreplacement" + defer deleteImages(name) + + _, err := buildImage(name, ` + FROM scratch + ENV port 80 + EXPOSE ${port} + `, true) + if err != nil { + t.Fatal(err) + } + + res, err := inspectFieldJSON(name, "Config.ExposedPorts") + if err != nil { + t.Fatal(err) + } + + var exposedPorts map[string]interface{} + + if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { + t.Fatal(err) + } + + if _, ok := exposedPorts["80/tcp"]; !ok { + t.Fatal("Exposed port 80 from environment not in Config.ExposedPorts on image") + } + + logDone("build - expose environment replacement") +} + +func TestBuildEnvironmentReplacementWorkdir(t *testing.T) { + name := "testbuildenvironmentreplacement" + defer deleteImages(name) + + _, err := buildImage(name, ` + FROM busybox + ENV MYWORKDIR /work + RUN mkdir ${MYWORKDIR} + WORKDIR ${MYWORKDIR} + `, true) + + if err != nil { + t.Fatal(err) + } + + logDone("build - workdir environment replacement") +} + +func TestBuildEnvironmentReplacementAddCopy(t *testing.T) { + name := "testbuildenvironmentreplacement" + defer deleteImages(name) + + ctx, err := fakeContext(` + FROM scratch + ENV baz foo + ENV quux bar + ENV dot . + + ADD ${baz} ${dot} + COPY ${quux} ${dot} + `, + map[string]string{ + "foo": "test1", + "bar": "test2", + }) + + if err != nil { + t.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + + logDone("build - add/copy environment replacement") +} + +func TestBuildEnvironmentReplacementEnv(t *testing.T) { + name := "testbuildenvironmentreplacement" + + defer deleteImages(name) + + _, err := buildImage(name, + ` + FROM scratch + ENV foo foo + ENV bar ${foo} + `, true) + + if err != nil { + t.Fatal(err) + } + + res, err := inspectFieldJSON(name, "Config.Env") + if err != nil { + t.Fatal(err) + } + + envResult := []string{} + + if err = unmarshalJSON([]byte(res), &envResult); err != nil { + t.Fatal(err) + } + + found := false + + for _, env := range envResult { + parts := strings.SplitN(env, "=", 2) + if parts[0] == "bar" { + found = true + if parts[1] != "foo" { + t.Fatal("Could not find replaced var for env `bar`: got %q instead of `foo`", parts[1]) + } + } + } + + if !found { + t.Fatal("Never found the `bar` env variable") + } + + logDone("build - env environment replacement") +} + func TestBuildHandleEscapes(t *testing.T) { name := "testbuildhandleescapes" @@ -170,7 +351,7 @@ func TestBuildEnvOverwrite(t *testing.T) { ` FROM busybox ENV TEST foo - CMD echo \${TEST} + CMD echo ${TEST} `, true) @@ -2618,6 +2799,7 @@ func TestBuildEnvUsage(t *testing.T) { name := "testbuildenvusage" defer deleteImages(name) dockerfile := `FROM busybox +ENV HOME /root ENV PATH $HOME/bin:$PATH ENV PATH /tmp:$PATH RUN [ "$PATH" = "/tmp:$HOME/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ] From b6db23cffe942b8d94c80d1e9b3f1f6fca87d139 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Mon, 27 Oct 2014 11:38:22 -0700 Subject: [PATCH 155/592] Use archive.CopyWithTar in vfs.Create The vfs storage driver currently shells out to the `cp` binary on the host system to perform an 'archive' copy of the base image to a new directory. The archive option preserves the modified time of the files which are created but there was an issue where it was unable to preserve the modified time of copied symbolic links on some host systems with an outdated version of `cp`. This change no longer relies on the host system implementation and instead utilizes the `CopyWithTar` function found in `pkg/archive` which is used to copy from source to destination directory using a Tar archive, which should correctly preserve file attributes. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- daemon/graphdriver/vfs/driver.go | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/daemon/graphdriver/vfs/driver.go b/daemon/graphdriver/vfs/driver.go index a186060d03..1076eb38dd 100644 --- a/daemon/graphdriver/vfs/driver.go +++ b/daemon/graphdriver/vfs/driver.go @@ -8,6 +8,7 @@ import ( "path" "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" "github.com/docker/libcontainer/label" ) @@ -46,21 +47,6 @@ func isGNUcoreutils() bool { return false } -func copyDir(src, dst string) error { - argv := make([]string, 0, 4) - - if isGNUcoreutils() { - argv = append(argv, "-aT", "--reflink=auto", src, dst) - } else { - argv = append(argv, "-a", src+"/.", dst+"/.") - } - - if output, err := exec.Command("cp", argv...).CombinedOutput(); err != nil { - return fmt.Errorf("Error VFS copying directory: %s (%s)", err, output) - } - return nil -} - func (d *Driver) Create(id, parent string) error { dir := d.dir(id) if err := os.MkdirAll(path.Dir(dir), 0700); err != nil { @@ -80,7 +66,7 @@ func (d *Driver) Create(id, parent string) error { if err != nil { return fmt.Errorf("%s: %s", parent, err) } - if err := copyDir(parentDir, dir); err != nil { + if err := archive.CopyWithTar(parentDir, dir); err != nil { return err } return nil From 0a37f836adef969e9e5aa9ce7b2c465f9441c1a5 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Mon, 27 Oct 2014 17:45:38 +0000 Subject: [PATCH 156/592] update sysinfo to logrus Signed-off-by: Victor Vieux --- pkg/jsonlog/jsonlog.go | 3 ++- pkg/proxy/tcp_proxy.go | 3 ++- pkg/proxy/udp_proxy.go | 3 ++- pkg/sysinfo/sysinfo.go | 2 +- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/pkg/jsonlog/jsonlog.go b/pkg/jsonlog/jsonlog.go index b0c61a803f..3a96d86f82 100644 --- a/pkg/jsonlog/jsonlog.go +++ b/pkg/jsonlog/jsonlog.go @@ -4,8 +4,9 @@ import ( "encoding/json" "fmt" "io" - "log" "time" + + log "github.com/Sirupsen/logrus" ) type JSONLog struct { diff --git a/pkg/proxy/tcp_proxy.go b/pkg/proxy/tcp_proxy.go index 1aa6d9fd70..eacf1427a3 100644 --- a/pkg/proxy/tcp_proxy.go +++ b/pkg/proxy/tcp_proxy.go @@ -2,9 +2,10 @@ package proxy import ( "io" - "log" "net" "syscall" + + log "github.com/Sirupsen/logrus" ) type TCPProxy struct { diff --git a/pkg/proxy/udp_proxy.go b/pkg/proxy/udp_proxy.go index ae6a7bbc42..f9f2d111e5 100644 --- a/pkg/proxy/udp_proxy.go +++ b/pkg/proxy/udp_proxy.go @@ -2,12 +2,13 @@ package proxy import ( "encoding/binary" - "log" "net" "strings" "sync" "syscall" "time" + + log "github.com/Sirupsen/logrus" ) const ( diff --git a/pkg/sysinfo/sysinfo.go b/pkg/sysinfo/sysinfo.go index 0c28719f61..001111f43d 100644 --- a/pkg/sysinfo/sysinfo.go +++ b/pkg/sysinfo/sysinfo.go @@ -2,10 +2,10 @@ package sysinfo import ( "io/ioutil" - "log" "os" "path" + log "github.com/Sirupsen/logrus" "github.com/docker/libcontainer/cgroups" ) From b65eb8d215576516b1d4bbb537968f08bbc8ff0e Mon Sep 17 00:00:00 2001 From: Igor Dolzhikov Date: Tue, 28 Oct 2014 01:04:36 +0600 Subject: [PATCH 157/592] excluding unused transformation to []byte Signed-off-by: Igor Dolzhikov --- registry/session.go | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/registry/session.go b/registry/session.go index 0c5f01397a..8dbf136205 100644 --- a/registry/session.go +++ b/registry/session.go @@ -230,11 +230,7 @@ func (r *Session) GetRemoteTags(registries []string, repository string, token [] } result := make(map[string]string) - rawJSON, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - if err := json.Unmarshal(rawJSON, &result); err != nil { + if err := json.NewDecoder(res.Body).Decode(&result); err != nil { return nil, err } return result, nil @@ -305,12 +301,8 @@ func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) { endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host)) } - checksumsJSON, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } remoteChecksums := []*ImgData{} - if err := json.Unmarshal(checksumsJSON, &remoteChecksums); err != nil { + if err := json.NewDecoder(res.Body).Decode(&remoteChecksums); err != nil { return nil, err } @@ -590,12 +582,8 @@ func (r *Session) SearchRepositories(term string) (*SearchResults, error) { if res.StatusCode != 200 { return nil, utils.NewHTTPRequestError(fmt.Sprintf("Unexepected status code %d", res.StatusCode), res) } - rawData, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } result := new(SearchResults) - err = json.Unmarshal(rawData, result) + err = json.NewDecoder(res.Body).Decode(result) return result, err } From 24545c18c35620c211003561dc482d66ee6d0306 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Mon, 27 Oct 2014 21:15:28 +0000 Subject: [PATCH 158/592] builder: Restore /bin/sh handling in CMD when entrypoint is specified with JSON Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- builder/dispatchers.go | 2 +- integration-cli/docker_cli_build_test.go | 34 ++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/builder/dispatchers.go b/builder/dispatchers.go index b3834c40b1..f2fdd35955 100644 --- a/builder/dispatchers.go +++ b/builder/dispatchers.go @@ -234,7 +234,7 @@ func run(b *Builder, args []string, attributes map[string]bool, original string) func cmd(b *Builder, args []string, attributes map[string]bool, original string) error { b.Config.Cmd = handleJsonArgs(args, attributes) - if !attributes["json"] && len(b.Config.Entrypoint) == 0 { + if !attributes["json"] { b.Config.Cmd = append([]string{"/bin/sh", "-c"}, b.Config.Cmd...) } diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index d100b5a4a6..a25d4f954f 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -16,6 +16,40 @@ import ( "github.com/docker/docker/pkg/archive" ) +func TestBuildShCmdJSONEntrypoint(t *testing.T) { + name := "testbuildshcmdjsonentrypoint" + defer deleteImages(name) + + _, err := buildImage( + name, + ` + FROM busybox + ENTRYPOINT ["/bin/echo"] + CMD echo test + `, + true) + + if err != nil { + t.Fatal(err) + } + + out, _, err := runCommandWithOutput( + exec.Command( + dockerBinary, + "run", + name)) + + if err != nil { + t.Fatal(err) + } + + if strings.TrimSpace(out) != "/bin/sh -c echo test" { + t.Fatal("CMD did not contain /bin/sh -c") + } + + logDone("build - CMD should always contain /bin/sh -c when specified without JSON") +} + func TestBuildEnvironmentReplacementUser(t *testing.T) { name := "testbuildenvironmentreplacement" defer deleteImages(name) From e377716b377ccc4854f810bbfc3e65a7858163ed Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Mon, 27 Oct 2014 20:05:29 +0000 Subject: [PATCH 159/592] builder: Update documentation WRT environment replacement Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- docs/sources/reference/builder.md | 41 +++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index 4bb02e3e21..cf8e0a6e29 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -104,6 +104,47 @@ be treated as an argument. This allows statements like: Here is the set of instructions you can use in a `Dockerfile` for building images. +### Environment Replacement + +**Note:** prior to 1.3, `Dockerfile` environment variables were handled +similarly, in that they would be replaced as described below. However, there +was no formal definition on as to which instructions handled environment +replacement at the time. After 1.3 this behavior will be preserved and +canonical. + +Environment variables (declared with the `ENV` statement) can also be used in +certain instructions as variables to be interpreted by the `Dockerfile`. Escapes +are also handled for including variable-like syntax into a statement literally. + +Environment variables are notated in the `Dockerfile` either with +`$variable_name` or `${variable_name}`. They are treated equivalently and the +brace syntax is typically used to address issues with variable names with no +whitespace, like `${foo}_bar`. + +Escaping is possible by adding a `\` before the variable: `\$foo` or `\${foo}`, +for example, will translate to `$foo` and `${foo}` literals respectively. + +Example (parsed representation is displayed after the `#`): + + FROM busybox + ENV foo /bar + WORKDIR ${foo} # WORKDIR /bar + ADD . $foo # ADD . /bar + COPY \$foo /quux # COPY $foo /quux + +The instructions that handle environment variables in the `Dockerfile` are: + +* `ENV` +* `ADD` +* `COPY` +* `WORKDIR` +* `EXPOSE` +* `VOLUME` +* `USER` + +`ONBUILD` instructions are **NOT** supported for environment replacement, even +the instructions above. + ## The `.dockerignore` file If a file named `.dockerignore` exists in the source repository, then it From 453552c8384929d8ae04dcf1c6954435c0111da0 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Tue, 19 Aug 2014 11:23:55 +0200 Subject: [PATCH 160/592] Add overlayfs graph backend This backend uses the overlayfs union filesystem for containers plus hard link file sharing for images. Each container/image can have a "root" subdirectory which is a plain filesystem hierarchy, or they can use overlayfs. If they use overlayfs there is a "upper" directory and a "lower-id" file, as well as "merged" and "work" directories. The "upper" directory has the upper layer of the overlay, and "lower-id" contains the id of the parent whose "root" directory shall be used as the lower layer in the overlay. The overlay itself is mounted in the "merged" directory, and the "work" dir is needed for overlayfs to work. When a overlay layer is created there are two cases, either the parent has a "root" dir, then we start out with a empty "upper" directory overlaid on the parents root. This is typically the case with the init layer of a container which is based on an image. If there is no "root" in the parent, we inherit the lower-id from the parent and start by making a copy if the parents "upper" dir. This is typically the case for a container layer which copies its parent -init upper layer. Additionally we also have a custom implementation of ApplyLayer which makes a recursive copy of the parent "root" layer using hardlinks to share file data, and then applies the layer on top of that. This means all chile images share file (but not directory) data with the parent. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- daemon/daemon_overlayfs.go | 7 + daemon/graphdriver/driver.go | 2 + daemon/graphdriver/overlayfs/copy.go | 157 ++++++++ daemon/graphdriver/overlayfs/overlayfs.go | 369 ++++++++++++++++++ .../graphdriver/overlayfs/overlayfs_test.go | 28 ++ 5 files changed, 563 insertions(+) create mode 100644 daemon/daemon_overlayfs.go create mode 100644 daemon/graphdriver/overlayfs/copy.go create mode 100644 daemon/graphdriver/overlayfs/overlayfs.go create mode 100644 daemon/graphdriver/overlayfs/overlayfs_test.go diff --git a/daemon/daemon_overlayfs.go b/daemon/daemon_overlayfs.go new file mode 100644 index 0000000000..e134b297a9 --- /dev/null +++ b/daemon/daemon_overlayfs.go @@ -0,0 +1,7 @@ +// +build !exclude_graphdriver_overlayfs + +package daemon + +import ( + _ "github.com/docker/docker/daemon/graphdriver/overlayfs" +) diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go index 91040db97a..3eacd428cc 100644 --- a/daemon/graphdriver/driver.go +++ b/daemon/graphdriver/driver.go @@ -81,6 +81,8 @@ var ( "btrfs", "devicemapper", "vfs", + // experimental, has to be enabled manually for now + "overlayfs", } ErrNotSupported = errors.New("driver not supported") diff --git a/daemon/graphdriver/overlayfs/copy.go b/daemon/graphdriver/overlayfs/copy.go new file mode 100644 index 0000000000..4c8c6239ac --- /dev/null +++ b/daemon/graphdriver/overlayfs/copy.go @@ -0,0 +1,157 @@ +// +build linux + +package overlayfs + +import ( + "fmt" + "io" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/system" +) + +type CopyFlags int + +const ( + CopyHardlink CopyFlags = 1 << iota +) + +func copyRegular(srcPath, dstPath string, mode os.FileMode) error { + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode) + if err != nil { + return err + } + defer dstFile.Close() + + _, err = io.Copy(dstFile, srcFile) + + return err +} + +func copyXattr(srcPath, dstPath, attr string) error { + data, err := system.Lgetxattr(srcPath, attr) + if err != nil { + return err + } + if data != nil { + if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { + return err + } + } + return nil +} + +func copyDir(srcDir, dstDir string, flags CopyFlags) error { + err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + if err != nil { + return err + } + + dstPath := filepath.Join(dstDir, relPath) + if err != nil { + return err + } + + stat, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) + } + + switch f.Mode() & os.ModeType { + case 0: // Regular file + if flags&CopyHardlink != 0 { + if err := os.Link(srcPath, dstPath); err != nil { + return err + } + } else { + if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil { + return err + } + } + + case os.ModeDir: + if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { + return err + } + + case os.ModeSymlink: + link, err := os.Readlink(srcPath) + if err != nil { + return err + } + + if err := os.Symlink(link, dstPath); err != nil { + return err + } + + case os.ModeNamedPipe: + fallthrough + case os.ModeSocket: + if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil { + return err + } + + case os.ModeDevice: + if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { + return err + } + + default: + return fmt.Errorf("Unknown file type for %s\n", srcPath) + } + + if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { + return err + } + + // We need to copy this attribute if it appears in an overlayfs upper layer, as + // this function is used to copy those. It is set by overlayfs if a directory + // is removed and then re-created and should not inherit anything from the + // same dir in the lower dir. + if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil { + return err + } + + isSymlink := f.Mode()&os.ModeSymlink != 0 + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if !isSymlink { + if err := os.Chmod(dstPath, f.Mode()); err != nil { + return err + } + } + + ts := []syscall.Timespec{stat.Atim, stat.Mtim} + // syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and + if !isSymlink { + if err := system.UtimesNano(dstPath, ts); err != nil { + return err + } + } else { + if err := system.LUtimesNano(dstPath, ts); err != nil { + return err + } + } + return nil + }) + return err +} diff --git a/daemon/graphdriver/overlayfs/overlayfs.go b/daemon/graphdriver/overlayfs/overlayfs.go new file mode 100644 index 0000000000..f2f478dc4a --- /dev/null +++ b/daemon/graphdriver/overlayfs/overlayfs.go @@ -0,0 +1,369 @@ +// +build linux + +package overlayfs + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "strings" + "sync" + "syscall" + + log "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/libcontainer/label" +) + +// This is a small wrapper over the NaiveDiffWriter that lets us have a custom +// implementation of ApplyDiff() + +var ( + ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff") +) + +type ApplyDiffProtoDriver interface { + graphdriver.ProtoDriver + ApplyDiff(id, parent string, diff archive.ArchiveReader) (bytes int64, err error) +} + +type naiveDiffDriverWithApply struct { + graphdriver.Driver + applyDiff ApplyDiffProtoDriver +} + +func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver) graphdriver.Driver { + return &naiveDiffDriverWithApply{ + Driver: graphdriver.NaiveDiffDriver(driver), + applyDiff: driver, + } +} + +func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.ArchiveReader) (int64, error) { + b, err := d.applyDiff.ApplyDiff(id, parent, diff) + if err == ErrApplyDiffFallback { + return d.Driver.ApplyDiff(id, parent, diff) + } + return b, err +} + +// This backend uses the overlayfs union filesystem for containers +// plus hard link file sharing for images. + +// Each container/image can have a "root" subdirectory which is a plain +// filesystem hierarchy, or they can use overlayfs. + +// If they use overlayfs there is a "upper" directory and a "lower-id" +// file, as well as "merged" and "work" directories. The "upper" +// directory has the upper layer of the overlay, and "lower-id" contains +// the id of the parent whose "root" directory shall be used as the lower +// layer in the overlay. The overlay itself is mounted in the "merged" +// directory, and the "work" dir is needed for overlayfs to work. + +// When a overlay layer is created there are two cases, either the +// parent has a "root" dir, then we start out with a empty "upper" +// directory overlaid on the parents root. This is typically the +// case with the init layer of a container which is based on an image. +// If there is no "root" in the parent, we inherit the lower-id from +// the parent and start by making a copy if the parents "upper" dir. +// This is typically the case for a container layer which copies +// its parent -init upper layer. + +// Additionally we also have a custom implementation of ApplyLayer +// which makes a recursive copy of the parent "root" layer using +// hardlinks to share file data, and then applies the layer on top +// of that. This means all child images share file (but not directory) +// data with the parent. + +type ActiveMount struct { + count int + path string + mounted bool +} +type Driver struct { + home string + sync.Mutex // Protects concurrent modification to active + active map[string]*ActiveMount +} + +func init() { + graphdriver.Register("overlayfs", Init) +} + +func Init(home string, options []string) (graphdriver.Driver, error) { + if err := supportsOverlayfs(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + // Create the driver home dir + if err := os.MkdirAll(home, 0755); err != nil && !os.IsExist(err) { + return nil, err + } + + d := &Driver{ + home: home, + active: make(map[string]*ActiveMount), + } + + return NaiveDiffDriverWithApply(d), nil +} + +func supportsOverlayfs() error { + // We can try to modprobe overlayfs first before looking at + // proc/filesystems for when overlayfs is supported + exec.Command("modprobe", "overlayfs").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.Contains(s.Text(), "overlayfs") { + return nil + } + } + return graphdriver.ErrNotSupported +} + +func (d *Driver) String() string { + return "overlayfs" +} + +func (d *Driver) Status() [][2]string { + return nil +} + +func (d *Driver) Cleanup() error { + return nil +} + +func (d *Driver) Create(id string, parent string) (retErr error) { + dir := d.dir(id) + if err := os.MkdirAll(path.Dir(dir), 0700); err != nil { + return err + } + if err := os.Mkdir(dir, 0700); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + // Toplevel images are just a "root" dir + if parent == "" { + if err := os.Mkdir(path.Join(dir, "root"), 0755); err != nil { + return err + } + return nil + } + + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return err + } + + // If parent has a root, just do a overlayfs to it + parentRoot := path.Join(parentDir, "root") + + if s, err := os.Lstat(parentRoot); err == nil { + if err := os.Mkdir(path.Join(dir, "upper"), s.Mode()); err != nil { + return err + } + if err := os.Mkdir(path.Join(dir, "work"), 0700); err != nil { + return err + } + if err := os.Mkdir(path.Join(dir, "merged"), 0700); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil { + return err + } + return nil + } + + // Otherwise, copy the upper and the lower-id from the parent + + lowerId, err := ioutil.ReadFile(path.Join(parentDir, "lower-id")) + if err != nil { + return err + } + + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerId, 0666); err != nil { + return err + } + + parentUpperDir := path.Join(parentDir, "upper") + s, err := os.Lstat(parentUpperDir) + if err != nil { + return err + } + + upperDir := path.Join(dir, "upper") + if err := os.Mkdir(upperDir, s.Mode()); err != nil { + return err + } + if err := os.Mkdir(path.Join(dir, "work"), 0700); err != nil { + return err + } + if err := os.Mkdir(path.Join(dir, "merged"), 0700); err != nil { + return err + } + + return copyDir(parentUpperDir, upperDir, 0) +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +func (d *Driver) Remove(id string) error { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return err + } + return os.RemoveAll(dir) +} + +func (d *Driver) Get(id string, mountLabel string) (string, error) { + // Protect the d.active from concurrent access + d.Lock() + defer d.Unlock() + + mount := d.active[id] + if mount != nil { + mount.count++ + return mount.path, nil + } else { + mount = &ActiveMount{count: 1} + } + + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + + // If id has a root, just return it + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + mount.path = rootDir + d.active[id] = mount + return mount.path, nil + } + + lowerId, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return "", err + } + lowerDir := path.Join(d.dir(string(lowerId)), "root") + upperDir := path.Join(dir, "upper") + workDir := path.Join(dir, "work") + mergedDir := path.Join(dir, "merged") + + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) + if err := syscall.Mount("overlayfs", mergedDir, "overlayfs", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { + return "", err + } + mount.path = mergedDir + mount.mounted = true + d.active[id] = mount + + return mount.path, nil +} + +func (d *Driver) Put(id string) { + // Protect the d.active from concurrent access + d.Lock() + defer d.Unlock() + + mount := d.active[id] + if mount == nil { + log.Debugf("Put on a non-mounted device %s", id) + return + } + + mount.count-- + if mount.count > 0 { + return + } + + if mount.mounted { + if err := syscall.Unmount(mount.path, 0); err != nil { + log.Debugf("Failed to unmount %s overlayfs: %v", id, err) + } + } + + delete(d.active, id) +} + +func (d *Driver) ApplyDiff(id string, parent string, diff archive.ArchiveReader) (bytes int64, err error) { + dir := d.dir(id) + + if parent == "" { + return 0, ErrApplyDiffFallback + } + + parentRootDir := path.Join(d.dir(parent), "root") + if _, err := os.Stat(parentRootDir); err != nil { + return 0, ErrApplyDiffFallback + } + + // We now know there is a parent, and it has a "root" directory containing + // the full root filesystem. We can just hardlink it and apply the + // layer. This relies on two things: + // 1) ApplyDiff is only run once on a clean (no writes to upper layer) container + // 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks) + // These are all currently true and are not expected to break + + tmpRootDir, err := ioutil.TempDir(dir, "tmproot") + if err != nil { + return 0, err + } + defer func() { + if err != nil { + os.RemoveAll(tmpRootDir) + } else { + os.RemoveAll(path.Join(dir, "upper")) + os.RemoveAll(path.Join(dir, "work")) + os.RemoveAll(path.Join(dir, "merged")) + os.RemoveAll(path.Join(dir, "lower-id")) + } + }() + + if err = copyDir(parentRootDir, tmpRootDir, CopyHardlink); err != nil { + return 0, err + } + + if err := archive.ApplyLayer(tmpRootDir, diff); err != nil { + return 0, err + } + + rootDir := path.Join(dir, "root") + if err := os.Rename(tmpRootDir, rootDir); err != nil { + return 0, err + } + + changes, err := archive.ChangesDirs(rootDir, parentRootDir) + if err != nil { + return 0, err + } + + return archive.ChangesSize(rootDir, changes), nil +} + +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/daemon/graphdriver/overlayfs/overlayfs_test.go b/daemon/graphdriver/overlayfs/overlayfs_test.go new file mode 100644 index 0000000000..7ab71d0e64 --- /dev/null +++ b/daemon/graphdriver/overlayfs/overlayfs_test.go @@ -0,0 +1,28 @@ +package overlayfs + +import ( + "github.com/docker/docker/daemon/graphdriver/graphtest" + "testing" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestOverlayfsSetup and TestOverlayfsTeardown +func TestOverlayfsSetup(t *testing.T) { + graphtest.GetDriver(t, "overlayfs") +} + +func TestOverlayfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "overlayfs") +} + +func TestOverlayfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "overlayfs") +} + +func TestOverlayfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "overlayfs") +} + +func TestOverlayfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} From 0d97e082c3b849a6901992e653b8963e8ef10f54 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Mon, 27 Oct 2014 17:23:50 -0700 Subject: [PATCH 161/592] Exclude `.wh..wh.*` AUFS metadata on layer export In an effort to make layer content 'stable' between import and export from two different graph drivers, we must resolve an issue where AUFS produces metadata files in its layers which other drivers explicitly ignore when importing. The issue presents itself like this: - Generate a layer using AUFS - On commit of that container, the new stored layer contains AUFS metadata files/dirs. The stored layer content has some tarsum value: '1234567' - `docker save` that image to a USB drive and `docker load` into another docker engine instance which uses another graph driver, say 'btrfs' - On load, this graph driver explicitly ignores any AUFS metadata that it encounters. The stored layer content now has some different tarsum value: 'abcdefg'. The only (apparent) useful aufs metadata to keep are the psuedo link files located at `/.wh..wh.plink/`. Thes files hold information at the RW layer about hard linked files between this layer and another layer. The other graph drivers make sure to copy up these psuedo linked files but I've tested out a few different situations and it seems that this is unnecessary (In my test, AUFS already copies up the other hard linked files to the RW layer). This changeset adds explicit exclusion of the AUFS metadata files and directories (NOTE: not the whiteout files!) on commit of a container using the AUFS storage driver. Also included is a change to the archive package. It now explicitly ignores the root directory from being included in the resulting tar archive for 2 reasons: 1) it's unnecessary. 2) It's another difference between what other graph drivers produce when exporting a layer to a tar archive. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- daemon/graphdriver/aufs/aufs.go | 1 + pkg/archive/archive.go | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go index 8ba097d45a..2e7a6eb2a6 100644 --- a/daemon/graphdriver/aufs/aufs.go +++ b/daemon/graphdriver/aufs/aufs.go @@ -300,6 +300,7 @@ func (a *Driver) Diff(id, parent string) (archive.Archive, error) { // AUFS doesn't need the parent layer to produce a diff. return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ Compression: archive.Uncompressed, + Excludes: []string{".wh..wh.*"}, }) } diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 9c4d881cfa..fea2c3df11 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -369,7 +369,9 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) } relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil { + if err != nil || (relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the root path. Skip in both situations. return nil } From 7597f27276b1c47da05ad1ae0bbcca21e080d9a7 Mon Sep 17 00:00:00 2001 From: Nathan Hsieh Date: Mon, 27 Oct 2014 13:44:42 -0700 Subject: [PATCH 162/592] changed dockerfile back button to look nicer Signed-off-by: Nathan Hsieh --- docs/sources/userguide/level1.md | 2 +- docs/sources/userguide/level2.md | 2 +- docs/theme/mkdocs/css/dockerfile_tutorial.css | 4 ++++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/docs/sources/userguide/level1.md b/docs/sources/userguide/level1.md index eca816250a..56048bfccf 100644 --- a/docs/sources/userguide/level1.md +++ b/docs/sources/userguide/level1.md @@ -2,7 +2,7 @@ page_title: Docker Images Test page_description: How to work with Docker images. page_keywords: documentation, docs, the docker guide, docker guide, docker, docker platform, virtualization framework, docker.io, Docker images, Docker image, image management, Docker repos, Docker repositories, docker, docker tag, docker tags, Docker Hub, collaboration -Back +Back # Dockerfile Tutorial diff --git a/docs/sources/userguide/level2.md b/docs/sources/userguide/level2.md index c4f2a2802c..412adb62fe 100644 --- a/docs/sources/userguide/level2.md +++ b/docs/sources/userguide/level2.md @@ -2,7 +2,7 @@ page_title: Docker Images Test page_description: How to work with Docker images. page_keywords: documentation, docs, the docker guide, docker guide, docker, docker platform, virtualization framework, docker.io, Docker images, Docker image, image management, Docker repos, Docker repositories, docker, docker tag, docker tags, Docker Hub, collaboration -Back +Back #Dockerfile Tutorial diff --git a/docs/theme/mkdocs/css/dockerfile_tutorial.css b/docs/theme/mkdocs/css/dockerfile_tutorial.css index 79d0e9cfdf..ac3f538f3e 100644 --- a/docs/theme/mkdocs/css/dockerfile_tutorial.css +++ b/docs/theme/mkdocs/css/dockerfile_tutorial.css @@ -56,4 +56,8 @@ div.level_error { width: 90px; margin-right: 0; padding: 0 0 2px 0; +} +.dockerfile.back { + display: block; + margin-top: 5px; } \ No newline at end of file From 622e1005530eada61675e533d3993d7df6c21186 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 28 Oct 2014 22:19:01 +0000 Subject: [PATCH 163/592] Update libcontainer f60d7b9195f8dc0b5d343abbc3293d Signed-off-by: Michael Crosby --- hack/vendor.sh | 2 +- .../github.com/docker/libcontainer/Makefile | 4 +- .../github.com/docker/libcontainer/README.md | 2 +- .../docker/libcontainer/cgroups/fs/blkio.go | 20 ++ .../libcontainer/cgroups/fs/blkio_test.go | 174 +++++++++++++++++- .../cgroups/fs/stats_util_test.go | 20 ++ .../docker/libcontainer/cgroups/stats.go | 6 +- .../github.com/docker/libcontainer/config.go | 3 + .../docker/libcontainer/devices/devices.go | 5 +- .../libcontainer/integration/exec_test.go | 120 ++++++++++++ .../github.com/docker/libcontainer/ipc/ipc.go | 29 +++ .../docker/libcontainer/namespaces/init.go | 4 + .../docker/libcontainer/network/stats.go | 77 ++++---- .../docker/libcontainer/xattr/errors.go | 8 + 14 files changed, 428 insertions(+), 46 deletions(-) create mode 100644 vendor/src/github.com/docker/libcontainer/ipc/ipc.go create mode 100644 vendor/src/github.com/docker/libcontainer/xattr/errors.go diff --git a/hack/vendor.sh b/hack/vendor.sh index 2dd12c6497..a537ff18b5 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -66,7 +66,7 @@ if [ "$1" = '--go' ]; then mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar fi -clone git github.com/docker/libcontainer aab3f6d17f2f56606f07f3a6eb6b693303f75812 +clone git github.com/docker/libcontainer f60d7b9195f8dc0b5d343abbc3293da7c17bb11c # see src/github.com/docker/libcontainer/update-vendor.sh which is the "source of truth" for libcontainer deps (just like this file) rm -rf src/github.com/docker/libcontainer/vendor eval "$(grep '^clone ' src/github.com/docker/libcontainer/update-vendor.sh | grep -v 'github.com/codegangsta/cli')" diff --git a/vendor/src/github.com/docker/libcontainer/Makefile b/vendor/src/github.com/docker/libcontainer/Makefile index 0ec995fc3c..0c4dda7c9b 100644 --- a/vendor/src/github.com/docker/libcontainer/Makefile +++ b/vendor/src/github.com/docker/libcontainer/Makefile @@ -12,10 +12,10 @@ sh: GO_PACKAGES = $(shell find . -not \( -wholename ./vendor -prune -o -wholename ./.git -prune \) -name '*.go' -print0 | xargs -0n1 dirname | sort -u) direct-test: - go test -cover -v $(GO_PACKAGES) + go test $(TEST_TAGS) -cover -v $(GO_PACKAGES) direct-test-short: - go test -cover -test.short -v $(GO_PACKAGES) + go test $(TEST_TAGS) -cover -test.short -v $(GO_PACKAGES) direct-build: go build -v $(GO_PACKAGES) diff --git a/vendor/src/github.com/docker/libcontainer/README.md b/vendor/src/github.com/docker/libcontainer/README.md index b80d2841f8..3201df9b98 100644 --- a/vendor/src/github.com/docker/libcontainer/README.md +++ b/vendor/src/github.com/docker/libcontainer/README.md @@ -56,7 +56,7 @@ Docs released under Creative commons. First of all, please familiarise yourself with the [libcontainer Principles](PRINCIPLES.md). -If you're a *contributor* or aspiring contributor, you should read the [Contributors' Guide](CONTRIBUTORS_GUIDE.md). +If you're a *contributor* or aspiring contributor, you should read the [Contributors' Guide](CONTRIBUTING.md). If you're a *maintainer* or aspiring maintainer, you should read the [Maintainers' Guide](MAINTAINERS_GUIDE.md) and "How can I become a maintainer?" in the Contributors' Guide. diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go index 261a97ff23..ce824d56c2 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go @@ -146,6 +146,26 @@ func getCFQStats(path string, stats *cgroups.Stats) error { } stats.BlkioStats.IoQueuedRecursive = blkioStats + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_time_recursive")); err != nil { + return err + } + stats.BlkioStats.IoServiceTimeRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_wait_time_recursive")); err != nil { + return err + } + stats.BlkioStats.IoWaitTimeRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_merged_recursive")); err != nil { + return err + } + stats.BlkioStats.IoMergedRecursive = blkioStats + + if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.time_recursive")); err != nil { + return err + } + stats.BlkioStats.IoTimeRecursive = blkioStats + return nil } diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go index 2a79d260f6..6cd38cbaba 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go @@ -26,7 +26,25 @@ Total 50` 8:0 Async 3 8:0 Total 5 Total 5` - throttleServiceBytes = `8:0 Read 11030528 + serviceTimeRecursiveContents = `8:0 Read 173959 +8:0 Write 0 +8:0 Sync 0 +8:0 Async 173959 +8:0 Total 17395 +Total 17395` + waitTimeRecursiveContents = `8:0 Read 15571 +8:0 Write 0 +8:0 Sync 0 +8:0 Async 15571 +8:0 Total 15571` + mergedRecursiveContents = `8:0 Read 5 +8:0 Write 10 +8:0 Sync 0 +8:0 Async 0 +8:0 Total 15 +Total 15` + timeRecursiveContents = `8:0 8` + throttleServiceBytes = `8:0 Read 11030528 8:0 Write 23 8:0 Sync 42 8:0 Async 11030528 @@ -61,6 +79,10 @@ func TestBlkioStats(t *testing.T) { "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, "blkio.io_serviced_recursive": servicedRecursiveContents, "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, "blkio.sectors_recursive": sectorsRecursiveContents, }) @@ -93,6 +115,26 @@ func TestBlkioStats(t *testing.T) { appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 3, "Async") appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 5, "Total") + appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 173959, "Read") + appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 0, "Write") + appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 0, "Sync") + appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 173959, "Async") + appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 17395, "Total") + + appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Read") + appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 0, "Write") + appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 0, "Sync") + appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Async") + appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Total") + + appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 5, "Read") + appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 10, "Write") + appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 0, "Sync") + appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 0, "Async") + appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 15, "Total") + + appendBlkioStatEntry(&expectedStats.IoTimeRecursive, 8, 0, 8, "") + expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats) } @@ -103,6 +145,10 @@ func TestBlkioStatsNoSectorsFile(t *testing.T) { "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, "blkio.io_serviced_recursive": servicedRecursiveContents, "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, }) blkio := &BlkioGroup{} @@ -117,9 +163,13 @@ func TestBlkioStatsNoServiceBytesFile(t *testing.T) { helper := NewCgroupTestUtil("blkio", t) defer helper.cleanup() helper.writeFileContents(map[string]string{ - "blkio.io_serviced_recursive": servicedRecursiveContents, - "blkio.io_queued_recursive": queuedRecursiveContents, - "blkio.sectors_recursive": sectorsRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, }) blkio := &BlkioGroup{} @@ -137,6 +187,10 @@ func TestBlkioStatsNoServicedFile(t *testing.T) { "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, "blkio.io_queued_recursive": queuedRecursiveContents, "blkio.sectors_recursive": sectorsRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, }) blkio := &BlkioGroup{} @@ -154,6 +208,106 @@ func TestBlkioStatsNoQueuedFile(t *testing.T) { "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, "blkio.io_serviced_recursive": servicedRecursiveContents, "blkio.sectors_recursive": sectorsRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoServiceTimeFile(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoWaitTimeFile(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoMergedFile(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &BlkioGroup{} + actualStats := *cgroups.NewStats() + err := blkio.GetStats(helper.CgroupPath, &actualStats) + if err != nil { + t.Fatalf("Failed unexpectedly: %s", err) + } +} + +func TestBlkioStatsNoTimeFile(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, }) blkio := &BlkioGroup{} @@ -172,6 +326,10 @@ func TestBlkioStatsUnexpectedNumberOfFields(t *testing.T) { "blkio.io_serviced_recursive": servicedRecursiveContents, "blkio.io_queued_recursive": queuedRecursiveContents, "blkio.sectors_recursive": sectorsRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, }) blkio := &BlkioGroup{} @@ -190,6 +348,10 @@ func TestBlkioStatsUnexpectedFieldType(t *testing.T) { "blkio.io_serviced_recursive": servicedRecursiveContents, "blkio.io_queued_recursive": queuedRecursiveContents, "blkio.sectors_recursive": sectorsRecursiveContents, + "blkio.io_service_time_recursive": serviceTimeRecursiveContents, + "blkio.io_wait_time_recursive": waitTimeRecursiveContents, + "blkio.io_merged_recursive": mergedRecursiveContents, + "blkio.time_recursive": timeRecursiveContents, }) blkio := &BlkioGroup{} @@ -208,6 +370,10 @@ func TestNonCFQBlkioStats(t *testing.T) { "blkio.io_serviced_recursive": "", "blkio.io_queued_recursive": "", "blkio.sectors_recursive": "", + "blkio.io_service_time_recursive": "", + "blkio.io_wait_time_recursive": "", + "blkio.io_merged_recursive": "", + "blkio.time_recursive": "", "blkio.throttle.io_service_bytes": throttleServiceBytes, "blkio.throttle.io_serviced": throttleServiced, }) diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go index 7e7da754d0..1a9e590f59 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go @@ -41,6 +41,26 @@ func expectBlkioStatsEquals(t *testing.T, expected, actual cgroups.BlkioStats) { log.Printf("blkio SectorsRecursive do not match - %s\n", err) t.Fail() } + + if err := blkioStatEntryEquals(expected.IoServiceTimeRecursive, actual.IoServiceTimeRecursive); err != nil { + log.Printf("blkio IoServiceTimeRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoWaitTimeRecursive, actual.IoWaitTimeRecursive); err != nil { + log.Printf("blkio IoWaitTimeRecursive do not match - %s\n", err) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoMergedRecursive, actual.IoMergedRecursive); err != nil { + log.Printf("blkio IoMergedRecursive do not match - %s vs %s\n", expected.IoMergedRecursive, actual.IoMergedRecursive) + t.Fail() + } + + if err := blkioStatEntryEquals(expected.IoTimeRecursive, actual.IoTimeRecursive); err != nil { + log.Printf("blkio IoTimeRecursive do not match - %s\n", err) + t.Fail() + } } func expectThrottlingDataEquals(t *testing.T, expected, actual cgroups.ThrottlingData) { diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/stats.go b/vendor/src/github.com/docker/libcontainer/cgroups/stats.go index f52251395c..857fc1dc47 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/stats.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/stats.go @@ -52,8 +52,12 @@ type BlkioStatEntry struct { type BlkioStats struct { // number of bytes tranferred to and from the block device IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"` - IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recusrive,omitempty"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive,omitempty"` IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive,omitempty"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive,omitempty"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive,omitempty"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive,omitempty"` SectorsRecursive []BlkioStatEntry `json:"sectors_recursive,omitempty"` } diff --git a/vendor/src/github.com/docker/libcontainer/config.go b/vendor/src/github.com/docker/libcontainer/config.go index 1fb377dcef..57ea5c69ac 100644 --- a/vendor/src/github.com/docker/libcontainer/config.go +++ b/vendor/src/github.com/docker/libcontainer/config.go @@ -47,6 +47,9 @@ type Config struct { // Networks specifies the container's network setup to be created Networks []*Network `json:"networks,omitempty"` + // Ipc specifies the container's ipc setup to be created + IpcNsPath string `json:"ipc,omitempty"` + // Routes can be specified to create entries in the route table as the container is started Routes []*Route `json:"routes,omitempty"` diff --git a/vendor/src/github.com/docker/libcontainer/devices/devices.go b/vendor/src/github.com/docker/libcontainer/devices/devices.go index 558f7f5f9c..5bf80e8cd4 100644 --- a/vendor/src/github.com/docker/libcontainer/devices/devices.go +++ b/vendor/src/github.com/docker/libcontainer/devices/devices.go @@ -100,7 +100,8 @@ func getDeviceNodes(path string) ([]*Device, error) { out := []*Device{} for _, f := range files { - if f.IsDir() { + switch { + case f.IsDir(): switch f.Name() { case "pts", "shm", "fd": continue @@ -113,6 +114,8 @@ func getDeviceNodes(path string) ([]*Device, error) { out = append(out, sub...) continue } + case f.Name() == "console": + continue } device, err := GetDevice(filepath.Join(path, f.Name()), "rwm") diff --git a/vendor/src/github.com/docker/libcontainer/integration/exec_test.go b/vendor/src/github.com/docker/libcontainer/integration/exec_test.go index 9609918943..261d208e3e 100644 --- a/vendor/src/github.com/docker/libcontainer/integration/exec_test.go +++ b/vendor/src/github.com/docker/libcontainer/integration/exec_test.go @@ -1,6 +1,7 @@ package integration import ( + "os" "strings" "testing" ) @@ -36,3 +37,122 @@ func TestExecPS(t *testing.T) { t.Fatalf("expected output %q but received %q", expected, actual) } } + +func TestIPCPrivate(t *testing.T) { + if testing.Short() { + return + } + + rootfs, err := newRootFs() + if err != nil { + t.Fatal(err) + } + defer remove(rootfs) + + l, err := os.Readlink("/proc/1/ns/ipc") + if err != nil { + t.Fatal(err) + } + + config := newTemplateConfig(rootfs) + config.Namespaces["NEWIPC"] = true + buffers, exitCode, err := runContainer(config, "", "readlink", "/proc/self/ns/ipc") + if err != nil { + t.Fatal(err) + } + + if exitCode != 0 { + t.Fatalf("exit code not 0. code %d stderr %q", exitCode, buffers.Stderr) + } + + if actual := strings.Trim(buffers.Stdout.String(), "\n"); actual == l { + t.Fatalf("ipc link should be private to the conatiner but equals host %q %q", actual, l) + } +} + +func TestIPCHost(t *testing.T) { + if testing.Short() { + return + } + + rootfs, err := newRootFs() + if err != nil { + t.Fatal(err) + } + defer remove(rootfs) + + l, err := os.Readlink("/proc/1/ns/ipc") + if err != nil { + t.Fatal(err) + } + + config := newTemplateConfig(rootfs) + config.Namespaces["NEWIPC"] = false + buffers, exitCode, err := runContainer(config, "", "readlink", "/proc/self/ns/ipc") + if err != nil { + t.Fatal(err) + } + + if exitCode != 0 { + t.Fatalf("exit code not 0. code %d stderr %q", exitCode, buffers.Stderr) + } + + if actual := strings.Trim(buffers.Stdout.String(), "\n"); actual != l { + t.Fatalf("ipc link not equal to host link %q %q", actual, l) + } +} + +func TestIPCJoinPath(t *testing.T) { + if testing.Short() { + return + } + + rootfs, err := newRootFs() + if err != nil { + t.Fatal(err) + } + defer remove(rootfs) + + l, err := os.Readlink("/proc/1/ns/ipc") + if err != nil { + t.Fatal(err) + } + + config := newTemplateConfig(rootfs) + config.Namespaces["NEWIPC"] = false + config.IpcNsPath = "/proc/1/ns/ipc" + + buffers, exitCode, err := runContainer(config, "", "readlink", "/proc/self/ns/ipc") + if err != nil { + t.Fatal(err) + } + + if exitCode != 0 { + t.Fatalf("exit code not 0. code %d stderr %q", exitCode, buffers.Stderr) + } + + if actual := strings.Trim(buffers.Stdout.String(), "\n"); actual != l { + t.Fatalf("ipc link not equal to host link %q %q", actual, l) + } +} + +func TestIPCBadPath(t *testing.T) { + if testing.Short() { + return + } + + rootfs, err := newRootFs() + if err != nil { + t.Fatal(err) + } + defer remove(rootfs) + + config := newTemplateConfig(rootfs) + config.Namespaces["NEWIPC"] = false + config.IpcNsPath = "/proc/1/ns/ipcc" + + _, _, err = runContainer(config, "", "true") + if err == nil { + t.Fatal("container succeded with bad ipc path") + } +} diff --git a/vendor/src/github.com/docker/libcontainer/ipc/ipc.go b/vendor/src/github.com/docker/libcontainer/ipc/ipc.go new file mode 100644 index 0000000000..147cf5571e --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/ipc/ipc.go @@ -0,0 +1,29 @@ +package ipc + +import ( + "fmt" + "os" + "syscall" + + "github.com/docker/libcontainer/system" +) + +// Join the IPC Namespace of specified ipc path if it exists. +// If the path does not exist then you are not joining a container. +func Initialize(nsPath string) error { + if nsPath == "" { + return nil + } + f, err := os.OpenFile(nsPath, os.O_RDONLY, 0) + if err != nil { + return fmt.Errorf("failed get IPC namespace fd: %v", err) + } + + err = system.Setns(f.Fd(), syscall.CLONE_NEWIPC) + f.Close() + + if err != nil { + return fmt.Errorf("failed to setns current IPC namespace: %v", err) + } + return nil +} diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/init.go b/vendor/src/github.com/docker/libcontainer/namespaces/init.go index 4c2b3327e5..879ac21e0d 100644 --- a/vendor/src/github.com/docker/libcontainer/namespaces/init.go +++ b/vendor/src/github.com/docker/libcontainer/namespaces/init.go @@ -11,6 +11,7 @@ import ( "github.com/docker/libcontainer" "github.com/docker/libcontainer/apparmor" "github.com/docker/libcontainer/console" + "github.com/docker/libcontainer/ipc" "github.com/docker/libcontainer/label" "github.com/docker/libcontainer/mount" "github.com/docker/libcontainer/netlink" @@ -66,6 +67,9 @@ func Init(container *libcontainer.Config, uncleanRootfs, consolePath string, syn return fmt.Errorf("setctty %s", err) } } + if err := ipc.Initialize(container.IpcNsPath); err != nil { + return fmt.Errorf("setup IPC %s", err) + } if err := setupNetwork(container, networkState); err != nil { return fmt.Errorf("setup networking %s", err) } diff --git a/vendor/src/github.com/docker/libcontainer/network/stats.go b/vendor/src/github.com/docker/libcontainer/network/stats.go index c8ece5c7b0..e2156c74da 100644 --- a/vendor/src/github.com/docker/libcontainer/network/stats.go +++ b/vendor/src/github.com/docker/libcontainer/network/stats.go @@ -2,7 +2,6 @@ package network import ( "io/ioutil" - "os" "path/filepath" "strconv" "strings" @@ -25,45 +24,51 @@ func GetStats(networkState *NetworkState) (*NetworkStats, error) { if networkState.VethHost == "" { return &NetworkStats{}, nil } - data, err := readSysfsNetworkStats(networkState.VethHost) - if err != nil { - return nil, err + + out := &NetworkStats{} + + type netStatsPair struct { + // Where to write the output. + Out *uint64 + + // The network stats file to read. + File string } // Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container. - return &NetworkStats{ - RxBytes: data["tx_bytes"], - RxPackets: data["tx_packets"], - RxErrors: data["tx_errors"], - RxDropped: data["tx_dropped"], - TxBytes: data["rx_bytes"], - TxPackets: data["rx_packets"], - TxErrors: data["rx_errors"], - TxDropped: data["rx_dropped"], - }, nil + netStats := []netStatsPair{ + {Out: &out.RxBytes, File: "tx_bytes"}, + {Out: &out.RxPackets, File: "tx_packets"}, + {Out: &out.RxErrors, File: "tx_errors"}, + {Out: &out.RxDropped, File: "tx_dropped"}, + + {Out: &out.TxBytes, File: "rx_bytes"}, + {Out: &out.TxPackets, File: "rx_packets"}, + {Out: &out.TxErrors, File: "rx_errors"}, + {Out: &out.TxDropped, File: "rx_dropped"}, + } + for _, netStat := range netStats { + data, err := readSysfsNetworkStats(networkState.VethHost, netStat.File) + if err != nil { + return nil, err + } + *(netStat.Out) = data + } + + return out, nil } -// Reads all the statistics available under /sys/class/net//statistics as a map with file name as key and data as integers. -func readSysfsNetworkStats(ethInterface string) (map[string]uint64, error) { - out := make(map[string]uint64) +// Reads the specified statistics available under /sys/class/net//statistics +func readSysfsNetworkStats(ethInterface, statsFile string) (uint64, error) { + fullPath := filepath.Join("/sys/class/net", ethInterface, "statistics", statsFile) + data, err := ioutil.ReadFile(fullPath) + if err != nil { + return 0, err + } + value, err := strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) + if err != nil { + return 0, err + } - fullPath := filepath.Join("/sys/class/net", ethInterface, "statistics/") - err := filepath.Walk(fullPath, func(path string, _ os.FileInfo, _ error) error { - // skip fullPath. - if path == fullPath { - return nil - } - base := filepath.Base(path) - data, err := ioutil.ReadFile(path) - if err != nil { - return err - } - value, err := strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) - if err != nil { - return err - } - out[base] = value - return nil - }) - return out, err + return value, err } diff --git a/vendor/src/github.com/docker/libcontainer/xattr/errors.go b/vendor/src/github.com/docker/libcontainer/xattr/errors.go new file mode 100644 index 0000000000..8cd77418cc --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/xattr/errors.go @@ -0,0 +1,8 @@ +package xattr + +import ( + "fmt" + "runtime" +) + +var ErrNotSupportedPlatform = fmt.Errorf("platform and architecture is not supported %s %s", runtime.GOOS, runtime.GOARCH) From 17e0a15386c2a2ff01417d66e37683bd9a2a79c8 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 28 Oct 2014 16:25:50 -0700 Subject: [PATCH 164/592] Revert "Use code generation to set IAMSTATIC instead of -X" This reverts commit 3e10b93106dea94e5747ab32fe4ac765aa22f9bc. Conflicts: .gitignore hack/make.sh hack/make/dynbinary Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- .gitignore | 3 --- hack/make.sh | 12 +----------- hack/make/binary | 11 ----------- hack/make/dynbinary | 20 +------------------- 4 files changed, 2 insertions(+), 44 deletions(-) diff --git a/.gitignore b/.gitignore index 21df574df6..2a86e41caf 100644 --- a/.gitignore +++ b/.gitignore @@ -27,6 +27,3 @@ docs/AWS_S3_BUCKET docs/GIT_BRANCH docs/VERSION docs/GITCOMMIT -dockerversion/static.go -dockerversion/details.go -dockerversion/init.go diff --git a/hack/make.sh b/hack/make.sh index a841c13424..f97633de57 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -94,17 +94,6 @@ if [ -z "$DOCKER_CLIENTONLY" ]; then DOCKER_BUILDTAGS+=" daemon" fi -rm -f dockerversion/static.go dockerversion/details.go dockerversion/init.go -cat > dockerversion/details.go < dockerversion/static.go < dockerversion/static.go < dockerversion/init.go < Date: Tue, 28 Oct 2014 16:27:23 -0700 Subject: [PATCH 165/592] Revert "use code generation for GITCOMMIT/VERSION too" This reverts commit 85744a35de0e30c2dd6741b833f38ff218c8a3fc. Conflicts: .gitignore hack/make.sh Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- hack/make.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/hack/make.sh b/hack/make.sh index f97633de57..d6da3057fa 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -95,7 +95,11 @@ if [ -z "$DOCKER_CLIENTONLY" ]; then fi # Use these flags when compiling the tests and final binary -LDFLAGS='-w' +LDFLAGS=' + -w + -X '$DOCKER_PKG'/dockerversion.GITCOMMIT "'$GITCOMMIT'" + -X '$DOCKER_PKG'/dockerversion.VERSION "'$VERSION'" +' LDFLAGS_STATIC='-linkmode external' EXTLDFLAGS_STATIC='-static' # ORIG_BUILDFLAGS is necessary for the cross target which cannot always build From 7041eb00be7d6224aeedf7db317667b1d121b54d Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 28 Oct 2014 16:29:01 -0700 Subject: [PATCH 166/592] Revert "finally, use code generation for INITSHA1 & INITPATH too" This reverts commit c7c620dd9fce0e7b437ee185c7a58341f3b8e3aa. Conflicts: .gitignore hack/make.sh hack/make/dynbinary Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- hack/make/dyntest-integration | 12 +++--------- hack/make/dyntest-unit | 12 +++--------- 2 files changed, 6 insertions(+), 18 deletions(-) diff --git a/hack/make/dyntest-integration b/hack/make/dyntest-integration index 5015c10969..1cc7349aba 100644 --- a/hack/make/dyntest-integration +++ b/hack/make/dyntest-integration @@ -11,14 +11,8 @@ fi ( export TEST_DOCKERINIT_PATH="$INIT" - export LDFLAGS_STATIC_DOCKER="" - cat > dockerversion/init.go < dockerversion/init.go < Date: Tue, 28 Oct 2014 17:42:03 -0700 Subject: [PATCH 167/592] Fix error on successful login. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- registry/auth.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/registry/auth.go b/registry/auth.go index c9067e7acb..1b11179533 100644 --- a/registry/auth.go +++ b/registry/auth.go @@ -219,7 +219,7 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e return "", err } if resp.StatusCode == 200 { - status = "Login Succeeded" + return "Login Succeeded", nil } else if resp.StatusCode == 401 { return "", fmt.Errorf("Wrong login/password, please try again") } else if resp.StatusCode == 403 { @@ -247,7 +247,7 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e return "", err } if resp.StatusCode == 200 { - status = "Login Succeeded" + return "Login Succeeded", nil } else if resp.StatusCode == 401 { return "", fmt.Errorf("Wrong login/password, please try again") } else { From ed5e776a2235dd185423749acb9b306bb6067945 Mon Sep 17 00:00:00 2001 From: decadent Date: Wed, 29 Oct 2014 12:36:38 +0300 Subject: [PATCH 168/592] Updated the COPY directive reference: Source can't be a URL for COPY (+ formatting error fixes) Signed-off-by: Roman Dudin --- docs/sources/reference/builder.md | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index ae0771d685..c16b33c764 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -355,9 +355,8 @@ change them using `docker run --env =`. ADD ... -The `ADD` instruction copies new files,directories or remote file URLs to -the filesystem of the container from `` and add them to the at -path ``. +The `ADD` instruction copies new files, directories or remote file URLs from `` +and adds them to the filesystem of the container at the path ``. Multiple `` resource may be specified but if they are files or directories then they must be relative to the source directory that is @@ -448,13 +447,11 @@ The copy obeys the following rules: COPY ... -The `COPY` instruction copies new files,directories or remote file URLs to -the filesystem of the container from `` and add them to the at -path ``. +The `COPY` instruction copies new files or directories from `` +and adds them to the filesystem of the container at the path ``. -Multiple `` resource may be specified but if they are files or -directories then they must be relative to the source directory that is being -built (the context of the build). +Multiple `` resource may be specified but they must be relative +to the source directory that is being built (the context of the build). Each `` may contain wildcards and matching will be done using Go's [filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. From 4da25724248d988771c2a546aa065505ceb24038 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Wed, 29 Oct 2014 09:31:35 -0700 Subject: [PATCH 169/592] Fix compilation issue bringed by #8319 Signed-off-by: Alexandr Morozov --- daemon/networkdriver/ipallocator/allocator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daemon/networkdriver/ipallocator/allocator.go b/daemon/networkdriver/ipallocator/allocator.go index d5c644b23c..3f60d2d065 100644 --- a/daemon/networkdriver/ipallocator/allocator.go +++ b/daemon/networkdriver/ipallocator/allocator.go @@ -6,8 +6,8 @@ import ( "net" "sync" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/networkdriver" - "github.com/docker/docker/pkg/log" ) // allocatedMap is thread-unsafe set of allocated IP From d98b117962a7178154e775b8b283744841a10e3b Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 30 Sep 2014 12:18:26 -0700 Subject: [PATCH 170/592] Add test for #8307. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_daemon_test.go | 95 +++++++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go index 9d238c15ee..42995def13 100644 --- a/integration-cli/docker_cli_daemon_test.go +++ b/integration-cli/docker_cli_daemon_test.go @@ -128,3 +128,98 @@ func TestDaemonStartBridgeWithoutIPAssociation(t *testing.T) { logDone("daemon - successful daemon start when bridge has no IP association") } + +func TestDaemonIptablesClean(t *testing.T) { + d := NewDaemon(t) + if err := d.StartWithBusybox(); err != nil { + t.Fatalf("Could not start daemon with busybox: %v", err) + } + defer d.Stop() + + if out, err := d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil { + t.Fatalf("Could not run top: %s, %v", out, err) + } + + // get output from iptables with container running + ipTablesSearchString := "tcp dpt:80" + ipTablesCmd := exec.Command("iptables", "-nvL") + out, _, err := runCommandWithOutput(ipTablesCmd) + if err != nil { + t.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if !strings.Contains(out, ipTablesSearchString) { + t.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) + } + + if err := d.Stop(); err != nil { + t.Fatalf("Could not stop daemon: %v", err) + } + + // get output from iptables after restart + ipTablesCmd = exec.Command("iptables", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + if err != nil { + t.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if strings.Contains(out, ipTablesSearchString) { + t.Fatalf("iptables output should not have contained %q, but was %q", ipTablesSearchString, out) + } + + deleteAllContainers() + + logDone("run,iptables - iptables rules cleaned after daemon restart") +} + +func TestDaemonIptablesCreate(t *testing.T) { + d := NewDaemon(t) + if err := d.StartWithBusybox(); err != nil { + t.Fatalf("Could not start daemon with busybox: %v", err) + } + defer d.Stop() + + if out, err := d.Cmd("run", "-d", "--name", "top", "--restart=always", "-p", "80", "busybox:latest", "top"); err != nil { + t.Fatalf("Could not run top: %s, %v", out, err) + } + + // get output from iptables with container running + ipTablesSearchString := "tcp dpt:80" + ipTablesCmd := exec.Command("iptables", "-nvL") + out, _, err := runCommandWithOutput(ipTablesCmd) + if err != nil { + t.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if !strings.Contains(out, ipTablesSearchString) { + t.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) + } + + if err := d.Restart(); err != nil { + t.Fatalf("Could not restart daemon: %v", err) + } + + // make sure the container is not running + runningOut, err := d.Cmd("inspect", "--format='{{.State.Running}}'", "top") + if err != nil { + t.Fatalf("Could not inspect on container: %s, %v", out, err) + } + if strings.TrimSpace(runningOut) != "true" { + t.Fatalf("Container should have been restarted after daemon restart. Status running should have been true but was: %q", strings.TrimSpace(runningOut)) + } + + // get output from iptables after restart + ipTablesCmd = exec.Command("iptables", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + if err != nil { + t.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if !strings.Contains(out, ipTablesSearchString) { + t.Fatalf("iptables output after restart should have contained %q, but was %q", ipTablesSearchString, out) + } + + deleteAllContainers() + + logDone("run,iptables - iptables rules for always restarted container created after daemon restart") +} From e171eda9989cb5d10e7fe14b258f239edb124541 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Thu, 16 Oct 2014 16:54:55 -0700 Subject: [PATCH 171/592] fix for iptables cleanup 8307 Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- daemon/container.go | 4 +++- engine/job.go | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/daemon/container.go b/daemon/container.go index a477f19f22..a972f8b712 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -527,7 +527,9 @@ func (container *Container) ReleaseNetwork() { } eng := container.daemon.eng - eng.Job("release_interface", container.ID).Run() + job := eng.Job("release_interface", container.ID) + job.SetenvBool("overrideShutdown", true) + job.Run() container.NetworkSettings = &NetworkSettings{} } diff --git a/engine/job.go b/engine/job.go index ecd9441ff5..6c11b13446 100644 --- a/engine/job.go +++ b/engine/job.go @@ -48,7 +48,7 @@ const ( // If the job returns a failure status, an error is returned // which includes the status. func (job *Job) Run() error { - if job.Eng.IsShutdown() { + if job.Eng.IsShutdown() && !job.GetenvBool("overrideShutdown") { return fmt.Errorf("engine is shutdown") } // FIXME: this is a temporary workaround to avoid Engine.Shutdown From f9f80443638fc2d703ee6205c8ef3db8e38db9a3 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Mon, 15 Sep 2014 14:45:53 -0400 Subject: [PATCH 172/592] archive: preserve hardlinks in Tar and Untar * integration test for preserving hardlinks Signed-off-by: Vincent Batts Signed-off-by: Vincent Batts --- integration-cli/docker_cli_commit_test.go | 52 ++++++++++++++++++++ pkg/archive/archive.go | 52 +++++++++++++++----- pkg/archive/archive_test.go | 58 +++++++++++++++++++++++ pkg/archive/changes.go | 18 ++++--- 4 files changed, 160 insertions(+), 20 deletions(-) diff --git a/integration-cli/docker_cli_commit_test.go b/integration-cli/docker_cli_commit_test.go index ddc7a2e041..7715e81bf5 100644 --- a/integration-cli/docker_cli_commit_test.go +++ b/integration-cli/docker_cli_commit_test.go @@ -101,6 +101,58 @@ func TestCommitNewFile(t *testing.T) { logDone("commit - commit file and read") } +func TestCommitHardlink(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-t", "--name", "hardlinks", "busybox", "sh", "-c", "touch file1 && ln file1 file2 && ls -di file1 file2") + firstOuput, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + + chunks := strings.Split(strings.TrimSpace(firstOuput), " ") + inode := chunks[0] + found := false + for _, chunk := range chunks[1:] { + if chunk == inode { + found = true + break + } + } + if !found { + t.Fatalf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:]) + } + + cmd = exec.Command(dockerBinary, "commit", "hardlinks", "hardlinks") + imageID, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(imageID, err) + } + imageID = strings.Trim(imageID, "\r\n") + + cmd = exec.Command(dockerBinary, "run", "-t", "hardlinks", "ls", "-di", "file1", "file2") + secondOuput, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + + chunks = strings.Split(strings.TrimSpace(secondOuput), " ") + inode = chunks[0] + found = false + for _, chunk := range chunks[1:] { + if chunk == inode { + found = true + break + } + } + if !found { + t.Fatalf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:]) + } + + deleteAllContainers() + deleteImages(imageID) + + logDone("commit - commit hardlinks") +} + func TestCommitTTY(t *testing.T) { cmd := exec.Command(dockerBinary, "run", "-t", "--name", "tty", "busybox", "/bin/ls") if _, err := runCommand(cmd); err != nil { diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 9c4d881cfa..dd14b778f2 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -153,7 +153,15 @@ func (compression *Compression) Extension() string { return "" } -func addTarFile(path, name string, tw *tar.Writer, twBuf *bufio.Writer) error { +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string +} + +func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err @@ -188,13 +196,28 @@ func addTarFile(path, name string, tw *tar.Writer, twBuf *bufio.Writer) error { } + // if it's a regular file and has more than 1 link, + // it's hardlinked, so set the type flag accordingly + if fi.Mode().IsRegular() && stat.Nlink > 1 { + // a link should have a name that it links too + // and that linked name should be first in the tar archive + ino := uint64(stat.Ino) + if oldpath, ok := ta.SeenFiles[ino]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[ino] = name + } + } + capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability) } - if err := tw.WriteHeader(hdr); err != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } @@ -204,17 +227,17 @@ func addTarFile(path, name string, tw *tar.Writer, twBuf *bufio.Writer) error { return err } - twBuf.Reset(tw) - _, err = io.Copy(twBuf, file) + ta.Buffer.Reset(ta.TarWriter) + _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } - err = twBuf.Flush() + err = ta.Buffer.Flush() if err != nil { return err } - twBuf.Reset(nil) + ta.Buffer.Reset(nil) } return nil @@ -345,9 +368,15 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) return nil, err } - tw := tar.NewWriter(compressWriter) - go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(compressWriter), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + } + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors @@ -357,9 +386,6 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) options.Includes = []string{"."} } - twBuf := pools.BufioWriter32KPool.Get(nil) - defer pools.BufioWriter32KPool.Put(twBuf) - var renamedRelFilePath string // For when tar.Options.Name is set for _, include := range options.Includes { filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error { @@ -395,7 +421,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1) } - if err := addTarFile(filePath, relFilePath, tw, twBuf); err != nil { + if err := ta.addTarFile(filePath, relFilePath); err != nil { log.Debugf("Can't add file %s to tar: %s", srcPath, err) } return nil @@ -403,7 +429,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) } // Make sure to check the error on Close. - if err := tw.Close(); err != nil { + if err := ta.TarWriter.Close(); err != nil { log.Debugf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { diff --git a/pkg/archive/archive_test.go b/pkg/archive/archive_test.go index 900fff5f01..3516aca8f0 100644 --- a/pkg/archive/archive_test.go +++ b/pkg/archive/archive_test.go @@ -249,6 +249,64 @@ func TestUntarUstarGnuConflict(t *testing.T) { } } +func TestTarWithHardLink(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := os.Link(path.Join(origin, "1"), path.Join(origin, "2")); err != nil { + t.Fatal(err) + } + + var i1, i2 uint64 + if i1, err = getNlink(path.Join(origin, "1")); err != nil { + t.Fatal(err) + } + // sanity check that we can hardlink + if i1 != 2 { + t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + if i1, err = getInode(path.Join(dest, "1")); err != nil { + t.Fatal(err) + } + if i2, err = getInode(path.Join(dest, "2")); err != nil { + t.Fatal(err) + } + + if i1 != i2 { + t.Errorf("expected matching inodes, but got %d and %d", i1, i2) + } +} + func getNlink(path string) (uint64, error) { stat, err := os.Stat(path) if err != nil { diff --git a/pkg/archive/changes.go b/pkg/archive/changes.go index 557b5db583..3e9ab45267 100644 --- a/pkg/archive/changes.go +++ b/pkg/archive/changes.go @@ -368,11 +368,15 @@ func minor(device uint64) uint64 { // ExportChanges produces an Archive from the provided changes, relative to dir. func ExportChanges(dir string, changes []Change) (Archive, error) { reader, writer := io.Pipe() - tw := tar.NewWriter(writer) - go func() { - twBuf := pools.BufioWriter32KPool.Get(nil) - defer pools.BufioWriter32KPool.Put(twBuf) + ta := &tarAppender{ + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + } + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors @@ -390,19 +394,19 @@ func ExportChanges(dir string, changes []Change) (Archive, error) { AccessTime: timestamp, ChangeTime: timestamp, } - if err := tw.WriteHeader(hdr); err != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { log.Debugf("Can't write whiteout header: %s", err) } } else { path := filepath.Join(dir, change.Path) - if err := addTarFile(path, change.Path[1:], tw, twBuf); err != nil { + if err := ta.addTarFile(path, change.Path[1:]); err != nil { log.Debugf("Can't add file %s to tar: %s", path, err) } } } // Make sure to check the error on Close. - if err := tw.Close(); err != nil { + if err := ta.TarWriter.Close(); err != nil { log.Debugf("Can't close layer: %s", err) } writer.Close() From f710a8d7746df5f54e2cbe7b90885e4eb75920b4 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Tue, 28 Oct 2014 16:59:27 -0400 Subject: [PATCH 173/592] archive: example app for diffing directories By default is a demo of file differences, but can be used to create a tar of changes between an old and new path. Signed-off-by: Vincent Batts Signed-off-by: Vincent Batts --- pkg/archive/example_changes.go | 97 ++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 pkg/archive/example_changes.go diff --git a/pkg/archive/example_changes.go b/pkg/archive/example_changes.go new file mode 100644 index 0000000000..cedd46a408 --- /dev/null +++ b/pkg/archive/example_changes.go @@ -0,0 +1,97 @@ +// +build ignore + +// Simple tool to create an archive stream from an old and new directory +// +// By default it will stream the comparison of two temporary directories with junk files +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" +) + +var ( + flDebug = flag.Bool("D", false, "debugging output") + flNewDir = flag.String("newdir", "", "") + flOldDir = flag.String("olddir", "", "") + log = logrus.New() +) + +func main() { + flag.Usage = func() { + fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") + fmt.Printf("%s [OPTIONS]\n", os.Args[0]) + flag.PrintDefaults() + } + flag.Parse() + log.Out = os.Stderr + if (len(os.Getenv("DEBUG")) > 0) || *flDebug { + logrus.SetLevel(logrus.DebugLevel) + } + var newDir, oldDir string + + if len(*flNewDir) == 0 { + var err error + newDir, err = ioutil.TempDir("", "docker-test-newDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(newDir) + if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { + log.Fatal(err) + } + } else { + newDir = *flNewDir + } + + if len(*flOldDir) == 0 { + oldDir, err := ioutil.TempDir("", "docker-test-oldDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(oldDir) + } else { + oldDir = *flOldDir + } + + changes, err := archive.ChangesDirs(newDir, oldDir) + if err != nil { + log.Fatal(err) + } + + a, err := archive.ExportChanges(newDir, changes) + if err != nil { + log.Fatal(err) + } + defer a.Close() + + i, err := io.Copy(os.Stdout, a) + if err != nil && err != io.EOF { + log.Fatal(err) + } + fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} From f14a9ed011d9b73104631310a13eab447d53be3a Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Tue, 28 Oct 2014 17:01:10 -0400 Subject: [PATCH 174/592] archive: cleanup and more information Signed-off-by: Vincent Batts Signed-off-by: Vincent Batts --- pkg/archive/archive.go | 3 +-- pkg/archive/changes.go | 6 +++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index dd14b778f2..37b312e5b0 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -193,7 +193,6 @@ func (ta *tarAppender) addTarFile(path, name string) error { hdr.Devmajor = int64(major(uint64(stat.Rdev))) hdr.Devminor = int64(minor(uint64(stat.Rdev))) } - } // if it's a regular file and has more than 1 link, @@ -228,6 +227,7 @@ func (ta *tarAppender) addTarFile(path, name string) error { } ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { @@ -237,7 +237,6 @@ func (ta *tarAppender) addTarFile(path, name string) error { if err != nil { return err } - ta.Buffer.Reset(nil) } return nil diff --git a/pkg/archive/changes.go b/pkg/archive/changes.go index 3e9ab45267..0a1f741c41 100644 --- a/pkg/archive/changes.go +++ b/pkg/archive/changes.go @@ -333,6 +333,8 @@ func ChangesDirs(newDir, oldDir string) ([]Change, error) { newRoot, err2 = collectFileInfo(newDir) errs <- err2 }() + + // block until both routines have returned for i := 0; i < 2; i++ { if err := <-errs; err != nil { return nil, err @@ -409,7 +411,9 @@ func ExportChanges(dir string, changes []Change) (Archive, error) { if err := ta.TarWriter.Close(); err != nil { log.Debugf("Can't close layer: %s", err) } - writer.Close() + if err := writer.Close(); err != nil { + log.Debugf("failed close Changes writer: %s", err) + } }() return reader, nil } From 461d33eeb0fe1058c9fcea39124c2d1f9e141765 Mon Sep 17 00:00:00 2001 From: Fred Lifton Date: Wed, 29 Oct 2014 16:43:18 -0700 Subject: [PATCH 175/592] Adds new Docs Style Guide. Includes changes to mkdocs yml and removes style info from docs Read Me, adding a link instead. Docker-DCO-1.1-Signed-off-by: Fred Lifton (github: fredlf) Conflicts: docs/README.md Revisions to style guide based on review. Docker-DCO-1.1-Signed-off-by: Fred Lifton (github: fredlf) More Style Guide revisions based on review. Docker-DCO-1.1-Signed-off-by: Fred Lifton (github: fredlf) A few more style guide copy edits Docker-DCO-1.1-Signed-off-by: Fred Lifton (github: fredlf) --- docs/README.md | 44 +-- docs/mkdocs.yml | 1 + docs/sources/contributing/docs_style-guide.md | 276 ++++++++++++++++++ 3 files changed, 292 insertions(+), 29 deletions(-) create mode 100644 docs/sources/contributing/docs_style-guide.md diff --git a/docs/README.md b/docs/README.md index 27ed7eef11..f5ccb753da 100755 --- a/docs/README.md +++ b/docs/README.md @@ -11,9 +11,8 @@ development) branch maps to the "master" documentation. ## Contributing -- Follow the contribution guidelines ([see - `../CONTRIBUTING.md`](../CONTRIBUTING.md)). -- [Remember to sign your work!](../CONTRIBUTING.md#sign-your-work) +Be sure to follow the [contribution guidelines](../CONTRIBUTING.md)). +In particular, [remember to sign your work!](../CONTRIBUTING.md#sign-your-work) ## Getting Started @@ -41,26 +40,10 @@ to the menu definition in the `docs/mkdocs.yml` file. ## Style guide -The documentation is written with paragraphs wrapped at 80 column lines to make -it easier for terminal use. - -### Examples - -When writing examples, give the user hints by making them resemble what they see -in their shell: - -- Indent shell examples by 4 spaces so they get rendered as code. -- Start typed commands with `$ ` (dollar space), so that they are easily - differentiated from program output. -- Program output has no prefix. -- Comments begin with `# ` (hash space). -- In-container shell commands begin with `$$ ` (dollar dollar space). - -### Images - -When you need to add images, try to make them as small as possible (e.g., as -gifs). Usually images should go in the same directory as the `.md` file which -references them, or in a subdirectory if one already exists. +If you have questions about how to write for Docker's documentation (e.g., +questions about grammar, syntax, formatting, styling, language, or tone) please +see the [style guide](sources/contributing/docs_style-guide.md). If something +isn't clear in the guide, please submit a PR to help us improve it. ## Working using GitHub's file editor @@ -73,11 +56,11 @@ work!](../CONTRIBUTING.md#sign-your-work) ## Branches -**There are two branches related to editing docs**: `master` and a `docs` -branch. You should always edit the documentation on a local branch of the `master` +**There are two branches related to editing docs**: `master` and `docs`. You +should always edit the documentation on a local branch of the `master` branch, and send a PR against `master`. -That way your edits will automatically get included in later releases, and docs +That way your fixes will automatically get included in later releases, and docs maintainers can easily cherry-pick your changes into the `docs` release branch. In the rare case where your change is not forward-compatible, you may need to base your changes on the `docs` branch. @@ -95,8 +78,10 @@ found between Docker code releases. ## Publishing Documentation -To publish a copy of the documentation you need to have Docker up and running on your -machine. You'll also need a `docs/awsconfig` file containing AWS settings to deploy to. +To publish a copy of the documentation you need to have Docker up and running on +your machine. You'll also need a `docs/awsconfig` file containing the settings +you need to access the AWS bucket you'll be deploying to. + The release script will create an s3 if needed, and will then push the files to it. [profile dowideit-docs] aws_access_key_id = IHOIUAHSIDH234rwf.... @@ -115,7 +100,8 @@ also update the root docs pages by running make AWS_S3_BUCKET=dowideit-docs BUILD_ROOT=yes docs-release -> **Note:** if you are using Boot2Docker on OSX and the above command returns an error, +> **Note:** +> if you are using Boot2Docker on OSX and the above command returns an error, > `Post http:///var/run/docker.sock/build?rm=1&t=docker-docs%3Apost-1.2.0-docs_update-2: > dial unix /var/run/docker.sock: no such file or directory', you need to set the Docker > host. Run `$(boot2docker shellinit)` to see the correct variable to set. The command diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index ca99701d8d..6e018f4afa 100755 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -148,3 +148,4 @@ pages: - ['contributing/index.md', '**HIDDEN**'] - ['contributing/contributing.md', 'Contribute', 'Contributing'] - ['contributing/devenvironment.md', 'Contribute', 'Development environment'] +- ['contributing/docs_style-guide.md', 'Contribute', 'Documentation style guide'] diff --git a/docs/sources/contributing/docs_style-guide.md b/docs/sources/contributing/docs_style-guide.md new file mode 100644 index 0000000000..f0e84e789a --- /dev/null +++ b/docs/sources/contributing/docs_style-guide.md @@ -0,0 +1,276 @@ +page_title: Style Guide for Docker Documentation +page_description: Style guide for Docker documentation describing standards and conventions for contributors +page_keywords: style, guide, docker, documentation + +# Docker documentation: style & grammar conventions + +## Style standards + +Over time, different publishing communities have written standards for the style +and grammar they prefer in their publications. These standards are called +[style guides](http://en.wikipedia.org/wiki/Style_guide). Generally, Docker’s +documentation uses the standards described in the +[Associated Press's (AP) style guide](http://en.wikipedia.org/wiki/AP_Stylebook). +If a question about syntactical, grammatical, or lexical practice comes up, +refer to the AP guide first. If you don’t have a copy of (or online subscription +to) the AP guide, you can almost always find an answer to a specific question by +searching the web. If you can’t find an answer, please ask a +[maintainer](https://github.com/docker/docker/blob/master/docs/MAINTAINERS) and +we will find the answer. + +That said, please don't get too hung up on using correct style. We'd rather have +you submit good information that doesn't conform to the guide than no +information at all. Docker's tech writers are always happy to help you with the +prose, and we promise not to judge or use a red pen! + +> **Note:** +> The documentation is written with paragraphs wrapped at 80 column lines to +> make it easier for terminal use. You can probably set up your favorite text +> editor to do this automatically for you. + +### Prose style + +In general, try to write simple, declarative prose. We prefer short, +single-clause sentences and brief three-to-five sentence paragraphs. Try to +choose vocabulary that is straightforward and precise. Avoid creating new terms, +using obscure terms or, in particular, using a lot of jargon. For example, use +"use" instead of leveraging "leverage". + +That said, don’t feel like you have to write for localization or for +English-as-a-second-language (ESL) speakers specifically. Assume you are writing +for an ordinary speaker of English with a basic university education. If your +prose is simple, clear, and straightforward it will translate readily. + +One way to think about this is to assume Docker’s users are generally university +educated and read at at least a "16th" grade level (meaning they have a +university degree). You can use a [readability +tester](https://readability-score.com/) to help guide your judgement. For +example, the readability score for the phrase "Containers should be ephemeral" +is around the 13th grade level (first year at university), and so is acceptable. + +In all cases, we prefer clear, concise communication over stilted, formal +language. Don't feel like you have to write documentation that "sounds like +technical writing." + +### Metaphor and figurative language + +One exception to the "don’t write directly for ESL" rule is to avoid the use of +metaphor or other +[figurative language](http://en.wikipedia.org/wiki/Literal_and_figurative_language) to +describe things. There are too many cultural and social issues that can prevent +a reader from correctly interpreting a metaphor. + +## Specific conventions + +Below are some specific recommendations (and a few deviations) from AP style +that we use in our docs. + +### Contractions + +As long as your prose does not become too slangy or informal, it's perfectly +acceptable to use contractions in our documentation. Make sure to use +apostrophes correctly. + +### Use of dashes in a sentence. + +Dashes refers to the en dash (–) and the em dash (—). Dashes can be used to +separate parenthetical material. + +Usage Example: This is an example of a Docker client – which uses the Big Widget +to run – and does x, y, and z. + +Use dashes cautiously and consider whether commas or parentheses would work just +as well. We always emphasize short, succinct sentences. + +More info from the always handy [Grammar Girl site](http://www.quickanddirtytips.com/education/grammar/dashes-parentheses-and-commas). + +### Pronouns + +It's okay to use first and second person pronouns. Specifically, use "we" to +refer to Docker and "you" to refer to the user. For example, "We built the +`exec` command so you can resize a TTY session." + +As much as possible, avoid using gendered pronouns ("he" and "she", etc.). +Either recast the sentence so the pronoun is not needed or, less preferably, +use "they" instead. If you absolutely can't get around using a gendered pronoun, +pick one and stick to it. Which one you choose is up to you. One common +convention is to use the pronoun of the author's gender, but if you prefer to +default to "he" or "she", that's fine too. + +### Capitalization + +#### In general + +Only proper nouns should be capitalized in body text. In general, strive to be +as strict as possible in applying this rule. Avoid using capitals for emphasis +or to denote "specialness". + +The word "Docker" should always be capitalized when referring to either the +company or the technology. The only exception is when the term appears in a code +sample. + +#### Starting sentences + +Because code samples should always be written exactly as they would appear +on-screen, you should avoid starting sentences with a code sample. + +#### In headings + +Headings take sentence capitalization, meaning that only the first letter is +capitalized (and words that would normally be capitalized in a sentence, e.g., +"Docker"). Do not use Title Case (i.e., capitalizing every word) for headings. Generally, we adhere to [AP style +for titles](http://www.quickanddirtytips.com/education/grammar/capitalizing-titles). + +## Periods + +We prefer one space after a period at the end of a sentence, not two. + +See [lists](#lists) below for how to punctuate list items. + +### Abbreviations and acronyms + +* Exempli gratia (e.g.) and id est ( i.e.): these should always have periods and +are always followed by a comma. + +* Acronyms are pluralized by simply adding "s", e.g., PCs, OSs. + +* On first use on a given page, the complete term should be used, with the +abbreviation or acronym in parentheses. E.g., Red Hat Enterprise Linux (RHEL). +The exception is common, non-technical acronyms like AKA or ASAP. Note that +acronyms other than i.e. and e.g. are capitalized. + +* Other than "e.g." and "i.e." (as discussed above), acronyms do not take +periods, PC not P.C. + + +### Lists + +When writing lists, keep the following in mind: + +Use bullets when the items being listed are independant of each other and the +order of presentation is not important. + +Use numbers for steps that have to happen in order or if you have mentioned the +list in introductory text. For example, if you wrote "There are three config +settings available for SSL, as follows:", you would number each config setting +in the subsequent list. + +In all lists, if an item is a complete sentence, it should end with a +period. Otherwise, we prefer no terminal punctuation for list items. +Each item in a list should start with a capital. + +### Numbers + +Write out numbers in body text and titles from one to ten. From 11 on, use numerals. + +### Notes + +Use notes sparingly and only to bring things to the reader's attention that are +critical or otherwise deserving of being called out from the body text. Please +format all notes as follows: + + **Note:** + > One line of note text + > another line of note text + +### Avoid excess use of "i.e." + +Minimize your use of "i.e.". It can add an unnecessary interpretive burden on +the reader. Avoid writing "This is a thing, i.e., it is like this". Just +say what it is: "This thing is …" + +### Preferred usages + +#### Login vs. log in. + +A "login" is a noun (one word), as in "Enter your login". "Log in" is a compound +verb (two words), as in "Log in to the terminal". + +### Oxford comma + +One way in which we differ from AP style is that Docker’s docs use the [Oxford +comma](http://en.wikipedia.org/wiki/Serial_comma) in all cases. That’s our +position on this controversial topic, we won't change our mind, and that’s that! + +### Code and UI text styling + +We require `code font` styling (monospace, sans-serif) for all text that refers +to a command or other input or output from the CLI. This includes file paths +(e.g., `/etc/hosts/docker.conf`). If you enclose text in backticks (`) markdown +will style the text as code. + +Text from a CLI should be quoted verbatim, even if it contains errors or its +style contradicts this guide. You can add "(sic)" after the quote to indicate +the errors are in the quote and are not errors in our docs. + +Text taken from a GUI (e.g., menu text or button text) should appear in "double +quotes". The text should take the exact same capitalisation, etc. as appears in +the GUI. E.g., Click "Continue" to save the settings. + +Text that refers to a keyboard command or hotkey is capitalized (e.g., Ctrl-D). + +When writing CLI examples, give the user hints by making the examples resemble +exactly what they see in their shell: + +* Indent shell examples by 4 spaces so they get rendered as code blocks. +* Start typed commands with `$ ` (dollar space), so that they are easily +differentiated from program output. +* Program output has no prefix. +* Comments begin with # (hash space). +* In-container shell commands, begin with `$$ ` (dollar dollar space). + +Please test all code samples to ensure that they are correct and functional so +that users can successfully cut-and-paste samples directly into the CLI. + +## Pull requests + +The pull request (PR) process is in place so that we can ensure changes made to +the docs are the best changes possible. A good PR will do some or all of the +following: + +* Explain why the change is needed +* Point out potential issues or questions +* Ask for help from experts in the company or the community +* Encourage feedback from core developers and others involved in creating the +software being documented. + +Writing a PR that is singular in focus and has clear objectives will encourage +all of the above. Done correctly, the process allows reviewers (maintainers and +community members) to validate the claims of the documentation and identify +potential problems in communication or presentation. + +### Commit messages + +In order to write clear, useful commit messages, please follow these +[recommendations](http://robots.thoughtbot.com/5-useful-tips-for-a-better-commit-message). + +## Links + +For accessibility and usability reasons, avoid using phrases such as "click +here" for link text. Recast your sentence so that the link text describes the +content of the link, as we did in the +["Commit messages" section](#commit-messages) above. + +You can use relative links (../linkeditem) to link to other pages in Docker's +documentation. + +## Graphics + +When you need to add a graphic, try to make the file-size as small as possible. +If you need help reducing file-size of a high-resolution image, feel free to +contact us for help. +Usually, graphics should go in the same directory as the .md file that +references them, or in a subdirectory for images if one already exists. + +The preferred file format for graphics is PNG, but GIF and JPG are also +acceptable. + +If you are referring to a specific part of the UI in an image, use +call-outs (circles and arrows or lines) to highlight what you’re referring to. +Line width for call-outs should not exceed five pixels. The preferred color for +call-outs is red. + +Be sure to include descriptive alt-text for the graphic. This greatly helps +users with accessibility issues. + +Lastly, be sure you have permission to use any included graphics. \ No newline at end of file From 05ff40b07a5857b41e17290e1d7cef516f122d6c Mon Sep 17 00:00:00 2001 From: shuai-z Date: Thu, 30 Oct 2014 13:31:19 +0800 Subject: [PATCH 176/592] Clear the internal state ourselves before raising error. If we need to raise an error, make sure the internal state is clean, because a successful driver.Get() may have its internal state changed (eg. counting, or mounts), while callers will only do that after a succussful Mount(). Signed-off-by: shuai-z --- daemon/daemon.go | 1 + 1 file changed, 1 insertion(+) diff --git a/daemon/daemon.go b/daemon/daemon.go index 658d578e4e..c19ebe9c65 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -970,6 +970,7 @@ func (daemon *Daemon) Mount(container *Container) error { if container.basefs == "" { container.basefs = dir } else if container.basefs != dir { + daemon.driver.Put(container.ID) return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", daemon.driver, container.ID, container.basefs, dir) } From acd64278f13ef0ee565f4819951393b9c3fe89eb Mon Sep 17 00:00:00 2001 From: unclejack Date: Thu, 30 Oct 2014 14:48:30 +0200 Subject: [PATCH 177/592] pkg/reexec: move reexec code to a new package Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- daemon/execdriver/lxc/init.go | 2 +- daemon/execdriver/native/exec.go | 2 +- daemon/execdriver/native/init.go | 2 +- daemon/networkdriver/portmapper/proxy.go | 2 +- docker/docker.go | 2 +- dockerinit/dockerinit.go | 2 +- integration/runtime_test.go | 2 +- pkg/reexec/MAINTAINERS | 1 + {reexec => pkg/reexec}/README.md | 0 {reexec => pkg/reexec}/reexec.go | 0 10 files changed, 8 insertions(+), 7 deletions(-) create mode 100644 pkg/reexec/MAINTAINERS rename {reexec => pkg/reexec}/README.md (100%) rename {reexec => pkg/reexec}/reexec.go (100%) diff --git a/daemon/execdriver/lxc/init.go b/daemon/execdriver/lxc/init.go index 2a91bbb5f5..680f53e1a4 100644 --- a/daemon/execdriver/lxc/init.go +++ b/daemon/execdriver/lxc/init.go @@ -13,7 +13,7 @@ import ( "strings" "syscall" - "github.com/docker/docker/reexec" + "github.com/docker/docker/pkg/reexec" "github.com/docker/libcontainer/netlink" ) diff --git a/daemon/execdriver/native/exec.go b/daemon/execdriver/native/exec.go index 0f7e5c07bd..84ad096725 100644 --- a/daemon/execdriver/native/exec.go +++ b/daemon/execdriver/native/exec.go @@ -11,7 +11,7 @@ import ( "runtime" "github.com/docker/docker/daemon/execdriver" - "github.com/docker/docker/reexec" + "github.com/docker/docker/pkg/reexec" "github.com/docker/libcontainer" "github.com/docker/libcontainer/namespaces" ) diff --git a/daemon/execdriver/native/init.go b/daemon/execdriver/native/init.go index 7021eeb67e..c1c988d934 100644 --- a/daemon/execdriver/native/init.go +++ b/daemon/execdriver/native/init.go @@ -10,7 +10,7 @@ import ( "path/filepath" "runtime" - "github.com/docker/docker/reexec" + "github.com/docker/docker/pkg/reexec" "github.com/docker/libcontainer" "github.com/docker/libcontainer/namespaces" "github.com/docker/libcontainer/syncpipe" diff --git a/daemon/networkdriver/portmapper/proxy.go b/daemon/networkdriver/portmapper/proxy.go index 341f0605e5..af20469ed8 100644 --- a/daemon/networkdriver/portmapper/proxy.go +++ b/daemon/networkdriver/portmapper/proxy.go @@ -14,7 +14,7 @@ import ( "time" "github.com/docker/docker/pkg/proxy" - "github.com/docker/docker/reexec" + "github.com/docker/docker/pkg/reexec" ) const userlandProxyCommandName = "docker-proxy" diff --git a/docker/docker.go b/docker/docker.go index 6d0979723a..16965452ae 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -13,7 +13,7 @@ import ( "github.com/docker/docker/api/client" "github.com/docker/docker/dockerversion" flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/reexec" + "github.com/docker/docker/pkg/reexec" "github.com/docker/docker/utils" ) diff --git a/dockerinit/dockerinit.go b/dockerinit/dockerinit.go index c5bba782b0..a6754b05a1 100644 --- a/dockerinit/dockerinit.go +++ b/dockerinit/dockerinit.go @@ -3,7 +3,7 @@ package main import ( _ "github.com/docker/docker/daemon/execdriver/lxc" _ "github.com/docker/docker/daemon/execdriver/native" - "github.com/docker/docker/reexec" + "github.com/docker/docker/pkg/reexec" ) func main() { diff --git a/integration/runtime_test.go b/integration/runtime_test.go index d2aac17081..01097b156e 100644 --- a/integration/runtime_test.go +++ b/integration/runtime_test.go @@ -22,7 +22,7 @@ import ( "github.com/docker/docker/image" "github.com/docker/docker/nat" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/reexec" + "github.com/docker/docker/pkg/reexec" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) diff --git a/pkg/reexec/MAINTAINERS b/pkg/reexec/MAINTAINERS new file mode 100644 index 0000000000..e48a0c7d4d --- /dev/null +++ b/pkg/reexec/MAINTAINERS @@ -0,0 +1 @@ +Michael Crosby (@crosbymichael) diff --git a/reexec/README.md b/pkg/reexec/README.md similarity index 100% rename from reexec/README.md rename to pkg/reexec/README.md diff --git a/reexec/reexec.go b/pkg/reexec/reexec.go similarity index 100% rename from reexec/reexec.go rename to pkg/reexec/reexec.go From 36ffbd7acf60d15942c0591bb4fec498f021331e Mon Sep 17 00:00:00 2001 From: Huayi Zhang Date: Wed, 29 Oct 2014 17:17:02 +0800 Subject: [PATCH 178/592] Add docs for --dns-search=. PR 6720 introduce that use `--dns-search=.` will not set `search` in `/etc/resolv.conf`. Signed-off-by: Huayi Zhang --- docs/man/docker-create.1.md | 2 +- docs/man/docker-run.1.md | 18 +++++++++--------- docs/sources/articles/networking.md | 3 ++- docs/sources/reference/commandline/cli.md | 8 ++++---- 4 files changed, 16 insertions(+), 15 deletions(-) diff --git a/docs/man/docker-create.1.md b/docs/man/docker-create.1.md index 00934347e3..92e34125a4 100644 --- a/docs/man/docker-create.1.md +++ b/docs/man/docker-create.1.md @@ -64,7 +64,7 @@ docker-create - Create a new container Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) **--dns-search**=[] - Set custom DNS search domains + Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) **--dns**=[] Set custom DNS servers diff --git a/docs/man/docker-run.1.md b/docs/man/docker-run.1.md index 32777b7f0e..8da95af6f8 100644 --- a/docs/man/docker-run.1.md +++ b/docs/man/docker-run.1.md @@ -103,7 +103,7 @@ stopping the process by pressing the keys CTRL-P CTRL-Q. Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) **--dns-search**=[] - Set custom DNS search domains + Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) **--dns**=*IP-address* Set custom DNS servers. This option can be used to override the DNS @@ -195,8 +195,8 @@ and foreground Docker containers. When set to true publish all exposed ports to the host interfaces. The default is false. If the operator uses -P (or -p) then Docker will make the exposed port accessible on the host and the ports will be available to any -client that can reach the host. When using -P, Docker will bind the exposed -ports to a random port on the host between 49153 and 65535. To find the +client that can reach the host. When using -P, Docker will bind the exposed +ports to a random port on the host between 49153 and 65535. To find the mapping between the host ports and the exposed ports, use **docker port**. **-p**, **--publish**=[] @@ -232,11 +232,11 @@ interactive shell. The default is value is false. **-v**, **--volume**=*volume*[:ro|:rw] - Bind mount a volume to the container. + Bind mount a volume to the container. The **-v** option can be used one or more times to add one or more mounts to a container. These mounts can then be -used in other containers using the **--volumes-from** option. +used in other containers using the **--volumes-from** option. The volume may be optionally suffixed with :ro or :rw to mount the volumes in read-only or read-write mode, respectively. By default, the volumes are mounted @@ -247,11 +247,11 @@ read-write. See examples. Once a volume is mounted in a one container it can be shared with other containers using the **--volumes-from** option when running those other containers. The volumes can be shared even if the original container with the -mount is not running. +mount is not running. -The container ID may be optionally suffixed with :ro or -:rw to mount the volumes in read-only or read-write mode, respectively. By -default, the volumes are mounted in the same mode (read write or read only) as +The container ID may be optionally suffixed with :ro or +:rw to mount the volumes in read-only or read-write mode, respectively. By +default, the volumes are mounted in the same mode (read write or read only) as the reference container. diff --git a/docs/sources/articles/networking.md b/docs/sources/articles/networking.md index 59673ecf6f..036babb006 100644 --- a/docs/sources/articles/networking.md +++ b/docs/sources/articles/networking.md @@ -14,7 +14,7 @@ Docker made the choice `172.17.42.1/16` when I started it a few minutes ago, for example — a 16-bit netmask providing 65,534 addresses for the host machine and its containers. -> **Note:** +> **Note:** > This document discusses advanced networking configuration > and options for Docker. In most cases you won't need this information. > If you're looking to get started with a simpler explanation of Docker @@ -170,6 +170,7 @@ Four different options affect container domain name services. When a container process attempts to access `host` and the search domain `example.com` is set, for instance, the DNS logic will not only look up `host` but also `host.example.com`. + Use `--dns-search=.` if you don't wish to set the search domain. Note that Docker, in the absence of either of the last two options above, will make `/etc/resolv.conf` inside of each container look like diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index e1beaeb29c..006e75d0f8 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -113,7 +113,7 @@ proxy in front of it. You can listen on port `2375` on all network interfaces with `-H tcp://0.0.0.0:2375`, or on a particular network interface using its IP address: `-H tcp://192.168.59.103:2375`. -On Systemd based systems, you can communicate with the daemon via +On Systemd based systems, you can communicate with the daemon via [systemd socket activation](http://0pointer.de/blog/projects/socket-activation.html), use `docker -d -H fd://`. Using `fd://` will work perfectly for most setups but you can also specify individual sockets: `docker -d -H fd://3`. If the @@ -999,7 +999,7 @@ used, which is observable by the process being suspended. With the cgroups freez the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. -See the +See the [cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for further details. @@ -1191,7 +1191,7 @@ removed before the image is removed. -d, --detach=false Detached mode: run the container in the background and print the new container ID --device=[] Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) --dns=[] Set custom DNS servers - --dns-search=[] Set custom DNS search domains + --dns-search=[] Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) -e, --env=[] Set environment variables --entrypoint="" Overwrite the default ENTRYPOINT of the image --env-file=[] Read in a line delimited file of environment variables @@ -1579,7 +1579,7 @@ them to [*Share Images via Repositories*]( The `docker unpause` command uses the cgroups freezer to un-suspend all processes in a container. -See the +See the [cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for further details. From 9542ea72188614d5b14f9e7fc31c80e6425738c4 Mon Sep 17 00:00:00 2001 From: Andy Wilson Date: Tue, 28 Oct 2014 10:06:04 -0500 Subject: [PATCH 179/592] doc: Update host integration article Update for changes in docker 1.2. Running the docker daemon with "-r=false" has been deprecated in favor of per-container restart policies. Signed-off-by: wilsaj --- docs/sources/articles/host_integration.md | 52 +++++++++++++++-------- 1 file changed, 34 insertions(+), 18 deletions(-) diff --git a/docs/sources/articles/host_integration.md b/docs/sources/articles/host_integration.md index 53fc2890e8..89fd2a1f7a 100644 --- a/docs/sources/articles/host_integration.md +++ b/docs/sources/articles/host_integration.md @@ -4,31 +4,51 @@ page_keywords: systemd, upstart, supervisor, docker, documentation, host integra # Automatically Start Containers -You can use your Docker containers with process managers like -`upstart`, `systemd` and `supervisor`. +As of Docker 1.2, +[restart policies](/reference/commandline/cli/#restart-policies) are the +built-in Docker mechanism for restarting containers when they exit. If set, +restart policies will be used when the Docker daemon starts up, as typically +happens after a system boot. Restart policies will ensure that linked containers +are started in the correct order. -## Introduction +If restart policies don't suit your needs (i.e., you have non-Docker processes +that depend on Docker containers), you can use a process manager like +[upstart](http://upstart.ubuntu.com/), +[systemd](http://freedesktop.org/wiki/Software/systemd/) or +[supervisor](http://supervisord.org/) instead. -If you want a process manager to manage your containers you will need to -run the docker daemon with the `-r=false` so that docker will not -automatically restart your containers when the host is restarted. + +## Using a Process Manager + +Docker does not set any restart policies by default, but be aware that they will +conflict with most process managers. So don't set restart policies if you are +using a process manager. + +*Note:* Prior to Docker 1.2, restarting of Docker containers had to be +explicitly disabled. Refer to the +[previous version](/v1.1/articles/host_integration/) of this article for the +details on how to do that. When you have finished setting up your image and are happy with your running container, you can then attach a process manager to manage it. -When you run `docker start -a` docker will automatically attach to the +When you run `docker start -a`, Docker will automatically attach to the running container, or start it if needed and forward all signals so that the process manager can detect when a container stops and correctly restart it. Here are a few sample scripts for systemd and upstart to integrate with -docker. +Docker. -## Sample Upstart Script -In this example We've already created a container to run Redis with -`--name redis_server`. To create an upstart script for our container, we -create a file named `/etc/init/redis.conf` and place the following into -it: +## Examples + +The examples below show configuration files for two popular process managers, +upstart and systemd. In these examples, we'll assume that we have already +created a container to run Redis with `--name=redis_server`. These files define +a new service that will be started after the docker daemon service has started. + + +### upstart description "Redis container" author "Me" @@ -39,12 +59,8 @@ it: /usr/bin/docker start -a redis_server end script -Next, we have to configure docker so that it's run with the option -`-r=false`. Run the following command: - $ sudo sh -c "echo 'DOCKER_OPTS=\"-r=false\"' >> /etc/default/docker" - -## Sample systemd Script +### systemd [Unit] Description=Redis container From 8a81c462722c7158e481f974f628843e7c172158 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Thu, 30 Oct 2014 20:52:13 +0200 Subject: [PATCH 180/592] Move consumeSlow() under test utils MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tõnis Tiigi (github: tonistiigi) --- integration-cli/docker_cli_run_test.go | 16 ---------------- integration-cli/utils.go | 16 ++++++++++++++++ 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 072c6f6b44..eb174ad974 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -2474,19 +2474,3 @@ func TestRunSlowStdoutConsumer(t *testing.T) { logDone("run - slow consumer") } - -func consumeSlow(reader io.Reader, chunkSize int, interval time.Duration) (n int, err error) { - buffer := make([]byte, chunkSize) - for { - var readBytes int - readBytes, err = reader.Read(buffer) - n += readBytes - if err != nil { - if err == io.EOF { - err = nil - } - return - } - time.Sleep(interval) - } -} diff --git a/integration-cli/utils.go b/integration-cli/utils.go index e99e45591b..3d15f66f1d 100644 --- a/integration-cli/utils.go +++ b/integration-cli/utils.go @@ -253,3 +253,19 @@ func makeRandomString(n int) string { } return string(b) } + +func consumeSlow(reader io.Reader, chunkSize int, interval time.Duration) (n int, err error) { + buffer := make([]byte, chunkSize) + for { + var readBytes int + readBytes, err = reader.Read(buffer) + n += readBytes + if err != nil { + if err == io.EOF { + err = nil + } + return + } + time.Sleep(interval) + } +} From 417e48e4a00c891e8fe5614ac6a1ef12de951f72 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Thu, 30 Oct 2014 21:10:38 +0200 Subject: [PATCH 181/592] Generalize consumeSlow and add stop support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tõnis Tiigi (github: tonistiigi) --- integration-cli/docker_cli_run_test.go | 3 +-- integration-cli/utils.go | 25 ++++++++++++++++--------- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index eb174ad974..95cb0c86d1 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -4,7 +4,6 @@ import ( "bufio" "bytes" "fmt" - "io" "io/ioutil" "net" "os" @@ -2462,7 +2461,7 @@ func TestRunSlowStdoutConsumer(t *testing.T) { if err := c.Start(); err != nil { t.Fatal(err) } - n, err := consumeSlow(stdout, 10000, 5*time.Millisecond) + n, err := consumeWithSpeed(stdout, 10000, 5*time.Millisecond, nil) if err != nil { t.Fatal(err) } diff --git a/integration-cli/utils.go b/integration-cli/utils.go index 3d15f66f1d..05c27dc5ac 100644 --- a/integration-cli/utils.go +++ b/integration-cli/utils.go @@ -254,18 +254,25 @@ func makeRandomString(n int) string { return string(b) } -func consumeSlow(reader io.Reader, chunkSize int, interval time.Duration) (n int, err error) { +// Reads chunkSize bytes from reader after every interval. +// Returns total read bytes. +func consumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) { buffer := make([]byte, chunkSize) for { - var readBytes int - readBytes, err = reader.Read(buffer) - n += readBytes - if err != nil { - if err == io.EOF { - err = nil - } + select { + case <-stop: return + default: + var readBytes int + readBytes, err = reader.Read(buffer) + n += readBytes + if err != nil { + if err == io.EOF { + err = nil + } + return + } + time.Sleep(interval) } - time.Sleep(interval) } } From c2cf97a0747976c2307e991028dc703b2b430d80 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Thu, 30 Oct 2014 21:33:26 +0200 Subject: [PATCH 182/592] Fix panic on slow log consumer. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #8832 All stdio streams need to finish writing before the connection can be closed. Signed-off-by: Tõnis Tiigi (github: tonistiigi) --- daemon/logs.go | 19 +++++++-- integration-cli/docker_cli_logs_test.go | 51 +++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 3 deletions(-) diff --git a/daemon/logs.go b/daemon/logs.go index a5fac2c3d9..6c9373f737 100644 --- a/daemon/logs.go +++ b/daemon/logs.go @@ -7,6 +7,7 @@ import ( "io" "os" "strconv" + "sync" log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" @@ -112,24 +113,36 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status { } if follow && container.IsRunning() { errors := make(chan error, 2) + wg := sync.WaitGroup{} + if stdout { + wg.Add(1) stdoutPipe := container.StdoutLogPipe() defer stdoutPipe.Close() go func() { errors <- jsonlog.WriteLog(stdoutPipe, job.Stdout, format) + wg.Done() }() } if stderr { + wg.Add(1) stderrPipe := container.StderrLogPipe() defer stderrPipe.Close() go func() { errors <- jsonlog.WriteLog(stderrPipe, job.Stderr, format) + wg.Done() }() } - err := <-errors - if err != nil { - log.Errorf("%s", err) + + wg.Wait() + close(errors) + + for err := range errors { + if err != nil { + log.Errorf("%s", err) + } } + } return engine.StatusOK } diff --git a/integration-cli/docker_cli_logs_test.go b/integration-cli/docker_cli_logs_test.go index d6d3f9320f..b86a50480d 100644 --- a/integration-cli/docker_cli_logs_test.go +++ b/integration-cli/docker_cli_logs_test.go @@ -284,3 +284,54 @@ func TestLogsFollowStopped(t *testing.T) { deleteContainer(cleanedContainerID) logDone("logs - logs follow stopped container") } + +// Regression test for #8832 +func TestLogsFollowSlowStdoutConsumer(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "/bin/sh", "-c", `usleep 200000;yes X | head -c 200000`) + + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("run failed with errors: %s, %v", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + stopSlowRead := make(chan bool) + + go func() { + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + stopSlowRead <- true + }() + + logCmd := exec.Command(dockerBinary, "logs", "-f", cleanedContainerID) + + stdout, err := logCmd.StdoutPipe() + if err != nil { + t.Fatal(err) + } + + if err := logCmd.Start(); err != nil { + t.Fatal(err) + } + + // First read slowly + bytes1, err := consumeWithSpeed(stdout, 10, 50*time.Millisecond, stopSlowRead) + if err != nil { + t.Fatal(err) + } + + // After the container has finished we can continue reading fast + bytes2, err := consumeWithSpeed(stdout, 32*1024, 0, nil) + if err != nil { + t.Fatal(err) + } + + actual := bytes1 + bytes2 + expected := 200000 + if actual != expected { + t.Fatalf("Invalid bytes read: %d, expected %d", actual, expected) + } + + logDone("logs - follow slow consumer") +} From 5e02468e76d61060f83a4d755b43f834981188f1 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 30 Oct 2014 13:42:57 -0400 Subject: [PATCH 183/592] ./pkg/archive: clean up Stat_t assertion inspired by @tonistiigi comment (https://github.com/docker/docker/pull/8046/files#r19579960) Signed-off-by: Vincent Batts --- pkg/archive/archive.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 37b312e5b0..3857220455 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -185,8 +185,13 @@ func (ta *tarAppender) addTarFile(path, name string) error { hdr.Name = name - stat, ok := fi.Sys().(*syscall.Stat_t) - if ok { + var ( + nlink uint32 + inode uint64 + ) + if stat, ok := fi.Sys().(*syscall.Stat_t); ok { + nlink = uint32(stat.Nlink) + inode = uint64(stat.Ino) // Currently go does not fill in the major/minors if stat.Mode&syscall.S_IFBLK == syscall.S_IFBLK || stat.Mode&syscall.S_IFCHR == syscall.S_IFCHR { @@ -194,19 +199,17 @@ func (ta *tarAppender) addTarFile(path, name string) error { hdr.Devminor = int64(minor(uint64(stat.Rdev))) } } - // if it's a regular file and has more than 1 link, // it's hardlinked, so set the type flag accordingly - if fi.Mode().IsRegular() && stat.Nlink > 1 { + if fi.Mode().IsRegular() && nlink > 1 { // a link should have a name that it links too // and that linked name should be first in the tar archive - ino := uint64(stat.Ino) - if oldpath, ok := ta.SeenFiles[ino]; ok { + if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { - ta.SeenFiles[ino] = name + ta.SeenFiles[inode] = name } } From 930a756ad55ad5f4e5e6391b41673743d7254c2b Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 30 Oct 2014 17:04:56 -0400 Subject: [PATCH 184/592] mount: move the MakePrivate to pkg/mount The logic is unrelated to graphdriver. Signed-off-by: Vincent Batts --- daemon/graphdriver/aufs/aufs.go | 2 +- daemon/graphdriver/btrfs/btrfs.go | 2 +- daemon/graphdriver/devmapper/driver.go | 2 +- daemon/graphdriver/driver.go | 16 ---------------- pkg/mount/sharedsubtree_linux.go | 18 ++++++++++++++++++ 5 files changed, 21 insertions(+), 19 deletions(-) create mode 100644 pkg/mount/sharedsubtree_linux.go diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go index 8ba097d45a..c05b95c4ec 100644 --- a/daemon/graphdriver/aufs/aufs.go +++ b/daemon/graphdriver/aufs/aufs.go @@ -98,7 +98,7 @@ func Init(root string, options []string) (graphdriver.Driver, error) { return nil, err } - if err := graphdriver.MakePrivate(root); err != nil { + if err := mountpk.MakePrivate(root); err != nil { return nil, err } diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go index 26102aa1ef..954cf9b245 100644 --- a/daemon/graphdriver/btrfs/btrfs.go +++ b/daemon/graphdriver/btrfs/btrfs.go @@ -40,7 +40,7 @@ func Init(home string, options []string) (graphdriver.Driver, error) { return nil, err } - if err := graphdriver.MakePrivate(home); err != nil { + if err := mount.MakePrivate(home); err != nil { return nil, err } diff --git a/daemon/graphdriver/devmapper/driver.go b/daemon/graphdriver/devmapper/driver.go index 53b8da436d..9e1d88e7d4 100644 --- a/daemon/graphdriver/devmapper/driver.go +++ b/daemon/graphdriver/devmapper/driver.go @@ -34,7 +34,7 @@ func Init(home string, options []string) (graphdriver.Driver, error) { return nil, err } - if err := graphdriver.MakePrivate(home); err != nil { + if err := mount.MakePrivate(home); err != nil { return nil, err } diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go index 91040db97a..e924aa45df 100644 --- a/daemon/graphdriver/driver.go +++ b/daemon/graphdriver/driver.go @@ -7,7 +7,6 @@ import ( "path" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/mount" ) type FsMagic uint64 @@ -139,18 +138,3 @@ func New(root string, options []string) (driver Driver, err error) { } return nil, fmt.Errorf("No supported storage backend found") } - -func MakePrivate(mountPoint string) error { - mounted, err := mount.Mounted(mountPoint) - if err != nil { - return err - } - - if !mounted { - if err := mount.Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { - return err - } - } - - return mount.ForceMount("", mountPoint, "none", "private") -} diff --git a/pkg/mount/sharedsubtree_linux.go b/pkg/mount/sharedsubtree_linux.go new file mode 100644 index 0000000000..566ebbd6bf --- /dev/null +++ b/pkg/mount/sharedsubtree_linux.go @@ -0,0 +1,18 @@ +// +build linux + +package mount + +func MakePrivate(mountPoint string) error { + mounted, err := Mounted(mountPoint) + if err != nil { + return err + } + + if !mounted { + if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { + return err + } + } + + return ForceMount("", mountPoint, "none", "private") +} From 79a77a396e003d0f5827c48d1b179f87c1542311 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Fri, 31 Oct 2014 01:10:35 +0200 Subject: [PATCH 185/592] Wait for hijack on docker start command MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With current implementation there was a possibility that /start responds quicker than /attach, meaning that some output would be clipped. Fixed so the implementation matches with `docker run`. This also fixes the flaky test results for TestCreateEchoStdout. Signed-off-by: Tõnis Tiigi (github: tonistiigi) --- api/client/commands.go | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/api/client/commands.go b/api/client/commands.go index da1eab27c9..7ce208d466 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -622,6 +622,8 @@ func (cli *DockerCli) CmdStart(args ...string) error { return nil } + hijacked := make(chan io.Closer) + if *attach || *openStdin { if cmd.NArg() > 1 { return fmt.Errorf("You cannot start and attach multiple containers at once.") @@ -658,8 +660,24 @@ func (cli *DockerCli) CmdStart(args ...string) error { v.Set("stderr", "1") cErr = promise.Go(func() error { - return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, nil, nil) + return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, hijacked, nil) }) + } else { + close(hijacked) + } + + // Acknowledge the hijack before starting + select { + case closer := <-hijacked: + // Make sure that the hijack gets closed when returning (results + // in closing the hijack chan and freeing server's goroutines) + if closer != nil { + defer closer.Close() + } + case err := <-cErr: + if err != nil { + return err + } } var encounteredError error From 380c8320a78dc16da65d9d13004422ac5a0cca53 Mon Sep 17 00:00:00 2001 From: unclejack Date: Sat, 16 Aug 2014 13:27:04 +0300 Subject: [PATCH 186/592] make http usage for registry explicit Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) Conflicts: daemon/config.go daemon/daemon.go graph/pull.go graph/push.go graph/tags.go registry/registry.go registry/service.go --- daemon/config.go | 2 + daemon/daemon.go | 2 +- docs/sources/reference/commandline/cli.md | 3 +- graph/pull.go | 4 +- graph/push.go | 4 +- graph/tags.go | 24 ++++++----- registry/registry.go | 49 +++++++++++++++++++++++ registry/service.go | 2 +- 8 files changed, 74 insertions(+), 16 deletions(-) diff --git a/daemon/config.go b/daemon/config.go index 8780294ce1..bae0c8cd29 100644 --- a/daemon/config.go +++ b/daemon/config.go @@ -31,6 +31,7 @@ type Config struct { BridgeIface string BridgeIP string FixedCIDR string + InsecureRegistries []string InterContainerCommunication bool GraphDriver string GraphOptions []string @@ -55,6 +56,7 @@ func (config *Config) InstallFlags() { flag.StringVar(&config.BridgeIP, []string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b") flag.StringVar(&config.BridgeIface, []string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking") flag.StringVar(&config.FixedCIDR, []string{"-fixed-cidr"}, "", "IPv4 subnet for fixed IPs (ex: 10.20.0.0/16)\nthis subnet must be nested in the bridge subnet (which is defined by -b or --bip)") + opts.ListVar(&config.InsecureRegistries, []string{"-insecure-registry"}, "Make these registries use http") flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Enable inter-container communication") flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver") flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver") diff --git a/daemon/daemon.go b/daemon/daemon.go index 658d578e4e..7922bf1bcf 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -832,7 +832,7 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) } log.Debugf("Creating repository list") - repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g, config.Mirrors) + repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g, config.Mirrors, config.InsecureRegistries) if err != nil { return nil, fmt.Errorf("Couldn't create Tag store: %s", err) } diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index e1beaeb29c..128228a635 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -70,7 +70,8 @@ expect an integer, and they can only be specified once. -g, --graph="/var/lib/docker" Path to use as the root of the Docker runtime -H, --host=[] The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. --icc=true Enable inter-container communication - --ip=0.0.0.0 Default IP address to use when binding container ports + --insecure-registry=[] Make these registries use http + --ip=0.0.0.0 Default IP address to use when binding container ports --ip-forward=true Enable net.ipv4.ip_forward --ip-masq=true Enable IP masquerading for bridge's IP range --iptables=true Enable Docker's addition of iptables rules diff --git a/graph/pull.go b/graph/pull.go index 9345d7d489..942a234458 100644 --- a/graph/pull.go +++ b/graph/pull.go @@ -113,7 +113,9 @@ func (s *TagStore) CmdPull(job *engine.Job) engine.Status { return job.Error(err) } - endpoint, err := registry.NewEndpoint(hostname) + secure := registry.IsSecure(hostname, s.InsecureRegistries) + + endpoint, err := registry.NewEndpoint(hostname, secure) if err != nil { return job.Error(err) } diff --git a/graph/push.go b/graph/push.go index a2bd7136f9..8ffcd88be2 100644 --- a/graph/push.go +++ b/graph/push.go @@ -214,7 +214,9 @@ func (s *TagStore) CmdPush(job *engine.Job) engine.Status { return job.Error(err) } - endpoint, err := registry.NewEndpoint(hostname) + secure := registry.IsSecure(hostname, s.InsecureRegistries) + + endpoint, err := registry.NewEndpoint(hostname, secure) if err != nil { return job.Error(err) } diff --git a/graph/tags.go b/graph/tags.go index 6e4e63148a..d458633ff1 100644 --- a/graph/tags.go +++ b/graph/tags.go @@ -23,10 +23,11 @@ var ( ) type TagStore struct { - path string - graph *Graph - mirrors []string - Repositories map[string]Repository + path string + graph *Graph + mirrors []string + InsecureRegistries []string + Repositories map[string]Repository sync.Mutex // FIXME: move push/pull-related fields // to a helper type @@ -54,18 +55,19 @@ func (r Repository) Contains(u Repository) bool { return true } -func NewTagStore(path string, graph *Graph, mirrors []string) (*TagStore, error) { +func NewTagStore(path string, graph *Graph, mirrors []string, insecureRegistries []string) (*TagStore, error) { abspath, err := filepath.Abs(path) if err != nil { return nil, err } store := &TagStore{ - path: abspath, - graph: graph, - mirrors: mirrors, - Repositories: make(map[string]Repository), - pullingPool: make(map[string]chan struct{}), - pushingPool: make(map[string]chan struct{}), + path: abspath, + graph: graph, + mirrors: mirrors, + InsecureRegistries: insecureRegistries, + Repositories: make(map[string]Repository), + pullingPool: make(map[string]chan struct{}), + pushingPool: make(map[string]chan struct{}), } // Load the json file if it exists, otherwise create it. if err := store.reload(); os.IsNotExist(err) { diff --git a/registry/registry.go b/registry/registry.go index 0b3ec12bf3..8599d3684b 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -213,6 +213,55 @@ func ResolveRepositoryName(reposName string) (string, string, error) { return hostname, reposName, nil } +// this method expands the registry name as used in the prefix of a repo +// to a full url. if it already is a url, there will be no change. +func ExpandAndVerifyRegistryUrl(hostname string, secure bool) (endpoint string, err error) { + if strings.HasPrefix(hostname, "http:") || strings.HasPrefix(hostname, "https:") { + // if there is no slash after https:// (8 characters) then we have no path in the url + if strings.LastIndex(hostname, "/") < 9 { + // there is no path given. Expand with default path + hostname = hostname + "/v1/" + } + if _, err := pingRegistryEndpoint(hostname); err != nil { + return "", errors.New("Invalid Registry endpoint: " + err.Error()) + } + return hostname, nil + } + + // use HTTPS if secure, otherwise use HTTP + if secure { + endpoint = fmt.Sprintf("https://%s/v1/", hostname) + } else { + endpoint = fmt.Sprintf("http://%s/v1/", hostname) + } + _, err = pingRegistryEndpoint(endpoint) + if err != nil { + //TODO: triggering highland build can be done there without "failing" + err = fmt.Errorf("Invalid registry endpoint '%s': %s ", endpoint, err) + if secure { + err = fmt.Errorf("%s. If this private registry supports only HTTP, please add `--insecure-registry %s` to the daemon's arguments.", err, hostname) + } + return "", err + } + return endpoint, nil +} + +// this method verifies if the provided hostname is part of the list of +// insecure registries and returns false if HTTP should be used +func IsSecure(hostname string, insecureRegistries []string) (secure bool) { + secure = true + for _, h := range insecureRegistries { + if hostname == h { + secure = false + break + } + } + if hostname == IndexServerAddress() { + secure = true + } + return +} + func trustedLocation(req *http.Request) bool { var ( trusteds = []string{"docker.com", "docker.io"} diff --git a/registry/service.go b/registry/service.go index f7b353000e..334e7c2ed6 100644 --- a/registry/service.go +++ b/registry/service.go @@ -40,7 +40,7 @@ func (s *Service) Auth(job *engine.Job) engine.Status { job.GetenvJson("authConfig", authConfig) // TODO: this is only done here because auth and registry need to be merged into one pkg if addr := authConfig.ServerAddress; addr != "" && addr != IndexServerAddress() { - endpoint, err := NewEndpoint(addr) + endpoint, err := NewEndpoint(addr, true) if err != nil { return job.Error(err) } From f29b2e48ebfb171f58375b6e355910fc2192aceb Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 19 Aug 2014 11:54:42 -0700 Subject: [PATCH 187/592] Refactor IsSecure change Fix issue with restoring the tag store and setting static configuration from the daemon. i.e. the field on the TagStore struct must be made internal or the json.Unmarshal in restore will overwrite the insecure registries to be an empty struct. Signed-off-by: Michael Crosby Conflicts: graph/pull.go graph/push.go graph/tags.go --- graph/pull.go | 2 +- graph/push.go | 2 +- graph/tags.go | 5 +++-- registry/registry.go | 44 +++++++++++++++++++------------------------- 4 files changed, 24 insertions(+), 29 deletions(-) diff --git a/graph/pull.go b/graph/pull.go index 942a234458..3cc48fa699 100644 --- a/graph/pull.go +++ b/graph/pull.go @@ -113,7 +113,7 @@ func (s *TagStore) CmdPull(job *engine.Job) engine.Status { return job.Error(err) } - secure := registry.IsSecure(hostname, s.InsecureRegistries) + secure := registry.IsSecure(hostname, s.insecureRegistries) endpoint, err := registry.NewEndpoint(hostname, secure) if err != nil { diff --git a/graph/push.go b/graph/push.go index 8ffcd88be2..4cda8914b3 100644 --- a/graph/push.go +++ b/graph/push.go @@ -214,7 +214,7 @@ func (s *TagStore) CmdPush(job *engine.Job) engine.Status { return job.Error(err) } - secure := registry.IsSecure(hostname, s.InsecureRegistries) + secure := registry.IsSecure(hostname, s.insecureRegistries) endpoint, err := registry.NewEndpoint(hostname, secure) if err != nil { diff --git a/graph/tags.go b/graph/tags.go index d458633ff1..5c3e533b2a 100644 --- a/graph/tags.go +++ b/graph/tags.go @@ -26,7 +26,7 @@ type TagStore struct { path string graph *Graph mirrors []string - InsecureRegistries []string + insecureRegistries []string Repositories map[string]Repository sync.Mutex // FIXME: move push/pull-related fields @@ -60,11 +60,12 @@ func NewTagStore(path string, graph *Graph, mirrors []string, insecureRegistries if err != nil { return nil, err } + store := &TagStore{ path: abspath, graph: graph, mirrors: mirrors, - InsecureRegistries: insecureRegistries, + insecureRegistries: insecureRegistries, Repositories: make(map[string]Repository), pullingPool: make(map[string]chan struct{}), pushingPool: make(map[string]chan struct{}), diff --git a/registry/registry.go b/registry/registry.go index 8599d3684b..788996811b 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -215,51 +215,45 @@ func ResolveRepositoryName(reposName string) (string, string, error) { // this method expands the registry name as used in the prefix of a repo // to a full url. if it already is a url, there will be no change. -func ExpandAndVerifyRegistryUrl(hostname string, secure bool) (endpoint string, err error) { - if strings.HasPrefix(hostname, "http:") || strings.HasPrefix(hostname, "https:") { - // if there is no slash after https:// (8 characters) then we have no path in the url - if strings.LastIndex(hostname, "/") < 9 { - // there is no path given. Expand with default path - hostname = hostname + "/v1/" - } - if _, err := pingRegistryEndpoint(hostname); err != nil { - return "", errors.New("Invalid Registry endpoint: " + err.Error()) - } +func ExpandAndVerifyRegistryUrl(hostname string, secure bool) (string, error) { + if hostname == IndexServerAddress() { return hostname, nil } - // use HTTPS if secure, otherwise use HTTP + endpoint := fmt.Sprintf("http://%s/v1/", hostname) + if secure { endpoint = fmt.Sprintf("https://%s/v1/", hostname) - } else { - endpoint = fmt.Sprintf("http://%s/v1/", hostname) } - _, err = pingRegistryEndpoint(endpoint) - if err != nil { + + if _, oerr := pingRegistryEndpoint(endpoint); oerr != nil { //TODO: triggering highland build can be done there without "failing" - err = fmt.Errorf("Invalid registry endpoint '%s': %s ", endpoint, err) + err := fmt.Errorf("Invalid registry endpoint '%s': %s ", endpoint, oerr) + if secure { - err = fmt.Errorf("%s. If this private registry supports only HTTP, please add `--insecure-registry %s` to the daemon's arguments.", err, hostname) + err = fmt.Errorf("%s. If this private registry supports only HTTP, please add `--insecure-registry %s` to the daemon's arguments.", oerr, hostname) } + return "", err } + return endpoint, nil } // this method verifies if the provided hostname is part of the list of // insecure registries and returns false if HTTP should be used -func IsSecure(hostname string, insecureRegistries []string) (secure bool) { - secure = true +func IsSecure(hostname string, insecureRegistries []string) bool { + if hostname == IndexServerAddress() { + return true + } + for _, h := range insecureRegistries { if hostname == h { - secure = false - break + return false } } - if hostname == IndexServerAddress() { - secure = true - } - return + + return true } func trustedLocation(req *http.Request) bool { From 48f7384d6365c59b4b61d527630aaf88af24f6dd Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 19 Aug 2014 12:27:23 -0700 Subject: [PATCH 188/592] Expand documentation for --insecure-registries Signed-off-by: Michael Crosby --- docs/sources/reference/commandline/cli.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 128228a635..462d649203 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -71,7 +71,7 @@ expect an integer, and they can only be specified once. -H, --host=[] The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. --icc=true Enable inter-container communication --insecure-registry=[] Make these registries use http - --ip=0.0.0.0 Default IP address to use when binding container ports + --ip=0.0.0.0 Default IP address to use when binding container ports --ip-forward=true Enable net.ipv4.ip_forward --ip-masq=true Enable IP masquerading for bridge's IP range --iptables=true Enable Docker's addition of iptables rules @@ -196,6 +196,16 @@ can be disabled with --ip-masq=false. +By default docker will assume all registries are securied via TLS. Prior versions +of docker used an auto fallback if a registry did not support TLS. This introduces +the opportunity for MITM attacks so in Docker 1.2 the user must specify `--insecure-registries` +when starting the Docker daemon to state which registries are not using TLS and to communicate +with these registries via plain text. If you are running a local registry over plain text +on `127.0.0.1:5000` you will be required to specify `--insecure-registries 127.0.0.1:500` +when starting the docker daemon to be able to push and pull images to that registry. +No automatic fallback will happen after Docker 1.2 to detect if a registry is using +HTTP or HTTPS. + Docker supports softlinks for the Docker data directory (`/var/lib/docker`) and for `/var/lib/docker/tmp`. The `DOCKER_TMPDIR` and the data directory can be set like this: From afade4236d3f15704653132c364d6e7ccc975f8b Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 20 Aug 2014 08:31:24 -0700 Subject: [PATCH 189/592] Don't hard code true for auth job Signed-off-by: Michael Crosby Conflicts: registry/service.go --- builtins/builtins.go | 4 ++-- docker/daemon.go | 7 +++++++ registry/service.go | 22 +++++++++++----------- 3 files changed, 20 insertions(+), 13 deletions(-) diff --git a/builtins/builtins.go b/builtins/builtins.go index f952d728b2..41bb249286 100644 --- a/builtins/builtins.go +++ b/builtins/builtins.go @@ -10,7 +10,6 @@ import ( "github.com/docker/docker/engine" "github.com/docker/docker/events" "github.com/docker/docker/pkg/parsers/kernel" - "github.com/docker/docker/registry" ) func Register(eng *engine.Engine) error { @@ -26,7 +25,8 @@ func Register(eng *engine.Engine) error { if err := eng.Register("version", dockerVersion); err != nil { return err } - return registry.NewService().Install(eng) + + return nil } // remote: a RESTful api for cross-docker communication diff --git a/docker/daemon.go b/docker/daemon.go index dd0baa5fa4..f2c0310f1f 100644 --- a/docker/daemon.go +++ b/docker/daemon.go @@ -13,6 +13,7 @@ import ( "github.com/docker/docker/engine" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/registry" ) const CanDaemon = true @@ -32,11 +33,17 @@ func mainDaemon() { } eng := engine.New() signal.Trap(eng.Shutdown) + // Load builtins if err := builtins.Register(eng); err != nil { log.Fatal(err) } + // load registry service + if err := registry.NewService(daemonCfg.InsecureRegistries).Install(eng); err != nil { + log.Fatal(err) + } + // load the daemon in the background so we can immediately start // the http api so that connections don't fail while the daemon // is booting diff --git a/registry/service.go b/registry/service.go index 334e7c2ed6..890837ca5e 100644 --- a/registry/service.go +++ b/registry/service.go @@ -13,12 +13,15 @@ import ( // 'pull': Download images from any registry (TODO) // 'push': Upload images to any registry (TODO) type Service struct { + insecureRegistries []string } // NewService returns a new instance of Service ready to be // installed no an engine. -func NewService() *Service { - return &Service{} +func NewService(insecureRegistries []string) *Service { + return &Service{ + insecureRegistries: insecureRegistries, + } } // Install installs registry capabilities to eng. @@ -32,15 +35,12 @@ func (s *Service) Install(eng *engine.Engine) error { // and returns OK if authentication was sucessful. // It can be used to verify the validity of a client's credentials. func (s *Service) Auth(job *engine.Job) engine.Status { - var ( - err error - authConfig = &AuthConfig{} - ) + var authConfig = new(AuthConfig) job.GetenvJson("authConfig", authConfig) - // TODO: this is only done here because auth and registry need to be merged into one pkg + if addr := authConfig.ServerAddress; addr != "" && addr != IndexServerAddress() { - endpoint, err := NewEndpoint(addr, true) + endpoint, err := NewEndpoint(addr, IsSecure(addr, s.insecureRegistries)) if err != nil { return job.Error(err) } @@ -49,11 +49,11 @@ func (s *Service) Auth(job *engine.Job) engine.Status { } authConfig.ServerAddress = endpoint.String() } - status, err := Login(authConfig, HTTPRequestFactory(nil)) - if err != nil { + + if _, err := Login(authConfig, HTTPRequestFactory(nil)); err != nil { return job.Error(err) } - job.Printf("%s\n", status) + return engine.StatusOK } From 6a1ff022b0744213ed588d9c16dbb13ce055eda6 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Fri, 10 Oct 2014 23:22:12 -0400 Subject: [PATCH 190/592] Do not verify certificate when using --insecure-registry on an HTTPS registry Signed-off-by: Tibor Vass Conflicts: registry/registry.go registry/registry_test.go registry/service.go registry/session.go Conflicts: registry/endpoint.go registry/registry.go --- daemon/config.go | 2 +- docs/sources/reference/commandline/cli.md | 20 +-- graph/tags_unit_test.go | 2 +- registry/endpoint.go | 51 ++++++-- registry/endpoint_test.go | 2 +- registry/registry.go | 143 +++++++++------------- registry/registry_test.go | 4 +- registry/service.go | 5 +- registry/session.go | 2 +- 9 files changed, 116 insertions(+), 115 deletions(-) diff --git a/daemon/config.go b/daemon/config.go index bae0c8cd29..9e8d08e2a6 100644 --- a/daemon/config.go +++ b/daemon/config.go @@ -56,7 +56,7 @@ func (config *Config) InstallFlags() { flag.StringVar(&config.BridgeIP, []string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b") flag.StringVar(&config.BridgeIface, []string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking") flag.StringVar(&config.FixedCIDR, []string{"-fixed-cidr"}, "", "IPv4 subnet for fixed IPs (ex: 10.20.0.0/16)\nthis subnet must be nested in the bridge subnet (which is defined by -b or --bip)") - opts.ListVar(&config.InsecureRegistries, []string{"-insecure-registry"}, "Make these registries use http") + opts.ListVar(&config.InsecureRegistries, []string{"-insecure-registry"}, "Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback)") flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Enable inter-container communication") flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver") flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver") diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 462d649203..1d4202811d 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -70,7 +70,7 @@ expect an integer, and they can only be specified once. -g, --graph="/var/lib/docker" Path to use as the root of the Docker runtime -H, --host=[] The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. --icc=true Enable inter-container communication - --insecure-registry=[] Make these registries use http + --insecure-registry=[] Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) --ip=0.0.0.0 Default IP address to use when binding container ports --ip-forward=true Enable net.ipv4.ip_forward --ip-masq=true Enable IP masquerading for bridge's IP range @@ -195,16 +195,16 @@ to other machines on the Internet. This may interfere with some network topologi can be disabled with --ip-masq=false. +By default, Docker will assume all registries are secured via TLS with certificate verification +enabled. Prior versions of Docker used an auto fallback if a registry did not support TLS +(or if the TLS connection failed). This introduced the opportunity for Man In The Middle (MITM) +attacks, so as of Docker 1.3.1, the user must now specify the `--insecure-registry` daemon flag +for each insecure registry. An insecure registry is either not using TLS (i.e. plain text HTTP), +or is using TLS with a CA certificate not known by the Docker daemon (i.e. certification +verification disabled). For example, if there is a registry listening for HTTP at 127.0.0.1:5000, +as of Docker 1.3.1 you are required to specify `--insecure-registry 127.0.0.1:5000` when starting +the Docker daemon. -By default docker will assume all registries are securied via TLS. Prior versions -of docker used an auto fallback if a registry did not support TLS. This introduces -the opportunity for MITM attacks so in Docker 1.2 the user must specify `--insecure-registries` -when starting the Docker daemon to state which registries are not using TLS and to communicate -with these registries via plain text. If you are running a local registry over plain text -on `127.0.0.1:5000` you will be required to specify `--insecure-registries 127.0.0.1:500` -when starting the docker daemon to be able to push and pull images to that registry. -No automatic fallback will happen after Docker 1.2 to detect if a registry is using -HTTP or HTTPS. Docker supports softlinks for the Docker data directory (`/var/lib/docker`) and for `/var/lib/docker/tmp`. The `DOCKER_TMPDIR` and the data directory can be set like this: diff --git a/graph/tags_unit_test.go b/graph/tags_unit_test.go index e4f1fb809f..da512547d5 100644 --- a/graph/tags_unit_test.go +++ b/graph/tags_unit_test.go @@ -53,7 +53,7 @@ func mkTestTagStore(root string, t *testing.T) *TagStore { if err != nil { t.Fatal(err) } - store, err := NewTagStore(path.Join(root, "tags"), graph, nil) + store, err := NewTagStore(path.Join(root, "tags"), graph, nil, nil) if err != nil { t.Fatal(err) } diff --git a/registry/endpoint.go b/registry/endpoint.go index 639c997039..88dbeafd96 100644 --- a/registry/endpoint.go +++ b/registry/endpoint.go @@ -2,7 +2,6 @@ package registry import ( "encoding/json" - "errors" "fmt" "io/ioutil" "net/http" @@ -34,27 +33,40 @@ func scanForAPIVersion(hostname string) (string, APIVersion) { return hostname, DefaultAPIVersion } -func NewEndpoint(hostname string) (*Endpoint, error) { - endpoint, err := newEndpoint(hostname) +func NewEndpoint(hostname string, secure bool) (*Endpoint, error) { + endpoint, err := newEndpoint(hostname, secure) if err != nil { return nil, err } + // Try HTTPS ping to registry endpoint.URL.Scheme = "https" if _, err := endpoint.Ping(); err != nil { - log.Debugf("Registry %s does not work (%s), falling back to http", endpoint, err) - // TODO: Check if http fallback is enabled - endpoint.URL.Scheme = "http" - if _, err = endpoint.Ping(); err != nil { - return nil, errors.New("Invalid Registry endpoint: " + err.Error()) + + //TODO: triggering highland build can be done there without "failing" + + if secure { + // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` + // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. + return nil, fmt.Errorf("Invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) } + + // If registry is insecure and HTTPS failed, fallback to HTTP. + log.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) + endpoint.URL.Scheme = "http" + _, err2 := endpoint.Ping() + if err2 == nil { + return endpoint, nil + } + + return nil, fmt.Errorf("Invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) } return endpoint, nil } -func newEndpoint(hostname string) (*Endpoint, error) { +func newEndpoint(hostname string, secure bool) (*Endpoint, error) { var ( - endpoint Endpoint + endpoint = Endpoint{secure: secure} trimmedHostname string err error ) @@ -72,6 +84,7 @@ func newEndpoint(hostname string) (*Endpoint, error) { type Endpoint struct { URL *url.URL Version APIVersion + secure bool } // Get the formated URL for the root of this registry Endpoint @@ -95,7 +108,7 @@ func (e Endpoint) Ping() (RegistryInfo, error) { return RegistryInfo{Standalone: false}, err } - resp, _, err := doRequest(req, nil, ConnectTimeout) + resp, _, err := doRequest(req, nil, ConnectTimeout, e.secure) if err != nil { return RegistryInfo{Standalone: false}, err } @@ -134,3 +147,19 @@ func (e Endpoint) Ping() (RegistryInfo, error) { log.Debugf("RegistryInfo.Standalone: %t", info.Standalone) return info, nil } + +// IsSecure returns false if the provided hostname is part of the list of insecure registries. +// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. +func IsSecure(hostname string, insecureRegistries []string) bool { + if hostname == IndexServerAddress() { + return true + } + + for _, h := range insecureRegistries { + if hostname == h { + return false + } + } + + return true +} diff --git a/registry/endpoint_test.go b/registry/endpoint_test.go index 0ec1220d9c..def5e0d7ae 100644 --- a/registry/endpoint_test.go +++ b/registry/endpoint_test.go @@ -12,7 +12,7 @@ func TestEndpointParse(t *testing.T) { {"0.0.0.0:5000", "https://0.0.0.0:5000/v1/"}, } for _, td := range testData { - e, err := newEndpoint(td.str) + e, err := newEndpoint(td.str, true) if err != nil { t.Errorf("%q: %s", td.str, err) } diff --git a/registry/registry.go b/registry/registry.go index 788996811b..8d43637495 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -14,6 +14,7 @@ import ( "strings" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/utils" ) @@ -35,7 +36,7 @@ const ( ConnectTimeout ) -func newClient(jar http.CookieJar, roots *x509.CertPool, cert *tls.Certificate, timeout TimeoutType) *http.Client { +func newClient(jar http.CookieJar, roots *x509.CertPool, cert *tls.Certificate, timeout TimeoutType, secure bool) *http.Client { tlsConfig := tls.Config{ RootCAs: roots, // Avoid fallback to SSL protocols < TLS1.0 @@ -46,6 +47,10 @@ func newClient(jar http.CookieJar, roots *x509.CertPool, cert *tls.Certificate, tlsConfig.Certificates = append(tlsConfig.Certificates, *cert) } + if !secure { + tlsConfig.InsecureSkipVerify = true + } + httpTransport := &http.Transport{ DisableKeepAlives: true, Proxy: http.ProxyFromEnvironment, @@ -86,69 +91,76 @@ func newClient(jar http.CookieJar, roots *x509.CertPool, cert *tls.Certificate, } } -func doRequest(req *http.Request, jar http.CookieJar, timeout TimeoutType) (*http.Response, *http.Client, error) { - hasFile := func(files []os.FileInfo, name string) bool { - for _, f := range files { - if f.Name() == name { - return true - } - } - return false - } - - hostDir := path.Join("/etc/docker/certs.d", req.URL.Host) - fs, err := ioutil.ReadDir(hostDir) - if err != nil && !os.IsNotExist(err) { - return nil, nil, err - } - +func doRequest(req *http.Request, jar http.CookieJar, timeout TimeoutType, secure bool) (*http.Response, *http.Client, error) { var ( pool *x509.CertPool certs []*tls.Certificate ) - for _, f := range fs { - if strings.HasSuffix(f.Name(), ".crt") { - if pool == nil { - pool = x509.NewCertPool() + if secure && req.URL.Scheme == "https" { + hasFile := func(files []os.FileInfo, name string) bool { + for _, f := range files { + if f.Name() == name { + return true + } } - data, err := ioutil.ReadFile(path.Join(hostDir, f.Name())) - if err != nil { - return nil, nil, err - } - pool.AppendCertsFromPEM(data) + return false } - if strings.HasSuffix(f.Name(), ".cert") { - certName := f.Name() - keyName := certName[:len(certName)-5] + ".key" - if !hasFile(fs, keyName) { - return nil, nil, fmt.Errorf("Missing key %s for certificate %s", keyName, certName) - } - cert, err := tls.LoadX509KeyPair(path.Join(hostDir, certName), path.Join(hostDir, keyName)) - if err != nil { - return nil, nil, err - } - certs = append(certs, &cert) + + hostDir := path.Join("/etc/docker/certs.d", req.URL.Host) + log.Debugf("hostDir: %s", hostDir) + fs, err := ioutil.ReadDir(hostDir) + if err != nil && !os.IsNotExist(err) { + return nil, nil, err } - if strings.HasSuffix(f.Name(), ".key") { - keyName := f.Name() - certName := keyName[:len(keyName)-4] + ".cert" - if !hasFile(fs, certName) { - return nil, nil, fmt.Errorf("Missing certificate %s for key %s", certName, keyName) + + for _, f := range fs { + if strings.HasSuffix(f.Name(), ".crt") { + if pool == nil { + pool = x509.NewCertPool() + } + log.Debugf("crt: %s", hostDir+"/"+f.Name()) + data, err := ioutil.ReadFile(path.Join(hostDir, f.Name())) + if err != nil { + return nil, nil, err + } + pool.AppendCertsFromPEM(data) + } + if strings.HasSuffix(f.Name(), ".cert") { + certName := f.Name() + keyName := certName[:len(certName)-5] + ".key" + log.Debugf("cert: %s", hostDir+"/"+f.Name()) + if !hasFile(fs, keyName) { + return nil, nil, fmt.Errorf("Missing key %s for certificate %s", keyName, certName) + } + cert, err := tls.LoadX509KeyPair(path.Join(hostDir, certName), path.Join(hostDir, keyName)) + if err != nil { + return nil, nil, err + } + certs = append(certs, &cert) + } + if strings.HasSuffix(f.Name(), ".key") { + keyName := f.Name() + certName := keyName[:len(keyName)-4] + ".cert" + log.Debugf("key: %s", hostDir+"/"+f.Name()) + if !hasFile(fs, certName) { + return nil, nil, fmt.Errorf("Missing certificate %s for key %s", certName, keyName) + } } } } if len(certs) == 0 { - client := newClient(jar, pool, nil, timeout) + client := newClient(jar, pool, nil, timeout, secure) res, err := client.Do(req) if err != nil { return nil, nil, err } return res, client, nil } + for i, cert := range certs { - client := newClient(jar, pool, cert, timeout) + client := newClient(jar, pool, cert, timeout, secure) res, err := client.Do(req) // If this is the last cert, otherwise, continue to next cert if 403 or 5xx if i == len(certs)-1 || err == nil && @@ -213,49 +225,6 @@ func ResolveRepositoryName(reposName string) (string, string, error) { return hostname, reposName, nil } -// this method expands the registry name as used in the prefix of a repo -// to a full url. if it already is a url, there will be no change. -func ExpandAndVerifyRegistryUrl(hostname string, secure bool) (string, error) { - if hostname == IndexServerAddress() { - return hostname, nil - } - - endpoint := fmt.Sprintf("http://%s/v1/", hostname) - - if secure { - endpoint = fmt.Sprintf("https://%s/v1/", hostname) - } - - if _, oerr := pingRegistryEndpoint(endpoint); oerr != nil { - //TODO: triggering highland build can be done there without "failing" - err := fmt.Errorf("Invalid registry endpoint '%s': %s ", endpoint, oerr) - - if secure { - err = fmt.Errorf("%s. If this private registry supports only HTTP, please add `--insecure-registry %s` to the daemon's arguments.", oerr, hostname) - } - - return "", err - } - - return endpoint, nil -} - -// this method verifies if the provided hostname is part of the list of -// insecure registries and returns false if HTTP should be used -func IsSecure(hostname string, insecureRegistries []string) bool { - if hostname == IndexServerAddress() { - return true - } - - for _, h := range insecureRegistries { - if hostname == h { - return false - } - } - - return true -} - func trustedLocation(req *http.Request) bool { var ( trusteds = []string{"docker.com", "docker.io"} diff --git a/registry/registry_test.go b/registry/registry_test.go index fdf714e800..23aef6c361 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -21,7 +21,7 @@ const ( func spawnTestRegistrySession(t *testing.T) *Session { authConfig := &AuthConfig{} - endpoint, err := NewEndpoint(makeURL("/v1/")) + endpoint, err := NewEndpoint(makeURL("/v1/"), false) if err != nil { t.Fatal(err) } @@ -33,7 +33,7 @@ func spawnTestRegistrySession(t *testing.T) *Session { } func TestPingRegistryEndpoint(t *testing.T) { - ep, err := NewEndpoint(makeURL("/v1/")) + ep, err := NewEndpoint(makeURL("/v1/"), false) if err != nil { t.Fatal(err) } diff --git a/registry/service.go b/registry/service.go index 890837ca5e..32274f407d 100644 --- a/registry/service.go +++ b/registry/service.go @@ -89,7 +89,10 @@ func (s *Service) Search(job *engine.Job) engine.Status { if err != nil { return job.Error(err) } - endpoint, err := NewEndpoint(hostname) + + secure := IsSecure(hostname, s.insecureRegistries) + + endpoint, err := NewEndpoint(hostname, secure) if err != nil { return job.Error(err) } diff --git a/registry/session.go b/registry/session.go index 8dbf136205..ba6df35841 100644 --- a/registry/session.go +++ b/registry/session.go @@ -65,7 +65,7 @@ func NewSession(authConfig *AuthConfig, factory *utils.HTTPRequestFactory, endpo } func (r *Session) doRequest(req *http.Request) (*http.Response, *http.Client, error) { - return doRequest(req, r.jar, r.timeout) + return doRequest(req, r.jar, r.timeout, r.indexEndpoint.secure) } // Retrieve the history of a given image from the Registry. From e43d9f713e96daed521034995c00375544f62827 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Wed, 22 Oct 2014 08:30:48 -0400 Subject: [PATCH 191/592] Docs edits for dropping SSLv3 and under + release notes for 1.3.1 Signed-off-by: Tibor Vass Conflicts: docs/sources/index.md Conflicts: docs/sources/index.md --- docs/mkdocs.yml | 1 + docs/sources/index.md | 61 +--- .../reference/api/hub_registry_spec.md | 30 +- docs/sources/reference/api/registry_api.md | 29 +- docs/sources/reference/commandline/cli.md | 7 +- docs/sources/release-notes.md | 291 ++++++++++++++++++ 6 files changed, 334 insertions(+), 85 deletions(-) mode change 100755 => 100644 docs/mkdocs.yml create mode 100644 docs/sources/release-notes.md diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml old mode 100755 new mode 100644 index 6e018f4afa..faa758546b --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -26,6 +26,7 @@ pages: # Introduction: - ['index.md', 'About', 'Docker'] +- ['release-notes.md', 'About', 'Release Notes'] - ['introduction/index.md', '**HIDDEN**'] - ['introduction/understanding-docker.md', 'About', 'Understanding Docker'] diff --git a/docs/sources/index.md b/docs/sources/index.md index bcec387f6b..5780eab05d 100644 --- a/docs/sources/index.md +++ b/docs/sources/index.md @@ -88,63 +88,4 @@ implementation, check out the [Docker User Guide](/userguide/). ## Release Notes -**Version 1.3.0** - -This version fixes a number of bugs and issues and adds new functions and other -improvements. The [GitHub 1.3 milestone](https://github.com/docker/docker/issues?q=milestone%3A1.3.0+) has -more detailed information. Major additions and changes include: - -*New command: `docker exec`* - -The new `docker exec` command lets you run a process in an existing, active -container. The command has APIs for both the daemon and the client. With -`docker exec`, you'll be able to do things like add or remove devices from -running containers, debug running containers, and run commands that are not -part of the container's static specification. Details in the [command line -reference](/reference/commandline/cli/#exec). - -*New command: `docker create`* - -Traditionally, the `docker run` command has been used to both create a -container and spawn a process to run it. The new `docker create` command breaks -this apart, letting you set up a container without actually starting it. This -provides more control over management of the container lifecycle, giving you the -ability to configure things like volumes or port mappings before the container -is started. For example, in a rapid-response scaling situation, you could use -`create` to prepare and stage ten containers in anticipation of heavy loads. -Details in the [command line reference](/reference/commandline/cli/#create). - -*Tech preview of new provenance features* - -This release offers a sneak peek at new image signing capabilities that are -currently under development. Soon, these capabilities will allow any image -author to sign their images to certify they have not been tampered with. For -this release, Official images are now signed by Docker, Inc. Not only does this -demonstrate the new functionality, we hope it will improve your confidence in -the security of Official images. Look for the blue ribbons denoting signed -images on the [Docker Hub](https://hub.docker.com/). -The Docker Engine has been updated to automatically verify that a given -Official Repo has a current, valid signature. When pulling a signed image, -you'll see a message stating `the image you are pulling has been verified`. If -no valid signature is detected, Docker Engine will fall back to pulling a -regular, unsigned image. - -*Other improvements & changes* - -* We've added a new security options flag to the `docker run` command, -`--security-opt`, that lets you set SELinux and AppArmor labels and profiles. -This means you'll no longer have to use `docker run --privileged` on kernels -that support SE Linux or AppArmor. For more information, see the -[command line reference](/reference/commandline/cli/#run). - -* A new flag, `--add-host`, has been added to `docker run` that lets you add -lines to `/etc/hosts`. This allows you to specify different name -resolution for the container than it would get via DNS. For more information, -see the [command line reference](/reference/commandline/cli/#run). - -* You can now set a `DOCKER_TLS_VERIFY` environment variable to secure -connections by default (rather than having to pass the `--tlsverify` flag on -every call). For more information, see the [https guide](/articles/https). - -* Three security issues have been addressed in this release: [CVE-2014-5280, -CVE-2014-5270, and CVE-2014-5282](https://groups.google.com/forum/#!msg/docker-announce/aQoVmQlcE0A/smPuBNYf8VwJ). +A summary of the changes in each release in the current series can now be found on the separate [Release Notes page](/release-notes/) diff --git a/docs/sources/reference/api/hub_registry_spec.md b/docs/sources/reference/api/hub_registry_spec.md index ee15277a44..853eda4aee 100644 --- a/docs/sources/reference/api/hub_registry_spec.md +++ b/docs/sources/reference/api/hub_registry_spec.md @@ -4,7 +4,9 @@ page_keywords: docker, registry, api, hub # The Docker Hub and the Registry spec -## The 3 roles +## The three roles + +There are three major components playing a role in the Docker ecosystem. ### Docker Hub @@ -21,13 +23,15 @@ The Docker Hub has different components: - Authentication service - Tokenization -The Docker Hub is authoritative for those information. +The Docker Hub is authoritative for that information. -We expect that there will be only one instance of the Docker Hub, run and +There is only one instance of the Docker Hub, run and managed by Docker Inc. ### Registry +The registry has the following characteristics: + - It stores the images and the graph for a set of repositories - It does not have user accounts data - It has no notion of user accounts or authorization @@ -37,35 +41,35 @@ managed by Docker Inc. - It doesn't have a local database - [Source Code](https://github.com/docker/docker-registry) -We expect that there will be multiple registries out there. To help to +We expect that there will be multiple registries out there. To help you grasp the context, here are some examples of registries: - **sponsor registry**: such a registry is provided by a third-party hosting infrastructure as a convenience for their customers and the - docker community as a whole. Its costs are supported by the third + Docker community as a whole. Its costs are supported by the third party, but the management and operation of the registry are - supported by dotCloud. It features read/write access, and delegates + supported by Docker, Inc. It features read/write access, and delegates authentication and authorization to the Docker Hub. - **mirror registry**: such a registry is provided by a third-party hosting infrastructure but is targeted at their customers only. Some mechanism (unspecified to date) ensures that public images are pulled from a sponsor registry to the mirror registry, to make sure - that the customers of the third-party provider can “docker pull” + that the customers of the third-party provider can `docker pull` those images locally. - **vendor registry**: such a registry is provided by a software - vendor, who wants to distribute docker images. It would be operated + vendor who wants to distribute docker images. It would be operated and managed by the vendor. Only users authorized by the vendor would be able to get write access. Some images would be public (accessible for anyone), others private (accessible only for authorized users). Authentication and authorization would be delegated to the Docker Hub. - The goal of vendor registries is to let someone do “docker pull - basho/riak1.3” and automatically push from the vendor registry - (instead of a sponsor registry); i.e. get all the convenience of a + The goal of vendor registries is to let someone do `docker pull + basho/riak1.3` and automatically push from the vendor registry + (instead of a sponsor registry); i.e., vendors get all the convenience of a sponsor registry, while retaining control on the asset distribution. - **private registry**: such a registry is located behind a firewall, or protected by an additional security layer (HTTP authorization, SSL client-side certificates, IP address authorization...). The - registry is operated by a private entity, outside of dotCloud's + registry is operated by a private entity, outside of Docker's control. It can optionally delegate additional authorization to the Docker Hub, but it is not mandatory. @@ -77,7 +81,7 @@ grasp the context, here are some examples of registries: > - local mount point; > - remote docker addressed through SSH. -The latter would only require two new commands in docker, e.g., +The latter would only require two new commands in Docker, e.g., `registryget` and `registryput`, wrapping access to the local filesystem (and optionally doing consistency checks). Authentication and authorization are then delegated diff --git a/docs/sources/reference/api/registry_api.md b/docs/sources/reference/api/registry_api.md index 8fe24cf6fb..0839fe209f 100644 --- a/docs/sources/reference/api/registry_api.md +++ b/docs/sources/reference/api/registry_api.md @@ -21,30 +21,30 @@ grasp the context, here are some examples of registries: - **sponsor registry**: such a registry is provided by a third-party hosting infrastructure as a convenience for their customers and the - docker community as a whole. Its costs are supported by the third + Docker community as a whole. Its costs are supported by the third party, but the management and operation of the registry are - supported by dotCloud. It features read/write access, and delegates + supported by Docker. It features read/write access, and delegates authentication and authorization to the Index. - **mirror registry**: such a registry is provided by a third-party hosting infrastructure but is targeted at their customers only. Some mechanism (unspecified to date) ensures that public images are pulled from a sponsor registry to the mirror registry, to make sure - that the customers of the third-party provider can “docker pull” + that the customers of the third-party provider can `docker pull` those images locally. - **vendor registry**: such a registry is provided by a software - vendor, who wants to distribute docker images. It would be operated + vendor, who wants to distribute Docker images. It would be operated and managed by the vendor. Only users authorized by the vendor would be able to get write access. Some images would be public (accessible for anyone), others private (accessible only for authorized users). Authentication and authorization would be delegated to the Index. - The goal of vendor registries is to let someone do “docker pull - basho/riak1.3” and automatically push from the vendor registry - (instead of a sponsor registry); i.e. get all the convenience of a + The goal of vendor registries is to let someone do `docker pull + basho/riak1.3` and automatically push from the vendor registry + (instead of a sponsor registry); i.e., get all the convenience of a sponsor registry, while retaining control on the asset distribution. - **private registry**: such a registry is located behind a firewall, or protected by an additional security layer (HTTP authorization, SSL client-side certificates, IP address authorization...). The - registry is operated by a private entity, outside of dotCloud's + registry is operated by a private entity, outside of Docker's control. It can optionally delegate additional authorization to the Index, but it is not mandatory. @@ -52,7 +52,7 @@ grasp the context, here are some examples of registries: > Mirror registries and private registries which do not use the Index > don't even need to run the registry code. They can be implemented by any > kind of transport implementing HTTP GET and PUT. Read-only registries -> can be powered by a simple static HTTP server. +> can be powered by a simple static HTTPS server. > **Note**: > The latter implies that while HTTP is the protocol of choice for a registry, @@ -60,13 +60,20 @@ grasp the context, here are some examples of registries: > > - HTTP with GET (and PUT for read-write registries); > - local mount point; -> - remote docker addressed through SSH. +> - remote Docker addressed through SSH. -The latter would only require two new commands in docker, e.g., +The latter would only require two new commands in Docker, e.g., `registryget` and `registryput`, wrapping access to the local filesystem (and optionally doing consistency checks). Authentication and authorization are then delegated to SSH (e.g., with public keys). +> **Note**: +> Private registry servers that expose an HTTP endpoint need to be secured with +> TLS (preferably TLSv1.2, but at least TLSv1.0). Make sure to put the CA +> certificate at /etc/docker/certs.d/my.registry.com:5000/ca.crt on the Docker +> host, so that the daemon can securely access the private registry. +> Support for SSLv3 and lower is not available due to security issues. + The default namespace for a private repository is `library`. # Endpoints diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 1d4202811d..8a158d7334 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -112,7 +112,12 @@ direct access to the Docker daemon - and should be secured either using the [built in https encrypted socket](/articles/https/), or by putting a secure web proxy in front of it. You can listen on port `2375` on all network interfaces with `-H tcp://0.0.0.0:2375`, or on a particular network interface using its IP -address: `-H tcp://192.168.59.103:2375`. +address: `-H tcp://192.168.59.103:2375`. It is conventional to use port `2375` +for un-encrypted, and port `2376` for encrypted communication with the daemon. + +> **Note** If you're using an HTTPS encrypted socket, keep in mind that only TLS1.0 +> and greater are supported. Protocols SSLv3 and under are not supported anymore +> for security reasons. On Systemd based systems, you can communicate with the daemon via [systemd socket activation](http://0pointer.de/blog/projects/socket-activation.html), use diff --git a/docs/sources/release-notes.md b/docs/sources/release-notes.md new file mode 100644 index 0000000000..6b2f0fd571 --- /dev/null +++ b/docs/sources/release-notes.md @@ -0,0 +1,291 @@ +page_title: Docker 1.x Series Release Notes page_description: Release Notes for +Docker 1.x. page_keywords: docker, documentation, about, technology, +understanding, release + +#Release Notes + +##Version 1.3.1 +(2014-10-28) + +This release fixes some bugs and addresses some security issues. + +*Security fixes* + +Patches and changes were made to address CVE-2014-5277 and CVE-2014-3566. Specifically, changes were made to: +* Prevent fallback to SSL protocols < TLS 1.0 for client, daemon and registry +* Secure HTTPS connection to registries with certificate verification and without HTTP fallback unless `--insecure-registry` is specified. + +*Runtime fixes* + +* Fixed issue where volumes would not be shared + +*Client fixes* + +* Fixed issue with `--iptables=false` not automatically setting +`--ip-masq=false` +* Fixed docker run output to non-TTY stdout + +*Builder fixes* + +* Fixed escaping `$` for environment variables +* Fixed issue with lowercase `onbuild` Dockerfile instruction + + +##Version 1.3.0 + +This version fixes a number of bugs and issues and adds new functions and other +improvements. The [GitHub 1.3milestone](https://github.com/docker/docker/issues?q=milestone%3A1.3.0+) has +more detailed information. Major additions and changes include: + +###New Features + +*New command: `docker exec`* + +The new `docker exec` command lets you run a process in an existing, active +container. The command has APIs for both the daemon and the client. With `docker +exec`, you'll be able to do things like add or remove devices from running +containers, debug running containers, and run commands that are not part of the +container's static specification. Details in the [command line reference](/reference/commandline/cli/#exec). + +*New command: `docker create`* + +Traditionally, the `docker run` command has been used to both create a container +and spawn a process to run it. The new `docker create` command breaks this +apart, letting you set up a container without actually starting it. This +provides more control over management of the container lifecycle, giving you the +ability to configure things like volumes or port mappings before the container +is started. For example, in a rapid-response scaling situation, you could use +`create` to prepare and stage ten containers in anticipation of heavy loads. +Details in the [command line reference](/reference/commandline/cli/#create). + +*Tech preview of new provenance features* + +This release offers a sneak peek at new image signing capabilities that are +currently under development. Soon, these capabilities will allow any image +author to sign their images to certify they have not been tampered with. For +this release, Official images are now signed by Docker, Inc. Not only does this +demonstrate the new functionality, we hope it will improve your confidence in +the security of Official images. Look for the blue ribbons denoting signed +images on the [Docker Hub](https://hub.docker.com/). The Docker Engine has been +updated to automatically verify that a given Official Repo has a current, valid +signature. When pulling a signed image, you'll see a message stating `the image +you are pulling has been verified`. If no valid signature is detected, Docker +Engine will fall back to pulling a regular, unsigned image. + +###Other improvements & changes* + +* We've added a new security options flag to the `docker run` command, +`--security-opt`, that lets you set SELinux and AppArmor labels and profiles. +This means you'll no longer have to use `docker run --privileged` on kernels +that support SE Linux or AppArmor. For more information, see the [command line +reference](/reference/commandline/cli/#run). + +* A new flag, `--add-host`, has been added to `docker run` that lets you add +lines to `/etc/hosts`. This allows you to specify different name resolution for +the container than it would get via DNS. For more information, see the [command +line reference](/reference/commandline/cli/#run). + +* You can now set a `DOCKER_TLS_VERIFY` environment variable to secure +connections by default (rather than having to pass the `--tlsverify` flag on +every call). For more information, see the [https guide](/articles/https). + +* Three security issues have been addressed in this release: [CVE-2014-5280, +CVE-2014-5270, and +CVE-2014-5282](https://groups.google.com/forum/#!msg/docker-announce/aQoVmQlcE0A/smPuBNYf8VwJ). + +##Version 1.2.0 + +This version fixes a number of bugs and issues and adds new functions and other +improvements. These include: + +###New Features + +*New restart policies* + +We added a `--restart flag` to `docker run` to specify a restart policy for your +container. Currently, there are three policies available: + +* `no` – Do not restart the container if it dies. (default) * `on-failure` – +Restart the container if it exits with a non-zero exit code. This can also +accept an optional maximum restart count (e.g. `on-failure:5`). * `always` – +Always restart the container no matter what exit code is returned. This +deprecates the `--restart` flag on the Docker daemon. + +*New flags for `docker run`: `--cap-add` and `–-cap-drop`* + +In previous releases, Docker containers could either be given complete +capabilities or they could all follow a whitelist of allowed capabilities while +dropping all others. Further, using `--privileged` would grant all capabilities +inside a container, rather than applying a whitelist. This was not recommended +for production use because it’s really unsafe; it’s as if you were directly in +the host. + +This release introduces two new flags for `docker run`, `--cap-add` and +`--cap-drop`, that give you fine-grain control over the specific capabilities +you want grant to a particular container. + +*New `-–device` flag for `docker run`* + +Previously, you could only use devices inside your containers by bind mounting +them (with `-v`) in a `--privileged` container. With this release, we introduce +the `--device flag` to `docker run` which lets you use a device without +requiring a privileged container. + +*Writable `/etc/hosts`, `/etc/hostname` and `/etc/resolv.conf`* + +You can now edit `/etc/hosts`, `/etc/hostname` and `/etc/resolve.conf` in a +running container. This is useful if you need to install BIND or other services +that might override one of those files. + +Note, however, that changes to these files are not saved when running `docker +build` and so will not be preserved in the resulting image. The changes will +only “stick” in a running container. + +*Docker proxy in a separate process* + +The Docker userland proxy that routes outbound traffic to your containers now +has its own separate process (one process per connection). This greatly reduces +the load on the daemon, which increases stability and efficiency. + +###Other improvements & changes + +* When using `docker rm -f`, Docker now kills the container (instead of stopping +it) before removing it . If you intend to stop the container cleanly, you can +use `docker stop`. + +* Added support for IPv6 addresses in `--dns` + +* Added search capability in private registries + +##Version 1.1.0 + +###New Features + +*`.dockerignore` support* + +You can now add a `.dockerignore` file next to your `Dockerfile` and Docker will +ignore files and directories specified in that file when sending the build +context to the daemon. Example: +https://github.com/docker/docker/blob/master/.dockerignore + +*Pause containers during commit* + +Doing a commit on a running container was not recommended because you could end +up with files in an inconsistent state (for example, if they were being written +during the commit). Containers are now paused when a commit is made to them. You +can disable this feature by doing a `docker commit --pause=false ` + +*Tailing logs* + +You can now tail the logs of a container. For example, you can get the last ten +lines of a log by using `docker logs --tail 10 `. You can also +follow the logs of a container without having to read the whole log file with +`docker logs --tail 0 -f `. + +*Allow a tar file as context for docker build* + +You can now pass a tar archive to `docker build` as context. This can be used to +automate docker builds, for example: `cat context.tar | docker build -` or +`docker run builder_image | docker build -` + +*Bind mounting your whole filesystem in a container* + +`/` is now allowed as source of `--volumes`. This means you can bind-mount your +whole system in a container if you need to. For example: `docker run -v +/:/my_host ubuntu:ro ls /my_host`. However, it is now forbidden to mount to /. + + +###Other Improvements & Changes + +* Port allocation has been improved. In the previous release, Docker could +prevent you from starting a container with previously allocated ports which +seemed to be in use when in fact they were not. This has been fixed. + +* A bug in `docker save` was introduced in the last release. The `docker save` +command could produce images with invalid metadata. The command now produces +images with correct metadata. + +* Running `docker inspect` in a container now returns which containers it is +linked to. + +* Parsing of the `docker commit` flag has improved validation, to better prevent +you from committing an image with a name such as `-m`. Image names with dashes +in them potentially conflict with command line flags. + +* The API now has Improved status codes for `start` and `stop`. Trying to start +a running container will now return a 304 error. + +* Performance has been improved overall. Starting the daemon is faster than in +previous releases. The daemon’s performance has also been improved when it is +working with large numbers of images and containers. + +* Fixed an issue with white-spaces and multi-lines in Dockerfiles. + +##Version 1.1.0 + +###New Features + +*`.dockerignore` support* + +You can now add a `.dockerignore` file next to your `Dockerfile` and Docker will +ignore files and directories specified in that file when sending the build +context to the daemon. Example: +https://github.com/dotcloud/docker/blob/master/.dockerignore + +*Pause containers during commit* + +Doing a commit on a running container was not recommended because you could end +up with files in an inconsistent state (for example, if they were being written +during the commit). Containers are now paused when a commit is made to them. You +can disable this feature by doing a `docker commit --pause=false ` + +*Tailing logs* + +You can now tail the logs of a container. For example, you can get the last ten +lines of a log by using `docker logs --tail 10 `. You can also +follow the logs of a container without having to read the whole log file with +`docker logs --tail 0 -f `. + +*Allow a tar file as context for docker build* + +You can now pass a tar archive to `docker build` as context. This can be used to +automate docker builds, for example: `cat context.tar | docker build -` or +`docker run builder_image | docker build -` + +*Bind mounting your whole filesystem in a container* + +`/` is now allowed as source of `--volumes`. This means you can bind-mount your +whole system in a container if you need to. For example: `docker run -v +/:/my_host ubuntu:ro ls /my_host`. However, it is now forbidden to mount to /. + + +###Other Improvements & Changes + +* Port allocation has been improved. In the previous release, Docker could +prevent you from starting a container with previously allocated ports which +seemed to be in use when in fact they were not. This has been fixed. + +* A bug in `docker save` was introduced in the last release. The `docker save` +command could produce images with invalid metadata. The command now produces +images with correct metadata. + +* Running `docker inspect` in a container now returns which containers it is +linked to. + +* Parsing of the `docker commit` flag has improved validation, to better prevent +you from committing an image with a name such as `-m`. Image names with dashes +in them potentially conflict with command line flags. + +* The API now has Improved status codes for `start` and `stop`. Trying to start +a running container will now return a 304 error. + +* Performance has been improved overall. Starting the daemon is faster than in +previous releases. The daemon’s performance has also been improved when it is +working with large numbers of images and containers. + +* Fixed an issue with white-spaces and multi-lines in Dockerfiles. + +##Version 1.0.0 + +First production-ready release. Prior development history can be found by +searching in [GitHub](https://github.com/docker/docker). From 05c18a2434ab7bd68a86c87fe866bc7107ac1941 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Tue, 28 Oct 2014 21:20:30 -0400 Subject: [PATCH 192/592] Fix login command Signed-off-by: Tibor Vass --- registry/service.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/registry/service.go b/registry/service.go index 32274f407d..7051d93430 100644 --- a/registry/service.go +++ b/registry/service.go @@ -50,9 +50,11 @@ func (s *Service) Auth(job *engine.Job) engine.Status { authConfig.ServerAddress = endpoint.String() } - if _, err := Login(authConfig, HTTPRequestFactory(nil)); err != nil { + status, err := Login(authConfig, HTTPRequestFactory(nil)) + if err != nil { return job.Error(err) } + job.Printf("%s\n", status) return engine.StatusOK } From 4a17e6eeddee1fe13c7aa5ad7f6a789490add0fe Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Mon, 20 Oct 2014 19:17:32 -0400 Subject: [PATCH 193/592] Bump to version v1.3.1 Signed-off-by: Tibor Vass Conflicts: VERSION --- CHANGELOG.md | 18 ++++++++++++++++++ VERSION | 2 +- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2c5218eea2..f958bbd48c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ # Changelog +## 1.3.1 (2014-10-28) + +#### Security +* Prevent fallback to SSL protocols < TLS 1.0 for client, daemon and registry ++ Secure HTTPS connection to registries with certificate verification and without HTTP fallback unless `--insecure-registry` is specified + +#### Runtime +- Fix issue where volumes would not be shared + +#### Client +- Fix issue with `--iptables=false` not automatically setting `--ip-masq=false` +- Fix docker run output to non-TTY stdout + +#### Builder +- Fix escaping `$` for environment variables +- Fix issue with lowercase `onbuild` Dockerfile instruction +- Restrict envrionment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, `EXPOSE`, `VOLUME` and `USER` + ## 1.3.0 (2014-10-14) #### Notable features since 1.2.0 diff --git a/VERSION b/VERSION index b6bb93f7c7..3a3cd8cc8b 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.3.0-dev +1.3.1 From b9fcdb81917f2258b295a14398054b40bb6919b8 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Thu, 30 Oct 2014 19:45:07 -0400 Subject: [PATCH 194/592] Change version to 1.3.1-dev Signed-off-by: Tibor Vass --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 3a3cd8cc8b..625610ece8 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.3.1 +1.3.1-dev From bfe21f095d491dc9ee7e664b260239cbed8f505f Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Fri, 31 Oct 2014 11:10:17 +1000 Subject: [PATCH 195/592] Yes, the review really should have picked up this spelling mistake Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/Dockerfile b/docs/Dockerfile index 0e5d3ae60d..d801ec2130 100644 --- a/docs/Dockerfile +++ b/docs/Dockerfile @@ -49,7 +49,7 @@ RUN VERSION=$(cat VERSION) \ && BUILD_DATE=$(date) \ && sed -i "s/\$VERSION/$VERSION/g" theme/mkdocs/base.html \ && sed -i "s/\$MAJOR_MINOR/v$MAJOR_MINOR/g" theme/mkdocs/base.html \ - && sed -i "s/\$GITCOMMIT/$GITCOMMIT/g" .heme/mkdocs/base.html \ + && sed -i "s/\$GITCOMMIT/$GITCOMMIT/g" theme/mkdocs/base.html \ && sed -i "s/\$GIT_BRANCH/$GIT_BRANCH/g" theme/mkdocs/base.html \ && sed -i "s/\$BUILD_DATE/$BUILD_DATE/g" theme/mkdocs/base.html \ && sed -i "s/\$AWS_S3_BUCKET/$AWS_S3_BUCKET/g" theme/mkdocs/base.html From bc62a35ffb2eeacb3ae79bc65076a01744d78a5d Mon Sep 17 00:00:00 2001 From: Scott Walls Date: Sun, 26 Oct 2014 12:42:51 -0700 Subject: [PATCH 196/592] ubuntulinux.md - old commands, typo, section movement - Removed some commands related to autocomplete/symlinks that don't seem to be required anymore on Ubuntu 14.04 - Fixed one minor typo ("see LINK _for_ details," not "see LINK details") - Moved section "Giving non-root access" to top level, rather than being under Ubuntu 13 (the section isn't specific to Ubuntu 13, and even references Ubuntu 14). Signed-off-by: Scott Walls --- docs/sources/installation/ubuntulinux.md | 31 ++++++++++++------------ 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/docs/sources/installation/ubuntulinux.md b/docs/sources/installation/ubuntulinux.md index efeeeea2e1..09b776f08d 100644 --- a/docs/sources/installation/ubuntulinux.md +++ b/docs/sources/installation/ubuntulinux.md @@ -29,8 +29,9 @@ To install the latest Ubuntu package (may not be the latest Docker release): $ sudo apt-get update $ sudo apt-get install docker.io - $ sudo ln -sf /usr/bin/docker.io /usr/local/bin/docker - $ sudo sed -i '$acomplete -F _docker docker' /etc/bash_completion.d/docker.io + +Then, to enable tab-completion of Docker commands in BASH, either restart BASH or: + $ source /etc/bash_completion.d/docker.io If you'd like to try the latest version of Docker: @@ -202,7 +203,18 @@ Type `exit` to exit **Done!**, now continue with the [User Guide](/userguide/). -### Giving non-root access +### Upgrade + +To install the latest version of Docker, use the standard +`apt-get` method: + + # update your sources list + $ sudo apt-get update + + # install the latest + $ sudo apt-get install lxc-docker + +## Giving non-root access The `docker` daemon always runs as the `root` user, and since Docker version 0.5.2, the `docker` daemon binds to a Unix socket instead of a @@ -221,7 +233,7 @@ alternative group. > **Warning**: > The `docker` group (or the group specified with the `-G` flag) is > `root`-equivalent; see [*Docker Daemon Attack Surface*]( -> /articles/security/#dockersecurity-daemon) details. +> /articles/security/#dockersecurity-daemon) for details. **Example:** @@ -238,17 +250,6 @@ alternative group. # If you are in Ubuntu 14.04, use docker.io instead of docker $ sudo service docker restart -### Upgrade - -To install the latest version of docker, use the standard -`apt-get` method: - - # update your sources list - $ sudo apt-get update - - # install the latest - $ sudo apt-get install lxc-docker - ## Memory and Swap Accounting If you want to enable memory and swap accounting, you must add the From d8b17d785a03246cb3a081223a0242469af7d410 Mon Sep 17 00:00:00 2001 From: Lei Jitang Date: Thu, 30 Oct 2014 09:38:44 +0800 Subject: [PATCH 197/592] Fix docker exec command help messages Signed-off-by: Lei Jitang --- api/client/commands.go | 2 +- daemon/execdriver/driver.go | 2 +- docker/flags.go | 2 +- docs/sources/reference/commandline/cli.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index da1eab27c9..610e06452d 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -2470,7 +2470,7 @@ func (cli *DockerCli) CmdLoad(args ...string) error { } func (cli *DockerCli) CmdExec(args ...string) error { - cmd := cli.Subcmd("exec", "CONTAINER COMMAND [ARG...]", "Run a command in an existing container") + cmd := cli.Subcmd("exec", "CONTAINER COMMAND [ARG...]", "Run a command in a running container") execConfig, err := runconfig.ParseExec(cmd, args) if err != nil { diff --git a/daemon/execdriver/driver.go b/daemon/execdriver/driver.go index 22e4c4647c..bc2eb24eda 100644 --- a/daemon/execdriver/driver.go +++ b/daemon/execdriver/driver.go @@ -42,7 +42,7 @@ type TtyTerminal interface { type Driver interface { Run(c *Command, pipes *Pipes, startCallback StartCallback) (int, error) // Run executes the process and blocks until the process exits and returns the exit code - // Exec executes the process in an existing container, blocks until the process exits and returns the exit code + // Exec executes the process in a running container, blocks until the process exits and returns the exit code Exec(c *Command, processConfig *ProcessConfig, pipes *Pipes, startCallback StartCallback) (int, error) Kill(c *Command, sig int) error Pause(c *Command) error diff --git a/docker/flags.go b/docker/flags.go index 61081ec996..31dcbe2cff 100644 --- a/docker/flags.go +++ b/docker/flags.go @@ -62,7 +62,7 @@ func init() { {"create", "Create a new container"}, {"diff", "Inspect changes on a container's filesystem"}, {"events", "Get real time events from the server"}, - {"exec", "Run a command in an existing container"}, + {"exec", "Run a command in a running container"}, {"export", "Stream the contents of a container as a tar archive"}, {"history", "Show the history of an image"}, {"images", "List images"}, diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 8a158d7334..64fe3fa885 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -637,7 +637,7 @@ You'll need two shells for this example. Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] - Run a command in an existing container + Run a command in a running container -d, --detach=false Detached mode: run command in the background -i, --interactive=false Keep STDIN open even if not attached From eaa050fdb8fed612b3cd5d204fddaedf27a20370 Mon Sep 17 00:00:00 2001 From: Huayi Zhang Date: Fri, 31 Oct 2014 09:57:54 +0800 Subject: [PATCH 198/592] Mapping change in code Signed-off-by: Huayi Zhang --- docs/sources/reference/commandline/cli.md | 2 +- runconfig/parse.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 006e75d0f8..88ac4a0c6c 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -489,7 +489,7 @@ Creates a new container. --cpuset="" CPUs in which to allow execution (0-3, 0,1) --device=[] Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) --dns=[] Set custom DNS servers - --dns-search=[] Set custom DNS search domains + --dns-search=[] Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) -e, --env=[] Set environment variables --entrypoint="" Overwrite the default ENTRYPOINT of the image --env-file=[] Read in a line delimited file of environment variables diff --git a/runconfig/parse.go b/runconfig/parse.go index 3a8cdd3350..f1258e5b7f 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -73,7 +73,7 @@ func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, cmd.Var(&flPublish, []string{"p", "-publish"}, fmt.Sprintf("Publish a container's port to the host\nformat: %s\n(use 'docker port' to see the actual mapping)", nat.PortSpecTemplateFormat)) cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port from the container without publishing it to your host") cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom DNS servers") - cmd.Var(&flDnsSearch, []string{"-dns-search"}, "Set custom DNS search domains") + cmd.Var(&flDnsSearch, []string{"-dns-search"}, "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)") cmd.Var(&flExtraHosts, []string{"-add-host"}, "Add a custom host-to-IP mapping (host:ip)") cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)") cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "(lxc exec-driver only) Add custom lxc options --lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") From b16f3736edf2427761cbc1c8fc5c4892299b2232 Mon Sep 17 00:00:00 2001 From: Huu Nguyen Date: Thu, 30 Oct 2014 22:50:02 -0400 Subject: [PATCH 199/592] Fix apparent copy paste mistake The note under the RUN header refers to the CMD instruction. It should refer to the RUN instruction instead. Signed-off-by: Huu Nguyen --- docs/sources/reference/builder.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index ae0771d685..a76e6b88f7 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -238,9 +238,9 @@ commands using a base image that does not contain `/bin/sh`. > **Note**: > Unlike the *shell* form, the *exec* form does not invoke a command shell. > This means that normal shell processing does not happen. For example, -> `CMD [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +> `RUN [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. > If you want shell processing then either use the *shell* form or execute -> a shell directly, for example: `CMD [ "sh", "-c", "echo", "$HOME" ]`. +> a shell directly, for example: `RUN [ "sh", "-c", "echo", "$HOME" ]`. The cache for `RUN` instructions isn't invalidated automatically during the next build. The cache for an instruction like From 8453cf0671879aef04f96ffce52bf3fbcb75ca13 Mon Sep 17 00:00:00 2001 From: Huu Nguyen Date: Thu, 30 Oct 2014 23:58:14 -0400 Subject: [PATCH 200/592] Remove out-of-context code block Signed-off-by: Huu Nguyen --- docs/sources/articles/dockerfile_best-practices.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/docs/sources/articles/dockerfile_best-practices.md b/docs/sources/articles/dockerfile_best-practices.md index c403c2f286..3a320efb2c 100644 --- a/docs/sources/articles/dockerfile_best-practices.md +++ b/docs/sources/articles/dockerfile_best-practices.md @@ -113,12 +113,6 @@ the command string itself will be used to find a match. Once the cache is invalidated, all subsequent `Dockerfile` commands will generate new images and the cache will not be used. - bzr \ - cvs \ - git \ - mercurial \ - subversion - ## The Dockerfile instructions Below you'll find recommendations for the best way to write the From ad5b307d0fb00ff0a63b3f97e529fa67bfce50b3 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Fri, 31 Oct 2014 04:20:26 +0000 Subject: [PATCH 201/592] proxy: Fix a potential panic handling error states. Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- daemon/networkdriver/portmapper/proxy.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/daemon/networkdriver/portmapper/proxy.go b/daemon/networkdriver/portmapper/proxy.go index af20469ed8..e4a17bcd9a 100644 --- a/daemon/networkdriver/portmapper/proxy.go +++ b/daemon/networkdriver/portmapper/proxy.go @@ -130,7 +130,12 @@ func (p *proxyCommand) Start() error { r.Read(buf) if string(buf) != "0\n" { - errStr, _ := ioutil.ReadAll(r) + errStr, err := ioutil.ReadAll(r) + if err != nil { + errchan <- fmt.Errorf("Error reading exit status from userland proxy: %v", err) + return + } + errchan <- fmt.Errorf("Error starting userland proxy: %s", errStr) return } From 29965246a735acbdbd9cbb4e54f1d2d9efb120f3 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Fri, 31 Oct 2014 08:31:20 +0100 Subject: [PATCH 202/592] Typo in 1.3.1 change log Replaced envrionment -> environment Signed-off-by: Sebastiaan van Stijn --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f958bbd48c..b9d4370517 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,7 +16,7 @@ #### Builder - Fix escaping `$` for environment variables - Fix issue with lowercase `onbuild` Dockerfile instruction -- Restrict envrionment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, `EXPOSE`, `VOLUME` and `USER` +- Restrict environment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, `EXPOSE`, `VOLUME` and `USER` ## 1.3.0 (2014-10-14) From 906985123aa6b7874c3c0e21f0fb5603928ff6dd Mon Sep 17 00:00:00 2001 From: Thomas LEVEIL Date: Fri, 31 Oct 2014 13:00:59 +0000 Subject: [PATCH 203/592] Reword a sentence bringing confusion about docker links As [discovered][1] doing user support, the sentence `You've learned that a link creates a source container that can provide information about itself to a recipient container` brings confusion. [1]: http://stackoverflow.com/questions/26652877/how-to-avoid-redundant-container-linking-in-docker-when-propagating-ip-addresses/26654203?noredirect=1#comment41945048_26654203 Signed-off-by: Thomas LEVEIL --- docs/sources/userguide/dockerlinks.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/userguide/dockerlinks.md b/docs/sources/userguide/dockerlinks.md index 631f4bdea5..fa665b7266 100644 --- a/docs/sources/userguide/dockerlinks.md +++ b/docs/sources/userguide/dockerlinks.md @@ -159,8 +159,8 @@ Next, inspect your linked containers with `docker inspect`: You can see that the `web` container is now linked to the `db` container `web/db`. Which allows it to access information about the `db` container. -So what does linking the containers actually do? You've learned that a link creates a -source container that can provide information about itself to a recipient container. In +So what does linking the containers actually do? You've learned that a link allows a +source container to provide information about itself to a recipient container. In our example, the recipient, `web`, can access information about the source `db`. To do this, Docker creates a secure tunnel between the containers that doesn't need to expose any ports externally on the container; you'll note when we started the From 06bd66a1f8079c9135acc34bcb9d46c439b429f1 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Fri, 31 Oct 2014 10:18:41 -0400 Subject: [PATCH 204/592] pkg/mount: add more sharesubtree options Signed-off-by: Vincent Batts --- pkg/mount/flags.go | 7 ++++++ pkg/mount/flags_freebsd.go | 7 ++++++ pkg/mount/flags_linux.go | 7 ++++++ pkg/mount/flags_unsupported.go | 7 ++++++ pkg/mount/sharedsubtree_linux.go | 38 +++++++++++++++++++++++++++++++- 5 files changed, 65 insertions(+), 1 deletion(-) diff --git a/pkg/mount/flags.go b/pkg/mount/flags.go index 742698e8d3..17dbd7a64c 100644 --- a/pkg/mount/flags.go +++ b/pkg/mount/flags.go @@ -37,7 +37,14 @@ func parseOptions(options string) (int, string) { "nodiratime": {false, NODIRATIME}, "bind": {false, BIND}, "rbind": {false, RBIND}, + "unbindable": {false, UNBINDABLE}, + "runbindable": {false, RUNBINDABLE}, "private": {false, PRIVATE}, + "rprivate": {false, RPRIVATE}, + "shared": {false, SHARED}, + "rshared": {false, RSHARED}, + "slave": {false, SLAVE}, + "rslave": {false, RSLAVE}, "relatime": {false, RELATIME}, "norelatime": {true, RELATIME}, "strictatime": {false, STRICTATIME}, diff --git a/pkg/mount/flags_freebsd.go b/pkg/mount/flags_freebsd.go index 4ddf4d7090..a59b58960b 100644 --- a/pkg/mount/flags_freebsd.go +++ b/pkg/mount/flags_freebsd.go @@ -19,7 +19,14 @@ const ( MANDLOCK = 0 NODEV = 0 NODIRATIME = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 RBIND = 0 RELATIVE = 0 RELATIME = 0 diff --git a/pkg/mount/flags_linux.go b/pkg/mount/flags_linux.go index 0bb47d8c90..9986621c8f 100644 --- a/pkg/mount/flags_linux.go +++ b/pkg/mount/flags_linux.go @@ -17,7 +17,14 @@ const ( NODIRATIME = syscall.MS_NODIRATIME BIND = syscall.MS_BIND RBIND = syscall.MS_BIND | syscall.MS_REC + UNBINDABLE = syscall.MS_UNBINDABLE + RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC PRIVATE = syscall.MS_PRIVATE + RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC + SLAVE = syscall.MS_SLAVE + RSLAVE = syscall.MS_SLAVE | syscall.MS_REC + SHARED = syscall.MS_SHARED + RSHARED = syscall.MS_SHARED | syscall.MS_REC RELATIME = syscall.MS_RELATIME STRICTATIME = syscall.MS_STRICTATIME ) diff --git a/pkg/mount/flags_unsupported.go b/pkg/mount/flags_unsupported.go index 5a14108880..c4f82176b8 100644 --- a/pkg/mount/flags_unsupported.go +++ b/pkg/mount/flags_unsupported.go @@ -11,7 +11,14 @@ const ( NODIRATIME = 0 NOEXEC = 0 NOSUID = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 RBIND = 0 RELATIME = 0 RELATIVE = 0 diff --git a/pkg/mount/sharedsubtree_linux.go b/pkg/mount/sharedsubtree_linux.go index 566ebbd6bf..cd9b86cefa 100644 --- a/pkg/mount/sharedsubtree_linux.go +++ b/pkg/mount/sharedsubtree_linux.go @@ -2,7 +2,39 @@ package mount +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { mounted, err := Mounted(mountPoint) if err != nil { return err @@ -13,6 +45,10 @@ func MakePrivate(mountPoint string) error { return err } } + mounted, err = Mounted(mountPoint) + if err != nil { + return err + } - return ForceMount("", mountPoint, "none", "private") + return ForceMount("", mountPoint, "none", options) } From f95d73dfd48ad4a3211f2f5e89e8195e2066c939 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 31 Oct 2014 18:28:10 +0000 Subject: [PATCH 205/592] add missing 'current filter' to ps Signed-off-by: Victor Vieux --- docs/sources/reference/commandline/cli.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 8a158d7334..5aebe3ff62 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -1054,6 +1054,7 @@ than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bi Current filters: * exited (int - the code of exited containers. Only useful with '--all') + * status (restarting|running|paused|exited) ##### Successfully exited containers From 5a73be8e9fbe5defc9fc71c6839bea1d888d6a95 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Fri, 31 Oct 2014 18:41:46 +0000 Subject: [PATCH 206/592] pkg/proxy: Bump the maximum size of a UDP packet. Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- pkg/proxy/udp_proxy.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/proxy/udp_proxy.go b/pkg/proxy/udp_proxy.go index f9f2d111e5..a3fcf116e3 100644 --- a/pkg/proxy/udp_proxy.go +++ b/pkg/proxy/udp_proxy.go @@ -13,7 +13,7 @@ import ( const ( UDPConnTrackTimeout = 90 * time.Second - UDPBufSize = 2048 + UDPBufSize = 65507 ) // A net.Addr where the IP is split into two fields so you can use it as a key From 7a3a93871218e5bc50a368f00f043c84004e5986 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Fri, 31 Oct 2014 13:12:31 -0400 Subject: [PATCH 207/592] pkg/mount: testing for linux sharedsubtree mounts * shared * shared/slave * unbindable * private Signed-off-by: Vincent Batts --- pkg/mount/sharedsubtree_linux_test.go | 331 ++++++++++++++++++++++++++ 1 file changed, 331 insertions(+) create mode 100644 pkg/mount/sharedsubtree_linux_test.go diff --git a/pkg/mount/sharedsubtree_linux_test.go b/pkg/mount/sharedsubtree_linux_test.go new file mode 100644 index 0000000000..145d57bbd8 --- /dev/null +++ b/pkg/mount/sharedsubtree_linux_test.go @@ -0,0 +1,331 @@ +// +build linux + +package mount + +import ( + "os" + "path" + "syscall" + "testing" +) + +// nothing is propogated in or out +func TestSubtreePrivate(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outside1Dir = path.Join(tmp, "outside1") + outside2Dir = path.Join(tmp, "outside2") + + outside1Path = path.Join(outside1Dir, "file.txt") + outside2Path = path.Join(outside2Dir, "file.txt") + outside1CheckPath = path.Join(targetDir, "a", "file.txt") + outside2CheckPath = path.Join(sourceDir, "b", "file.txt") + ) + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside1Dir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside2Dir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outside1Path); err != nil { + t.Fatal(err) + } + if err := createFile(outside2Path); err != nil { + t.Fatal(err) + } + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // next, make the target private + if err := MakePrivate(targetDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the _source_ + if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(sourceDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _target_ + if _, err := os.Stat(outside1CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside1CheckPath) + } + + // next mount outside2Dir into the _target_ + if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "b")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _source_ + if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside2CheckPath) + } +} + +// Testing that when a target is a shared mount, +// then child mounts propogate to the source +func TestSubtreeShared(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outsideDir = path.Join(tmp, "outside") + + outsidePath = path.Join(outsideDir, "file.txt") + sourceCheckPath = path.Join(sourceDir, "a", "file.txt") + ) + + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outsideDir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outsidePath); err != nil { + t.Fatal(err) + } + + // mount the source as shared + if err := MakeShared(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the target + if err := Mount(outsideDir, path.Join(targetDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // NOW, check that the file from the outside directory is avaible in the source directory + if _, err := os.Stat(sourceCheckPath); err != nil { + t.Fatal(err) + } +} + +// testing that mounts to a shared source show up in the slave target, +// and that mounts into a slave target do _not_ show up in the shared source +func TestSubtreeSharedSlave(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outside1Dir = path.Join(tmp, "outside1") + outside2Dir = path.Join(tmp, "outside2") + + outside1Path = path.Join(outside1Dir, "file.txt") + outside2Path = path.Join(outside2Dir, "file.txt") + outside1CheckPath = path.Join(targetDir, "a", "file.txt") + outside2CheckPath = path.Join(sourceDir, "b", "file.txt") + ) + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside1Dir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside2Dir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outside1Path); err != nil { + t.Fatal(err) + } + if err := createFile(outside2Path); err != nil { + t.Fatal(err) + } + + // mount the source as shared + if err := MakeShared(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // next, make the target slave + if err := MakeSlave(targetDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the _source_ + if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(sourceDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_ show in the _target_ + if _, err := os.Stat(outside1CheckPath); err != nil { + t.Fatal(err) + } + + // next mount outside2Dir into the _target_ + if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "b")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _source_ + if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside2CheckPath) + } +} + +func TestSubtreeUnbindable(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + ) + if err := os.MkdirAll(sourceDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(targetDir, 0777); err != nil { + t.Fatal(err) + } + + // next, make the source unbindable + if err := MakeUnbindable(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // then attempt to mount it to target. It should fail + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != syscall.EINVAL { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not have been bindable") + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() +} + +func createFile(path string) error { + f, err := os.Create(path) + if err != nil { + return err + } + f.WriteString("hello world!") + return f.Close() +} From fd774a818c7d8942922b4f74eabd2a4e14094e1a Mon Sep 17 00:00:00 2001 From: Srini Brahmaroutu Date: Wed, 17 Sep 2014 01:08:30 +0000 Subject: [PATCH 208/592] adding support for port ranges on --expose Closes #1834 Signed-off-by: Srini Brahmaroutu --- daemon/networkdriver/bridge/driver.go | 20 ++++----- docs/man/docker-create.1.md | 2 +- docs/man/docker-run.1.md | 8 +--- docs/sources/reference/commandline/cli.md | 4 +- docs/sources/reference/run.md | 4 +- integration-cli/docker_cli_run_test.go | 30 ++++++++++++++ links/links.go | 43 ++++++++++++++++++-- links/links_test.go | 49 +++++++++++++++++++++++ nat/nat.go | 39 ++++++++---------- nat/nat_test.go | 4 +- runconfig/parse.go | 23 +++++++++-- 11 files changed, 171 insertions(+), 55 deletions(-) diff --git a/daemon/networkdriver/bridge/driver.go b/daemon/networkdriver/bridge/driver.go index c967aebb79..5d0040a8e7 100644 --- a/daemon/networkdriver/bridge/driver.go +++ b/daemon/networkdriver/bridge/driver.go @@ -5,7 +5,7 @@ import ( "io/ioutil" "net" "os" - "strings" + "strconv" "sync" log "github.com/Sirupsen/logrus" @@ -14,6 +14,7 @@ import ( "github.com/docker/docker/daemon/networkdriver/portallocator" "github.com/docker/docker/daemon/networkdriver/portmapper" "github.com/docker/docker/engine" + "github.com/docker/docker/nat" "github.com/docker/docker/pkg/iptables" "github.com/docker/docker/pkg/networkfs/resolvconf" "github.com/docker/docker/pkg/parsers/kernel" @@ -515,18 +516,13 @@ func LinkContainers(job *engine.Job) engine.Status { ignoreErrors = job.GetenvBool("IgnoreErrors") ports = job.GetenvList("Ports") ) - split := func(p string) (string, string) { - parts := strings.Split(p, "/") - return parts[0], parts[1] - } - - for _, p := range ports { - port, proto := split(p) + for _, value := range ports { + port := nat.Port(value) if output, err := iptables.Raw(action, "FORWARD", "-i", bridgeIface, "-o", bridgeIface, - "-p", proto, + "-p", port.Proto(), "-s", parentIP, - "--dport", port, + "--dport", strconv.Itoa(port.Int()), "-d", childIP, "-j", "ACCEPT"); !ignoreErrors && err != nil { return job.Error(err) @@ -536,9 +532,9 @@ func LinkContainers(job *engine.Job) engine.Status { if output, err := iptables.Raw(action, "FORWARD", "-i", bridgeIface, "-o", bridgeIface, - "-p", proto, + "-p", port.Proto(), "-s", childIP, - "--sport", port, + "--sport", strconv.Itoa(port.Int()), "-d", parentIP, "-j", "ACCEPT"); !ignoreErrors && err != nil { return job.Error(err) diff --git a/docs/man/docker-create.1.md b/docs/man/docker-create.1.md index 92e34125a4..bc431aa975 100644 --- a/docs/man/docker-create.1.md +++ b/docs/man/docker-create.1.md @@ -79,7 +79,7 @@ docker-create - Create a new container Read in a line delimited file of environment variables **--expose**=[] - Expose a port from the container without publishing it to your host + Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host **-h**, **--hostname**="" Container host name diff --git a/docs/man/docker-run.1.md b/docs/man/docker-run.1.md index 8da95af6f8..485965381c 100644 --- a/docs/man/docker-run.1.md +++ b/docs/man/docker-run.1.md @@ -132,12 +132,8 @@ ENTRYPOINT. **--env-file**=[] Read in a line delimited file of environment variables -**--expose**=*port* - Expose a port from the container without publishing it to your host. A -containers port can be exposed to other containers in three ways: 1) The -developer can expose the port using the EXPOSE parameter of the Dockerfile, 2) -the operator can use the **--expose** option with **docker run**, or 3) the -container can be started with the **--link**. +**--expose**=[] + Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host **-h**, **--hostname**=*hostname* Sets the container host name that is available inside the container. diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index dd82ae40df..ea73b16714 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -509,7 +509,7 @@ Creates a new container. -e, --env=[] Set environment variables --entrypoint="" Overwrite the default ENTRYPOINT of the image --env-file=[] Read in a line delimited file of environment variables - --expose=[] Expose a port from the container without publishing it to your host + --expose=[] Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host -h, --hostname="" Container host name -i, --interactive=false Keep STDIN open even if not attached --link=[] Add link to another container in the form of name:alias @@ -1211,7 +1211,7 @@ removed before the image is removed. -e, --env=[] Set environment variables --entrypoint="" Overwrite the default ENTRYPOINT of the image --env-file=[] Read in a line delimited file of environment variables - --expose=[] Expose a port from the container without publishing it to your host + --expose=[] Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host -h, --hostname="" Container host name -i, --interactive=false Keep STDIN open even if not attached --link=[] Add link to another container in the form of name:alias diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index 88e3f5d491..b17afde23f 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -413,7 +413,7 @@ the `EXPOSE` instruction to give a hint to the operator about what incoming ports might provide services. The following options work with or override the Dockerfile's exposed defaults: - --expose=[]: Expose a port from the container + --expose=[]: Expose a port or a range of ports from the container without publishing it to your host -P=false : Publish all exposed ports to the host interfaces -p=[] : Publish a container᾿s port to the host (format: @@ -422,7 +422,7 @@ or override the Dockerfile's exposed defaults: (use 'docker port' to see the actual mapping) --link="" : Add link to another container (name:alias) -As mentioned previously, `EXPOSE` (and `--expose`) make a port available +As mentioned previously, `EXPOSE` (and `--expose`) makes ports available **in** a container for incoming connections. The port number on the inside of the container (where the service listens) does not need to be the same number as the port exposed on the outside of the container diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 95cb0c86d1..c529be7df2 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -13,11 +13,13 @@ import ( "reflect" "regexp" "sort" + "strconv" "strings" "sync" "testing" "time" + "github.com/docker/docker/nat" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/networkfs/resolvconf" "github.com/kr/pty" @@ -2473,3 +2475,31 @@ func TestRunSlowStdoutConsumer(t *testing.T) { logDone("run - slow consumer") } + +func TestRunAllowPortRangeThroughExpose(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-d", "--expose", "3000-3003", "-P", "busybox", "top") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + id := strings.TrimSpace(out) + portstr, err := inspectFieldJSON(id, "NetworkSettings.Ports") + if err != nil { + t.Fatal(err) + } + var ports nat.PortMap + err = unmarshalJSON([]byte(portstr), &ports) + for port, binding := range ports { + portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) + if portnum < 3000 || portnum > 3003 { + t.Fatalf("Port is out of range ", portnum, binding, out) + } + if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { + t.Fatal("Port is not mapped for the port "+port, out) + } + } + if err := deleteContainer(id); err != nil { + t.Fatal(err) + } + logDone("run - allow port range through --expose flag") +} diff --git a/links/links.go b/links/links.go index d2d699398e..fc4d95ab08 100644 --- a/links/links.go +++ b/links/links.go @@ -47,6 +47,20 @@ func (l *Link) Alias() string { return alias } +func nextContiguous(ports []nat.Port, value int, index int) int { + if index+1 == len(ports) { + return index + } + for i := index + 1; i < len(ports); i++ { + if ports[i].Int() > value+1 { + return i - 1 + } + + value++ + } + return len(ports) - 1 +} + func (l *Link) ToEnv() []string { env := []string{} alias := strings.Replace(strings.ToUpper(l.Alias()), "-", "_", -1) @@ -55,12 +69,35 @@ func (l *Link) ToEnv() []string { env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port())) } - // Load exposed ports into the environment - for _, p := range l.Ports { + //sort the ports so that we can bulk the continuous ports together + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + + for i := 0; i < len(l.Ports); { + p := l.Ports[i] + j := nextContiguous(l.Ports, p.Int(), i) + if j > i+1 { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_START=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_START=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + + q := l.Ports[j] + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_END=%s://%s:%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Proto(), l.ChildIP, q.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_END=%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Port())) + + i = j + 1 + continue + } + env = append(env, fmt.Sprintf("%s_PORT_%s_%s=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + i++ } // Load the linked container's name into the environment @@ -125,7 +162,7 @@ func (l *Link) toggle(action string, ignoreErrors bool) error { out := make([]string, len(l.Ports)) for i, p := range l.Ports { - out[i] = fmt.Sprintf("%s/%s", p.Port(), p.Proto()) + out[i] = string(p) } job.SetenvList("Ports", out) diff --git a/links/links_test.go b/links/links_test.go index c26559e599..7ba9513ea0 100644 --- a/links/links_test.go +++ b/links/links_test.go @@ -107,3 +107,52 @@ func TestLinkEnv(t *testing.T) { t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) } } + +func TestLinkMultipleEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[nat.Port("6379/tcp")] = struct{}{} + ports[nat.Port("6380/tcp")] = struct{}{} + ports[nat.Port("6381/tcp")] = struct{}{} + + link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports, nil) + if err != nil { + t.Fatal(err) + } + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) + } + if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { + t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { + t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } +} diff --git a/nat/nat.go b/nat/nat.go index b0177289ce..1246626b0d 100644 --- a/nat/nat.go +++ b/nat/nat.go @@ -42,44 +42,37 @@ func ParsePort(rawPort string) (int, error) { } func (p Port) Proto() string { - parts := strings.Split(string(p), "/") - if len(parts) == 1 { - return "tcp" - } - return parts[1] + proto, _ := SplitProtoPort(string(p)) + return proto } func (p Port) Port() string { - return strings.Split(string(p), "/")[0] + _, port := SplitProtoPort(string(p)) + return port } func (p Port) Int() int { - i, err := ParsePort(p.Port()) + port, err := ParsePort(p.Port()) if err != nil { panic(err) } - return i + return port } // Splits a port in the format of proto/port func SplitProtoPort(rawPort string) (string, string) { - var port string - var proto string - parts := strings.Split(rawPort, "/") - - if len(parts) == 0 || parts[0] == "" { // we have "" or ""/ - port = "" - proto = "" - } else { // we have # or #/ or #/... - port = parts[0] - if len(parts) > 1 && parts[1] != "" { - proto = parts[1] // we have #/... - } else { - proto = "tcp" // we have # or #/ - } + l := len(parts) + if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { + return "", "" } - return proto, port + if l == 1 { + return "tcp", rawPort + } + if len(parts[1]) == 0 { + return "tcp", parts[0] + } + return parts[1], parts[0] } func validateProto(proto string) bool { diff --git a/nat/nat_test.go b/nat/nat_test.go index a8c2cb584e..4ae9f4ece5 100644 --- a/nat/nat_test.go +++ b/nat/nat_test.go @@ -76,13 +76,13 @@ func TestSplitProtoPort(t *testing.T) { proto, port = SplitProtoPort("") if proto != "" || port != "" { - t.Fatal("parsing an empty string yielded surprising results") + t.Fatal("parsing an empty string yielded surprising results", proto, port) } proto, port = SplitProtoPort("1234") if proto != "tcp" || port != "1234" { - t.Fatal("tcp is not the default protocol for portspec '1234'") + t.Fatal("tcp is not the default protocol for portspec '1234'", proto, port) } proto, port = SplitProtoPort("1234/") diff --git a/runconfig/parse.go b/runconfig/parse.go index f1258e5b7f..9635e9402d 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -71,7 +71,7 @@ func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, cmd.Var(&flEnvFile, []string{"-env-file"}, "Read in a line delimited file of environment variables") cmd.Var(&flPublish, []string{"p", "-publish"}, fmt.Sprintf("Publish a container's port to the host\nformat: %s\n(use 'docker port' to see the actual mapping)", nat.PortSpecTemplateFormat)) - cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port from the container without publishing it to your host") + cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host") cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom DNS servers") cmd.Var(&flDnsSearch, []string{"-dns-search"}, "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)") cmd.Var(&flExtraHosts, []string{"-add-host"}, "Add a custom host-to-IP mapping (host:ip)") @@ -197,9 +197,24 @@ func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, if strings.Contains(e, ":") { return nil, nil, cmd, fmt.Errorf("Invalid port format for --expose: %s", e) } - p := nat.NewPort(nat.SplitProtoPort(e)) - if _, exists := ports[p]; !exists { - ports[p] = struct{}{} + //support two formats for expose, original format /[] or /[] + if strings.Contains(e, "-") { + proto, port := nat.SplitProtoPort(e) + //parse the start and end port and create a sequence of ports to expose + parts := strings.Split(port, "-") + start, _ := strconv.Atoi(parts[0]) + end, _ := strconv.Atoi(parts[1]) + for i := start; i <= end; i++ { + p := nat.NewPort(proto, strconv.Itoa(i)) + if _, exists := ports[p]; !exists { + ports[p] = struct{}{} + } + } + } else { + p := nat.NewPort(nat.SplitProtoPort(e)) + if _, exists := ports[p]; !exists { + ports[p] = struct{}{} + } } } From 9d6391a9ebca6c7bd4b262ebb5a4baf9d2c222f9 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 23 Oct 2014 15:26:52 -0700 Subject: [PATCH 209/592] Skip V2 registry and immediately fallback to V1 when mirrors are provided Since V2 registry does not yet implement mirrors, when mirrors are given automatically fallback to V1 without checking V2 first. Signed-off-by: Derek McGowan (github: dmcgowan) --- graph/pull.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graph/pull.go b/graph/pull.go index 3cc48fa699..07d5ec1462 100644 --- a/graph/pull.go +++ b/graph/pull.go @@ -139,7 +139,7 @@ func (s *TagStore) CmdPull(job *engine.Job) engine.Status { mirrors = s.mirrors } - if isOfficial || endpoint.Version == registry.APIVersion2 { + if len(mirrors) == 0 && (isOfficial || endpoint.Version == registry.APIVersion2) { j := job.Eng.Job("trust_update_base") if err = j.Run(); err != nil { return job.Errorf("error updating trust base graph: %s", err) From f2df38050e019c0db265e7c3e38ee0de8948ca77 Mon Sep 17 00:00:00 2001 From: Malte Janduda Date: Fri, 3 Oct 2014 23:02:17 +0200 Subject: [PATCH 210/592] Adding docker-cli run param to set MAC address Signed-off-by: Malte Janduda --- daemon/container.go | 1 + docs/man/docker-run.1.md | 9 +++++ docs/sources/articles/networking.md | 14 ++++++-- .../reference/api/docker_remote_api.md | 4 +++ .../reference/api/docker_remote_api_v1.15.md | 1 + .../reference/api/docker_remote_api_v1.16.md | 1 + docs/sources/reference/commandline/cli.md | 8 +++++ docs/sources/reference/run.md | 19 ++++++---- integration-cli/docker_cli_run_test.go | 35 +++++++++++++++++++ runconfig/config.go | 2 ++ runconfig/parse.go | 2 ++ 11 files changed, 87 insertions(+), 9 deletions(-) diff --git a/daemon/container.go b/daemon/container.go index a477f19f22..6a717c2c9e 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -457,6 +457,7 @@ func (container *Container) AllocateNetwork() error { ) job := eng.Job("allocate_interface", container.ID) + job.Setenv("RequestedMac", container.Config.MacAddress) if env, err = job.Stdout.AddEnv(); err != nil { return err } diff --git a/docs/man/docker-run.1.md b/docs/man/docker-run.1.md index 485965381c..ce2ffeb434 100644 --- a/docs/man/docker-run.1.md +++ b/docs/man/docker-run.1.md @@ -29,6 +29,7 @@ docker-run - Run a command in a new container [**-m**|**--memory**[=*MEMORY*]] [**--name**[=*NAME*]] [**--net**[=*"bridge"*]] +[**--mac-address**[=*MACADDRESS*]] [**-P**|**--publish-all**[=*false*]] [**-p**|**--publish**[=*[]*]] [**--privileged**[=*false*]] @@ -187,6 +188,14 @@ and foreground Docker containers. 'container:': reuses another container network stack 'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. +**--mac-address**=*macaddress* + Set the MAC address for the container's ethernet device: + --mac-address=12:34:56:78:9a:bc + +Remember that the MAC address in an ethernet network must be unique. +The IPv6 link-local address will be based on the device's MAC address +according to RFC4862. + **-P**, **--publish-all**=*true*|*false* When set to true publish all exposed ports to the host interfaces. The default is false. If the operator uses -P (or -p) then Docker will make the diff --git a/docs/sources/articles/networking.md b/docs/sources/articles/networking.md index 036babb006..49f3722216 100644 --- a/docs/sources/articles/networking.md +++ b/docs/sources/articles/networking.md @@ -104,6 +104,9 @@ Finally, several networking options can only be provided when calling * `--net=bridge|none|container:NAME_or_ID|host` — see [How Docker networks a container](#container-networking) + * `--mac-address=MACADDRESS...` — see + [How docker networks a container](#container-networking) + * `-p SPEC` or `--publish=SPEC` — see [Binding container ports](#binding-ports) @@ -537,9 +540,15 @@ The steps with which Docker configures a container are: separate and unique network interface namespace, there are no physical interfaces with which this name could collide. -4. Give the container's `eth0` a new IP address from within the +4. Set the interface's mac address according to the `--mac-address` + parameter or generate a random one. + +5. Give the container's `eth0` a new IP address from within the bridge's range of network addresses, and set its default route to - the IP address that the Docker host owns on the bridge. + the IP address that the Docker host owns on the bridge. If available + the IP address is generated from the MAC address. This prevents arp + cache invalidation problems, when a new container comes up with an + IP used in the past by another container with another MAC. With these steps complete, the container now possesses an `eth0` (virtual) network card and will find itself able to communicate with @@ -621,6 +630,7 @@ Docker do all of the configuration: $ sudo ip link set B netns $pid $ sudo ip netns exec $pid ip link set dev B name eth0 + $ sudo ip netns exec $pid ip link set eth0 address 12:34:56:78:9a:bc $ sudo ip netns exec $pid ip link set eth0 up $ sudo ip netns exec $pid ip addr add 172.17.42.99/16 dev eth0 $ sudo ip netns exec $pid ip route add default via 172.17.42.1 diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md index 559b841cde..5b054c6bbe 100644 --- a/docs/sources/reference/api/docker_remote_api.md +++ b/docs/sources/reference/api/docker_remote_api.md @@ -52,6 +52,10 @@ You can still call an old version of the API using `info` now returns the number of CPUs available on the machine (`NCPU`) and total memory available (`MemTotal`). +`POST /containers/create` +**New!** +You can define the container's MAC address by providing a MacAddress key-value pair. + ## v1.15 ### Full Documentation diff --git a/docs/sources/reference/api/docker_remote_api_v1.15.md b/docs/sources/reference/api/docker_remote_api_v1.15.md index cf8f6d3cc3..1b185db4de 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.15.md +++ b/docs/sources/reference/api/docker_remote_api_v1.15.md @@ -131,6 +131,7 @@ Create a container }, "WorkingDir":"", "NetworkDisabled": false, + "MacAddress":"12:34:56:78:9a:bc", "ExposedPorts":{ "22/tcp": {} }, diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md index fbb89294e2..81fdc7380a 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.16.md +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -131,6 +131,7 @@ Create a container }, "WorkingDir":"", "NetworkDisabled": false, + "MacAddress":"12:34:56:78:9a:bc", "ExposedPorts":{ "22/tcp": {} }, diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index a46ef6e3b6..50c0ff3cce 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -516,6 +516,7 @@ Creates a new container. --lxc-conf=[] (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" -m, --memory="" Memory limit (format: , where unit = b, k, m or g) --name="" Assign a name to the container + --mac-address="" Set the container's MAC address --net="bridge" Set the Network mode for the container 'bridge': creates a new network stack for the container on the docker bridge 'none': no networking for this container @@ -867,6 +868,13 @@ straightforward manner. $ sudo docker inspect --format='{{.NetworkSettings.IPAddress}}' $INSTANCE_ID +**Get an instance's MAC Address:** + +For the most part, you can pick out any field from the JSON in a fairly +straightforward manner. + + $ sudo docker inspect --format='{{.NetworkSettings.MacAddress}}' $INSTANCE_ID + **List All Port Bindings:** One can loop over arrays and maps in the results to produce simple text diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index b17afde23f..c8a141de06 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -133,13 +133,14 @@ example, `docker run ubuntu:14.04`. ## Network settings - --dns=[] : Set custom dns servers for the container - --net="bridge" : Set the Network mode for the container - 'bridge': creates a new network stack for the container on the docker bridge - 'none': no networking for this container - 'container:': reuses another container network stack - 'host': use the host network stack inside the container - --add-host="" : Add a line to /etc/hosts (host:IP) + --dns=[] : Set custom dns servers for the container + --net="bridge" : Set the Network mode for the container + 'bridge': creates a new network stack for the container on the docker bridge + 'none': no networking for this container + 'container:': reuses another container network stack + 'host': use the host network stack inside the container + --add-host="" : Add a line to /etc/hosts (host:IP) + --mac-address="" : Sets the container's ethernet device's mac address By default, all containers have networking enabled and they can make any outgoing connections. The operator can completely disable networking @@ -150,6 +151,10 @@ networking. In cases like this, you would perform I/O through files or Your container will use the same DNS servers as the host by default, but you can override this with `--dns`. +By default a random mac is generated. You can set the container's mac address +explicitly by providing a mac via the `--mac-address` parameter (format: +12:34:56:78:9a:bc). + Supported networking modes are: * none - no networking in the container diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index c529be7df2..389bcda93a 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -2018,6 +2018,41 @@ func TestRunNetworkNotInitializedNoneMode(t *testing.T) { logDone("run - network must not be initialized in 'none' mode") } +func TestRunSetMacAddress(t *testing.T) { + mac := "12:34:56:78:9a:bc" + cmd := exec.Command("/bin/bash", "-c", dockerBinary+` run -i --rm --mac-address=`+mac+` busybox /bin/sh -c "ip link show eth0 | tail -1 | awk '{ print \$2 }'"`) + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + actualMac := strings.TrimSpace(out) + if actualMac != mac { + t.Fatalf("Set Mac Address with --mac-address failed. The container has an incorrect MAC address: %q, expected: %q", actualMac, mac) + } + + deleteAllContainers() + logDone("run - setting Mac Address with --mac-address") +} + +func TestRunInspectMacAddress(t *testing.T) { + mac := "12:34:56:78:9a:bc" + cmd := exec.Command(dockerBinary, "run", "-d", "--mac-address="+mac, "busybox", "top") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + id := strings.TrimSpace(out) + inspectedMac, err := inspectField(id, "NetworkSettings.MacAddress") + if err != nil { + t.Fatal(err) + } + if inspectedMac != mac { + t.Fatalf("Inspecting Mac Address with failed. docker inspect shows incorrect MacAddress: %q, actual Mac: %q", inspectedMac, mac) + } + deleteAllContainers() + logDone("run - inspecting Mac Address") +} + func TestRunDeallocatePortOnMissingIptablesRule(t *testing.T) { cmd := exec.Command(dockerBinary, "run", "-d", "-p", "23:23", "busybox", "top") out, _, err := runCommandWithOutput(cmd) diff --git a/runconfig/config.go b/runconfig/config.go index 28e85de647..29c54a4d6d 100644 --- a/runconfig/config.go +++ b/runconfig/config.go @@ -31,6 +31,7 @@ type Config struct { WorkingDir string Entrypoint []string NetworkDisabled bool + MacAddress string OnBuild []string SecurityOpt []string } @@ -53,6 +54,7 @@ func ContainerConfigFromJob(job *engine.Job) *Config { Image: job.Getenv("Image"), WorkingDir: job.Getenv("WorkingDir"), NetworkDisabled: job.GetenvBool("NetworkDisabled"), + MacAddress: job.Getenv("MacAddress"), } job.GetenvJson("ExposedPorts", &config.ExposedPorts) job.GetenvJson("Volumes", &config.Volumes) diff --git a/runconfig/parse.go b/runconfig/parse.go index 9635e9402d..aed10eadd1 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -59,6 +59,7 @@ func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") flCpuset = cmd.String([]string{"-cpuset"}, "", "CPUs in which to allow execution (0-3, 0,1)") flNetMode = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container\n'bridge': creates a new network stack for the container on the docker bridge\n'none': no networking for this container\n'container:': reuses another container network stack\n'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure.") + flMacAddress = cmd.String([]string{"-mac-address"}, "", "Container MAC address (ex: 92:d0:c6:0a:29:33)") flRestartPolicy = cmd.String([]string{"-restart"}, "", "Restart policy to apply when a container exits (no, on-failure[:max-retry], always)") ) @@ -269,6 +270,7 @@ func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, Cmd: runCmd, Image: image, Volumes: flVolumes.GetMap(), + MacAddress: *flMacAddress, Entrypoint: entrypoint, WorkingDir: *flWorkingDir, SecurityOpt: flSecurityOpt.GetAll(), From f12b7b4bc21ff6a64505909a9ba49107877d2ca4 Mon Sep 17 00:00:00 2001 From: Vladimir Bulyga Date: Sat, 1 Nov 2014 19:22:28 +0300 Subject: [PATCH 211/592] allow to use x-registry-auth header with enabled cors Signed-off-by: Vladimir Bulyga --- api/server/server.go | 2 +- integration/api_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/api/server/server.go b/api/server/server.go index d77a6c22a2..16e5085148 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -1167,7 +1167,7 @@ func optionsHandler(eng *engine.Engine, version version.Version, w http.Response } func writeCorsHeaders(w http.ResponseWriter, r *http.Request) { w.Header().Add("Access-Control-Allow-Origin", "*") - w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept") + w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS") } diff --git a/integration/api_test.go b/integration/api_test.go index 6bb340d53b..8e45f89282 100644 --- a/integration/api_test.go +++ b/integration/api_test.go @@ -785,8 +785,8 @@ func TestGetEnabledCors(t *testing.T) { if allowOrigin != "*" { t.Errorf("Expected header Access-Control-Allow-Origin to be \"*\", %s found.", allowOrigin) } - if allowHeaders != "Origin, X-Requested-With, Content-Type, Accept" { - t.Errorf("Expected header Access-Control-Allow-Headers to be \"Origin, X-Requested-With, Content-Type, Accept\", %s found.", allowHeaders) + if allowHeaders != "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth" { + t.Errorf("Expected header Access-Control-Allow-Headers to be \"Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth\", %s found.", allowHeaders) } if allowMethods != "GET, POST, DELETE, PUT, OPTIONS" { t.Errorf("Expected hearder Access-Control-Allow-Methods to be \"GET, POST, DELETE, PUT, OPTIONS\", %s found.", allowMethods) From e5ecfd3b178bbc399de344635606d82e147276ff Mon Sep 17 00:00:00 2001 From: "Daniel, Dao Quang Minh" Date: Sat, 1 Nov 2014 12:23:08 -0400 Subject: [PATCH 212/592] change util.CopyDirectory to archive.CopyWithTar Docker-DCO-1.1-Signed-off-by: Daniel, Dao Quang Minh (github: dqminh) --- utils/utils.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/utils/utils.go b/utils/utils.go index ef587bb77c..e2254b8bab 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -22,6 +22,7 @@ import ( log "github.com/Sirupsen/logrus" "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/ioutils" ) @@ -250,14 +251,6 @@ func HashData(src io.Reader) (string, error) { return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil } -// FIXME: this is deprecated by CopyWithTar in archive.go -func CopyDirectory(source, dest string) error { - if output, err := exec.Command("cp", "-ra", source, dest).CombinedOutput(); err != nil { - return fmt.Errorf("Error copy: %s (%s)", err, output) - } - return nil -} - type WriteFlusher struct { sync.Mutex w io.Writer @@ -381,7 +374,7 @@ func TestDirectory(templateDir string) (dir string, err error) { return } if templateDir != "" { - if err = CopyDirectory(templateDir, dir); err != nil { + if err = archive.CopyWithTar(templateDir, dir); err != nil { return } } From de9bf24750e0a9774a475eae74cbd2c241224e10 Mon Sep 17 00:00:00 2001 From: Arnaud Porterie Date: Sat, 1 Nov 2014 13:58:18 -0700 Subject: [PATCH 213/592] Clarify USER instruction documentation Reuse WORKDIR wording to specify that the USER instructions affect the following RUN, CMD, and ENTRYPOINT instructions. Signed-off-by: Arnaud Porterie --- docs/sources/reference/builder.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index ae0771d685..cb096640bd 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -731,7 +731,8 @@ documentation. USER daemon The `USER` instruction sets the user name or UID to use when running the image -and for any following `RUN` directives. +and for any `RUN`, `CMD` and `ENTRYPOINT` instructions that follow it in the +`Dockerfile`. ## WORKDIR From d22d32d61fc024d2c81e75f870d802f7a2cd28fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A9di-R=C3=A9mi=20Hashim?= Date: Sat, 1 Nov 2014 21:22:43 +0000 Subject: [PATCH 214/592] Grammatical mistake in docs Change `The team can now use this image by run their own containers.` to `The team can now use this image by running their own containers.` --- docs/sources/userguide/dockerimages.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/userguide/dockerimages.md b/docs/sources/userguide/dockerimages.md index f58c9a224f..51f6beb554 100644 --- a/docs/sources/userguide/dockerimages.md +++ b/docs/sources/userguide/dockerimages.md @@ -154,7 +154,7 @@ We've identified a suitable image, `training/sinatra`, and now we can download i $ sudo docker pull training/sinatra -The team can now use this image by run their own containers. +The team can now use this image by running their own containers. $ sudo docker run -t -i training/sinatra /bin/bash root@a8cb6ce02d85:/# From 795533ede700395add26746b50f3d34aa3c1a3eb Mon Sep 17 00:00:00 2001 From: Mengdi Gao Date: Sun, 2 Nov 2014 22:55:36 +0800 Subject: [PATCH 215/592] Remove extra line feed typo The removed extra line feed broke the paragraph at https://docs.docker.com/userguide/dockerizing/ --- docs/sources/userguide/dockerizing.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/sources/userguide/dockerizing.md b/docs/sources/userguide/dockerizing.md index 9da4890bfa..2383160986 100644 --- a/docs/sources/userguide/dockerizing.md +++ b/docs/sources/userguide/dockerizing.md @@ -126,8 +126,7 @@ identifies a container so we can work with it. > on we'll see a shorter ID and some ways to name our containers to make > working with them easier. -We can use this container ID to see what's happening with our `hello -world` daemon. +We can use this container ID to see what's happening with our `hello world` daemon. Firstly let's make sure our container is running. We can do that with the `docker ps` command. The `docker ps` command queries From c0f0f5c9887032c606750b645001829d9f14f47c Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Thu, 23 Oct 2014 14:54:35 -0700 Subject: [PATCH 216/592] Do some cleanup on .dockerignore paths While working on the fix for #8330 I noticed a few things: 1 - the split() call for the .dockerignore process will generate a blank "exclude". While this isn't causing an issue right now, I got worried that in the future some code later on might interpret "" as something bad, like "everything" or ".". So I added a check for an empty "exclude" and skipped it 2 - if someone puts "foo" in their .dockerignore then we'll skip "foo". However, if they put "./foo" then we won't due to the painfully simplistic logic of go's filepath.Match algorithm. To help things a little (and to treat ./Dockerfile just like Dockerfile) I added code to filepath.Clean() each entry in .dockerignore. It should result in the same semantic path but ensure that no matter how the user expresses the path, we'll match it. Signed-off-by: Doug Davis --- api/client/commands.go | 5 ++++ integration-cli/docker_cli_build_test.go | 32 +++++++++++++++++++++++- 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/api/client/commands.go b/api/client/commands.go index f4ced5ecff..de0076c4ac 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -143,6 +143,11 @@ func (cli *DockerCli) CmdBuild(args ...string) error { return fmt.Errorf("Error reading .dockerignore: '%s'", err) } for _, pattern := range strings.Split(string(ignore), "\n") { + pattern = strings.TrimSpace(pattern) + if pattern == "" { + continue + } + pattern = filepath.Clean(pattern) ok, err := filepath.Match(pattern, "Dockerfile") if err != nil { return fmt.Errorf("Bad .dockerignore pattern: '%s', error: %s", pattern, err) diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index c909b14f0d..8812cab1be 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -2325,6 +2325,29 @@ func TestBuildDockerignore(t *testing.T) { logDone("build - test .dockerignore") } +func TestBuildDockerignoreCleanPaths(t *testing.T) { + name := "testbuilddockerignorecleanpaths" + defer deleteImages(name) + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN (! ls /tmp/foo) && (! ls /tmp/foo2) && (! ls /tmp/dir1/foo)` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "foo", + "foo2": "foo2", + "dir1/foo": "foo in dir1", + ".dockerignore": "./foo\ndir1//foo\n./dir1/../foo2", + }) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - test .dockerignore with clean paths") +} + func TestBuildDockerignoringDockerfile(t *testing.T) { name := "testbuilddockerignoredockerfile" defer deleteImages(name) @@ -2334,13 +2357,20 @@ func TestBuildDockerignoringDockerfile(t *testing.T) { "Dockerfile": "FROM scratch", ".dockerignore": "Dockerfile\n", }) - defer ctx.Close() if err != nil { t.Fatal(err) } + defer ctx.Close() if _, err = buildImageFromContext(name, ctx, true); err == nil { t.Fatalf("Didn't get expected error from ignoring Dockerfile") } + + // now try it with ./Dockerfile + ctx.Add(".dockerignore", "./Dockerfile\n") + if _, err = buildImageFromContext(name, ctx, true); err == nil { + t.Fatalf("Didn't get expected error from ignoring ./Dockerfile") + } + logDone("build - test .dockerignore of Dockerfile") } From 971fc2253af77ae4a22a3741fff51d84cd9bc697 Mon Sep 17 00:00:00 2001 From: Malte Janduda Date: Mon, 3 Nov 2014 11:43:11 +0100 Subject: [PATCH 217/592] enhancing set-macaddress docu Signed-off-by: Malte Janduda --- docs/man/docker-run.1.md | 2 +- docs/sources/articles/networking.md | 6 +++--- docs/sources/reference/api/docker_remote_api.md | 2 +- docs/sources/reference/run.md | 8 ++++---- integration-cli/docker_cli_run_test.go | 8 ++++---- runconfig/parse.go | 2 +- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/docs/man/docker-run.1.md b/docs/man/docker-run.1.md index ce2ffeb434..ae559819b1 100644 --- a/docs/man/docker-run.1.md +++ b/docs/man/docker-run.1.md @@ -192,7 +192,7 @@ and foreground Docker containers. Set the MAC address for the container's ethernet device: --mac-address=12:34:56:78:9a:bc -Remember that the MAC address in an ethernet network must be unique. +Remember that the MAC address in an Ethernet network must be unique. The IPv6 link-local address will be based on the device's MAC address according to RFC4862. diff --git a/docs/sources/articles/networking.md b/docs/sources/articles/networking.md index 49f3722216..6587efc522 100644 --- a/docs/sources/articles/networking.md +++ b/docs/sources/articles/networking.md @@ -105,7 +105,7 @@ Finally, several networking options can only be provided when calling [How Docker networks a container](#container-networking) * `--mac-address=MACADDRESS...` — see - [How docker networks a container](#container-networking) + [How Docker networks a container](#container-networking) * `-p SPEC` or `--publish=SPEC` — see [Binding container ports](#binding-ports) @@ -540,13 +540,13 @@ The steps with which Docker configures a container are: separate and unique network interface namespace, there are no physical interfaces with which this name could collide. -4. Set the interface's mac address according to the `--mac-address` +4. Set the interface's MAC address according to the `--mac-address` parameter or generate a random one. 5. Give the container's `eth0` a new IP address from within the bridge's range of network addresses, and set its default route to the IP address that the Docker host owns on the bridge. If available - the IP address is generated from the MAC address. This prevents arp + the IP address is generated from the MAC address. This prevents ARP cache invalidation problems, when a new container comes up with an IP used in the past by another container with another MAC. diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md index 5b054c6bbe..3babab8eca 100644 --- a/docs/sources/reference/api/docker_remote_api.md +++ b/docs/sources/reference/api/docker_remote_api.md @@ -54,7 +54,7 @@ total memory available (`MemTotal`). `POST /containers/create` **New!** -You can define the container's MAC address by providing a MacAddress key-value pair. +You can set the new container's MAC address explicitly. ## v1.15 diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index c8a141de06..4ec867f1a6 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -140,7 +140,7 @@ example, `docker run ubuntu:14.04`. 'container:': reuses another container network stack 'host': use the host network stack inside the container --add-host="" : Add a line to /etc/hosts (host:IP) - --mac-address="" : Sets the container's ethernet device's mac address + --mac-address="" : Sets the container's ethernet device's MAC address By default, all containers have networking enabled and they can make any outgoing connections. The operator can completely disable networking @@ -151,9 +151,9 @@ networking. In cases like this, you would perform I/O through files or Your container will use the same DNS servers as the host by default, but you can override this with `--dns`. -By default a random mac is generated. You can set the container's mac address -explicitly by providing a mac via the `--mac-address` parameter (format: -12:34:56:78:9a:bc). +By default a random MAC is generated. You can set the container's MAC address +explicitly by providing a MAC via the `--mac-address` parameter (format: +`12:34:56:78:9a:bc`). Supported networking modes are: diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 389bcda93a..4c3e8d0a08 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -2027,11 +2027,11 @@ func TestRunSetMacAddress(t *testing.T) { } actualMac := strings.TrimSpace(out) if actualMac != mac { - t.Fatalf("Set Mac Address with --mac-address failed. The container has an incorrect MAC address: %q, expected: %q", actualMac, mac) + t.Fatalf("Set MAC address with --mac-address failed. The container has an incorrect MAC address: %q, expected: %q", actualMac, mac) } deleteAllContainers() - logDone("run - setting Mac Address with --mac-address") + logDone("run - setting MAC address with --mac-address") } func TestRunInspectMacAddress(t *testing.T) { @@ -2047,10 +2047,10 @@ func TestRunInspectMacAddress(t *testing.T) { t.Fatal(err) } if inspectedMac != mac { - t.Fatalf("Inspecting Mac Address with failed. docker inspect shows incorrect MacAddress: %q, actual Mac: %q", inspectedMac, mac) + t.Fatalf("docker inspect outputs wrong MAC address: %q, should be: %q", inspectedMac, mac) } deleteAllContainers() - logDone("run - inspecting Mac Address") + logDone("run - inspecting MAC address") } func TestRunDeallocatePortOnMissingIptablesRule(t *testing.T) { diff --git a/runconfig/parse.go b/runconfig/parse.go index aed10eadd1..c62ab3fdd4 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -59,7 +59,7 @@ func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") flCpuset = cmd.String([]string{"-cpuset"}, "", "CPUs in which to allow execution (0-3, 0,1)") flNetMode = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container\n'bridge': creates a new network stack for the container on the docker bridge\n'none': no networking for this container\n'container:': reuses another container network stack\n'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure.") - flMacAddress = cmd.String([]string{"-mac-address"}, "", "Container MAC address (ex: 92:d0:c6:0a:29:33)") + flMacAddress = cmd.String([]string{"-mac-address"}, "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)") flRestartPolicy = cmd.String([]string{"-restart"}, "", "Restart policy to apply when a container exits (no, on-failure[:max-retry], always)") ) From 4437573c96cdc52e98f44356be5c96dff4ce5907 Mon Sep 17 00:00:00 2001 From: Peter Ericson Date: Mon, 3 Nov 2014 10:52:50 +0000 Subject: [PATCH 218/592] Fix typo in api docs: stdou -> stdout --- docs/sources/reference/api/docker_remote_api_v1.10.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.11.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.12.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.13.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.14.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.15.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.16.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.6.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.7.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.8.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.9.md | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.md b/docs/sources/reference/api/docker_remote_api_v1.10.md index 52bbe2e486..7918215257 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.10.md +++ b/docs/sources/reference/api/docker_remote_api_v1.10.md @@ -519,7 +519,7 @@ Status Codes: `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) -- 1: stdou +- 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of diff --git a/docs/sources/reference/api/docker_remote_api_v1.11.md b/docs/sources/reference/api/docker_remote_api_v1.11.md index 2368daf4ec..ad858c3144 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.11.md +++ b/docs/sources/reference/api/docker_remote_api_v1.11.md @@ -555,7 +555,7 @@ Status Codes: `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) -- 1: stdou +- 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of diff --git a/docs/sources/reference/api/docker_remote_api_v1.12.md b/docs/sources/reference/api/docker_remote_api_v1.12.md index 0d547f279b..48e6bb5c9c 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.12.md +++ b/docs/sources/reference/api/docker_remote_api_v1.12.md @@ -603,7 +603,7 @@ Status Codes: `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) -- 1: stdou +- 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of diff --git a/docs/sources/reference/api/docker_remote_api_v1.13.md b/docs/sources/reference/api/docker_remote_api_v1.13.md index b752d5c01e..595a748e2b 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.13.md +++ b/docs/sources/reference/api/docker_remote_api_v1.13.md @@ -596,7 +596,7 @@ Status Codes: `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) -- 1: stdou +- 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of diff --git a/docs/sources/reference/api/docker_remote_api_v1.14.md b/docs/sources/reference/api/docker_remote_api_v1.14.md index 6806afae06..3830130991 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.14.md +++ b/docs/sources/reference/api/docker_remote_api_v1.14.md @@ -601,7 +601,7 @@ Status Codes: `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) -- 1: stdou +- 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of diff --git a/docs/sources/reference/api/docker_remote_api_v1.15.md b/docs/sources/reference/api/docker_remote_api_v1.15.md index cf8f6d3cc3..dac3d71f6b 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.15.md +++ b/docs/sources/reference/api/docker_remote_api_v1.15.md @@ -635,7 +635,7 @@ Status Codes: `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) -- 1: stdou +- 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md index fbb89294e2..6a32ac0d04 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.16.md +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -635,7 +635,7 @@ Status Codes: `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) -- 1: stdou +- 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of diff --git a/docs/sources/reference/api/docker_remote_api_v1.6.md b/docs/sources/reference/api/docker_remote_api_v1.6.md index 39d87f38f6..3946cc69c8 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.6.md +++ b/docs/sources/reference/api/docker_remote_api_v1.6.md @@ -545,7 +545,7 @@ Status Codes: `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) -- 1: stdou +- 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of diff --git a/docs/sources/reference/api/docker_remote_api_v1.7.md b/docs/sources/reference/api/docker_remote_api_v1.7.md index 6e5387a80e..ff4b485ec6 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.7.md +++ b/docs/sources/reference/api/docker_remote_api_v1.7.md @@ -490,7 +490,7 @@ Status Codes: `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) -- 1: stdou +- 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of diff --git a/docs/sources/reference/api/docker_remote_api_v1.8.md b/docs/sources/reference/api/docker_remote_api_v1.8.md index 36c92a4aee..768465f2e6 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.8.md +++ b/docs/sources/reference/api/docker_remote_api_v1.8.md @@ -538,7 +538,7 @@ Status Codes: `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) -- 1: stdou +- 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.md b/docs/sources/reference/api/docker_remote_api_v1.9.md index 7cac380109..ed12bc3253 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.9.md +++ b/docs/sources/reference/api/docker_remote_api_v1.9.md @@ -564,7 +564,7 @@ Status Codes: `STREAM_TYPE` can be: - 0: stdin (will be written on stdout) -- 1: stdou +- 1: stdout - 2: stderr `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of From ef004ec03fe5aad33da7d53f196f16382c750d07 Mon Sep 17 00:00:00 2001 From: Ben Firshman Date: Mon, 3 Nov 2014 16:46:01 +0000 Subject: [PATCH 219/592] Fix help text being incorrect with multiple args E.g. "docker foobar run" would have printed "Command not found: foobar" and printed the help text for "run". It should instead print the root help message for docker. Signed-off-by: Ben Firshman --- api/client/cli.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/client/cli.go b/api/client/cli.go index 70eae6e4b4..74e645171a 100644 --- a/api/client/cli.go +++ b/api/client/cli.go @@ -75,11 +75,11 @@ func (cli *DockerCli) Cmd(args ...string) error { method, exists := cli.getMethod(args[0]) if !exists { fmt.Println("Error: Command not found:", args[0]) - return cli.CmdHelp(args[1:]...) + return cli.CmdHelp() } return method(args[1:]...) } - return cli.CmdHelp(args...) + return cli.CmdHelp() } func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet { From 693b9d335cc1fe688a7fffb62e5da97a5d5a3b13 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Mon, 3 Nov 2014 10:12:05 -0700 Subject: [PATCH 220/592] Update "official repos" doc to mention 100 char short desc limit The Hub no longer accepts short descriptions over 100 characters. Signed-off-by: Andrew Page --- docs/sources/docker-hub/official_repos.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/docker-hub/official_repos.md b/docs/sources/docker-hub/official_repos.md index 5a948c6263..4ec431238b 100644 --- a/docs/sources/docker-hub/official_repos.md +++ b/docs/sources/docker-hub/official_repos.md @@ -60,7 +60,7 @@ should also: * Be named `README-short.txt` * Reside in the repo for the “latest” tag -* Not exceed 200 characters +* Not exceed 100 characters ### A logo From 03ea2166b66632ee7cdd824d7acd87b15c9bccb2 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Mon, 3 Nov 2014 10:50:16 -0800 Subject: [PATCH 221/592] Fix deadlock in ps exited filter Fixes #8909 Signed-off-by: Alexandr Morozov --- daemon/list.go | 2 +- integration-cli/docker_cli_ps_test.go | 78 +++++++++++++++++++++++++++ 2 files changed, 79 insertions(+), 1 deletion(-) diff --git a/daemon/list.go b/daemon/list.go index 347d3c20d8..29d7298fc2 100644 --- a/daemon/list.go +++ b/daemon/list.go @@ -99,7 +99,7 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status { if len(filt_exited) > 0 && !container.Running { should_skip := true for _, code := range filt_exited { - if code == container.GetExitCode() { + if code == container.ExitCode { should_skip = false break } diff --git a/integration-cli/docker_cli_ps_test.go b/integration-cli/docker_cli_ps_test.go index 3874fa70b5..09207826bb 100644 --- a/integration-cli/docker_cli_ps_test.go +++ b/integration-cli/docker_cli_ps_test.go @@ -396,3 +396,81 @@ func TestPsListContainersFilterName(t *testing.T) { logDone("ps - test ps filter name") } + +func TestPsListContainersFilterExited(t *testing.T) { + deleteAllContainers() + defer deleteAllContainers() + runCmd := exec.Command(dockerBinary, "run", "--name", "zero1", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatal(out, err) + } + firstZero, err := getIDByName("zero1") + if err != nil { + t.Fatal(err) + } + + runCmd = exec.Command(dockerBinary, "run", "--name", "zero2", "busybox", "true") + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + t.Fatal(out, err) + } + secondZero, err := getIDByName("zero2") + if err != nil { + t.Fatal(err) + } + + runCmd = exec.Command(dockerBinary, "run", "--name", "nonzero1", "busybox", "false") + out, _, err = runCommandWithOutput(runCmd) + if err == nil { + t.Fatal("Should fail.", out, err) + } + firstNonZero, err := getIDByName("nonzero1") + if err != nil { + t.Fatal(err) + } + + runCmd = exec.Command(dockerBinary, "run", "--name", "nonzero2", "busybox", "false") + out, _, err = runCommandWithOutput(runCmd) + if err == nil { + t.Fatal("Should fail.", out, err) + } + secondNonZero, err := getIDByName("nonzero2") + if err != nil { + t.Fatal(err) + } + + // filter containers by exited=0 + runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=exited=0") + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + t.Fatal(out, err) + } + ids := strings.Split(strings.TrimSpace(out), "\n") + if len(ids) != 2 { + t.Fatalf("Should be 2 zero exited containerst got %d", len(ids)) + } + if ids[0] != secondZero { + t.Fatalf("First in list should be %q, got %q", secondZero, ids[0]) + } + if ids[1] != firstZero { + t.Fatalf("Second in list should be %q, got %q", firstZero, ids[1]) + } + + runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=exited=1") + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + t.Fatal(out, err) + } + ids = strings.Split(strings.TrimSpace(out), "\n") + if len(ids) != 2 { + t.Fatalf("Should be 2 zero exited containerst got %d", len(ids)) + } + if ids[0] != secondNonZero { + t.Fatalf("First in list should be %q, got %q", secondNonZero, ids[0]) + } + if ids[1] != firstNonZero { + t.Fatalf("Second in list should be %q, got %q", firstNonZero, ids[1]) + } + logDone("ps - test ps filter exited") +} From 0e217c4a9e123551843ceeecdefe5d66b408cdf3 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Mon, 3 Nov 2014 14:01:50 -0500 Subject: [PATCH 222/592] pkg/mount: adding fields supported by freebsd Signed-off-by: Vincent Batts --- pkg/mount/mountinfo_freebsd.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/mount/mountinfo_freebsd.go b/pkg/mount/mountinfo_freebsd.go index a16bdb84f8..2fe91862d8 100644 --- a/pkg/mount/mountinfo_freebsd.go +++ b/pkg/mount/mountinfo_freebsd.go @@ -32,6 +32,8 @@ func parseMountTable() ([]*MountInfo, error) { for _, entry := range entries { var mountinfo MountInfo mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) + mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) out = append(out, &mountinfo) } return out, nil From a12d89739412dc9fa9f961e0e4a310ec5a53bd74 Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Mon, 3 Nov 2014 11:31:37 -0800 Subject: [PATCH 223/592] Update logrus to v0.6.0 so we can use ParseLevel() for PR #8335 Signed-off-by: Doug Davis --- hack/vendor.sh | 2 +- .../github.com/Sirupsen/logrus/.travis.yml | 6 +- .../src/github.com/Sirupsen/logrus/README.md | 16 ++-- .../src/github.com/Sirupsen/logrus/entry.go | 26 +++--- .../github.com/Sirupsen/logrus/exported.go | 4 +- .../github.com/Sirupsen/logrus/formatter.go | 12 +-- .../Sirupsen/logrus/json_formatter.go | 7 +- .../src/github.com/Sirupsen/logrus/logrus.go | 22 +++++ .../github.com/Sirupsen/logrus/logrus_test.go | 74 +++++++++++++++++ .../Sirupsen/logrus/text_formatter.go | 81 ++++++++++--------- 10 files changed, 181 insertions(+), 69 deletions(-) diff --git a/hack/vendor.sh b/hack/vendor.sh index a537ff18b5..85be29303e 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -53,7 +53,7 @@ clone hg code.google.com/p/gosqlite 74691fb6f837 clone git github.com/docker/libtrust d273ef2565ca -clone git github.com/Sirupsen/logrus v0.5.1 +clone git github.com/Sirupsen/logrus v0.6.0 # get Go tip's archive/tar, for xattr support and improved performance # TODO after Go 1.4 drops, bump our minimum supported version and drop this vendored dep diff --git a/vendor/src/github.com/Sirupsen/logrus/.travis.yml b/vendor/src/github.com/Sirupsen/logrus/.travis.yml index 2efbc54a17..d5a559f840 100644 --- a/vendor/src/github.com/Sirupsen/logrus/.travis.yml +++ b/vendor/src/github.com/Sirupsen/logrus/.travis.yml @@ -1,7 +1,9 @@ language: go go: - - 1.1 - 1.2 + - 1.3 - tip -before_script: +install: - go get github.com/stretchr/testify + - go get github.com/stvp/go-udp-testing + - go get github.com/tobi/airbrake-go diff --git a/vendor/src/github.com/Sirupsen/logrus/README.md b/vendor/src/github.com/Sirupsen/logrus/README.md index 6843fcc0e8..01769c723f 100644 --- a/vendor/src/github.com/Sirupsen/logrus/README.md +++ b/vendor/src/github.com/Sirupsen/logrus/README.md @@ -81,7 +81,7 @@ func init() { // Use the Airbrake hook to report errors that have Error severity or above to // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(logrus_airbrake.AirbrakeHook) + log.AddHook(&logrus_airbrake.AirbrakeHook{}) // Output to stderr instead of stdout, could also be a file. log.SetOutput(os.Stderr) @@ -126,7 +126,7 @@ func main() { // exported logger. See Godoc. log.Out = os.Stderr - log.WithFields(log.Fields{ + log.WithFields(logrus.Fields{ "animal": "walrus", "size": 10, }).Info("A group of walrus emerges from the ocean") @@ -214,14 +214,20 @@ func init() { } ``` -* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go). +* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. -* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go). +* [`github.com/Sirupsen/logrus/hooks/papertrail`](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) + Send errors to the Papertrail hosted logging service via UDP. + +* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. +* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus) + Send errors to a channel in hipchat. + #### Level logging Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. @@ -295,7 +301,7 @@ The built-in logging formatters are: * `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise without colors. * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the + field to `true`. To force no colored output even if there is a TTY set the `DisableColors` field to `true` * `logrus.JSONFormatter`. Logs fields as JSON. diff --git a/vendor/src/github.com/Sirupsen/logrus/entry.go b/vendor/src/github.com/Sirupsen/logrus/entry.go index 44ff0566c9..a77c4b0ed1 100644 --- a/vendor/src/github.com/Sirupsen/logrus/entry.go +++ b/vendor/src/github.com/Sirupsen/logrus/entry.go @@ -8,7 +8,7 @@ import ( "time" ) -// An entry is the final or intermediate Logrus logging entry. It containts all +// An entry is the final or intermediate Logrus logging entry. It contains all // the fields passed with WithField{,s}. It's finally logged when Debug, Info, // Warn, Error, Fatal or Panic is called on it. These objects can be reused and // passed around as much as you wish to avoid field duplication. @@ -28,8 +28,6 @@ type Entry struct { Message string } -var baseTimestamp time.Time - func NewEntry(logger *Logger) *Entry { return &Entry{ Logger: logger, @@ -72,18 +70,22 @@ func (entry *Entry) WithFields(fields Fields) *Entry { return &Entry{Logger: entry.Logger, Data: data} } -func (entry *Entry) log(level Level, msg string) string { +func (entry *Entry) log(level Level, msg string) { entry.Time = time.Now() entry.Level = level entry.Message = msg if err := entry.Logger.Hooks.Fire(level, entry); err != nil { - fmt.Fprintf(os.Stderr, "Failed to fire hook", err) + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + entry.Logger.mu.Unlock() } reader, err := entry.Reader() if err != nil { - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v", err) + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + entry.Logger.mu.Unlock() } entry.Logger.mu.Lock() @@ -91,10 +93,15 @@ func (entry *Entry) log(level Level, msg string) string { _, err = io.Copy(entry.Logger.Out, reader) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v", err) + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) } - return reader.String() + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(reader.String()) + } } func (entry *Entry) Debug(args ...interface{}) { @@ -134,8 +141,7 @@ func (entry *Entry) Fatal(args ...interface{}) { func (entry *Entry) Panic(args ...interface{}) { if entry.Logger.Level >= PanicLevel { - msg := entry.log(PanicLevel, fmt.Sprint(args...)) - panic(msg) + entry.log(PanicLevel, fmt.Sprint(args...)) } panic(fmt.Sprint(args...)) } diff --git a/vendor/src/github.com/Sirupsen/logrus/exported.go b/vendor/src/github.com/Sirupsen/logrus/exported.go index 383ce93d4d..0e2d59f19a 100644 --- a/vendor/src/github.com/Sirupsen/logrus/exported.go +++ b/vendor/src/github.com/Sirupsen/logrus/exported.go @@ -96,7 +96,7 @@ func Fatal(args ...interface{}) { std.Fatal(args...) } -// Debugf logs a message at level Debugf on the standard logger. +// Debugf logs a message at level Debug on the standard logger. func Debugf(format string, args ...interface{}) { std.Debugf(format, args...) } @@ -126,7 +126,7 @@ func Errorf(format string, args ...interface{}) { std.Errorf(format, args...) } -// Panicf logs a message at level Pancf on the standard logger. +// Panicf logs a message at level Panic on the standard logger. func Panicf(format string, args ...interface{}) { std.Panicf(format, args...) } diff --git a/vendor/src/github.com/Sirupsen/logrus/formatter.go b/vendor/src/github.com/Sirupsen/logrus/formatter.go index fc0ebd7a97..74c49a0e0e 100644 --- a/vendor/src/github.com/Sirupsen/logrus/formatter.go +++ b/vendor/src/github.com/Sirupsen/logrus/formatter.go @@ -1,9 +1,5 @@ package logrus -import ( - "time" -) - // The Formatter interface is used to implement a custom Formatter. It takes an // `Entry`. It exposes all the fields, including the default ones: // @@ -28,7 +24,7 @@ type Formatter interface { // // {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} // -// It's not exported because it's still using Data in an opionated way. It's to +// It's not exported because it's still using Data in an opinionated way. It's to // avoid code duplication between the two default formatters. func prefixFieldClashes(entry *Entry) { _, ok := entry.Data["time"] @@ -36,19 +32,13 @@ func prefixFieldClashes(entry *Entry) { entry.Data["fields.time"] = entry.Data["time"] } - entry.Data["time"] = entry.Time.Format(time.RFC3339) - _, ok = entry.Data["msg"] if ok { entry.Data["fields.msg"] = entry.Data["msg"] } - entry.Data["msg"] = entry.Message - _, ok = entry.Data["level"] if ok { entry.Data["fields.level"] = entry.Data["level"] } - - entry.Data["level"] = entry.Level.String() } diff --git a/vendor/src/github.com/Sirupsen/logrus/json_formatter.go b/vendor/src/github.com/Sirupsen/logrus/json_formatter.go index c0e2d18436..9d11b642d4 100644 --- a/vendor/src/github.com/Sirupsen/logrus/json_formatter.go +++ b/vendor/src/github.com/Sirupsen/logrus/json_formatter.go @@ -3,13 +3,16 @@ package logrus import ( "encoding/json" "fmt" + "time" ) -type JSONFormatter struct { -} +type JSONFormatter struct{} func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { prefixFieldClashes(entry) + entry.Data["time"] = entry.Time.Format(time.RFC3339) + entry.Data["msg"] = entry.Message + entry.Data["level"] = entry.Level.String() serialized, err := json.Marshal(entry.Data) if err != nil { diff --git a/vendor/src/github.com/Sirupsen/logrus/logrus.go b/vendor/src/github.com/Sirupsen/logrus/logrus.go index 79df39cb71..43ee12e90e 100644 --- a/vendor/src/github.com/Sirupsen/logrus/logrus.go +++ b/vendor/src/github.com/Sirupsen/logrus/logrus.go @@ -1,6 +1,7 @@ package logrus import ( + "fmt" "log" ) @@ -30,6 +31,27 @@ func (level Level) String() string { return "unknown" } +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch lvl { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + // These are the different logging levels. You can set the logging level to log // on your instance of logger, obtained with `logrus.New()`. const ( diff --git a/vendor/src/github.com/Sirupsen/logrus/logrus_test.go b/vendor/src/github.com/Sirupsen/logrus/logrus_test.go index 6202300366..15157d172d 100644 --- a/vendor/src/github.com/Sirupsen/logrus/logrus_test.go +++ b/vendor/src/github.com/Sirupsen/logrus/logrus_test.go @@ -3,6 +3,8 @@ package logrus import ( "bytes" "encoding/json" + "strconv" + "strings" "testing" "github.com/stretchr/testify/assert" @@ -24,6 +26,31 @@ func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fi assertions(fields) } +func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { + var buffer bytes.Buffer + + logger := New() + logger.Out = &buffer + logger.Formatter = &TextFormatter{ + DisableColors: true, + } + + log(logger) + + fields := make(map[string]string) + for _, kv := range strings.Split(buffer.String(), " ") { + if !strings.Contains(kv, "=") { + continue + } + kvArr := strings.Split(kv, "=") + key := strings.TrimSpace(kvArr[0]) + val, err := strconv.Unquote(kvArr[1]) + assert.NoError(t, err) + fields[key] = val + } + assertions(fields) +} + func TestPrint(t *testing.T) { LogAndAssertJSON(t, func(log *Logger) { log.Print("test") @@ -163,6 +190,20 @@ func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { }) } +func TestDefaultFieldsAreNotPrefixed(t *testing.T) { + LogAndAssertText(t, func(log *Logger) { + ll := log.WithField("herp", "derp") + ll.Info("hello") + ll.Info("bye") + }, func(fields map[string]string) { + for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} { + if _, ok := fields[fieldName]; ok { + t.Fatalf("should not have prefixed %q: %v", fieldName, fields) + } + } + }) +} + func TestConvertLevelToString(t *testing.T) { assert.Equal(t, "debug", DebugLevel.String()) assert.Equal(t, "info", InfoLevel.String()) @@ -171,3 +212,36 @@ func TestConvertLevelToString(t *testing.T) { assert.Equal(t, "fatal", FatalLevel.String()) assert.Equal(t, "panic", PanicLevel.String()) } + +func TestParseLevel(t *testing.T) { + l, err := ParseLevel("panic") + assert.Nil(t, err) + assert.Equal(t, PanicLevel, l) + + l, err = ParseLevel("fatal") + assert.Nil(t, err) + assert.Equal(t, FatalLevel, l) + + l, err = ParseLevel("error") + assert.Nil(t, err) + assert.Equal(t, ErrorLevel, l) + + l, err = ParseLevel("warn") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("warning") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("info") + assert.Nil(t, err) + assert.Equal(t, InfoLevel, l) + + l, err = ParseLevel("debug") + assert.Nil(t, err) + assert.Equal(t, DebugLevel, l) + + l, err = ParseLevel("invalid") + assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) +} diff --git a/vendor/src/github.com/Sirupsen/logrus/text_formatter.go b/vendor/src/github.com/Sirupsen/logrus/text_formatter.go index 4b93690e7d..fc0a4082a7 100644 --- a/vendor/src/github.com/Sirupsen/logrus/text_formatter.go +++ b/vendor/src/github.com/Sirupsen/logrus/text_formatter.go @@ -16,8 +16,14 @@ const ( blue = 34 ) +var ( + baseTimestamp time.Time + isTerminal bool +) + func init() { baseTimestamp = time.Now() + isTerminal = IsTerminal() } func miniTS() int { @@ -31,45 +37,27 @@ type TextFormatter struct { } func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + + var keys []string + for k := range entry.Data { + keys = append(keys, k) + } + sort.Strings(keys) + b := &bytes.Buffer{} prefixFieldClashes(entry) - if (f.ForceColors || IsTerminal()) && !f.DisableColors { - levelText := strings.ToUpper(entry.Data["level"].(string))[0:4] + isColored := (f.ForceColors || isTerminal) && !f.DisableColors - levelColor := blue - - if entry.Data["level"] == "warning" { - levelColor = yellow - } else if entry.Data["level"] == "error" || - entry.Data["level"] == "fatal" || - entry.Data["level"] == "panic" { - levelColor = red - } - - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Data["msg"]) - - keys := make([]string, 0) - for k, _ := range entry.Data { - if k != "level" && k != "time" && k != "msg" { - keys = append(keys, k) - } - } - sort.Strings(keys) - for _, k := range keys { - v := entry.Data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v) - } + if isColored { + printColored(b, entry, keys) } else { - f.AppendKeyValue(b, "time", entry.Data["time"].(string)) - f.AppendKeyValue(b, "level", entry.Data["level"].(string)) - f.AppendKeyValue(b, "msg", entry.Data["msg"].(string)) - - for key, value := range entry.Data { - if key != "time" && key != "level" && key != "msg" { - f.AppendKeyValue(b, key, value) - } + f.appendKeyValue(b, "time", entry.Time.Format(time.RFC3339)) + f.appendKeyValue(b, "level", entry.Level.String()) + f.appendKeyValue(b, "msg", entry.Message) + for _, key := range keys { + f.appendKeyValue(b, key, entry.Data[key]) } } @@ -77,10 +65,31 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { return b.Bytes(), nil } -func (f *TextFormatter) AppendKeyValue(b *bytes.Buffer, key, value interface{}) { - if _, ok := value.(string); ok { +func printColored(b *bytes.Buffer, entry *Entry, keys []string) { + var levelColor int + switch entry.Level { + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String())[0:4] + + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v) + } +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) { + switch value.(type) { + case string, error: fmt.Fprintf(b, "%v=%q ", key, value) - } else { + default: fmt.Fprintf(b, "%v=%v ", key, value) } } From 1a1b0f223d5b877271d1484d6235c4a7b9e4cb32 Mon Sep 17 00:00:00 2001 From: Fred Lifton Date: Fri, 31 Oct 2014 10:55:55 -0700 Subject: [PATCH 224/592] Edits and refinements to 1.3.1 release notes. Docker-DCO-1.1-Signed-off-by: Fred Lifton (github: fredlf) --- docs/sources/release-notes.md | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/docs/sources/release-notes.md b/docs/sources/release-notes.md index 6b2f0fd571..b4627e88b4 100644 --- a/docs/sources/release-notes.md +++ b/docs/sources/release-notes.md @@ -1,6 +1,6 @@ -page_title: Docker 1.x Series Release Notes page_description: Release Notes for -Docker 1.x. page_keywords: docker, documentation, about, technology, -understanding, release +page_title: Docker 1.x Series Release Notes +page_description: Release Notes for Docker 1.x. +page_keywords: docker, documentation, about, technology, understanding, release #Release Notes @@ -11,25 +11,26 @@ This release fixes some bugs and addresses some security issues. *Security fixes* -Patches and changes were made to address CVE-2014-5277 and CVE-2014-3566. Specifically, changes were made to: +Patches and changes were made to address [CVE-2014-5277 and CVE-2014-3566](https://groups.google.com/forum/#!topic/docker-user/oYm0i3xShJU). +Specifically, changes were made to: + * Prevent fallback to SSL protocols < TLS 1.0 for client, daemon and registry -* Secure HTTPS connection to registries with certificate verification and without HTTP fallback unless `--insecure-registry` is specified. +* Secure HTTPS connection to registries with certificate verification and without HTTP fallback unless [`--insecure-registry`](/reference/commandline/cli/#run) is specified. *Runtime fixes* -* Fixed issue where volumes would not be shared +* Fixed issue where volumes would not be shared. *Client fixes* * Fixed issue with `--iptables=false` not automatically setting -`--ip-masq=false` -* Fixed docker run output to non-TTY stdout +`--ip-masq=false`. +* Fixed docker run output to non-TTY stdout. *Builder fixes* -* Fixed escaping `$` for environment variables -* Fixed issue with lowercase `onbuild` Dockerfile instruction - +* Fixed escaping `$` for environment variables. +* Fixed issue with lowercase `onbuild` instruction in a `Dockerfile`. ##Version 1.3.0 From 06f366bf0041a223ae848e6a8df0c8cc41f08001 Mon Sep 17 00:00:00 2001 From: Fred Lifton Date: Mon, 3 Nov 2014 13:51:51 -0800 Subject: [PATCH 225/592] Added missing item from Changelog Docker-DCO-1.1-Signed-off-by: Fred Lifton (github: fredlf) --- docs/sources/release-notes.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/sources/release-notes.md b/docs/sources/release-notes.md index b4627e88b4..a74eea43b5 100644 --- a/docs/sources/release-notes.md +++ b/docs/sources/release-notes.md @@ -31,6 +31,8 @@ Specifically, changes were made to: * Fixed escaping `$` for environment variables. * Fixed issue with lowercase `onbuild` instruction in a `Dockerfile`. +* Restricted environment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, +`EXPOSE`, `VOLUME`, and `USER` ##Version 1.3.0 From 26184de8ab1dfe812094c55c9becd8ebb60ed7be Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Mon, 27 Oct 2014 11:00:29 -0700 Subject: [PATCH 226/592] Remove `jsonData` argument from `image.StoreImage` The argument specified the json data to save to disk when registering a new image into the image graph. If it is nil, then the given image is serialized to json and that is written by default. This default behavior is sufficient if the given image was originally deserialzed from this jsonData to begin with which has always been the case. Signed-off-by: Josh Hawn (github: jlhawn) --- graph/graph.go | 6 +++--- graph/load.go | 2 +- graph/pull.go | 4 ++-- graph/service.go | 2 +- graph/tags_unit_test.go | 2 +- image/image.go | 22 ++++++++-------------- integration/graph_test.go | 14 +++++++------- 7 files changed, 23 insertions(+), 29 deletions(-) diff --git a/graph/graph.go b/graph/graph.go index 75b1825034..720f6e6963 100644 --- a/graph/graph.go +++ b/graph/graph.go @@ -132,14 +132,14 @@ func (graph *Graph) Create(layerData archive.ArchiveReader, containerID, contain img.ContainerConfig = *containerConfig } - if err := graph.Register(img, nil, layerData); err != nil { + if err := graph.Register(img, layerData); err != nil { return nil, err } return img, nil } // Register imports a pre-existing image into the graph. -func (graph *Graph) Register(img *image.Image, jsonData []byte, layerData archive.ArchiveReader) (err error) { +func (graph *Graph) Register(img *image.Image, layerData archive.ArchiveReader) (err error) { defer func() { // If any error occurs, remove the new dir from the driver. // Don't check for errors since the dir might not have been created. @@ -181,7 +181,7 @@ func (graph *Graph) Register(img *image.Image, jsonData []byte, layerData archiv } // Apply the diff/layer img.SetGraph(graph) - if err := image.StoreImage(img, jsonData, layerData, tmp); err != nil { + if err := image.StoreImage(img, layerData, tmp); err != nil { return err } // Commit diff --git a/graph/load.go b/graph/load.go index 05e963daaa..875741ecf7 100644 --- a/graph/load.go +++ b/graph/load.go @@ -118,7 +118,7 @@ func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string } } } - if err := s.graph.Register(img, imageJson, layer); err != nil { + if err := s.graph.Register(img, layer); err != nil { return err } } diff --git a/graph/pull.go b/graph/pull.go index 9345d7d489..0e85f2df5f 100644 --- a/graph/pull.go +++ b/graph/pull.go @@ -392,7 +392,7 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint layers_downloaded = true defer layer.Close() - err = s.graph.Register(img, imgJSON, + err = s.graph.Register(img, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading")) if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { time.Sleep(time.Duration(j) * 500 * time.Millisecond) @@ -577,7 +577,7 @@ func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Wri defer d.tmpFile.Close() d.tmpFile.Seek(0, 0) if d.tmpFile != nil { - err = s.graph.Register(d.img, d.imgJSON, + err = s.graph.Register(d.img, utils.ProgressReader(d.tmpFile, int(d.length), out, sf, false, utils.TruncateID(d.img.ID), "Extracting")) if err != nil { return false, err diff --git a/graph/service.go b/graph/service.go index 9b1509af29..6f020e8d02 100644 --- a/graph/service.go +++ b/graph/service.go @@ -74,7 +74,7 @@ func (s *TagStore) CmdSet(job *engine.Job) engine.Status { if err != nil { return job.Error(err) } - if err := s.graph.Register(img, imgJSON, layer); err != nil { + if err := s.graph.Register(img, layer); err != nil { return job.Error(err) } return engine.StatusOK diff --git a/graph/tags_unit_test.go b/graph/tags_unit_test.go index e4f1fb809f..1b87565dc7 100644 --- a/graph/tags_unit_test.go +++ b/graph/tags_unit_test.go @@ -62,7 +62,7 @@ func mkTestTagStore(root string, t *testing.T) *TagStore { t.Fatal(err) } img := &image.Image{ID: testImageID} - if err := graph.Register(img, nil, archive); err != nil { + if err := graph.Register(img, archive); err != nil { t.Fatal(err) } if err := store.Set(testImageName, "", testImageID, false); err != nil { diff --git a/image/image.go b/image/image.go index 728a188a14..47df76d9f3 100644 --- a/image/image.go +++ b/image/image.go @@ -70,7 +70,7 @@ func LoadImage(root string) (*Image, error) { return img, nil } -func StoreImage(img *Image, jsonData []byte, layerData archive.ArchiveReader, root string) error { +func StoreImage(img *Image, layerData archive.ArchiveReader, root string) error { // Store the layer var ( size int64 @@ -90,20 +90,14 @@ func StoreImage(img *Image, jsonData []byte, layerData archive.ArchiveReader, ro return err } - // If raw json is provided, then use it - if jsonData != nil { - if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil { - return err - } - } else { - if jsonData, err = json.Marshal(img); err != nil { - return err - } - if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil { - return err - } + f, err := os.OpenFile(jsonPath(root), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) + if err != nil { + return err } - return nil + + defer f.Close() + + return json.NewEncoder(f).Encode(img) } func (img *Image) SetGraph(graph Graph) { diff --git a/integration/graph_test.go b/integration/graph_test.go index 203476cbb2..56e5a90642 100644 --- a/integration/graph_test.go +++ b/integration/graph_test.go @@ -74,7 +74,7 @@ func TestInterruptedRegister(t *testing.T) { Created: time.Now(), } w.CloseWithError(errors.New("But I'm not a tarball!")) // (Nobody's perfect, darling) - graph.Register(image, nil, badArchive) + graph.Register(image, badArchive) if _, err := graph.Get(image.ID); err == nil { t.Fatal("Image should not exist after Register is interrupted") } @@ -83,7 +83,7 @@ func TestInterruptedRegister(t *testing.T) { if err != nil { t.Fatal(err) } - if err := graph.Register(image, nil, goodArchive); err != nil { + if err := graph.Register(image, goodArchive); err != nil { t.Fatal(err) } } @@ -133,7 +133,7 @@ func TestRegister(t *testing.T) { Comment: "testing", Created: time.Now(), } - err = graph.Register(image, nil, archive) + err = graph.Register(image, archive) if err != nil { t.Fatal(err) } @@ -228,7 +228,7 @@ func TestDelete(t *testing.T) { t.Fatal(err) } // Test delete twice (pull -> rm -> pull -> rm) - if err := graph.Register(img1, nil, archive); err != nil { + if err := graph.Register(img1, archive); err != nil { t.Fatal(err) } if err := graph.Delete(img1.ID); err != nil { @@ -262,9 +262,9 @@ func TestByParent(t *testing.T) { Created: time.Now(), Parent: parentImage.ID, } - _ = graph.Register(parentImage, nil, archive1) - _ = graph.Register(childImage1, nil, archive2) - _ = graph.Register(childImage2, nil, archive3) + _ = graph.Register(parentImage, archive1) + _ = graph.Register(childImage1, archive2) + _ = graph.Register(childImage2, archive3) byParent, err := graph.ByParent() if err != nil { From 471adb73a9c3a23ac7c599621531b5fada1a127f Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Mon, 3 Nov 2014 17:46:33 -0800 Subject: [PATCH 227/592] Remove travis CI. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- .travis.yml | 39 --------------------------------------- 1 file changed, 39 deletions(-) delete mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 174afae88a..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,39 +0,0 @@ -# Note: right now we don't use go-specific features of travis. -# Later we might automate "go test" etc. (or do it inside a docker container...?) - -language: go - -go: -# This should match the version in the Dockerfile. - - 1.3.1 -# Test against older versions too, just for a little extra retrocompat. - - 1.2 - -# Let us have pretty experimental Docker-based Travis workers. -# (These spin up much faster than the VM-based ones.) -sudo: false - -# Disable the normal go build. -install: - - export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs exclude_graphdriver_devicemapper' # btrfs and devicemapper fail to compile thanks to a couple missing headers (which we can't install thanks to "sudo: false") - - export AUTO_GOPATH=1 -# some of Docker's unit tests don't work inside Travis (yet!), so we purge those test files for now - - rm -f daemon/graphdriver/btrfs/*_test.go # fails to compile (missing header) - - rm -f daemon/graphdriver/devmapper/*_test.go # fails to compile (missing header) - - rm -f daemon/execdriver/lxc/*_test.go # fails to run (missing "lxc-start") - - rm -f daemon/graphdriver/aufs/*_test.go # fails to run ("backing file system is unsupported for this graph driver") - - rm -f daemon/graphdriver/vfs/*_test.go # fails to run (not root, which these tests assume "/var/tmp/... no owned by uid 0") - - rm -f daemon/networkdriver/bridge/*_test.go # fails to run ("Failed to initialize network driver") - - rm -f graph/*_test.go # fails to run ("mkdir /tmp/docker-test.../vfs/dir/foo/etc/postgres: permission denied") - - rm -f pkg/mount/*_test.go # fails to run ("permission denied") - -before_script: - - env | sort - -script: - - hack/make.sh validate-dco - - hack/make.sh validate-gofmt - - DOCKER_CLIENTONLY=1 ./hack/make.sh dynbinary - - ./hack/make.sh dynbinary dyntest-unit - -# vim:set sw=2 ts=2: From 4b4b88a8730b459944b0b0ab51adc7d401c87667 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Mon, 3 Nov 2014 17:48:50 -0800 Subject: [PATCH 228/592] Switch travis status to drone. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 857cd3c70a..2d143a6d6e 100644 --- a/README.md +++ b/README.md @@ -178,7 +178,7 @@ Contributing to Docker ====================== [![GoDoc](https://godoc.org/github.com/docker/docker?status.png)](https://godoc.org/github.com/docker/docker) -[![Travis](https://travis-ci.org/docker/docker.svg?branch=master)](https://travis-ci.org/docker/docker) +[![Build Status](https://ci.dockerproject.com/github.com/docker/docker/status.svg?branch=master)](https://ci.dockerproject.com/github.com/docker/docker) Want to hack on Docker? Awesome! There are instructions to get you started [here](CONTRIBUTING.md). From 91b4ac320fd91a6b776713b3c35a51da96024b32 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Mon, 3 Nov 2014 22:05:04 -0500 Subject: [PATCH 229/592] pkg/mount: include optional field one linux, the optional field designates the sharedsubtree information, if any. Signed-off-by: Vincent Batts --- pkg/mount/mountinfo.go | 6 +++--- pkg/mount/mountinfo_linux.go | 13 +++++++++---- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/pkg/mount/mountinfo.go b/pkg/mount/mountinfo.go index 78b83ced4a..ec8e8bca2a 100644 --- a/pkg/mount/mountinfo.go +++ b/pkg/mount/mountinfo.go @@ -1,7 +1,7 @@ package mount type MountInfo struct { - Id, Parent, Major, Minor int - Root, Mountpoint, Opts string - Fstype, Source, VfsOpts string + Id, Parent, Major, Minor int + Root, Mountpoint, Opts, Optional string + Fstype, Source, VfsOpts string } diff --git a/pkg/mount/mountinfo_linux.go b/pkg/mount/mountinfo_linux.go index 84bf5516b5..e6c28da535 100644 --- a/pkg/mount/mountinfo_linux.go +++ b/pkg/mount/mountinfo_linux.go @@ -23,7 +23,7 @@ const ( (9) filesystem type: name of filesystem of the form "type[.subtype]" (10) mount source: filesystem specific information or "none" (11) super options: per super block options*/ - mountinfoFormat = "%d %d %d:%d %s %s %s " + mountinfoFormat = "%d %d %d:%d %s %s %s %s" ) // Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts @@ -49,13 +49,14 @@ func parseInfoFile(r io.Reader) ([]*MountInfo, error) { } var ( - p = &MountInfo{} - text = s.Text() + p = &MountInfo{} + text = s.Text() + optionalFields string ) if _, err := fmt.Sscanf(text, mountinfoFormat, &p.Id, &p.Parent, &p.Major, &p.Minor, - &p.Root, &p.Mountpoint, &p.Opts); err != nil { + &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) } // Safe as mountinfo encodes mountpoints with spaces as \040. @@ -65,6 +66,10 @@ func parseInfoFile(r io.Reader) ([]*MountInfo, error) { return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) } + if optionalFields != "-" { + p.Optional = optionalFields + } + p.Fstype = postSeparatorFields[0] p.Source = postSeparatorFields[1] p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") From 95a400e6e1a3b5da68431e64f9902a3fac218360 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Mon, 30 Jun 2014 18:39:58 -0400 Subject: [PATCH 230/592] Support hairpin NAT This re-applies commit b39d02b with additional iptables rules to solve the issue with containers routing back into themselves. The previous issue with this attempt was that the DNAT rule would send traffic back into the container it came from. When this happens you have 2 issues. 1) reverse path filtering. The container is going to see the traffic coming in from the outside and it's going to have a source address of itself. So reverse path filtering will kick in and drop the packet. 2) direct return mismatch. Assuming you turned reverse path filtering off, when the packet comes back in, it's goign to have a source address of itself, thus when the reply traffic is sent, it's going to have a source address of itself. But the original packet was sent to the host IP address, so the traffic will be dropped because it's coming from an address which the original traffic was not sent to (and likely with an incorrect port as well). The solution to this is to masquerade the traffic when it gets routed back into the origin container. However for this to work you need to enable hairpin mode on the bridge port, otherwise the kernel will just drop the traffic. The hairpin mode set is part of libcontainer, while the MASQ change is part of docker. This reverts commit 63c303eecdbaf4dc7967fd51b82cd447c778cecc. Docker-DCO-1.1-Signed-off-by: Patrick Hemmer (github: phemmer) --- pkg/iptables/iptables.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/pkg/iptables/iptables.go b/pkg/iptables/iptables.go index 53e6e1430c..b550837601 100644 --- a/pkg/iptables/iptables.go +++ b/pkg/iptables/iptables.go @@ -73,7 +73,6 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str "-p", proto, "-d", daddr, "--dport", strconv.Itoa(port), - "!", "-i", c.Bridge, "-j", "DNAT", "--to-destination", net.JoinHostPort(dest_addr, strconv.Itoa(dest_port))); err != nil { return err @@ -97,6 +96,17 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str return fmt.Errorf("Error iptables forward: %s", output) } + if output, err := Raw("-t", "nat", string(fAction), "POSTROUTING", + "-p", proto, + "-s", dest_addr, + "-d", dest_addr, + "--dport", strconv.Itoa(dest_port), + "-j", "MASQUERADE"); err != nil { + return err + } else if len(output) != 0 { + return fmt.Errorf("Error iptables forward: %s", output) + } + return nil } From 4c978322979f00408c72b50931a8cdea2d5cdefc Mon Sep 17 00:00:00 2001 From: shuai-z Date: Wed, 22 Oct 2014 15:12:03 +0800 Subject: [PATCH 231/592] fixed the way of iterating over the range of map. Fixed the following errors: 1. Request(0) causes a dead loop when the map is full and map.last == BEGIN. 2. When map.last is the only available port (or ip), Request(0) returns ErrAllPortsAllocated (or ErrNoAvailableIPs). Exception is when map.last == BEGIN. Signed-off-by: shuai-z --- daemon/networkdriver/ipallocator/allocator.go | 5 +- .../ipallocator/allocator_test.go | 59 +++++++++++++++++++ .../portallocator/portallocator.go | 4 +- .../portallocator/portallocator_test.go | 13 ++++ 4 files changed, 79 insertions(+), 2 deletions(-) diff --git a/daemon/networkdriver/ipallocator/allocator.go b/daemon/networkdriver/ipallocator/allocator.go index 3f60d2d065..a8625c0300 100644 --- a/daemon/networkdriver/ipallocator/allocator.go +++ b/daemon/networkdriver/ipallocator/allocator.go @@ -129,7 +129,10 @@ func (allocated *allocatedMap) checkIP(ip net.IP) (net.IP, error) { // return an available ip if one is currently available. If not, // return the next available ip for the nextwork func (allocated *allocatedMap) getNextIP() (net.IP, error) { - for pos := big.NewInt(0).Add(allocated.last, big.NewInt(1)); pos.Cmp(allocated.last) != 0; pos.Add(pos, big.NewInt(1)) { + pos := big.NewInt(0).Set(allocated.last) + allRange := big.NewInt(0).Sub(allocated.end, allocated.begin) + for i := big.NewInt(0); i.Cmp(allRange) <= 0; i.Add(i, big.NewInt(1)) { + pos.Add(pos, big.NewInt(1)) if pos.Cmp(allocated.end) == 1 { pos.Set(allocated.begin) } diff --git a/daemon/networkdriver/ipallocator/allocator_test.go b/daemon/networkdriver/ipallocator/allocator_test.go index c4ce40cd0a..8e0e853fac 100644 --- a/daemon/networkdriver/ipallocator/allocator_test.go +++ b/daemon/networkdriver/ipallocator/allocator_test.go @@ -432,6 +432,65 @@ func TestAllocateAllIps(t *testing.T) { } assertIPEquals(t, first, again) + + // ensure that alloc.last == alloc.begin won't result in dead loop + if _, err := RequestIP(network, nil); err != ErrNoAvailableIPs { + t.Fatal(err) + } + + // Test by making alloc.last the only free ip and ensure we get it back + // #1. first of the range, (alloc.last == ipToInt(first) already) + if err := ReleaseIP(network, first); err != nil { + t.Fatal(err) + } + + ret, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + assertIPEquals(t, first, ret) + + // #2. last of the range, note that current is the last one + last := net.IPv4(192, 168, 0, 254) + setLastTo(t, network, last) + + ret, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + assertIPEquals(t, last, ret) + + // #3. middle of the range + mid := net.IPv4(192, 168, 0, 7) + setLastTo(t, network, mid) + + ret, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + assertIPEquals(t, mid, ret) +} + +// make sure the pool is full when calling setLastTo. +// we don't cheat here +func setLastTo(t *testing.T, network *net.IPNet, ip net.IP) { + if err := ReleaseIP(network, ip); err != nil { + t.Fatal(err) + } + + ret, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + assertIPEquals(t, ip, ret) + + if err := ReleaseIP(network, ip); err != nil { + t.Fatal(err) + } } func TestAllocateDifferentSubnets(t *testing.T) { diff --git a/daemon/networkdriver/portallocator/portallocator.go b/daemon/networkdriver/portallocator/portallocator.go index e5dd077a9e..3414d11e7a 100644 --- a/daemon/networkdriver/portallocator/portallocator.go +++ b/daemon/networkdriver/portallocator/portallocator.go @@ -136,7 +136,9 @@ func ReleaseAll() error { } func (pm *portMap) findPort() (int, error) { - for port := pm.last + 1; port != pm.last; port++ { + port := pm.last + for i := 0; i <= EndPortRange-BeginPortRange; i++ { + port++ if port > EndPortRange { port = BeginPortRange } diff --git a/daemon/networkdriver/portallocator/portallocator_test.go b/daemon/networkdriver/portallocator/portallocator_test.go index 3fb218502c..72581f1040 100644 --- a/daemon/networkdriver/portallocator/portallocator_test.go +++ b/daemon/networkdriver/portallocator/portallocator_test.go @@ -134,6 +134,19 @@ func TestAllocateAllPorts(t *testing.T) { if newPort != port { t.Fatalf("Expected port %d got %d", port, newPort) } + + // now pm.last == newPort, release it so that it's the only free port of + // the range, and ensure we get it back + if err := ReleasePort(defaultIP, "tcp", newPort); err != nil { + t.Fatal(err) + } + port, err = RequestPort(defaultIP, "tcp", 0) + if err != nil { + t.Fatal(err) + } + if newPort != port { + t.Fatalf("Expected port %d got %d", newPort, port) + } } func BenchmarkAllocatePorts(b *testing.B) { From 4bc28f4e6bb38ec70fb98a4deea723a2d0812d98 Mon Sep 17 00:00:00 2001 From: unclejack Date: Tue, 4 Nov 2014 15:43:58 +0200 Subject: [PATCH 232/592] daemon/container: stream & decode JSON Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- daemon/container.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/daemon/container.go b/daemon/container.go index a972f8b712..da5745650c 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -102,13 +102,17 @@ func (container *Container) FromDisk() error { return err } - data, err := ioutil.ReadFile(pth) + jsonSource, err := os.Open(pth) if err != nil { return err } + defer jsonSource.Close() + + dec := json.NewDecoder(jsonSource) + // Load container settings // udp broke compat of docker.PortMapping, but it's not used when loading a container, we can skip it - if err := json.Unmarshal(data, container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") { + if err := dec.Decode(container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") { return err } From 4dbbe4f51a22a9ec335fbf2f6e3981132f868312 Mon Sep 17 00:00:00 2001 From: unclejack Date: Tue, 4 Nov 2014 15:45:46 +0200 Subject: [PATCH 233/592] image: stream img JSON & Decode in LoadImage Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- image/image.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/image/image.go b/image/image.go index 47df76d9f3..dfa8e9a6e3 100644 --- a/image/image.go +++ b/image/image.go @@ -38,14 +38,18 @@ type Image struct { } func LoadImage(root string) (*Image, error) { - // Load the json data - jsonData, err := ioutil.ReadFile(jsonPath(root)) + // Open the JSON file to decode by streaming + jsonSource, err := os.Open(jsonPath(root)) if err != nil { return nil, err } - img := &Image{} + defer jsonSource.Close() - if err := json.Unmarshal(jsonData, img); err != nil { + img := &Image{} + dec := json.NewDecoder(jsonSource) + + // Decode the JSON data + if err := dec.Decode(img); err != nil { return nil, err } if err := utils.ValidateID(img.ID); err != nil { From f665be55fe832086202e54449402c1513cf4f195 Mon Sep 17 00:00:00 2001 From: unclejack Date: Tue, 4 Nov 2014 15:46:45 +0200 Subject: [PATCH 234/592] volume: stream JSON & Decode Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- volumes/volume.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/volumes/volume.go b/volumes/volume.go index 73cbb3640d..d718b07d70 100644 --- a/volumes/volume.go +++ b/volumes/volume.go @@ -154,12 +154,15 @@ func (v *Volume) FromDisk() error { return err } - data, err := ioutil.ReadFile(pth) + jsonSource, err := os.Open(pth) if err != nil { return err } + defer jsonSource.Close() - return json.Unmarshal(data, v) + dec := json.NewDecoder(jsonSource) + + return dec.Decode(v) } func (v *Volume) jsonPath() (string, error) { From 28605bc4d55bcfc7a5562e4de3e01b1d5cd9bdcd Mon Sep 17 00:00:00 2001 From: Malte Janduda Date: Tue, 4 Nov 2014 15:19:47 +0100 Subject: [PATCH 235/592] enhancing set-macaddress docu #2 Signed-off-by: Malte Janduda --- docs/man/docker-run.1.md | 2 +- docs/sources/reference/run.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/man/docker-run.1.md b/docs/man/docker-run.1.md index ae559819b1..ff3dac17b0 100644 --- a/docs/man/docker-run.1.md +++ b/docs/man/docker-run.1.md @@ -189,7 +189,7 @@ and foreground Docker containers. 'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. **--mac-address**=*macaddress* - Set the MAC address for the container's ethernet device: + Set the MAC address for the container's Ethernet device: --mac-address=12:34:56:78:9a:bc Remember that the MAC address in an Ethernet network must be unique. diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index 4ec867f1a6..826e1c8b6c 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -140,7 +140,7 @@ example, `docker run ubuntu:14.04`. 'container:': reuses another container network stack 'host': use the host network stack inside the container --add-host="" : Add a line to /etc/hosts (host:IP) - --mac-address="" : Sets the container's ethernet device's MAC address + --mac-address="" : Sets the container's Ethernet device's MAC address By default, all containers have networking enabled and they can make any outgoing connections. The operator can completely disable networking From 762ffda95d9ddd4f75fa94971ac85c52389648c1 Mon Sep 17 00:00:00 2001 From: Bert Goethals Date: Tue, 4 Nov 2014 17:49:39 +0100 Subject: [PATCH 236/592] makes the -s --size option documentation clearer --- docs/man/docker-ps.1.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/man/docker-ps.1.md b/docs/man/docker-ps.1.md index bf22d87da5..9a9ae70c4c 100644 --- a/docs/man/docker-ps.1.md +++ b/docs/man/docker-ps.1.md @@ -46,7 +46,7 @@ the running containers. Only display numeric IDs. The default is *false*. **-s**, **--size**=*true*|*false* - Display sizes. The default is *false*. + Display total file sizes. The default is *false*. **--since**="" Show only containers created since Id or Name, include non-running ones. From f9d80712d9417feadd63cb71c0c7825c4e9f9fd4 Mon Sep 17 00:00:00 2001 From: unclejack Date: Wed, 22 Oct 2014 11:15:57 +0300 Subject: [PATCH 237/592] contrib/install.sh: remove pull of hello-world This removes the pull of the hello-world image from install.sh to address privacy concerns. Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- hack/install.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/install.sh b/hack/install.sh index 9652e4672d..b7e97555ae 100755 --- a/hack/install.sh +++ b/hack/install.sh @@ -85,7 +85,7 @@ case "$lsb_dist" in if command_exists docker && [ -e /var/run/docker.sock ]; then ( set -x - $sh_c 'docker run --rm hello-world' + $sh_c 'docker version' ) || true fi your_user=your-user @@ -162,7 +162,7 @@ case "$lsb_dist" in if command_exists docker && [ -e /var/run/docker.sock ]; then ( set -x - $sh_c 'docker run --rm hello-world' + $sh_c 'docker version' ) || true fi your_user=your-user From a368e064a972ab75561ee50067a3168b9d8d277e Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 9 Oct 2014 13:52:30 -0400 Subject: [PATCH 238/592] registry: don't iterate through certs the golang tls.Conn does a fine job of that. http://golang.org/src/pkg/crypto/tls/handshake_client.go?#L334 Signed-off-by: Vincent Batts --- registry/registry.go | 29 ++++++++--------------------- 1 file changed, 8 insertions(+), 21 deletions(-) diff --git a/registry/registry.go b/registry/registry.go index 8d43637495..e1d22b0908 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -36,15 +36,12 @@ const ( ConnectTimeout ) -func newClient(jar http.CookieJar, roots *x509.CertPool, cert *tls.Certificate, timeout TimeoutType, secure bool) *http.Client { +func newClient(jar http.CookieJar, roots *x509.CertPool, certs []tls.Certificate, timeout TimeoutType, secure bool) *http.Client { tlsConfig := tls.Config{ RootCAs: roots, // Avoid fallback to SSL protocols < TLS1.0 - MinVersion: tls.VersionTLS10, - } - - if cert != nil { - tlsConfig.Certificates = append(tlsConfig.Certificates, *cert) + MinVersion: tls.VersionTLS10, + Certificates: certs, } if !secure { @@ -94,7 +91,7 @@ func newClient(jar http.CookieJar, roots *x509.CertPool, cert *tls.Certificate, func doRequest(req *http.Request, jar http.CookieJar, timeout TimeoutType, secure bool) (*http.Response, *http.Client, error) { var ( pool *x509.CertPool - certs []*tls.Certificate + certs []tls.Certificate ) if secure && req.URL.Scheme == "https" { @@ -137,7 +134,7 @@ func doRequest(req *http.Request, jar http.CookieJar, timeout TimeoutType, secur if err != nil { return nil, nil, err } - certs = append(certs, &cert) + certs = append(certs, cert) } if strings.HasSuffix(f.Name(), ".key") { keyName := f.Name() @@ -159,19 +156,9 @@ func doRequest(req *http.Request, jar http.CookieJar, timeout TimeoutType, secur return res, client, nil } - for i, cert := range certs { - client := newClient(jar, pool, cert, timeout, secure) - res, err := client.Do(req) - // If this is the last cert, otherwise, continue to next cert if 403 or 5xx - if i == len(certs)-1 || err == nil && - res.StatusCode != 403 && - res.StatusCode != 404 && - res.StatusCode < 500 { - return res, client, err - } - } - - return nil, nil, nil + client := newClient(jar, pool, certs, timeout, secure) + res, err := client.Do(req) + return res, client, err } func validateRepositoryName(repositoryName string) error { From e621f99923ae8ab74a44d1fa2416e28a6dbc6eb0 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Tue, 4 Nov 2014 14:47:13 -0800 Subject: [PATCH 239/592] Add check for IP_NF_FILTER Signed-off-by: Alexandr Morozov --- contrib/check-config.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/check-config.sh b/contrib/check-config.sh index afaabbc956..26a2f0ae43 100755 --- a/contrib/check-config.sh +++ b/contrib/check-config.sh @@ -135,7 +135,7 @@ flags=( DEVPTS_MULTIPLE_INSTANCES CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED MACVLAN VETH BRIDGE - NF_NAT_IPV4 IP_NF_TARGET_MASQUERADE + NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK} NF_NAT NF_NAT_NEEDED ) From ebeccee968e5edba5043920581a5b6976aa504c0 Mon Sep 17 00:00:00 2001 From: Nathan Hsieh Date: Tue, 4 Nov 2014 15:28:38 -0800 Subject: [PATCH 240/592] Updated footer to match rest of site Signed-off-by: Nathan Hsieh --- docs/theme/mkdocs/css/main.css | 28 ++++++++++++++ docs/theme/mkdocs/footer.html | 28 +++++++++++++- .../mkdocs/img/footer/angellist-white.svg | 35 ++++++++++++++++++ .../mkdocs/img/footer/sprites-small_360.png | Bin 0 -> 20957 bytes 4 files changed, 90 insertions(+), 1 deletion(-) create mode 100644 docs/theme/mkdocs/img/footer/angellist-white.svg create mode 100644 docs/theme/mkdocs/img/footer/sprites-small_360.png diff --git a/docs/theme/mkdocs/css/main.css b/docs/theme/mkdocs/css/main.css index 3375f797da..ed7c189a09 100644 --- a/docs/theme/mkdocs/css/main.css +++ b/docs/theme/mkdocs/css/main.css @@ -366,9 +366,20 @@ body { text-decoration: none; color: #eeeeee; } +#footer .social { + width: 100px; + float: left; +} #footer .social li a { padding-left: 28px; } +#footer .social li span { + float: left; + width: 24px; + height: 25px; + position: absolute; + margin: 1px 0px 2px -28px; +} #footer .social .blog { background: url(../img/footer/docker-blog-24.png) no-repeat; background-position: 0px -3px; @@ -393,6 +404,23 @@ body { background: url(../img/footer/slideshare-24.png) no-repeat; background-position: 0px -3px; } +#footer .social .linkedin { + background: url(../img/footer/sprites-small_360.png) no-repeat; + background-position: -168px -3px; +} +#footer .social .github { + background: url(../img/footer/sprites-small_360.png) no-repeat; + background-position: -48px -3px; +} +#footer .social .reddit { + background: url(../img/footer/sprites-small_360.png) no-repeat; + background-position: -192px -3px; +} +#footer .social .angellist { + background: url(../img/footer/angellist-white.svg) no-repeat; + background-position: 5px; + height: 20px; +} /* Social Links */ @media only screen and (-webkit-min-device-pixel-ratio: 2), only screen and (min--moz-device-pixel-ratio: 2), only screen and (-o-min-device-pixel-ratio: 2/1), only screen and (min-device-pixel-ratio: 2), only screen and (min-resolution: 192dpi), only screen and (min-resolution: 2dppx) { #footer .social .blog { diff --git a/docs/theme/mkdocs/footer.html b/docs/theme/mkdocs/footer.html index 05316b4f38..69a4e6367f 100644 --- a/docs/theme/mkdocs/footer.html +++ b/docs/theme/mkdocs/footer.html @@ -69,7 +69,33 @@
  • Google+
  • YouTube
  • -
  • Slideshare
  • + + diff --git a/docs/theme/mkdocs/img/footer/angellist-white.svg b/docs/theme/mkdocs/img/footer/angellist-white.svg new file mode 100644 index 0000000000..5c52f3a832 --- /dev/null +++ b/docs/theme/mkdocs/img/footer/angellist-white.svg @@ -0,0 +1,35 @@ + + + + + + + + diff --git a/docs/theme/mkdocs/img/footer/sprites-small_360.png b/docs/theme/mkdocs/img/footer/sprites-small_360.png new file mode 100644 index 0000000000000000000000000000000000000000..c28863e3f5c7ceb64b5a69345c757da563de85ed GIT binary patch literal 20957 zcmeI4c|4Tu+y5_HStF^W#ZK8~h8fFXWUFLNlC2nHFqWBN#-5O95m^#hQzB)}o^4tX zSt7Seh>+}L&-RQ~_uSp{`u<+e^Lw6u%EVjKW0h*0G1u=ZyF#jfg1prpJ3G04GipYL>$2$ zhv(B$SLegK;%qTaXaMl+$hzra5cQ6y=EKBND~lvQt)Uw3!i-qO7K89@vTC$id>1)5 z>7DLNh?VRz(l~skW#@K=H?%LzI8Hc)SWU1zx_@xQ`blv8N}zD3;(WKtc!RH9Qv)tZ zVNhYhr%8n!7~6hYZ0B7Ez?8F80{%QXG4@7-2!cl19@vg2ef;8j@BE1q+b5G*Xcq8W zIRO87S&IxK);05t>DR7az%ec$^$Wqv6nNnRqy~yc*U{vj1Hfk`V(Mvfc>t?=&ZtP5 zTtR@urGpm*z$Zl)4gg?uUJDKI77Sde8K|cR+VlZZQy)`50F?xCqb4;u0keaE=k!Gj z1mG+QG`y&<|3vpHjLu(kSY?rMwOZ!XP~8CY4J8N*0EE+OmKs8efz=Pu!p1t?NJapf zA_3(A8i4S)hEcq*w&9?L-lNOx;5s!;`%a;0SGq{iTg_~&$u^p{2_J+A=#zGuu6maqstSopkYfI%n?wVk+!Q(8fhI4{fPL4;^uS6pZUD}MAKXTklir;-#Nj#=nY?c*k z(so1t#ur__T^DZyOq2XKjL^Z<;ArgQ z0loQ2u1DoeY+C{Ut<3jncND1dUVB38M53`E?KsEJ_>_;+nxAPGE&IiA25A#kZKUvx z!+V@AI2$mOAwm26pW(U8dbkn-L-0Gjdt{X^BDgP}2GjCdv8r+is2%0LENkr_E6{$K z?qjg0pz1Mt&QKx#LzjUFK4zOkk9ZEL*GGuiup3@qzL22N#2*uJwja@Yos}OK0Xi7m zY9R99Qe2A63Ar|Qwxg&PhWkLHDocwskt#u|Ep=GS4uR_F*+oo@^#d*_B!3v$uBpXlhA$F0%6a76+xa)jk{Ox!Kp= z_W-{Y-&ms8scWHvrJvugUpw+KYwCdKHpF%i2~-o)BR;t_B|5o#vUnTc=Tq;edTKjT5?^U^^IhY-qTzf8))K<4)z3Nbob%(hkNh7^KeBz~o7!>Z zxHO+a_yw&xAtubuj0?nYVlXjqCINcHSTH_P`9*{cqSU`UqWpU2kxIP-Lf22U9s0xt zIxZm`_$bm)3zKSbTj@^uUBiqcMjpc!4=$T&WThTRIeTaBq{?fky>S81aW3vcQGoGL z$MmlBio36eY>AfM(r5HO9L`BkPFG3sGeq8h`v7mWASQ0~;8ffx?EP|M;fHY#zGg@n zNg8DtD5rU!aWe_cv`yP}yXKU6wsDrb;aGOZeYkqpS!F|IZQqKos(_Wis@d{qoy7vmT5z})A2WDox2KZFmh?WZE@KH52MpgJ@_lt&3q(8)?D$J z;z^%;_mDZ%{)DjTCX2*sco|XZGpoLx-h4t+{8{+Q=b5dK?`KD23&uISDjvTf9-W<; zRhvomR-9=XVH?Sx@tu{KDIQL4;gz=H_S}1!wTLGH=2_Qnvy@jodfZ*q=efZfaGL?L zq&D|GzTrjZ!6y6Y6HzTF)YZIrm&a4aecSrB@1To%C!(#(qjMlOQnyPdDDAy&nT|@_ zt$5+sEQlk-2bqm-f`Fhnu%C@;@nqhMy!(YEXsujgenUYg<{c`?Ou?Kf>p@nw`Q3J@ zc7c(^_6nIvnNGW7wrx!%j_UbS`E~i2+@WIhyCZM2A35jwD!;a|{nF4VQ)YXwyM6eaWyM2nyxveqxYs@7v!x=5b9yyUQx$%X!;p4kUdTLiErdR$E>AsC21w?CEaeQ$C{;C9%cPu9`hcSz31y!@-Ky4>g>B;YnddQbSr7AvSovG za-AT@ zx=A6iZJooV&3DG`#vJje23zl{-gj-0V}!R1GKWmiOc2NxJPSL2{+#b}?M2eXFBdDH z-@5XWGe=}d=t`tn)Rnrax}~V2gA9%DWpVh*>E*~pk6p2#7nXRhlqR*Qr>{edcDwA} zul*!lD0QFCVtr_TyX(=D3K_5r!E&J6y<5BzYHuECDfAAy&se z3uba`3!&9Gbw_Sb%APVAgzXXAc63D<-2M5H%NvoWB6Vqs@xHRW2Ty}Q61;DveYB^I=h|DdCW<9H+)JNkSkJrH zyI0Ru)?cn~-!GylJGNjZocZ|7m}QsIUGx0xPknl;O?%7EdBDco-=8U3dZ~XWd3=oN z-D?X+WAnV2TshoCzVFo1Q z&hP#pzNfeSNPLa5Zf4peLA-`b{{ZMvRi^^a`GUUK_jxsaX|;}eV7(i%f{hG~f|5^f zR<@_McSj*b6+4vP>n%GhId&a-cJi-um_ANPUE$<8+hwQTcP;LirAdV+r2%1@``MdC zldAqwD9&F}TL{wL9SBOA_%Z_bsT*m{czGa3|Ex<{tLt9_4%4tfN73_y)iI4?ZI5ZU zHzQffwmpp@#q!13-ARP9dA-g=ACJKgV`a5f^B*f0R^zvNvDX~f zrTkPy%r|qcB30A9|SA8>#ZwTBeut)Yk>eO20FEwf5~fC+H)y>b{~l=HWJx*zIxc zNGM`4d=@#18WJ7T--;;n9se3RIk5QlW6txO=noQAg}&@-tM*Ec=Pc@@U1$J+4uC4} z?{VG_C^PxD&>cQ;>qVX4ppT8%(3)J*i&g;Wp*0v};W$mt(XuV~;eJv9qksE04hA5v z(jN{Nd-EYWe*81IS2K#HMj+dqN@W(;hd$AHaKgvl&3SEeDnnXB^6IR z66J^{@>!$pFj!@QiLxpIK8%gBz-gE+SQoF3w#OXvc154`)-ytRJE9b91XK{rN}h0X z0B1B2$>-_pgeAZ|l?A@Vg_FOpH-iNDzI7owDhsHtC*(8HHQ-anxuW@CQZPvrSQ^47 zryvD^!Q|wmCHSPl5GV)?0YPLWAqsFP6b=US{dfr=n91LiTy1ROh8jnHq$69(0`^2A z9u5L|cz8&8$VlN_?LZI(1qBdT8YC?(N$w#@@WK+2o|0IC;P)Uu<7l7>C|3-gh{0j` z*5e|rac)Fq0fF^Izg|Da<&6K82ut|Ej!Y5ciNu4*cVy6?L^h~j8oZmU)3-~rL4nXt zXlFE*NFeJVf7IcBK5qPy|1IS&_rE44&xx+?pT$3~*V*}xsR=|)cQS$>NdK1hr;=dg zg-3%7(FB~ED+;aYP9BQjpGHr_*#4fJzeQU2{Oh%&Ju&|xTlajIeOn6OXGn=WR=Bz= z8cD>t8sTtGi0`ZH_Y)iWfo*+N@g3AfqA=L?$GAgEAWG5i8UGy*t$`$>5$jq>u!5vC z)CdBBLuBC)uq0THye>Bc`QDQvhAz$qW9#*Y7}7FuC}d-d4LvDikav&`l8F3|qHOHD zA&w0SZi{nuMiLPiXQUk(gvZ(`fqpk`==p11!PRk2I9Kvzp%F4lpg%SLQAK_>Hu(N% zjL^Uk@J>jtjp7a74eDgM|HA)f&mIX^f6%^zp zts!U|Nf{_i0VZdw0Fj5FzfFOz?ne1PLTlhqZtJ^^9Qu1j&}WjnGEujBguDSv?~E&>xy&cL*nsH7!-1SVnOa$n;*BbU;BZN zh~wK(D83JOLyZ5$u@aE(|Gh=`pVg56-lF?kHUGa{bia2k${vZeL)#!gKR4{phW~8t zzOUZDSCXIA>7SKHY5isnCs!@<4*IsJ5D+EMKiYn0{@&kYz4|!0eXA~V(qQrn1X=(4 zL%PvTDK(`o-RP#2{#KsCVu%RoAK56HH>lQ69N%w-C&`CnG(r{zlYuFLD4Qu21{hDY zlZgg~eBU9g*H38(`3~|;MCtjX(sY9ox>2~n@k981t~Q)wzm=Ttt>jZO`G5}kbwd9~ zHTqSy|0lov9?}0~REk)eNhp9&H#c!n=R@J$#6A&CqRxlHyNQbe2z7H47j-@q-c4K-K&YFWxTy1?@NVLw07Bi|#6_JCg?AGd z1rX}yCNAoHD7>4vD1cBmH*rztL*d=TMFE7mxrvK99}4d#E(##j%}rd?`A~Q_aZv!F zZf@eD&WFOgiHiaVb#oIJbv_i{OEcu779^_xNTJPI6N&Yn~AL^K)E&w>Y0RZ7P04#R_K%fl(;6DSvs67CH5dgpu z-amS4nEbC|oV7GmjXXO>6TAveo!Vcka(%vs{~5|am6PiP={ZyNA>p{T%)?sMOj!j% zNQWC&)g8iaq}{uGHO>Bxuv(qj{g)(qdYC2`7so3OHMAl9EiY2B%J_WET&7Qx@*v^> z=cM^aw$Hf2(r86a9KdW9@Fs_K zf!ToR8bcDTabA==_XEZ-c6z<7@HYmeIr>k)#Mt4Hh5fg;;`bTwF0h(Rz%stfU*v2j zimoJ|yp<}=@`n5Nz^-AB?5y6=v2!b80sN$QBxsLF&%vHYw;N}7WnUo$CrT7|8Ak8? zeDywq;sJR_7UtcD&3nXpE`Uxr^0E}jLE~dyrO|{O&|i4=e>n8hO7!D58Jtl^qpumIPSsc*R=+DzK8H=?YNS-se=Rn9V-QnUL(T zWmWoI7@Q=3TRfN1lm6{ad+U<9yp&N4)(;LA=;KZ|jg72Lt5$Oo;^!aQ8O1O&WL544UpZ< zu);2dT=mh8+cN_>W2kNE!zW**(VDmCYnD#Sr-ND{ookF*Wn88q?+XWpYA!)v^EZ+PP*a>1Y7%9(Q^%og8L}wx@@6Om55hGl76Z zuu-!CEcx+^6hhI3{ zyicr%b=a8xTXKF{@KF1fe)|#HVk#shmCUV;i;+0f()@Kiev%u13!kAFxj7{0` z#4%ac2%=79b0)3v{=;R`TKef0x1$Vad|fAR58ku)vv{h;*1=30oMM@jBu6(UJYH+MVuANI8~3}ISO6U-qpR@y^yR8 zw`|2m>fInlGvwr?TEY!xt?_G0=A@D-sT(s`w&lBvk$QD}R{6}C&gK(dhiK;BB=wa)d5mSp=<8#egLo$O+wD(0ZXrwt6L*PR4&X9nDB@by94v*Ad!g zc}B>6(qmz!H}*Y|H~x}|CMnuZ-aJHSNTgp*8qt=`6y zt|xMAS@6C(=q=CakU8(oKJN`H_*nmX|F+cxx_ubak{7BZ7>PHZ(M0$7m3FV!?1x%q zdD+ai=+IQ2NnJ4y5b@#XoPEsY$+1uzReluk(HOTJ)%C2=YCYF_4oOlnwZDF>Kc|pk zJe`Ev^YtB2?r%8twi`uKfh8@5~H-}bbk-{*~} zQ|I)P38WXBfqy&QZjr+|g`~w`kB9!XY>Dmf#QHOI+2p<)k2yG-S6%UfXR>TpNqS%r z9h+jSq1Ux9@ks`cxVBCYD<;1cta((P;Hr0*bGneL@v zg749-gJu!pd&N^<&3UYf>)Y|v>G0E?5Bq3dEnimsSucK~+Z{g54(0UBKD^|ZczX;T zVx*g$`^AMtLQ}#*u7`aU>s<1-E#$0d4Rh+fRni2jo&G`Kkjj?x_6g^2-5S`}6L({! zxv0izmqL~JRCNKtx+!PwG25KQ**vSl$%F@n4R&{;jT6+C%6Cb$hzgs%jC)1+RJXmz zV!E#4>Wn{f$y0o+A}iPI?TU$$%BY@TbYo%4o@uL>7ZTKhQe(7P^NrCuw6YIX&!E(` z{5-^v=x{^N%;&3ST4v*R<&v69dDtcU8W+8Y0{Ek}sz_)$$xL7Wpm@Ds9K88vQ2X(( zzE{6iPdlakpCDQBW2s=m?)aKTq z5qab&;rh<@PlekD#Pz{brm${1SZpni`2}h7oX26(MMN%Vt(w)jBF*DV`%o{Un{9k) z+)`J%N1VOV73iyCJ4YamUDynMZ-{eM1BdPH-mekcqH7lgc67NPzgoI&YF~5Vn#r*E zadZ(!;HZdD)=(sH3OS-?QyF62B-)~8p0iXJJ=+L2^SWRW?N{h!zuR@-H*2*pmmL>J)<&v++ zPcg6@d Date: Tue, 4 Nov 2014 18:53:34 -0500 Subject: [PATCH 241/592] made getDefaultNetworkMtu private Signed-off-by: Blake Geno --- daemon/config.go | 2 +- daemon/daemon.go | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/daemon/config.go b/daemon/config.go index 9e8d08e2a6..ddb6040bff 100644 --- a/daemon/config.go +++ b/daemon/config.go @@ -70,7 +70,7 @@ func (config *Config) InstallFlags() { opts.MirrorListVar(&config.Mirrors, []string{"-registry-mirror"}, "Specify a preferred Docker registry mirror") } -func GetDefaultNetworkMtu() int { +func getDefaultNetworkMtu() int { if iface, err := networkdriver.GetDefaultRouteIface(); err == nil { return iface.MTU } diff --git a/daemon/daemon.go b/daemon/daemon.go index 084d779033..b0feae917b 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -721,8 +721,7 @@ func NewDaemon(config *Config, eng *engine.Engine) (*Daemon, error) { func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) { // Apply configuration defaults if config.Mtu == 0 { - // FIXME: GetDefaultNetwork Mtu doesn't need to be public anymore - config.Mtu = GetDefaultNetworkMtu() + config.Mtu = getDefaultNetworkMtu() } // Check for mutually incompatible config options if config.BridgeIface != "" && config.BridgeIP != "" { From 6fdaa66652d65558d8225d58f3ac48904ebac1de Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Wed, 1 Oct 2014 13:17:29 -0700 Subject: [PATCH 242/592] Add a better error message when we get an unknown http issue Closes #5184 Signed-off-by: Doug Davis --- api/client/utils.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/api/client/utils.go b/api/client/utils.go index 11e39729af..6fad9db10d 100644 --- a/api/client/utils.go +++ b/api/client/utils.go @@ -96,7 +96,12 @@ func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo b if strings.Contains(err.Error(), "connection refused") { return nil, -1, ErrConnectionRefused } - return nil, -1, err + + if cli.tlsConfig == nil { + return nil, -1, fmt.Errorf("%v. Are you trying to connect to a TLS-enabled daemon without TLS?", err) + } + return nil, -1, fmt.Errorf("An error occurred trying to connect: %v", err) + } if resp.StatusCode < 200 || resp.StatusCode >= 400 { From a7aa2c8ad26149e9be753bc08964f35cb09d313c Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Thu, 30 Oct 2014 13:47:31 -0700 Subject: [PATCH 243/592] Finalize TarSum Version 1 w/ refactor The current Dev version of TarSum includes hashing of extended file attributes and omits inclusion of modified time headers. I refactored the logic around the version differences to make it more clear that the difference between versions is in how tar headers are selected and ordered. TarSum Version 1 is now declared with the new Dev version continuing to track it. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- pkg/tarsum/tarsum.go | 60 +++++------------------- pkg/tarsum/versioning.go | 86 +++++++++++++++++++++++++++++++++-- pkg/tarsum/versioning_test.go | 8 +++- 3 files changed, 100 insertions(+), 54 deletions(-) diff --git a/pkg/tarsum/tarsum.go b/pkg/tarsum/tarsum.go index 88d603c45b..34386ff39d 100644 --- a/pkg/tarsum/tarsum.go +++ b/pkg/tarsum/tarsum.go @@ -7,8 +7,6 @@ import ( "encoding/hex" "hash" "io" - "sort" - "strconv" "strings" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" @@ -29,18 +27,20 @@ const ( // including the byte payload of the image's json metadata as well, and for // calculating the checksums for buildcache. func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { - if _, ok := tarSumVersions[v]; !ok { - return nil, ErrVersionNotImplemented + headerSelector, err := getTarHeaderSelector(v) + if err != nil { + return nil, err } - return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v}, nil + return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector}, nil } // Create a new TarSum, providing a THash to use rather than the DefaultTHash func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { - if _, ok := tarSumVersions[v]; !ok { - return nil, ErrVersionNotImplemented + headerSelector, err := getTarHeaderSelector(v) + if err != nil { + return nil, err } - return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, tHash: tHash}, nil + return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash}, nil } // TarSum is the generic interface for calculating fixed time @@ -69,8 +69,9 @@ type tarSum struct { currentFile string finished bool first bool - DisableCompression bool // false by default. When false, the output gzip compressed. - tarSumVersion Version // this field is not exported so it can not be mutated during use + DisableCompression bool // false by default. When false, the output gzip compressed. + tarSumVersion Version // this field is not exported so it can not be mutated during use + headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive } func (ts tarSum) Hash() THash { @@ -103,49 +104,12 @@ type simpleTHash struct { func (sth simpleTHash) Name() string { return sth.n } func (sth simpleTHash) Hash() hash.Hash { return sth.h() } -func (ts tarSum) selectHeaders(h *tar.Header, v Version) (set [][2]string) { - for _, elem := range [][2]string{ - {"name", h.Name}, - {"mode", strconv.Itoa(int(h.Mode))}, - {"uid", strconv.Itoa(h.Uid)}, - {"gid", strconv.Itoa(h.Gid)}, - {"size", strconv.Itoa(int(h.Size))}, - {"mtime", strconv.Itoa(int(h.ModTime.UTC().Unix()))}, - {"typeflag", string([]byte{h.Typeflag})}, - {"linkname", h.Linkname}, - {"uname", h.Uname}, - {"gname", h.Gname}, - {"devmajor", strconv.Itoa(int(h.Devmajor))}, - {"devminor", strconv.Itoa(int(h.Devminor))}, - } { - if v >= VersionDev && elem[0] == "mtime" { - continue - } - set = append(set, elem) - } - return -} - func (ts *tarSum) encodeHeader(h *tar.Header) error { - for _, elem := range ts.selectHeaders(h, ts.Version()) { + for _, elem := range ts.headerSelector.selectHeaders(h) { if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { return err } } - - // include the additional pax headers, from an ordered list - if ts.Version() >= VersionDev { - var keys []string - for k := range h.Xattrs { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - if _, err := ts.h.Write([]byte(k + h.Xattrs[k])); err != nil { - return err - } - } - } return nil } diff --git a/pkg/tarsum/versioning.go b/pkg/tarsum/versioning.go index e1161fc5ab..3a656612ff 100644 --- a/pkg/tarsum/versioning.go +++ b/pkg/tarsum/versioning.go @@ -2,7 +2,11 @@ package tarsum import ( "errors" + "sort" + "strconv" "strings" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) // versioning of the TarSum algorithm @@ -10,11 +14,11 @@ import ( // i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" type Version int +// Prefix of "tarsum" const ( - // Prefix of "tarsum" Version0 Version = iota - // Prefix of "tarsum.dev" - // NOTE: this variable will be of an unsettled next-version of the TarSum calculation + Version1 + // NOTE: this variable will be either the latest or an unsettled next-version of the TarSum calculation VersionDev ) @@ -28,8 +32,9 @@ func GetVersions() []Version { } var tarSumVersions = map[Version]string{ - 0: "tarsum", - 1: "tarsum.dev", + Version0: "tarsum", + Version1: "tarsum.v1", + VersionDev: "tarsum.dev", } func (tsv Version) String() string { @@ -50,7 +55,78 @@ func GetVersionFromTarsum(tarsum string) (Version, error) { return -1, ErrNotVersion } +// Errors that may be returned by functions in this package var ( ErrNotVersion = errors.New("string does not include a TarSum Version") ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") ) + +// tarHeaderSelector is the interface which different versions +// of tarsum should use for selecting and ordering tar headers +// for each item in the archive. +type tarHeaderSelector interface { + selectHeaders(h *tar.Header) (orderedHeaders [][2]string) +} + +type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) + +func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { + return f(h) +} + +func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + return [][2]string{ + {"name", h.Name}, + {"mode", strconv.Itoa(int(h.Mode))}, + {"uid", strconv.Itoa(h.Uid)}, + {"gid", strconv.Itoa(h.Gid)}, + {"size", strconv.Itoa(int(h.Size))}, + {"mtime", strconv.Itoa(int(h.ModTime.UTC().Unix()))}, + {"typeflag", string([]byte{h.Typeflag})}, + {"linkname", h.Linkname}, + {"uname", h.Uname}, + {"gname", h.Gname}, + {"devmajor", strconv.Itoa(int(h.Devmajor))}, + {"devminor", strconv.Itoa(int(h.Devminor))}, + } +} + +func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + // Get extended attributes. + xAttrKeys := make([]string, len(h.Xattrs)) + for k := range h.Xattrs { + xAttrKeys = append(xAttrKeys, k) + } + sort.Strings(xAttrKeys) + + // Make the slice with enough capacity to hold the 11 basic headers + // we want from the v0 selector plus however many xattrs we have. + orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) + + // Copy all headers from v0 excluding the 'mtime' header (the 5th element). + v0headers := v0TarHeaderSelect(h) + orderedHeaders = append(orderedHeaders, v0headers[0:5]...) + orderedHeaders = append(orderedHeaders, v0headers[6:]...) + + // Finally, append the sorted xattrs. + for _, k := range xAttrKeys { + orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) + } + + return +} + +var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ + Version0: v0TarHeaderSelect, + Version1: v1TarHeaderSelect, + VersionDev: v1TarHeaderSelect, +} + +func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { + headerSelector, ok := registeredHeaderSelectors[v] + if !ok { + return nil, ErrVersionNotImplemented + } + + return headerSelector, nil +} diff --git a/pkg/tarsum/versioning_test.go b/pkg/tarsum/versioning_test.go index b851c3be6f..4ddb72ec55 100644 --- a/pkg/tarsum/versioning_test.go +++ b/pkg/tarsum/versioning_test.go @@ -11,11 +11,17 @@ func TestVersion(t *testing.T) { t.Errorf("expected %q, got %q", expected, v.String()) } - expected = "tarsum.dev" + expected = "tarsum.v1" v = 1 if v.String() != expected { t.Errorf("expected %q, got %q", expected, v.String()) } + + expected = "tarsum.dev" + v = 2 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } } func TestGetVersion(t *testing.T) { From c81337d5dbcc6a670d24f2faf9b181730fe547e4 Mon Sep 17 00:00:00 2001 From: George Xie Date: Wed, 5 Nov 2014 12:26:17 +0800 Subject: [PATCH 244/592] =?UTF-8?q?unify=20`=E2=80=93`(en=20dash)=20to=20`?= =?UTF-8?q?-`=20when=20specifying=20options?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Xie Shi --- docs/sources/reference/api/docker_remote_api_v1.2.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.3.md | 2 +- docs/sources/reference/api/hub_registry_spec.md | 2 +- docs/sources/reference/run.md | 4 ++-- docs/sources/release-notes.md | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/sources/reference/api/docker_remote_api_v1.2.md b/docs/sources/reference/api/docker_remote_api_v1.2.md index 8da486cf94..4a518aea90 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.2.md +++ b/docs/sources/reference/api/docker_remote_api_v1.2.md @@ -979,4 +979,4 @@ To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. > docker -d -H="[tcp://192.168.1.9:2375](tcp://192.168.1.9:2375)" -> –api-enable-cors +> -api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.3.md b/docs/sources/reference/api/docker_remote_api_v1.3.md index 087262d7c8..30399ea625 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.3.md +++ b/docs/sources/reference/api/docker_remote_api_v1.3.md @@ -1064,4 +1064,4 @@ stdout and stderr on the same socket. This might change in the future. To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. -> docker -d -H="192.168.1.9:2375" –api-enable-cors +> docker -d -H="192.168.1.9:2375" -api-enable-cors diff --git a/docs/sources/reference/api/hub_registry_spec.md b/docs/sources/reference/api/hub_registry_spec.md index 853eda4aee..66724cdc82 100644 --- a/docs/sources/reference/api/hub_registry_spec.md +++ b/docs/sources/reference/api/hub_registry_spec.md @@ -458,7 +458,7 @@ on a private network without having to rely on an external entity controlled by Docker Inc. In this case, the registry will be launched in a special mode -(–standalone? ne? –no-index?). In this mode, the only thing which changes is +(-standalone? ne? -no-index?). In this mode, the only thing which changes is that Registry will never contact the Docker Hub to verify a token. It will be the Registry owner responsibility to authenticate the user who pushes (or even pulls) an image using any mechanism (HTTP auth, IP based, diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index 826e1c8b6c..1abb7d0575 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -99,7 +99,7 @@ together in most interactive cases. ## Container identification -### Name (–-name) +### Name (--name) The operator can identify a container in three ways: @@ -218,7 +218,7 @@ container itself as well as `localhost` and a few other common things. The ::1 localhost ip6-localhost ip6-loopback 86.75.30.9 db-static -## Clean up (–-rm) +## Clean up (--rm) By default a container's file system persists even after the container exits. This makes debugging a lot easier (since you can inspect the diff --git a/docs/sources/release-notes.md b/docs/sources/release-notes.md index a74eea43b5..b1b3b2bfdf 100644 --- a/docs/sources/release-notes.md +++ b/docs/sources/release-notes.md @@ -114,7 +114,7 @@ accept an optional maximum restart count (e.g. `on-failure:5`). * `always` – Always restart the container no matter what exit code is returned. This deprecates the `--restart` flag on the Docker daemon. -*New flags for `docker run`: `--cap-add` and `–-cap-drop`* +*New flags for `docker run`: `--cap-add` and `--cap-drop`* In previous releases, Docker containers could either be given complete capabilities or they could all follow a whitelist of allowed capabilities while @@ -127,7 +127,7 @@ This release introduces two new flags for `docker run`, `--cap-add` and `--cap-drop`, that give you fine-grain control over the specific capabilities you want grant to a particular container. -*New `-–device` flag for `docker run`* +*New `--device` flag for `docker run`* Previously, you could only use devices inside your containers by bind mounting them (with `-v`) in a `--privileged` container. With this release, we introduce From 03f67aa46a07024a98dd9677a3b3e73ea5da80c3 Mon Sep 17 00:00:00 2001 From: unclejack Date: Wed, 5 Nov 2014 10:12:57 +0200 Subject: [PATCH 245/592] bump fpm to 1.3.2 Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 9fffa63f30..e1c6236da8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -77,7 +77,7 @@ RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do RUN go get code.google.com/p/go.tools/cmd/cover # TODO replace FPM with some very minimal debhelper stuff -RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 +RUN gem install --no-rdoc --no-ri fpm --version 1.3.2 # Install man page generator RUN mkdir -p /go/src/github.com/cpuguy83 \ From cbb88741e40315c5e92f2be92faedc178cf32d1c Mon Sep 17 00:00:00 2001 From: Blake Geno Date: Wed, 5 Nov 2014 08:36:08 -0500 Subject: [PATCH 246/592] Removed fixme from utils_test.go Signed-off-by: Blake Geno --- integration/utils_test.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/integration/utils_test.go b/integration/utils_test.go index 1d6e3ec609..da20de586c 100644 --- a/integration/utils_test.go +++ b/integration/utils_test.go @@ -37,10 +37,6 @@ type Fataler interface { func mkDaemon(f Fataler) *daemon.Daemon { eng := newTestEngine(f, false, "") return mkDaemonFromEngine(eng, f) - // FIXME: - // [...] - // Mtu: docker.GetDefaultNetworkMtu(), - // [...] } func createNamedTestContainer(eng *engine.Engine, config *runconfig.Config, f Fataler, name string) (shortId string) { From 67fbd34d8379a1b8232aea5d126a389f64bdc59a Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 5 Nov 2014 09:25:02 -0500 Subject: [PATCH 247/592] devmapper: Move file write and rename functionality in a separate function Currently we save device metadata and have a helper function saveMetadata() which converts data in json format as well as saves it to file. For converting data in json format, one needs to know what is being saved. Break this function down in two functions. One function only has file write capability and takes in argument about byte array of json data. Now this function does not have to know what data is being saved. It only knows about a stream of json data is being saved to a file. This allows me to reuse this function to save a different type of metadata. In this case I am planning to save NextDeviceId so that docker can use this device Id upon next restart. Otherwise docker starts from 0 which is suboptimal. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index fdfc089a82..7c767fff97 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -200,11 +200,8 @@ func (devices *DeviceSet) removeMetadata(info *DevInfo) error { return nil } -func (devices *DeviceSet) saveMetadata(info *DevInfo) error { - jsonData, err := json.Marshal(info) - if err != nil { - return fmt.Errorf("Error encoding metadata to json: %s", err) - } +// Given json data and file path, write it to disk +func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") if err != nil { return fmt.Errorf("Error creating metadata file: %s", err) @@ -223,10 +220,23 @@ func (devices *DeviceSet) saveMetadata(info *DevInfo) error { if err := tmpFile.Close(); err != nil { return fmt.Errorf("Error closing metadata file %s: %s", tmpFile.Name(), err) } - if err := os.Rename(tmpFile.Name(), devices.metadataFile(info)); err != nil { + if err := os.Rename(tmpFile.Name(), filePath); err != nil { return fmt.Errorf("Error committing metadata file %s: %s", tmpFile.Name(), err) } + return nil +} + +func (devices *DeviceSet) saveMetadata(info *DevInfo) error { + jsonData, err := json.Marshal(info) + if err != nil { + return fmt.Errorf("Error encoding metadata to json: %s", err) + } + err = devices.writeMetaFile(jsonData, devices.metadataFile(info)) + if err != nil { + return err + } + if devices.NewTransactionId != devices.TransactionId { if err = setTransactionId(devices.getPoolDevName(), devices.TransactionId, devices.NewTransactionId); err != nil { return fmt.Errorf("Error setting devmapper transition ID: %s", err) From 8e9a18039be6ade0b8db65f7f298959055d86192 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 5 Nov 2014 09:25:02 -0500 Subject: [PATCH 248/592] devmapper: Export nextDeviceId so that json.Marshal() can operate on it I was trying to save nextDeviceId to a file but it would not work and json.Marshal() will do nothing. Then some search showed that I need to make first letter of struct field capital, exporting this field and now json.Marshal() works. This is a preparatory patch for the next one. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index 7c767fff97..c02f369d23 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -68,7 +68,7 @@ type DeviceSet struct { devicePrefix string TransactionId uint64 NewTransactionId uint64 - nextDeviceId int + NextDeviceId int // Options dataLoopbackSize int64 @@ -407,7 +407,7 @@ func (devices *DeviceSet) setupBaseImage() error { log.Debugf("Initializing base device-manager snapshot") - id := devices.nextDeviceId + id := devices.NextDeviceId // Create initial device if err := createDevice(devices.getPoolDevName(), &id); err != nil { @@ -415,7 +415,7 @@ func (devices *DeviceSet) setupBaseImage() error { } // Ids are 24bit, so wrap around - devices.nextDeviceId = (id + 1) & 0xffffff + devices.NextDeviceId = (id + 1) & 0xffffff log.Debugf("Registering base device (id %v) with FS size %v", id, devices.baseFsSize) info, err := devices.registerDevice(id, "", devices.baseFsSize) @@ -703,7 +703,7 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error { return fmt.Errorf("device %s already exists", hash) } - deviceId := devices.nextDeviceId + deviceId := devices.NextDeviceId if err := createSnapDevice(devices.getPoolDevName(), &deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { log.Debugf("Error creating snap device: %s", err) @@ -711,7 +711,7 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error { } // Ids are 24bit, so wrap around - devices.nextDeviceId = (deviceId + 1) & 0xffffff + devices.NextDeviceId = (deviceId + 1) & 0xffffff if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size); err != nil { deleteDevice(devices.getPoolDevName(), deviceId) From 8c9e5e5e05f8ddfcf8cd5218edb83d9fe8238d81 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 5 Nov 2014 09:25:02 -0500 Subject: [PATCH 249/592] devmapper: Save and restore NextDeviceId in a file The way thin-pool right now is designed, user space is supposed to keep track of what device ids have already been used. If user space tries to create a new thin/snap device and device id has already been used, thin pool retuns -EEXIST. Upon receiving -EEXIST, current docker implementation simply tries the NextDeviceId++ and keeps on doing this till it finds a free device id. This approach has two issues. - It is little suboptimal. - If device id already exists, current kenrel implementation spits out a messsage on console. [17991.140135] device-mapper: thin: Creation of new snapshot 33 of device 3 failed. Here kenrel is trying to tell user that device id 33 has already been used. And this shows up for every device id docker tries till it reaches a point where device ids are not used. So if there are thousands of container and one is trying to create a new container after fresh docker start, expect thousands of such warnings to flood console. This patch saves the NextDeviceId in a file in /var/lib/docker/devmapper/metadata/deviceset-metadata and reads it back when docker starts. This way we don't retry lots of device ids which have already been used. There might be some device ids which are free but we will get back to them once device numbers wrap around (24bit limit on device ids). This patch should cut down on number of kernel warnings. Notice that I am creating a deviceset metadata file which is a global file for this pool. So down the line if we need to save more data we should be able to do that. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 72 +++++++++++++++++------ 1 file changed, 55 insertions(+), 17 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index c02f369d23..b193480459 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -62,25 +62,25 @@ type MetaData struct { } type DeviceSet struct { - MetaData - sync.Mutex // Protects Devices map and serializes calls into libdevmapper - root string - devicePrefix string - TransactionId uint64 - NewTransactionId uint64 - NextDeviceId int + MetaData `json:"-"` + sync.Mutex `json:"-"` // Protects Devices map and serializes calls into libdevmapper + root string `json:"-"` + devicePrefix string `json:"-"` + TransactionId uint64 `json:"-"` + NewTransactionId uint64 `json:"-"` + NextDeviceId int `json:"next_device_id"` // Options - dataLoopbackSize int64 - metaDataLoopbackSize int64 - baseFsSize uint64 - filesystem string - mountOptions string - mkfsArgs []string - dataDevice string - metadataDevice string - doBlkDiscard bool - thinpBlockSize uint32 + dataLoopbackSize int64 `json:"-"` + metaDataLoopbackSize int64 `json:"-"` + baseFsSize uint64 `json:"-"` + filesystem string `json:"-"` + mountOptions string `json:"-"` + mkfsArgs []string `json:"-"` + dataDevice string `json:"-"` + metadataDevice string `json:"-"` + doBlkDiscard bool `json:"-"` + thinpBlockSize uint32 `json:"-"` } type DiskUsage struct { @@ -138,6 +138,10 @@ func (devices *DeviceSet) metadataFile(info *DevInfo) string { return path.Join(devices.metadataDir(), file) } +func (devices *DeviceSet) deviceSetMetaFile() string { + return path.Join(devices.metadataDir(), "deviceset-metadata") +} + func (devices *DeviceSet) oldMetadataFile() string { return path.Join(devices.loopbackDir(), "json") } @@ -545,6 +549,34 @@ func (devices *DeviceSet) ResizePool(size int64) error { return nil } +func (devices *DeviceSet) loadDeviceSetMetaData() error { + jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) + if err != nil { + return nil + } + + if err := json.Unmarshal(jsonData, devices); err != nil { + return nil + } + + return nil +} + +func (devices *DeviceSet) saveDeviceSetMetaData() error { + jsonData, err := json.Marshal(devices) + + if err != nil { + return fmt.Errorf("Error encoding metadata to json: %s", err) + } + + err = devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) + if err != nil { + return err + } + + return nil +} + func (devices *DeviceSet) initDevmapper(doInit bool) error { logInit(devices) @@ -676,6 +708,10 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { } } + // Right now this loads only NextDeviceId. If there is more metatadata + // down the line, we might have to move it earlier. + devices.loadDeviceSetMetaData() + // Setup the base image if doInit { if err := devices.setupBaseImage(); err != nil { @@ -955,6 +991,8 @@ func (devices *DeviceSet) Shutdown() error { if err := devices.deactivatePool(); err != nil { log.Debugf("Shutdown deactivate pool , error: %s", err) } + + devices.saveDeviceSetMetaData() devices.Unlock() return nil From e86223e7b3ea7766164d809d2fbd463870e0614a Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Tue, 21 Oct 2014 14:44:06 -0400 Subject: [PATCH 250/592] Bring API docs inline with 1.3 Signed-off-by: Brian Goff --- .../reference/api/docker_remote_api.md | 12 ++ .../reference/api/docker_remote_api_v1.15.md | 130 ++++++++++++++++-- .../reference/api/docker_remote_api_v1.16.md | 111 +++++++++++---- 3 files changed, 211 insertions(+), 42 deletions(-) diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md index 3babab8eca..1db0f76928 100644 --- a/docs/sources/reference/api/docker_remote_api.md +++ b/docs/sources/reference/api/docker_remote_api.md @@ -56,6 +56,12 @@ total memory available (`MemTotal`). **New!** You can set the new container's MAC address explicitly. +`POST /containers/(id)/start` + +**New!** +Passing the container's `HostConfig` on start is now deprecated. You should +set this when creating the container. + ## v1.15 ### Full Documentation @@ -64,6 +70,12 @@ You can set the new container's MAC address explicitly. ### What's new +`POST /containers/create` + +**New!** +It is now possible to set a container's HostConfig when creating a container. +Previously this was only available when starting a container. + ## v1.14 ### Full Documentation diff --git a/docs/sources/reference/api/docker_remote_api_v1.15.md b/docs/sources/reference/api/docker_remote_api_v1.15.md index 513c9f3c55..f5ce896c5f 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.15.md +++ b/docs/sources/reference/api/docker_remote_api_v1.15.md @@ -117,7 +117,6 @@ Create a container "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, - "PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, @@ -125,6 +124,7 @@ Create a container "Cmd":[ "date" ], + "Entrypoint": "" "Image":"base", "Volumes":{ "/tmp": {} @@ -135,7 +135,23 @@ Create a container "ExposedPorts":{ "22/tcp": {} }, - "RestartPolicy": { "Name": "always" } + "SecurityOpts": [""], + "HostConfig": { + "Binds":["/tmp:/tmp"], + "Links":["redis3:redis"], + "LxcConf":{"lxc.utsname":"docker"}, + "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts":false, + "Privileged":false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [] + } } **Example response**: @@ -144,21 +160,78 @@ Create a container Content-Type: application/json { - "Id":"e90e34656806" + "Id":"f91ddc4b01e079c4481a8340bbbeca4dbd33d6e4a10662e499f8eacbb5bf252b" "Warnings":[] } Json Parameters: -- **RestartPolicy** – The behavior to apply when the container exits. The - value is an object with a `Name` property of either `"always"` to - always restart or `"on-failure"` to restart only when the container - exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` - controls the number of times to retry before giving up. - The default is not to restart. (optional) +- **Hostname** - A string value containing the desired hostname to use for the + container. +- **Domainname** - A string value containing the desired domain name to use + for the container. +- **User** - A string value containg the user to use inside the container. +- **Memory** - Memory limit in bytes. +- **MemorySwap**- Total memory usage (memory + swap); set `-1` to disable swap. +- **CpuShares** - An integer value containing the CPU Shares for container + (ie. the relative weight vs othercontainers). + **CpuSet** - String value containg the cgroups Cpuset to use. +- **AttachStdin** - Boolean value, attaches to stdin. +- **AttachStdout** - Boolean value, attaches to stdout. +- **AttachStderr** - Boolean value, attaches to stderr. +- **Tty** - Boolean value, Attach standard streams to a tty, including stdin if it is not closed. +- **OpenStdin** - Boolean value, opens stdin, +- **StdinOnce** - Boolean value, close stdin after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `VAR=value` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entrypoint for the container a a string or an array + of strings +- **Image** - String value containing the image name to use for the container - **Volumes** – An object mapping mountpoint paths (strings) inside the container to empty objects. -- **config** – the container's configuration +- **WorkingDir** - A string value containing the working dir for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables neworking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **SecurityOpts**: A list of string values to customize labels for MLS + systems, such as SELinux. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume + binding is a string of the form `container_path` (to create a new + volume for the container), `host_path:container_path` (to bind-mount + a host path into the container), or `host_path:container_path:ro` + (to make the bind-mount read-only inside the container). + - **Links** - A list of links for the container. Each link entry should be of + of the form "container_name:alias". + - **LxcConf** - LXC specific configurations. These configurations will only + work when using the `lxc` execution driver. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. It should be specified in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **Dns** - A list of dns servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilties to add to the container. + - **Capdrop** - A list of kernel capabilties to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, and `container:` + - **Devices** - A list of devices to add to the container specified in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` Query Parameters: @@ -437,9 +510,13 @@ Start the container `id` "PublishAllPorts":false, "Privileged":false, "Dns": ["8.8.8.8"], + "DnsSearch": [""], "VolumesFrom": ["parent", "other:ro"], "CapAdd": ["NET_ADMIN"], - "CapDrop": ["MKNOD"] + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [] } **Example response**: @@ -447,13 +524,40 @@ Start the container `id` HTTP/1.1 204 No Content Json Parameters: - - **Binds** – A list of volume bindings for this container. Each volume binding is a string of the form `container_path` (to create a new volume for the container), `host_path:container_path` (to bind-mount a host path into the container), or `host_path:container_path:ro` (to make the bind-mount read-only inside the container). -- **hostConfig** – the container's host configuration (optional) +- **Links** - A list of links for the container. Each link entry should be of + of the form "container_name:alias". +- **LxcConf** - LXC specific configurations. These configurations will only + work when using the `lxc` execution driver. +- **PortBindings** - A map of exposed container ports and the host port they + should map to. It should be specified in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. +- **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. +- **Privileged** - Gives the container full access to the host. Specified as + a boolean value. +- **Dns** - A list of dns servers for the container to use. +- **DnsSearch** - A list of DNS search domains +- **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` +- **CapAdd** - A list of kernel capabilties to add to the container. +- **Capdrop** - A list of kernel capabilties to drop from the container. +- **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) +- **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, and `container:` +- **Devices** - A list of devices to add to the container specified in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` Status Codes: diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md index e7a889e539..113b97e462 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.16.md +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -117,7 +117,6 @@ Create a container "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, - "PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, @@ -125,6 +124,7 @@ Create a container "Cmd":[ "date" ], + "Entrypoint": "" "Image":"base", "Volumes":{ "/tmp": {} @@ -135,7 +135,23 @@ Create a container "ExposedPorts":{ "22/tcp": {} }, - "RestartPolicy": { "Name": "always" } + "SecurityOpts": [""], + "HostConfig": { + "Binds":["/tmp:/tmp"], + "Links":["redis3:redis"], + "LxcConf":{"lxc.utsname":"docker"}, + "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts":false, + "Privileged":false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [] + } } **Example response**: @@ -150,15 +166,72 @@ Create a container Json Parameters: -- **RestartPolicy** – The behavior to apply when the container exits. The - value is an object with a `Name` property of either `"always"` to - always restart or `"on-failure"` to restart only when the container - exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` - controls the number of times to retry before giving up. - The default is not to restart. (optional) +- **Hostname** - A string value containing the desired hostname to use for the + container. +- **Domainname** - A string value containing the desired domain name to use + for the container. +- **User** - A string value containg the user to use inside the container. +- **Memory** - Memory limit in bytes. +- **MemorySwap**- Total memory usage (memory + swap); set `-1` to disable swap. +- **CpuShares** - An integer value containing the CPU Shares for container + (ie. the relative weight vs othercontainers). + **CpuSet** - String value containg the cgroups Cpuset to use. +- **AttachStdin** - Boolean value, attaches to stdin. +- **AttachStdout** - Boolean value, attaches to stdout. +- **AttachStderr** - Boolean value, attaches to stderr. +- **Tty** - Boolean value, Attach standard streams to a tty, including stdin if it is not closed. +- **OpenStdin** - Boolean value, opens stdin, +- **StdinOnce** - Boolean value, close stdin after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `VAR=value` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entrypoint for the container a a string or an array + of strings +- **Image** - String value containing the image name to use for the container - **Volumes** – An object mapping mountpoint paths (strings) inside the container to empty objects. -- **config** – the container's configuration +- **WorkingDir** - A string value containing the working dir for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables neworking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **SecurityOpts**: A list of string values to customize labels for MLS + systems, such as SELinux. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume + binding is a string of the form `container_path` (to create a new + volume for the container), `host_path:container_path` (to bind-mount + a host path into the container), or `host_path:container_path:ro` + (to make the bind-mount read-only inside the container). + - **Links** - A list of links for the container. Each link entry should be of + of the form "container_name:alias". + - **LxcConf** - LXC specific configurations. These configurations will only + work when using the `lxc` execution driver. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. It should be specified in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **Dns** - A list of dns servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilties to add to the container. + - **Capdrop** - A list of kernel capabilties to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, and `container:` + - **Devices** - A list of devices to add to the container specified in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` Query Parameters: @@ -429,32 +502,12 @@ Start the container `id` POST /containers/(id)/start HTTP/1.1 Content-Type: application/json - { - "Binds":["/tmp:/tmp"], - "Links":["redis3:redis"], - "LxcConf":{"lxc.utsname":"docker"}, - "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, - "PublishAllPorts":false, - "Privileged":false, - "Dns": ["8.8.8.8"], - "VolumesFrom": ["parent", "other:ro"], - "CapAdd": ["NET_ADMIN"], - "CapDrop": ["MKNOD"] - } - **Example response**: HTTP/1.1 204 No Content Json Parameters: -- **Binds** – A list of volume bindings for this container. Each volume - binding is a string of the form `container_path` (to create a new - volume for the container), `host_path:container_path` (to bind-mount - a host path into the container), or `host_path:container_path:ro` - (to make the bind-mount read-only inside the container). -- **hostConfig** – the container's host configuration (optional) - Status Codes: - **204** – no error From aeeb0d59d3ed40c3b0d9cecd3c19a52f005dd140 Mon Sep 17 00:00:00 2001 From: "Daniel, Dao Quang Minh" Date: Wed, 24 Sep 2014 03:26:53 -0400 Subject: [PATCH 251/592] print everything except progress in non-terminal Instead of only checking `Progress != nil` (which is always true because the server sends `"progressDetail":{}` when it doesnt have the progress), we also check if `Progress.String() != ""`, which should be sufficient to filter out the progress data. Docker-DCO-1.1-Signed-off-by: Daniel, Dao Quang Minh (github: dqminh) --- utils/jsonmessage.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/jsonmessage.go b/utils/jsonmessage.go index 3752c997f1..bdc47f0e1d 100644 --- a/utils/jsonmessage.go +++ b/utils/jsonmessage.go @@ -97,7 +97,7 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { // [2K = erase entire current line fmt.Fprintf(out, "%c[2K\r", 27) endl = "\r" - } else if jm.Progress != nil { //disable progressbar in non-terminal + } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal return nil } if jm.Time != 0 { @@ -109,7 +109,7 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { if jm.From != "" { fmt.Fprintf(out, "(from %s) ", jm.From) } - if jm.Progress != nil { + if jm.Progress != nil && isTerminal { fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) } else if jm.ProgressMessage != "" { //deprecated fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) From 18d9f1978b311ff9cadce9f0237313db14502f9f Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Wed, 5 Nov 2014 08:26:22 -0800 Subject: [PATCH 252/592] Fix vet errors Signed-off-by: Alexandr Morozov --- docker/docker.go | 2 +- integration-cli/docker_cli_build_test.go | 8 ++++---- integration-cli/docker_cli_history_test.go | 2 +- integration-cli/docker_cli_info_test.go | 2 +- integration-cli/docker_cli_pull_test.go | 4 ++-- integration-cli/docker_cli_push_test.go | 6 +++--- integration-cli/docker_cli_rm_test.go | 2 +- integration-cli/docker_cli_search_test.go | 2 +- integration-cli/docker_cli_version_test.go | 2 +- 9 files changed, 15 insertions(+), 15 deletions(-) diff --git a/docker/docker.go b/docker/docker.go index 16965452ae..92cdd95e0f 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -109,7 +109,7 @@ func main() { if err := cli.Cmd(flag.Args()...); err != nil { if sterr, ok := err.(*utils.StatusError); ok { if sterr.Status != "" { - log.Println("%s", sterr.Status) + log.Println(sterr.Status) } os.Exit(sterr.StatusCode) } diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 68d607521e..59d09f54d4 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -218,7 +218,7 @@ func TestBuildEnvironmentReplacementEnv(t *testing.T) { if parts[0] == "bar" { found = true if parts[1] != "foo" { - t.Fatal("Could not find replaced var for env `bar`: got %q instead of `foo`", parts[1]) + t.Fatalf("Could not find replaced var for env `bar`: got %q instead of `foo`", parts[1]) } } } @@ -1224,7 +1224,7 @@ func TestBuildCopyDisallowRemote(t *testing.T) { COPY https://index.docker.io/robots.txt /`, true) if err == nil || !strings.Contains(out, "Source can't be a URL for COPY") { - t.Fatal("Error should be about disallowed remote source, got err: %s, out: %q", err, out) + t.Fatalf("Error should be about disallowed remote source, got err: %s, out: %q", err, out) } logDone("build - copy - disallow copy from remote") } @@ -1374,7 +1374,7 @@ func TestBuildForceRm(t *testing.T) { buildCmd := exec.Command(dockerBinary, "build", "-t", name, "--force-rm", ".") buildCmd.Dir = ctx.Dir if out, _, err := runCommandWithOutput(buildCmd); err == nil { - t.Fatal("failed to build the image: %s, %v", out, err) + t.Fatalf("failed to build the image: %s, %v", out, err) } containerCountAfter, err := getContainerCount() @@ -3181,7 +3181,7 @@ func TestBuildEntrypointInheritance(t *testing.T) { status, _ = runCommand(exec.Command(dockerBinary, "run", "child")) if status != 5 { - t.Fatal("expected exit code 5 but received %d", status) + t.Fatalf("expected exit code 5 but received %d", status) } logDone("build - clear entrypoint") diff --git a/integration-cli/docker_cli_history_test.go b/integration-cli/docker_cli_history_test.go index c6355374dd..3ae9ffb45d 100644 --- a/integration-cli/docker_cli_history_test.go +++ b/integration-cli/docker_cli_history_test.go @@ -47,7 +47,7 @@ RUN echo "Z"`, out, exitCode, err := runCommandWithOutput(exec.Command(dockerBinary, "history", "testbuildhistory")) if err != nil || exitCode != 0 { - t.Fatal("failed to get image history: %s, %v", out, err) + t.Fatalf("failed to get image history: %s, %v", out, err) } actualValues := strings.Split(out, "\n")[1:27] diff --git a/integration-cli/docker_cli_info_test.go b/integration-cli/docker_cli_info_test.go index ac6fa5f0a2..2e8239a4b3 100644 --- a/integration-cli/docker_cli_info_test.go +++ b/integration-cli/docker_cli_info_test.go @@ -11,7 +11,7 @@ func TestInfoEnsureSucceeds(t *testing.T) { versionCmd := exec.Command(dockerBinary, "info") out, exitCode, err := runCommandWithOutput(versionCmd) if err != nil || exitCode != 0 { - t.Fatal("failed to execute docker info: %s, %v", out, err) + t.Fatalf("failed to execute docker info: %s, %v", out, err) } stringsToCheck := []string{"Containers:", "Execution Driver:", "Kernel Version:"} diff --git a/integration-cli/docker_cli_pull_test.go b/integration-cli/docker_cli_pull_test.go index 7ad6f13710..b67b1caca5 100644 --- a/integration-cli/docker_cli_pull_test.go +++ b/integration-cli/docker_cli_pull_test.go @@ -11,7 +11,7 @@ import ( func TestPullImageFromCentralRegistry(t *testing.T) { pullCmd := exec.Command(dockerBinary, "pull", "scratch") if out, _, err := runCommandWithOutput(pullCmd); err != nil { - t.Fatal("pulling the scratch image from the registry has failed: %s, %v", out, err) + t.Fatalf("pulling the scratch image from the registry has failed: %s, %v", out, err) } logDone("pull - pull scratch") } @@ -20,7 +20,7 @@ func TestPullImageFromCentralRegistry(t *testing.T) { func TestPullNonExistingImage(t *testing.T) { pullCmd := exec.Command(dockerBinary, "pull", "fooblahblah1234") if out, _, err := runCommandWithOutput(pullCmd); err == nil { - t.Fatal("expected non-zero exit status when pulling non-existing image: %s", out) + t.Fatalf("expected non-zero exit status when pulling non-existing image: %s", out) } logDone("pull - pull fooblahblah1234 (non-existing image)") } diff --git a/integration-cli/docker_cli_push_test.go b/integration-cli/docker_cli_push_test.go index 5db359bf2d..0dfd85a9d4 100644 --- a/integration-cli/docker_cli_push_test.go +++ b/integration-cli/docker_cli_push_test.go @@ -16,12 +16,12 @@ func TestPushBusyboxImage(t *testing.T) { repoName := fmt.Sprintf("%v/busybox", privateRegistryURL) tagCmd := exec.Command(dockerBinary, "tag", "busybox", repoName) if out, _, err := runCommandWithOutput(tagCmd); err != nil { - t.Fatal("image tagging failed: %s, %v", out, err) + t.Fatalf("image tagging failed: %s, %v", out, err) } pushCmd := exec.Command(dockerBinary, "push", repoName) if out, _, err := runCommandWithOutput(pushCmd); err != nil { - t.Fatal("pushing the image to the private registry has failed: %s, %v", out, err) + t.Fatalf("pushing the image to the private registry has failed: %s, %v", out, err) } deleteImages(repoName) @@ -35,7 +35,7 @@ func TestPushUnprefixedRepo(t *testing.T) { t.Skip() pushCmd := exec.Command(dockerBinary, "push", "busybox") if out, _, err := runCommandWithOutput(pushCmd); err == nil { - t.Fatal("pushing an unprefixed repo didn't result in a non-zero exit status: %s", out) + t.Fatalf("pushing an unprefixed repo didn't result in a non-zero exit status: %s", out) } logDone("push - push unprefixed busybox repo --> must fail") } diff --git a/integration-cli/docker_cli_rm_test.go b/integration-cli/docker_cli_rm_test.go index 22ecc363bd..bac7490d55 100644 --- a/integration-cli/docker_cli_rm_test.go +++ b/integration-cli/docker_cli_rm_test.go @@ -114,7 +114,7 @@ func TestRmInvalidContainer(t *testing.T) { if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rm", "unknown")); err == nil { t.Fatal("Expected error on rm unknown container, got none") } else if !strings.Contains(out, "failed to remove one or more containers") { - t.Fatal("Expected output to contain 'failed to remove one or more containers', got %q", out) + t.Fatalf("Expected output to contain 'failed to remove one or more containers', got %q", out) } logDone("rm - delete unknown container") diff --git a/integration-cli/docker_cli_search_test.go b/integration-cli/docker_cli_search_test.go index 946c34dc9c..fafb5df750 100644 --- a/integration-cli/docker_cli_search_test.go +++ b/integration-cli/docker_cli_search_test.go @@ -11,7 +11,7 @@ func TestSearchOnCentralRegistry(t *testing.T) { searchCmd := exec.Command(dockerBinary, "search", "busybox") out, exitCode, err := runCommandWithOutput(searchCmd) if err != nil || exitCode != 0 { - t.Fatal("failed to search on the central registry: %s, %v", out, err) + t.Fatalf("failed to search on the central registry: %s, %v", out, err) } if !strings.Contains(out, "Busybox base image.") { diff --git a/integration-cli/docker_cli_version_test.go b/integration-cli/docker_cli_version_test.go index bb9942593d..0759ba6767 100644 --- a/integration-cli/docker_cli_version_test.go +++ b/integration-cli/docker_cli_version_test.go @@ -11,7 +11,7 @@ func TestVersionEnsureSucceeds(t *testing.T) { versionCmd := exec.Command(dockerBinary, "version") out, _, err := runCommandWithOutput(versionCmd) if err != nil { - t.Fatal("failed to execute docker version: %s, %v", out, err) + t.Fatalf("failed to execute docker version: %s, %v", out, err) } stringsToCheck := []string{ From 059e589c3427b1afa1b112acc931f59b5b063e16 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Tue, 4 Nov 2014 15:23:21 -0800 Subject: [PATCH 253/592] Compute TarSum on storage of image layer content Now, newly created/imported layers will have the checksum of the layer diff computed and stored in the image json file. For now, it is not an error if the computed checksum does not match an existing checksum, only a warning message is logged. The eventual goal is to use the checksums in the image JSON to verify the integrity of the layer contents when doing `docker load` or `docker pull`, and error out if it does not match. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- graph/service.go | 1 + image/image.go | 34 ++++++++++++++++++++++++++++++---- 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/graph/service.go b/graph/service.go index 6f020e8d02..a27c9a8e38 100644 --- a/graph/service.go +++ b/graph/service.go @@ -150,6 +150,7 @@ func (s *TagStore) CmdLookup(job *engine.Job) engine.Status { out.Set("Os", image.OS) out.SetInt64("Size", image.Size) out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) + out.Set("Checksum", image.Checksum) if _, err = out.WriteTo(job.Stdout); err != nil { return job.Error(err) } diff --git a/image/image.go b/image/image.go index dfa8e9a6e3..e58516bcb1 100644 --- a/image/image.go +++ b/image/image.go @@ -11,6 +11,7 @@ import ( log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) @@ -32,6 +33,7 @@ type Image struct { Config *runconfig.Config `json:"config,omitempty"` Architecture string `json:"architecture,omitempty"` OS string `json:"os,omitempty"` + Checksum string `json:"checksum"` Size int64 graph Graph @@ -74,19 +76,43 @@ func LoadImage(root string) (*Image, error) { return img, nil } +// StoreImage stores file system layer data for the given image to the +// image's registered storage driver. Image metadata is stored in a file +// at the specified root directory. This function also computes the TarSum +// of `layerData` (currently using tarsum.dev). func StoreImage(img *Image, layerData archive.ArchiveReader, root string) error { // Store the layer var ( - size int64 - err error - driver = img.graph.Driver() + size int64 + err error + driver = img.graph.Driver() + layerTarSum tarsum.TarSum ) // If layerData is not nil, unpack it into the new layer if layerData != nil { - if size, err = driver.ApplyDiff(img.ID, img.Parent, layerData); err != nil { + layerDataDecompressed, err := archive.DecompressStream(layerData) + if err != nil { return err } + + defer layerDataDecompressed.Close() + + if layerTarSum, err = tarsum.NewTarSum(layerDataDecompressed, true, tarsum.VersionDev); err != nil { + return err + } + + if size, err = driver.ApplyDiff(img.ID, img.Parent, layerTarSum); err != nil { + return err + } + + checksum := layerTarSum.Sum(nil) + + if img.Checksum != "" && img.Checksum != checksum { + log.Warn("image layer checksum mismatch: computed %q, expected %q", checksum, img.Checksum) + } + + img.Checksum = checksum } img.Size = size From ff56531de47c08157b2a37e6c6b6189a5006dba2 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 5 Nov 2014 14:39:54 -0500 Subject: [PATCH 254/592] devmapper: Fix gofmt related build failures My pull request failed the build due to gofmat issues. I have run gofmt on specified files and this commit fixes it. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 34 +++++++++++------------ 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index b193480459..e661dbb7ad 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -62,25 +62,25 @@ type MetaData struct { } type DeviceSet struct { - MetaData `json:"-"` - sync.Mutex `json:"-"` // Protects Devices map and serializes calls into libdevmapper - root string `json:"-"` - devicePrefix string `json:"-"` - TransactionId uint64 `json:"-"` - NewTransactionId uint64 `json:"-"` - NextDeviceId int `json:"next_device_id"` + MetaData `json:"-"` + sync.Mutex `json:"-"` // Protects Devices map and serializes calls into libdevmapper + root string `json:"-"` + devicePrefix string `json:"-"` + TransactionId uint64 `json:"-"` + NewTransactionId uint64 `json:"-"` + NextDeviceId int `json:"next_device_id"` // Options - dataLoopbackSize int64 `json:"-"` - metaDataLoopbackSize int64 `json:"-"` - baseFsSize uint64 `json:"-"` - filesystem string `json:"-"` - mountOptions string `json:"-"` - mkfsArgs []string `json:"-"` - dataDevice string `json:"-"` - metadataDevice string `json:"-"` - doBlkDiscard bool `json:"-"` - thinpBlockSize uint32 `json:"-"` + dataLoopbackSize int64 `json:"-"` + metaDataLoopbackSize int64 `json:"-"` + baseFsSize uint64 `json:"-"` + filesystem string `json:"-"` + mountOptions string `json:"-"` + mkfsArgs []string `json:"-"` + dataDevice string `json:"-"` + metadataDevice string `json:"-"` + doBlkDiscard bool `json:"-"` + thinpBlockSize uint32 `json:"-"` } type DiskUsage struct { From d453d8b3210310f12a66a64a57b2946ce93653b8 Mon Sep 17 00:00:00 2001 From: golubbe Date: Tue, 4 Nov 2014 10:34:15 -0800 Subject: [PATCH 255/592] Create GOVERNANCE.md Information on Governance Advisory Board and associated proposals Signed-off-by: Ben Golub --- GOVERNANCE.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 GOVERNANCE.md diff --git a/GOVERNANCE.md b/GOVERNANCE.md new file mode 100644 index 0000000000..52a8bf05d6 --- /dev/null +++ b/GOVERNANCE.md @@ -0,0 +1,17 @@ +# Docker Governance Advisory Board Meetings + +In the spirit of openness, Docker created a Governance Advisory Board, and committed to make all materials and notes from the meetings of this group public. +All output from the meetings should be considered proposals only, and are subject to the review and approval of the community and the project leadership. + +The materials from the first Docker Governance Advisory Board meeting, held on October 28, 2014, are available at +[Google Docs Folder](http://goo.gl/Alfj8r) + +These include: + +* First Meeting Notes +* DGAB Charter +* Presentation 1: Introductory Presentation, including State of The Project +* Presentation 2: Overall Contribution Structure/Docker Project Core Proposal +* Presentation 3: Long Term Roadmap/Statement of Direction + + From e2f8fbfbcc450432536e387777b1ff080c94a948 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Wed, 5 Nov 2014 18:10:38 -0500 Subject: [PATCH 256/592] devicemapper: split out devicemapper bindings This is a first pass at splitting out devicemapper into separate, usable bindings. Signed-off-by: Vincent Batts --- daemon/graphdriver/devmapper/deviceset.go | 80 ++++++++++--------- daemon/graphdriver/devmapper/driver.go | 3 +- .../devicemapper}/attach_loopback.go | 4 +- .../devicemapper}/devmapper.go | 74 ++++++++--------- .../devicemapper}/devmapper_log.go | 4 +- .../devicemapper}/devmapper_wrapper.go | 2 +- .../devmapper => pkg/devicemapper}/ioctl.go | 2 +- 7 files changed, 87 insertions(+), 82 deletions(-) rename {daemon/graphdriver/devmapper => pkg/devicemapper}/attach_loopback.go (97%) rename {daemon/graphdriver/devmapper => pkg/devicemapper}/devmapper.go (87%) rename {daemon/graphdriver/devmapper => pkg/devicemapper}/devmapper_log.go (83%) rename {daemon/graphdriver/devmapper => pkg/devicemapper}/devmapper_wrapper.go (99%) rename {daemon/graphdriver/devmapper => pkg/devicemapper}/ioctl.go (98%) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index fdfc089a82..97f7814887 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -20,6 +20,7 @@ import ( log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/devicemapper" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/units" "github.com/docker/libcontainer/label" @@ -228,7 +229,7 @@ func (devices *DeviceSet) saveMetadata(info *DevInfo) error { } if devices.NewTransactionId != devices.TransactionId { - if err = setTransactionId(devices.getPoolDevName(), devices.TransactionId, devices.NewTransactionId); err != nil { + if err = devicemapper.SetTransactionId(devices.getPoolDevName(), devices.TransactionId, devices.NewTransactionId); err != nil { return fmt.Errorf("Error setting devmapper transition ID: %s", err) } devices.TransactionId = devices.NewTransactionId @@ -280,11 +281,11 @@ func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*Dev func (devices *DeviceSet) activateDeviceIfNeeded(info *DevInfo) error { log.Debugf("activateDeviceIfNeeded(%v)", info.Hash) - if devinfo, _ := getInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { + if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { return nil } - return activateDevice(devices.getPoolDevName(), info.Name(), info.DeviceId, info.Size) + return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceId, info.Size) } func (devices *DeviceSet) createFilesystem(info *DevInfo) error { @@ -321,7 +322,7 @@ func (devices *DeviceSet) createFilesystem(info *DevInfo) error { } func (devices *DeviceSet) initMetaData() error { - _, _, _, params, err := getStatus(devices.getPoolName()) + _, _, _, params, err := devicemapper.GetStatus(devices.getPoolName()) if err != nil { return err } @@ -400,7 +401,7 @@ func (devices *DeviceSet) setupBaseImage() error { id := devices.nextDeviceId // Create initial device - if err := createDevice(devices.getPoolDevName(), &id); err != nil { + if err := devicemapper.CreateDevice(devices.getPoolDevName(), &id); err != nil { return err } @@ -410,7 +411,7 @@ func (devices *DeviceSet) setupBaseImage() error { log.Debugf("Registering base device (id %v) with FS size %v", id, devices.baseFsSize) info, err := devices.registerDevice(id, "", devices.baseFsSize) if err != nil { - _ = deleteDevice(devices.getPoolDevName(), id) + _ = devicemapper.DeleteDevice(devices.getPoolDevName(), id) return err } @@ -447,11 +448,12 @@ func setCloseOnExec(name string) { } } -func (devices *DeviceSet) log(level int, file string, line int, dmError int, message string) { +func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) { if level >= 7 { return // Ignore _LOG_DEBUG } + // FIXME(vbatts) push this back into ./pkg/devicemapper/ log.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) } @@ -489,7 +491,7 @@ func (devices *DeviceSet) ResizePool(size int64) error { return fmt.Errorf("Can't shrink file") } - dataloopback := FindLoopDeviceFor(datafile) + dataloopback := devicemapper.FindLoopDeviceFor(datafile) if dataloopback == nil { return fmt.Errorf("Unable to find loopback mount for: %s", datafilename) } @@ -501,7 +503,7 @@ func (devices *DeviceSet) ResizePool(size int64) error { } defer metadatafile.Close() - metadataloopback := FindLoopDeviceFor(metadatafile) + metadataloopback := devicemapper.FindLoopDeviceFor(metadatafile) if metadataloopback == nil { return fmt.Errorf("Unable to find loopback mount for: %s", metadatafilename) } @@ -513,22 +515,22 @@ func (devices *DeviceSet) ResizePool(size int64) error { } // Reload size for loopback device - if err := LoopbackSetCapacity(dataloopback); err != nil { + if err := devicemapper.LoopbackSetCapacity(dataloopback); err != nil { return fmt.Errorf("Unable to update loopback capacity: %s", err) } // Suspend the pool - if err := suspendDevice(devices.getPoolName()); err != nil { + if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { return fmt.Errorf("Unable to suspend pool: %s", err) } // Reload with the new block sizes - if err := reloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { + if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { return fmt.Errorf("Unable to reload pool: %s", err) } // Resume the pool - if err := resumeDevice(devices.getPoolName()); err != nil { + if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { return fmt.Errorf("Unable to resume pool: %s", err) } @@ -536,9 +538,10 @@ func (devices *DeviceSet) ResizePool(size int64) error { } func (devices *DeviceSet) initDevmapper(doInit bool) error { - logInit(devices) + // give ourselves to libdm as a log handler + devicemapper.LogInit(devices) - _, err := getDriverVersion() + _, err := devicemapper.GetDriverVersion() if err != nil { // Can't even get driver version, assume not supported return graphdriver.ErrNotSupported @@ -566,9 +569,9 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { // Check for the existence of the device -pool log.Debugf("Checking for existence of the pool '%s'", devices.getPoolName()) - info, err := getInfo(devices.getPoolName()) + info, err := devicemapper.GetInfo(devices.getPoolName()) if info == nil { - log.Debugf("Error device getInfo: %s", err) + log.Debugf("Error device devicemapper.GetInfo: %s", err) return err } @@ -610,7 +613,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { return err } - dataFile, err = attachLoopDevice(data) + dataFile, err = devicemapper.AttachLoopDevice(data) if err != nil { return err } @@ -641,7 +644,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { return err } - metadataFile, err = attachLoopDevice(metadata) + metadataFile, err = devicemapper.AttachLoopDevice(metadata) if err != nil { return err } @@ -653,7 +656,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { } defer metadataFile.Close() - if err := createPool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { + if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { return err } } @@ -695,7 +698,7 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error { deviceId := devices.nextDeviceId - if err := createSnapDevice(devices.getPoolDevName(), &deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { + if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), &deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { log.Debugf("Error creating snap device: %s", err) return err } @@ -704,7 +707,7 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error { devices.nextDeviceId = (deviceId + 1) & 0xffffff if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size); err != nil { - deleteDevice(devices.getPoolDevName(), deviceId) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) log.Debugf("Error registering device: %s", err) return err } @@ -717,13 +720,13 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error { // on the thin pool when we remove a thinp device, so we do it // manually if err := devices.activateDeviceIfNeeded(info); err == nil { - if err := BlockDeviceDiscard(info.DevName()); err != nil { + if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { log.Debugf("Error discarding block on device: %s (ignoring)", err) } } } - devinfo, _ := getInfo(info.Name()) + devinfo, _ := devicemapper.GetInfo(info.Name()) if devinfo != nil && devinfo.Exists != 0 { if err := devices.removeDeviceAndWait(info.Name()); err != nil { log.Debugf("Error removing device: %s", err) @@ -731,7 +734,7 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error { } } - if err := deleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil { + if err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil { log.Debugf("Error deleting device: %s", err) return err } @@ -772,16 +775,16 @@ func (devices *DeviceSet) deactivatePool() error { defer log.Debugf("[devmapper] deactivatePool END") devname := devices.getPoolDevName() - devinfo, err := getInfo(devname) + devinfo, err := devicemapper.GetInfo(devname) if err != nil { return err } - if d, err := getDeps(devname); err == nil { + if d, err := devicemapper.GetDeps(devname); err == nil { // Access to more Debug output - log.Debugf("[devmapper] getDeps() %s: %#v", devname, d) + log.Debugf("[devmapper] devicemapper.GetDeps() %s: %#v", devname, d) } if devinfo.Exists != 0 { - return removeDevice(devname) + return devicemapper.RemoveDevice(devname) } return nil @@ -797,7 +800,7 @@ func (devices *DeviceSet) deactivateDevice(info *DevInfo) error { log.Errorf("Warning: error waiting for device %s to close: %s", info.Hash, err) } - devinfo, err := getInfo(info.Name()) + devinfo, err := devicemapper.GetInfo(info.Name()) if err != nil { return err } @@ -816,11 +819,11 @@ func (devices *DeviceSet) removeDeviceAndWait(devname string) error { var err error for i := 0; i < 1000; i++ { - err = removeDevice(devname) + err = devicemapper.RemoveDevice(devname) if err == nil { break } - if err != ErrBusy { + if err != devicemapper.ErrBusy { return err } @@ -848,7 +851,7 @@ func (devices *DeviceSet) waitRemove(devname string) error { defer log.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname) i := 0 for ; i < 1000; i++ { - devinfo, err := getInfo(devname) + devinfo, err := devicemapper.GetInfo(devname) if err != nil { // If there is an error we assume the device doesn't exist. // The error might actually be something else, but we can't differentiate. @@ -877,7 +880,7 @@ func (devices *DeviceSet) waitRemove(devname string) error { func (devices *DeviceSet) waitClose(info *DevInfo) error { i := 0 for ; i < 1000; i++ { - devinfo, err := getInfo(info.Name()) + devinfo, err := devicemapper.GetInfo(info.Name()) if err != nil { return err } @@ -898,7 +901,6 @@ func (devices *DeviceSet) waitClose(info *DevInfo) error { } func (devices *DeviceSet) Shutdown() error { - log.Debugf("[deviceset %s] shutdown()", devices.devicePrefix) log.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root) defer log.Debugf("[deviceset %s] shutdown END", devices.devicePrefix) @@ -1065,7 +1067,7 @@ func (devices *DeviceSet) HasActivatedDevice(hash string) bool { devices.Lock() defer devices.Unlock() - devinfo, _ := getInfo(info.Name()) + devinfo, _ := devicemapper.GetInfo(info.Name()) return devinfo != nil && devinfo.Exists != 0 } @@ -1087,7 +1089,7 @@ func (devices *DeviceSet) List() []string { func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { var params string - _, sizeInSectors, _, params, err = getStatus(devName) + _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) if err != nil { return } @@ -1132,7 +1134,7 @@ func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionId, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { var params string - if _, totalSizeInSectors, _, params, err = getStatus(devices.getPoolName()); err == nil { + if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionId, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) } return @@ -1175,7 +1177,7 @@ func (devices *DeviceSet) Status() *Status { } func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error) { - SetDevDir("/dev") + devicemapper.SetDevDir("/dev") devices := &DeviceSet{ root: root, diff --git a/daemon/graphdriver/devmapper/driver.go b/daemon/graphdriver/devmapper/driver.go index 9e1d88e7d4..b20f3e5450 100644 --- a/daemon/graphdriver/devmapper/driver.go +++ b/daemon/graphdriver/devmapper/driver.go @@ -10,6 +10,7 @@ import ( log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/devicemapper" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/units" ) @@ -63,7 +64,7 @@ func (d *Driver) Status() [][2]string { {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(int64(s.Metadata.Used)))}, {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(int64(s.Metadata.Total)))}, } - if vStr, err := GetLibraryVersion(); err == nil { + if vStr, err := devicemapper.GetLibraryVersion(); err == nil { status = append(status, [2]string{"Library Version", vStr}) } return status diff --git a/daemon/graphdriver/devmapper/attach_loopback.go b/pkg/devicemapper/attach_loopback.go similarity index 97% rename from daemon/graphdriver/devmapper/attach_loopback.go rename to pkg/devicemapper/attach_loopback.go index dce5b23ee8..d39cbc6cf5 100644 --- a/daemon/graphdriver/devmapper/attach_loopback.go +++ b/pkg/devicemapper/attach_loopback.go @@ -1,6 +1,6 @@ // +build linux -package devmapper +package devicemapper import ( "fmt" @@ -84,7 +84,7 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil // attachLoopDevice attaches the given sparse file to the next // available loopback device. It returns an opened *os.File. -func attachLoopDevice(sparseName string) (loop *os.File, err error) { +func AttachLoopDevice(sparseName string) (loop *os.File, err error) { // Try to retrieve the next available loopback device via syscall. // If it fails, we discard error and start loopking for a diff --git a/daemon/graphdriver/devmapper/devmapper.go b/pkg/devicemapper/devmapper.go similarity index 87% rename from daemon/graphdriver/devmapper/devmapper.go rename to pkg/devicemapper/devmapper.go index cd281f0bb3..c0b931c3fb 100644 --- a/daemon/graphdriver/devmapper/devmapper.go +++ b/pkg/devicemapper/devmapper.go @@ -1,6 +1,6 @@ // +build linux -package devmapper +package devicemapper import ( "errors" @@ -13,7 +13,7 @@ import ( ) type DevmapperLogger interface { - log(level int, file string, line int, dmError int, message string) + DMLog(level int, file string, line int, dmError int, message string) } const ( @@ -272,7 +272,8 @@ func LogInitVerbose(level int) { var dmLogger DevmapperLogger = nil -func logInit(logger DevmapperLogger) { +// initialize the logger for the device mapper library +func LogInit(logger DevmapperLogger) { dmLogger = logger LogWithErrnoInit() } @@ -295,6 +296,7 @@ func GetLibraryVersion() (string, error) { // Useful helper for cleanup func RemoveDevice(name string) error { + // TODO(vbatts) just use the other removeDevice() task := TaskCreate(DeviceRemove) if task == nil { return ErrCreateRemoveTask @@ -342,7 +344,7 @@ func BlockDeviceDiscard(path string) error { } // This is the programmatic example of "dmsetup create" -func createPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { +func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { task, err := createTask(DeviceCreate, poolName) if task == nil { return err @@ -364,7 +366,7 @@ func createPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize } if err := task.Run(); err != nil { - return fmt.Errorf("Error running DeviceCreate (createPool) %s", err) + return fmt.Errorf("Error running DeviceCreate (CreatePool) %s", err) } UdevWait(cookie) @@ -372,7 +374,7 @@ func createPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize return nil } -func reloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { +func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { task, err := createTask(DeviceReload, poolName) if task == nil { return err @@ -406,7 +408,7 @@ func createTask(t TaskType, name string) (*Task, error) { return task, nil } -func getDeps(name string) (*Deps, error) { +func GetDeps(name string) (*Deps, error) { task, err := createTask(DeviceDeps, name) if task == nil { return nil, err @@ -417,7 +419,7 @@ func getDeps(name string) (*Deps, error) { return task.GetDeps() } -func getInfo(name string) (*Info, error) { +func GetInfo(name string) (*Info, error) { task, err := createTask(DeviceInfo, name) if task == nil { return nil, err @@ -428,7 +430,7 @@ func getInfo(name string) (*Info, error) { return task.GetInfo() } -func getDriverVersion() (string, error) { +func GetDriverVersion() (string, error) { task := TaskCreate(DeviceVersion) if task == nil { return "", fmt.Errorf("Can't create DeviceVersion task") @@ -439,24 +441,24 @@ func getDriverVersion() (string, error) { return task.GetDriverVersion() } -func getStatus(name string) (uint64, uint64, string, string, error) { +func GetStatus(name string) (uint64, uint64, string, string, error) { task, err := createTask(DeviceStatus, name) if task == nil { - log.Debugf("getStatus: Error createTask: %s", err) + log.Debugf("GetStatus: Error createTask: %s", err) return 0, 0, "", "", err } if err := task.Run(); err != nil { - log.Debugf("getStatus: Error Run: %s", err) + log.Debugf("GetStatus: Error Run: %s", err) return 0, 0, "", "", err } devinfo, err := task.GetInfo() if err != nil { - log.Debugf("getStatus: Error GetInfo: %s", err) + log.Debugf("GetStatus: Error GetInfo: %s", err) return 0, 0, "", "", err } if devinfo.Exists == 0 { - log.Debugf("getStatus: Non existing device %s", name) + log.Debugf("GetStatus: Non existing device %s", name) return 0, 0, "", "", fmt.Errorf("Non existing device %s", name) } @@ -464,7 +466,7 @@ func getStatus(name string) (uint64, uint64, string, string, error) { return start, length, targetType, params, nil } -func setTransactionId(poolName string, oldId uint64, newId uint64) error { +func SetTransactionId(poolName string, oldId uint64, newId uint64) error { task, err := createTask(DeviceTargetMsg, poolName) if task == nil { return err @@ -479,12 +481,12 @@ func setTransactionId(poolName string, oldId uint64, newId uint64) error { } if err := task.Run(); err != nil { - return fmt.Errorf("Error running setTransactionId %s", err) + return fmt.Errorf("Error running SetTransactionId %s", err) } return nil } -func suspendDevice(name string) error { +func SuspendDevice(name string) error { task, err := createTask(DeviceSuspend, name) if task == nil { return err @@ -495,7 +497,7 @@ func suspendDevice(name string) error { return nil } -func resumeDevice(name string) error { +func ResumeDevice(name string) error { task, err := createTask(DeviceResume, name) if task == nil { return err @@ -515,8 +517,8 @@ func resumeDevice(name string) error { return nil } -func createDevice(poolName string, deviceId *int) error { - log.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, *deviceId) +func CreateDevice(poolName string, deviceId *int) error { + log.Debugf("[devmapper] CreateDevice(poolName=%v, deviceId=%v)", poolName, *deviceId) for { task, err := createTask(DeviceTargetMsg, poolName) @@ -539,14 +541,14 @@ func createDevice(poolName string, deviceId *int) error { *deviceId++ continue } - return fmt.Errorf("Error running createDevice %s", err) + return fmt.Errorf("Error running CreateDevice %s", err) } break } return nil } -func deleteDevice(poolName string, deviceId int) error { +func DeleteDevice(poolName string, deviceId int) error { task, err := createTask(DeviceTargetMsg, poolName) if task == nil { return err @@ -561,14 +563,14 @@ func deleteDevice(poolName string, deviceId int) error { } if err := task.Run(); err != nil { - return fmt.Errorf("Error running deleteDevice %s", err) + return fmt.Errorf("Error running DeleteDevice %s", err) } return nil } func removeDevice(name string) error { - log.Debugf("[devmapper] removeDevice START") - defer log.Debugf("[devmapper] removeDevice END") + log.Debugf("[devmapper] RemoveDevice START") + defer log.Debugf("[devmapper] RemoveDevice END") task, err := createTask(DeviceRemove, name) if task == nil { return err @@ -578,12 +580,12 @@ func removeDevice(name string) error { if dmSawBusy { return ErrBusy } - return fmt.Errorf("Error running removeDevice %s", err) + return fmt.Errorf("Error running RemoveDevice %s", err) } return nil } -func activateDevice(poolName string, name string, deviceId int, size uint64) error { +func ActivateDevice(poolName string, name string, deviceId int, size uint64) error { task, err := createTask(DeviceCreate, name) if task == nil { return err @@ -603,7 +605,7 @@ func activateDevice(poolName string, name string, deviceId int, size uint64) err } if err := task.Run(); err != nil { - return fmt.Errorf("Error running DeviceCreate (activateDevice) %s", err) + return fmt.Errorf("Error running DeviceCreate (ActivateDevice) %s", err) } UdevWait(cookie) @@ -611,12 +613,12 @@ func activateDevice(poolName string, name string, deviceId int, size uint64) err return nil } -func createSnapDevice(poolName string, deviceId *int, baseName string, baseDeviceId int) error { - devinfo, _ := getInfo(baseName) +func CreateSnapDevice(poolName string, deviceId *int, baseName string, baseDeviceId int) error { + devinfo, _ := GetInfo(baseName) doSuspend := devinfo != nil && devinfo.Exists != 0 if doSuspend { - if err := suspendDevice(baseName); err != nil { + if err := SuspendDevice(baseName); err != nil { return err } } @@ -625,21 +627,21 @@ func createSnapDevice(poolName string, deviceId *int, baseName string, baseDevic task, err := createTask(DeviceTargetMsg, poolName) if task == nil { if doSuspend { - resumeDevice(baseName) + ResumeDevice(baseName) } return err } if err := task.SetSector(0); err != nil { if doSuspend { - resumeDevice(baseName) + ResumeDevice(baseName) } return fmt.Errorf("Can't set sector %s", err) } if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", *deviceId, baseDeviceId)); err != nil { if doSuspend { - resumeDevice(baseName) + ResumeDevice(baseName) } return fmt.Errorf("Can't set message %s", err) } @@ -653,7 +655,7 @@ func createSnapDevice(poolName string, deviceId *int, baseName string, baseDevic } if doSuspend { - resumeDevice(baseName) + ResumeDevice(baseName) } return fmt.Errorf("Error running DeviceCreate (createSnapDevice) %s", err) } @@ -662,7 +664,7 @@ func createSnapDevice(poolName string, deviceId *int, baseName string, baseDevic } if doSuspend { - if err := resumeDevice(baseName); err != nil { + if err := ResumeDevice(baseName); err != nil { return err } } diff --git a/daemon/graphdriver/devmapper/devmapper_log.go b/pkg/devicemapper/devmapper_log.go similarity index 83% rename from daemon/graphdriver/devmapper/devmapper_log.go rename to pkg/devicemapper/devmapper_log.go index ec7809cc51..d6550bd626 100644 --- a/daemon/graphdriver/devmapper/devmapper_log.go +++ b/pkg/devicemapper/devmapper_log.go @@ -1,6 +1,6 @@ // +build linux -package devmapper +package devicemapper import "C" @@ -25,6 +25,6 @@ func DevmapperLogCallback(level C.int, file *C.char, line C.int, dm_errno_or_cla } if dmLogger != nil { - dmLogger.log(int(level), C.GoString(file), int(line), int(dm_errno_or_class), msg) + dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dm_errno_or_class), msg) } } diff --git a/daemon/graphdriver/devmapper/devmapper_wrapper.go b/pkg/devicemapper/devmapper_wrapper.go similarity index 99% rename from daemon/graphdriver/devmapper/devmapper_wrapper.go rename to pkg/devicemapper/devmapper_wrapper.go index 855c95e3ba..c7e96a1617 100644 --- a/daemon/graphdriver/devmapper/devmapper_wrapper.go +++ b/pkg/devicemapper/devmapper_wrapper.go @@ -1,6 +1,6 @@ // +build linux -package devmapper +package devicemapper /* #cgo LDFLAGS: -L. -ldevmapper diff --git a/daemon/graphdriver/devmapper/ioctl.go b/pkg/devicemapper/ioctl.go similarity index 98% rename from daemon/graphdriver/devmapper/ioctl.go rename to pkg/devicemapper/ioctl.go index 29caab0664..f97e9d1682 100644 --- a/daemon/graphdriver/devmapper/ioctl.go +++ b/pkg/devicemapper/ioctl.go @@ -1,6 +1,6 @@ // +build linux -package devmapper +package devicemapper import ( "syscall" From 165624062e1ac105b6e45beba51cec6439918f11 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Wed, 5 Nov 2014 15:12:24 -0800 Subject: [PATCH 257/592] Close stdin after execution with `docker exec -i` Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- daemon/exec.go | 2 +- integration-cli/docker_cli_exec_test.go | 42 +++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/daemon/exec.go b/daemon/exec.go index 058b712625..d813dbba1d 100644 --- a/daemon/exec.go +++ b/daemon/exec.go @@ -203,7 +203,7 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status { execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } - attachErr := d.attach(&execConfig.StreamConfig, execConfig.OpenStdin, false, execConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr) + attachErr := d.attach(&execConfig.StreamConfig, execConfig.OpenStdin, true, execConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr) execErr := make(chan error) diff --git a/integration-cli/docker_cli_exec_test.go b/integration-cli/docker_cli_exec_test.go index 2a9e30e688..6626a33a8b 100644 --- a/integration-cli/docker_cli_exec_test.go +++ b/integration-cli/docker_cli_exec_test.go @@ -2,6 +2,7 @@ package main import ( "bufio" + "os" "os/exec" "strings" "testing" @@ -31,6 +32,47 @@ func TestExec(t *testing.T) { logDone("exec - basic test") } +func TestExecInteractiveStdinClose(t *testing.T) { + defer deleteAllContainers() + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-itd", "busybox", "/bin/cat")) + if err != nil { + t.Fatal(err) + } + + contId := strings.TrimSpace(out) + println(contId) + + returnchan := make(chan struct{}) + + go func() { + var err error + cmd := exec.Command(dockerBinary, "exec", "-i", contId, "/bin/ls", "/") + cmd.Stdin = os.Stdin + if err != nil { + t.Fatal(err) + } + + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatal(err, out) + } + + if string(out) == "" { + t.Fatalf("Output was empty, likely blocked by standard input") + } + + returnchan <- struct{}{} + }() + + select { + case <-returnchan: + case <-time.After(10 * time.Second): + t.Fatal("timed out running docker exec") + } + + logDone("exec - interactive mode closes stdin after execution") +} + func TestExecInteractive(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && sleep 100") if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil { From 7f5ebdcaac51a3a0cf3805397748154d3743469c Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 6 Nov 2014 01:12:41 +0000 Subject: [PATCH 258/592] Update libcontainer to fd6df76562137aa3b18e44b790c Signed-off-by: Michael Crosby --- hack/vendor.sh | 2 +- .../Sirupsen/logrus/formatter_bench_test.go | 88 +++++++++++++++ .../logrus/hooks/papertrail/README.md | 28 +++++ .../logrus/hooks/papertrail/papertrail.go | 54 +++++++++ .../hooks/papertrail/papertrail_test.go | 26 +++++ .../github.com/docker/libcontainer/.drone.yml | 9 ++ .../docker/libcontainer/.travis.yml | 36 ------ .../docker/libcontainer/MAINTAINERS | 1 - .../github.com/docker/libcontainer/README.md | 2 +- .../libcontainer/cgroups/fs/utils_test.go | 4 +- .../cgroups/systemd/apply_systemd.go | 23 ++-- .../docker/libcontainer/devices/devices.go | 2 +- .../libcontainer/integration/init_test.go | 8 +- .../docker/libcontainer/label/label.go | 12 ++ .../libcontainer/label/label_selinux.go | 15 ++- .../libcontainer/label/label_selinux_test.go | 43 ++++++- .../docker/libcontainer/mount/init.go | 3 +- .../docker/libcontainer/namespaces/exec.go | 93 +++++++--------- .../docker/libcontainer/namespaces/execin.go | 28 +++-- .../docker/libcontainer/namespaces/init.go | 24 +++- .../docker/libcontainer/namespaces/utils.go | 38 +++++++ .../libcontainer/netlink/netlink_linux.go | 24 ++++ .../netlink/netlink_linux_test.go | 4 +- .../docker/libcontainer/network/network.go | 8 ++ .../docker/libcontainer/network/veth.go | 3 + .../docker/libcontainer/nsinit/init.go | 9 +- .../docker/libcontainer/nsinit/utils.go | 10 +- .../docker/libcontainer/selinux/selinux.go | 25 +++++ .../libcontainer/selinux/selinux_test.go | 2 +- .../docker/libcontainer/syncpipe/sync_pipe.go | 105 ------------------ .../libcontainer/syncpipe/sync_pipe_linux.go | 20 ---- .../libcontainer/syncpipe/sync_pipe_test.go | 72 ------------ .../system/syscall_linux_amd64.go | 1 + 33 files changed, 474 insertions(+), 348 deletions(-) create mode 100644 vendor/src/github.com/Sirupsen/logrus/formatter_bench_test.go create mode 100644 vendor/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md create mode 100644 vendor/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go create mode 100644 vendor/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go create mode 100755 vendor/src/github.com/docker/libcontainer/.drone.yml delete mode 100644 vendor/src/github.com/docker/libcontainer/.travis.yml create mode 100644 vendor/src/github.com/docker/libcontainer/namespaces/utils.go delete mode 100644 vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe.go delete mode 100644 vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe_linux.go delete mode 100644 vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe_test.go diff --git a/hack/vendor.sh b/hack/vendor.sh index 85be29303e..ae45dbe2d8 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -66,7 +66,7 @@ if [ "$1" = '--go' ]; then mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar fi -clone git github.com/docker/libcontainer f60d7b9195f8dc0b5d343abbc3293da7c17bb11c +clone git github.com/docker/libcontainer fd6df76562137aa3b18e44b790cb484fe2b6fa0b # see src/github.com/docker/libcontainer/update-vendor.sh which is the "source of truth" for libcontainer deps (just like this file) rm -rf src/github.com/docker/libcontainer/vendor eval "$(grep '^clone ' src/github.com/docker/libcontainer/update-vendor.sh | grep -v 'github.com/codegangsta/cli')" diff --git a/vendor/src/github.com/Sirupsen/logrus/formatter_bench_test.go b/vendor/src/github.com/Sirupsen/logrus/formatter_bench_test.go new file mode 100644 index 0000000000..77989da629 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/formatter_bench_test.go @@ -0,0 +1,88 @@ +package logrus + +import ( + "testing" + "time" +) + +// smallFields is a small size data set for benchmarking +var smallFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", +} + +// largeFields is a large size data set for benchmarking +var largeFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", + "five": "six", + "seven": "eight", + "nine": "ten", + "eleven": "twelve", + "thirteen": "fourteen", + "fifteen": "sixteen", + "seventeen": "eighteen", + "nineteen": "twenty", + "a": "b", + "c": "d", + "e": "f", + "g": "h", + "i": "j", + "k": "l", + "m": "n", + "o": "p", + "q": "r", + "s": "t", + "u": "v", + "w": "x", + "y": "z", + "this": "will", + "make": "thirty", + "entries": "yeah", +} + +func BenchmarkSmallTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) +} + +func BenchmarkLargeTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) +} + +func BenchmarkSmallColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) +} + +func BenchmarkLargeColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) +} + +func BenchmarkSmallJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, smallFields) +} + +func BenchmarkLargeJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, largeFields) +} + +func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { + entry := &Entry{ + Time: time.Time{}, + Level: InfoLevel, + Message: "message", + Data: fields, + } + var d []byte + var err error + for i := 0; i < b.N; i++ { + d, err = formatter.Format(entry) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(d))) + } +} diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md b/vendor/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md new file mode 100644 index 0000000000..ae61e9229a --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md @@ -0,0 +1,28 @@ +# Papertrail Hook for Logrus :walrus: + +[Papertrail](https://papertrailapp.com) provides hosted log management. Once stored in Papertrail, you can [group](http://help.papertrailapp.com/kb/how-it-works/groups/) your logs on various dimensions, [search](http://help.papertrailapp.com/kb/how-it-works/search-syntax) them, and trigger [alerts](http://help.papertrailapp.com/kb/how-it-works/alerts). + +In most deployments, you'll want to send logs to Papertrail via their [remote_syslog](http://help.papertrailapp.com/kb/configuration/configuring-centralized-logging-from-text-log-files-in-unix/) daemon, which requires no application-specific configuration. This hook is intended for relatively low-volume logging, likely in managed cloud hosting deployments where installing `remote_syslog` is not possible. + +## Usage + +You can find your Papertrail UDP port on your [Papertrail account page](https://papertrailapp.com/account/destinations). Substitute it below for `YOUR_PAPERTRAIL_UDP_PORT`. + +For `YOUR_APP_NAME`, substitute a short string that will readily identify your application or service in the logs. + +```go +import ( + "log/syslog" + "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/hooks/papertrail" +) + +func main() { + log := logrus.New() + hook, err := logrus_papertrail.NewPapertrailHook("logs.papertrailapp.com", YOUR_PAPERTRAIL_UDP_PORT, YOUR_APP_NAME) + + if err == nil { + log.Hooks.Add(hook) + } +} +``` diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go b/vendor/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go new file mode 100644 index 0000000000..48e2feaeb5 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go @@ -0,0 +1,54 @@ +package logrus_papertrail + +import ( + "fmt" + "net" + "os" + "time" + + "github.com/Sirupsen/logrus" +) + +const ( + format = "Jan 2 15:04:05" +) + +// PapertrailHook to send logs to a logging service compatible with the Papertrail API. +type PapertrailHook struct { + Host string + Port int + AppName string + UDPConn net.Conn +} + +// NewPapertrailHook creates a hook to be added to an instance of logger. +func NewPapertrailHook(host string, port int, appName string) (*PapertrailHook, error) { + conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", host, port)) + return &PapertrailHook{host, port, appName, conn}, err +} + +// Fire is called when a log event is fired. +func (hook *PapertrailHook) Fire(entry *logrus.Entry) error { + date := time.Now().Format(format) + payload := fmt.Sprintf("<22> %s %s: [%s] %s", date, hook.AppName, entry.Data["level"], entry.Message) + + bytesWritten, err := hook.UDPConn.Write([]byte(payload)) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to send log line to Papertrail via UDP. Wrote %d bytes before error: %v", bytesWritten, err) + return err + } + + return nil +} + +// Levels returns the available logging levels. +func (hook *PapertrailHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + logrus.WarnLevel, + logrus.InfoLevel, + logrus.DebugLevel, + } +} diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go b/vendor/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go new file mode 100644 index 0000000000..96318d0030 --- /dev/null +++ b/vendor/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go @@ -0,0 +1,26 @@ +package logrus_papertrail + +import ( + "fmt" + "testing" + + "github.com/Sirupsen/logrus" + "github.com/stvp/go-udp-testing" +) + +func TestWritingToUDP(t *testing.T) { + port := 16661 + udp.SetAddr(fmt.Sprintf(":%d", port)) + + hook, err := NewPapertrailHook("localhost", port, "test") + if err != nil { + t.Errorf("Unable to connect to local UDP server.") + } + + log := logrus.New() + log.Hooks.Add(hook) + + udp.ShouldReceive(t, "foo", func() { + log.Info("foo") + }) +} diff --git a/vendor/src/github.com/docker/libcontainer/.drone.yml b/vendor/src/github.com/docker/libcontainer/.drone.yml new file mode 100755 index 0000000000..80d298f218 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/.drone.yml @@ -0,0 +1,9 @@ +image: dockercore/libcontainer +script: +# Setup the DockerInDocker environment. + - /dind + - sed -i 's!docker/docker!docker/libcontainer!' /go/src/github.com/docker/docker/hack/make/.validate + - bash /go/src/github.com/docker/docker/hack/make/validate-dco + - bash /go/src/github.com/docker/docker/hack/make/validate-gofmt + - export GOPATH="$GOPATH:/go:$(pwd)/vendor" # Drone mucks with our GOPATH + - make direct-test diff --git a/vendor/src/github.com/docker/libcontainer/.travis.yml b/vendor/src/github.com/docker/libcontainer/.travis.yml deleted file mode 100644 index 3ce0e27e45..0000000000 --- a/vendor/src/github.com/docker/libcontainer/.travis.yml +++ /dev/null @@ -1,36 +0,0 @@ -language: go -go: 1.3 - -# let us have pretty experimental Docker-based Travis workers -sudo: false - -env: - - TRAVIS_GLOBAL_WTF=1 - - _GOOS=linux _GOARCH=amd64 CGO_ENABLED=1 - - _GOOS=linux _GOARCH=amd64 CGO_ENABLED=0 -# - _GOOS=linux _GOARCH=386 CGO_ENABLED=1 # TODO add this once Travis can handle it (https://github.com/travis-ci/travis-ci/issues/2207#issuecomment-49625061) - - _GOOS=linux _GOARCH=386 CGO_ENABLED=0 - - _GOOS=linux _GOARCH=arm CGO_ENABLED=0 - -install: - - go get code.google.com/p/go.tools/cmd/cover - - mkdir -pv "${GOPATH%%:*}/src/github.com/docker" && [ -d "${GOPATH%%:*}/src/github.com/docker/libcontainer" ] || ln -sv "$(readlink -f .)" "${GOPATH%%:*}/src/github.com/docker/libcontainer" - - if [ -z "$TRAVIS_GLOBAL_WTF" ]; then - gvm cross "$_GOOS" "$_GOARCH"; - export GOOS="$_GOOS" GOARCH="$_GOARCH"; - fi - - export GOPATH="$GOPATH:$(pwd)/vendor" - - if [ -z "$TRAVIS_GLOBAL_WTF" ]; then go env; fi - - go get -d -v ./... # TODO remove this if /docker/docker gets purged from our includes - - if [ "$TRAVIS_GLOBAL_WTF" ]; then - export DOCKER_PATH="${GOPATH%%:*}/src/github.com/docker/docker"; - mkdir -p "$DOCKER_PATH/hack/make"; - ( cd "$DOCKER_PATH/hack/make" && wget -c 'https://raw.githubusercontent.com/docker/docker/master/hack/make/'{.validate,validate-dco,validate-gofmt} ); - sed -i 's!docker/docker!docker/libcontainer!' "$DOCKER_PATH/hack/make/.validate"; - fi - -script: - - if [ "$TRAVIS_GLOBAL_WTF" ]; then bash "$DOCKER_PATH/hack/make/validate-dco"; fi - - if [ "$TRAVIS_GLOBAL_WTF" ]; then bash "$DOCKER_PATH/hack/make/validate-gofmt"; fi - - if [ -z "$TRAVIS_GLOBAL_WTF" ]; then make direct-build; fi - - if [ -z "$TRAVIS_GLOBAL_WTF" -a "$GOARCH" != 'arm' ]; then make direct-test-short; fi diff --git a/vendor/src/github.com/docker/libcontainer/MAINTAINERS b/vendor/src/github.com/docker/libcontainer/MAINTAINERS index 24011b0540..7295c6038f 100644 --- a/vendor/src/github.com/docker/libcontainer/MAINTAINERS +++ b/vendor/src/github.com/docker/libcontainer/MAINTAINERS @@ -2,5 +2,4 @@ Michael Crosby (@crosbymichael) Rohit Jnagal (@rjnagal) Victor Marmol (@vmarmol) Mrunal Patel (@mrunalp) -.travis.yml: Tianon Gravi (@tianon) update-vendor.sh: Tianon Gravi (@tianon) diff --git a/vendor/src/github.com/docker/libcontainer/README.md b/vendor/src/github.com/docker/libcontainer/README.md index 3201df9b98..37047e68c8 100644 --- a/vendor/src/github.com/docker/libcontainer/README.md +++ b/vendor/src/github.com/docker/libcontainer/README.md @@ -1,4 +1,4 @@ -## libcontainer - reference implementation for containers [![Build Status](https://travis-ci.org/docker/libcontainer.png?branch=master)](https://travis-ci.org/docker/libcontainer) +## libcontainer - reference implementation for containers [![Build Status](https://ci.dockerproject.com/github.com/docker/libcontainer/status.svg?branch=master)](https://ci.dockerproject.com/github.com/docker/libcontainer) ### Note on API changes: diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/utils_test.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/utils_test.go index f1afd49411..8b19a84b27 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/utils_test.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/utils_test.go @@ -57,7 +57,7 @@ func TestGetCgroupParamsInt(t *testing.T) { if err != nil { t.Fatal(err) } else if value != 0 { - t.Fatalf("Expected %d to equal %f", value, 0) + t.Fatalf("Expected %d to equal %d", value, 0) } // Success with negative values lesser than min int64 @@ -70,7 +70,7 @@ func TestGetCgroupParamsInt(t *testing.T) { if err != nil { t.Fatal(err) } else if value != 0 { - t.Fatalf("Expected %d to equal %f", value, 0) + t.Fatalf("Expected %d to equal %d", value, 0) } // Not a float. diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go index 1f84a9c6f2..5155b67535 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go @@ -43,6 +43,13 @@ var ( } ) +func newProp(name string, units interface{}) systemd.Property { + return systemd.Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} + func UseSystemd() bool { s, err := os.Stat("/run/systemd/system") if err != nil || !s.IsDir() { @@ -99,27 +106,27 @@ func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { } properties = append(properties, - systemd.Property{"Slice", dbus.MakeVariant(slice)}, - systemd.Property{"Description", dbus.MakeVariant("docker container " + c.Name)}, - systemd.Property{"PIDs", dbus.MakeVariant([]uint32{uint32(pid)})}, + systemd.PropSlice(slice), + systemd.PropDescription("docker container "+c.Name), + newProp("PIDs", []uint32{uint32(pid)}), ) // Always enable accounting, this gets us the same behaviour as the fs implementation, // plus the kernel has some problems with joining the memory cgroup at a later time. properties = append(properties, - systemd.Property{"MemoryAccounting", dbus.MakeVariant(true)}, - systemd.Property{"CPUAccounting", dbus.MakeVariant(true)}, - systemd.Property{"BlockIOAccounting", dbus.MakeVariant(true)}) + newProp("MemoryAccounting", true), + newProp("CPUAccounting", true), + newProp("BlockIOAccounting", true)) if c.Memory != 0 { properties = append(properties, - systemd.Property{"MemoryLimit", dbus.MakeVariant(uint64(c.Memory))}) + newProp("MemoryLimit", uint64(c.Memory))) } // TODO: MemoryReservation and MemorySwap not available in systemd if c.CpuShares != 0 { properties = append(properties, - systemd.Property{"CPUShares", dbus.MakeVariant(uint64(c.CpuShares))}) + newProp("CPUShares", uint64(c.CpuShares))) } if _, err := theConn.StartTransientUnit(unitName, "replace", properties...); err != nil { diff --git a/vendor/src/github.com/docker/libcontainer/devices/devices.go b/vendor/src/github.com/docker/libcontainer/devices/devices.go index 5bf80e8cd4..8e86d95292 100644 --- a/vendor/src/github.com/docker/libcontainer/devices/devices.go +++ b/vendor/src/github.com/docker/libcontainer/devices/devices.go @@ -103,7 +103,7 @@ func getDeviceNodes(path string) ([]*Device, error) { switch { case f.IsDir(): switch f.Name() { - case "pts", "shm", "fd": + case "pts", "shm", "fd", "mqueue": continue default: sub, err := getDeviceNodes(filepath.Join(path, f.Name())) diff --git a/vendor/src/github.com/docker/libcontainer/integration/init_test.go b/vendor/src/github.com/docker/libcontainer/integration/init_test.go index a0570f3245..9954c0f8e5 100644 --- a/vendor/src/github.com/docker/libcontainer/integration/init_test.go +++ b/vendor/src/github.com/docker/libcontainer/integration/init_test.go @@ -6,7 +6,6 @@ import ( "runtime" "github.com/docker/libcontainer/namespaces" - "github.com/docker/libcontainer/syncpipe" ) // init runs the libcontainer initialization code because of the busybox style needs @@ -27,12 +26,7 @@ func init() { log.Fatal(err) } - syncPipe, err := syncpipe.NewSyncPipeFromFd(0, 3) - if err != nil { - log.Fatalf("unable to create sync pipe: %s", err) - } - - if err := namespaces.Init(container, rootfs, "", syncPipe, os.Args[3:]); err != nil { + if err := namespaces.Init(container, rootfs, "", os.NewFile(3, "pipe"), os.Args[3:]); err != nil { log.Fatalf("unable to initialize for container: %s", err) } os.Exit(1) diff --git a/vendor/src/github.com/docker/libcontainer/label/label.go b/vendor/src/github.com/docker/libcontainer/label/label.go index ce60296ea1..04a72aeae0 100644 --- a/vendor/src/github.com/docker/libcontainer/label/label.go +++ b/vendor/src/github.com/docker/libcontainer/label/label.go @@ -43,3 +43,15 @@ func ReserveLabel(label string) error { func UnreserveLabel(label string) error { return nil } + +// DupSecOpt takes an process label and returns security options that +// can be used to set duplicate labels on future container processes +func DupSecOpt(src string) []string { + return nil +} + +// DisableSecOpt returns a security opt that can disable labeling +// support for future container processes +func DisableSecOpt() []string { + return nil +} diff --git a/vendor/src/github.com/docker/libcontainer/label/label_selinux.go b/vendor/src/github.com/docker/libcontainer/label/label_selinux.go index 65b84797b5..0b7d437f84 100644 --- a/vendor/src/github.com/docker/libcontainer/label/label_selinux.go +++ b/vendor/src/github.com/docker/libcontainer/label/label_selinux.go @@ -17,7 +17,6 @@ func InitLabels(options []string) (string, string, error) { if !selinux.SelinuxEnabled() { return "", "", nil } - var err error processLabel, mountLabel := selinux.GetLxcContexts() if processLabel != "" { pcon := selinux.NewContext(processLabel) @@ -38,7 +37,7 @@ func InitLabels(options []string) (string, string, error) { processLabel = pcon.Get() mountLabel = mcon.Get() } - return processLabel, mountLabel, err + return processLabel, mountLabel, nil } // DEPRECATED: The GenLabels function is only to be used during the transition to the official API. @@ -130,3 +129,15 @@ func UnreserveLabel(label string) error { selinux.FreeLxcContexts(label) return nil } + +// DupSecOpt takes an process label and returns security options that +// can be used to set duplicate labels on future container processes +func DupSecOpt(src string) []string { + return selinux.DupSecOpt(src) +} + +// DisableSecOpt returns a security opt that can disable labeling +// support for future container processes +func DisableSecOpt() []string { + return selinux.DisableSecOpt() +} diff --git a/vendor/src/github.com/docker/libcontainer/label/label_selinux_test.go b/vendor/src/github.com/docker/libcontainer/label/label_selinux_test.go index c83654f6b5..8629353f24 100644 --- a/vendor/src/github.com/docker/libcontainer/label/label_selinux_test.go +++ b/vendor/src/github.com/docker/libcontainer/label/label_selinux_test.go @@ -3,6 +3,7 @@ package label import ( + "strings" "testing" "github.com/docker/libcontainer/selinux" @@ -33,7 +34,7 @@ func TestInit(t *testing.T) { t.Fatal(err) } if plabel != "user_u:user_r:user_t:s0:c1,c15" || mlabel != "user_u:object_r:svirt_sandbox_file_t:s0:c1,c15" { - t.Log("InitLabels User Failed") + t.Log("InitLabels User Match Failed") t.Log(plabel, mlabel) t.Fatal(err) } @@ -46,3 +47,43 @@ func TestInit(t *testing.T) { } } } +func TestDuplicateLabel(t *testing.T) { + secopt := DupSecOpt("system_u:system_r:svirt_lxc_net_t:s0:c1,c2") + t.Log(secopt) + for _, opt := range secopt { + con := strings.SplitN(opt, ":", 3) + if len(con) != 3 || con[0] != "label" { + t.Errorf("Invalid DupSecOpt return value") + continue + } + if con[1] == "user" { + if con[2] != "system_u" { + t.Errorf("DupSecOpt Failed user incorrect") + } + continue + } + if con[1] == "role" { + if con[2] != "system_r" { + t.Errorf("DupSecOpt Failed role incorrect") + } + continue + } + if con[1] == "type" { + if con[2] != "svirt_lxc_net_t" { + t.Errorf("DupSecOpt Failed type incorrect") + } + continue + } + if con[1] == "level" { + if con[2] != "s0:c1,c2" { + t.Errorf("DupSecOpt Failed level incorrect") + } + continue + } + t.Errorf("DupSecOpt Failed invalid field %q", con[1]) + } + secopt = DisableSecOpt() + if secopt[0] != "label:disable" { + t.Errorf("DisableSecOpt Failed level incorrect") + } +} diff --git a/vendor/src/github.com/docker/libcontainer/mount/init.go b/vendor/src/github.com/docker/libcontainer/mount/init.go index ea2b732737..a2c3d52026 100644 --- a/vendor/src/github.com/docker/libcontainer/mount/init.go +++ b/vendor/src/github.com/docker/libcontainer/mount/init.go @@ -97,7 +97,7 @@ func InitializeMountNamespace(rootfs, console string, sysReadonly bool, mountCon return nil } -// mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts +// mountSystem sets up linux specific system mounts like mqueue, sys, proc, shm, and devpts // inside the mount namespace func mountSystem(rootfs string, sysReadonly bool, mountConfig *MountConfig) error { for _, m := range newSystemMounts(rootfs, mountConfig.MountLabel, sysReadonly) { @@ -168,6 +168,7 @@ func newSystemMounts(rootfs, mountLabel string, sysReadonly bool) []mount { {source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags}, {source: "tmpfs", path: filepath.Join(rootfs, "dev"), device: "tmpfs", flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, data: label.FormatMountLabel("mode=755", mountLabel)}, {source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1777,size=65536k", mountLabel)}, + {source: "mqueue", path: filepath.Join(rootfs, "dev", "mqueue"), device: "mqueue", flags: defaultMountFlags}, {source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)}, } diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/exec.go b/vendor/src/github.com/docker/libcontainer/namespaces/exec.go index 4440ccd0d5..bd3a4a3f9e 100644 --- a/vendor/src/github.com/docker/libcontainer/namespaces/exec.go +++ b/vendor/src/github.com/docker/libcontainer/namespaces/exec.go @@ -3,6 +3,7 @@ package namespaces import ( + "encoding/json" "io" "os" "os/exec" @@ -13,7 +14,6 @@ import ( "github.com/docker/libcontainer/cgroups/fs" "github.com/docker/libcontainer/cgroups/systemd" "github.com/docker/libcontainer/network" - "github.com/docker/libcontainer/syncpipe" "github.com/docker/libcontainer/system" ) @@ -22,19 +22,17 @@ import ( // Exec performs setup outside of a namespace so that a container can be // executed. Exec is a high level function for working with container namespaces. func Exec(container *libcontainer.Config, stdin io.Reader, stdout, stderr io.Writer, console, dataPath string, args []string, createCommand CreateCommand, startCallback func()) (int, error) { - var ( - err error - ) + var err error // create a pipe so that we can syncronize with the namespaced process and - // pass the veth name to the child - syncPipe, err := syncpipe.NewSyncPipe() + // pass the state and configuration to the child process + parent, child, err := newInitPipe() if err != nil { return -1, err } - defer syncPipe.Close() + defer parent.Close() - command := createCommand(container, console, dataPath, os.Args[0], syncPipe.Child(), args) + command := createCommand(container, console, dataPath, os.Args[0], child, args) // Note: these are only used in non-tty mode // if there is a tty for the container it will be opened within the namespace and the // fds will be duped to stdin, stdiout, and stderr @@ -43,39 +41,47 @@ func Exec(container *libcontainer.Config, stdin io.Reader, stdout, stderr io.Wri command.Stderr = stderr if err := command.Start(); err != nil { + child.Close() return -1, err } + child.Close() - // Now we passed the pipe to the child, close our side - syncPipe.CloseChild() + terminate := func(terr error) (int, error) { + // TODO: log the errors for kill and wait + command.Process.Kill() + command.Wait() + return -1, terr + } started, err := system.GetProcessStartTime(command.Process.Pid) if err != nil { - return -1, err + return terminate(err) } // Do this before syncing with child so that no children // can escape the cgroup cgroupRef, err := SetupCgroups(container, command.Process.Pid) if err != nil { - command.Process.Kill() - command.Wait() - return -1, err + return terminate(err) } defer cgroupRef.Cleanup() cgroupPaths, err := cgroupRef.Paths() if err != nil { - command.Process.Kill() - command.Wait() - return -1, err + return terminate(err) } var networkState network.NetworkState - if err := InitializeNetworking(container, command.Process.Pid, syncPipe, &networkState); err != nil { - command.Process.Kill() - command.Wait() - return -1, err + if err := InitializeNetworking(container, command.Process.Pid, &networkState); err != nil { + return terminate(err) + } + // send the state to the container's init process then shutdown writes for the parent + if err := json.NewEncoder(parent).Encode(networkState); err != nil { + return terminate(err) + } + // shutdown writes for the parent side of the pipe + if err := syscall.Shutdown(int(parent.Fd()), syscall.SHUT_WR); err != nil { + return terminate(err) } state := &libcontainer.State{ @@ -86,17 +92,18 @@ func Exec(container *libcontainer.Config, stdin io.Reader, stdout, stderr io.Wri } if err := libcontainer.SaveState(dataPath, state); err != nil { - command.Process.Kill() - command.Wait() - return -1, err + return terminate(err) } defer libcontainer.DeleteState(dataPath) - // Sync with child - if err := syncPipe.ReadFromChild(); err != nil { - command.Process.Kill() - command.Wait() - return -1, err + // wait for the child process to fully complete and receive an error message + // if one was encoutered + var ierr *initError + if err := json.NewDecoder(parent).Decode(&ierr); err != nil && err != io.EOF { + return terminate(err) + } + if ierr != nil { + return terminate(ierr) } if startCallback != nil { @@ -108,7 +115,6 @@ func Exec(container *libcontainer.Config, stdin io.Reader, stdout, stderr io.Wri return -1, err } } - return command.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil } @@ -129,16 +135,6 @@ func DefaultCreateCommand(container *libcontainer.Config, console, dataPath, ini "data_path=" + dataPath, } - /* - TODO: move user and wd into env - if user != "" { - env = append(env, "user="+user) - } - if workingDir != "" { - env = append(env, "wd="+workingDir) - } - */ - command := exec.Command(init, append([]string{"init", "--"}, args...)...) // make sure the process is executed inside the context of the rootfs command.Dir = container.RootFs @@ -173,7 +169,7 @@ func SetupCgroups(container *libcontainer.Config, nspid int) (cgroups.ActiveCgro // InitializeNetworking creates the container's network stack outside of the namespace and moves // interfaces into the container's net namespaces if necessary -func InitializeNetworking(container *libcontainer.Config, nspid int, pipe *syncpipe.SyncPipe, networkState *network.NetworkState) error { +func InitializeNetworking(container *libcontainer.Config, nspid int, networkState *network.NetworkState) error { for _, config := range container.Networks { strategy, err := network.GetStrategy(config.Type) if err != nil { @@ -183,18 +179,5 @@ func InitializeNetworking(container *libcontainer.Config, nspid int, pipe *syncp return err } } - return pipe.SendToChild(networkState) -} - -// GetNamespaceFlags parses the container's Namespaces options to set the correct -// flags on clone, unshare, and setns -func GetNamespaceFlags(namespaces map[string]bool) (flag int) { - for key, enabled := range namespaces { - if enabled { - if ns := GetNamespace(key); ns != nil { - flag |= ns.Value - } - } - } - return flag + return nil } diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/execin.go b/vendor/src/github.com/docker/libcontainer/namespaces/execin.go index 53e676ac7e..7dea918735 100644 --- a/vendor/src/github.com/docker/libcontainer/namespaces/execin.go +++ b/vendor/src/github.com/docker/libcontainer/namespaces/execin.go @@ -3,6 +3,7 @@ package namespaces import ( + "encoding/json" "fmt" "io" "os" @@ -15,7 +16,6 @@ import ( "github.com/docker/libcontainer/apparmor" "github.com/docker/libcontainer/cgroups" "github.com/docker/libcontainer/label" - "github.com/docker/libcontainer/syncpipe" "github.com/docker/libcontainer/system" ) @@ -41,11 +41,11 @@ func ExecIn(container *libcontainer.Config, state *libcontainer.State, userArgs } } - pipe, err := syncpipe.NewSyncPipe() + parent, child, err := newInitPipe() if err != nil { return -1, err } - defer pipe.Close() + defer parent.Close() // Note: these are only used in non-tty mode // if there is a tty for the container it will be opened within the namespace and the @@ -53,23 +53,28 @@ func ExecIn(container *libcontainer.Config, state *libcontainer.State, userArgs cmd.Stdin = stdin cmd.Stdout = stdout cmd.Stderr = stderr - - cmd.ExtraFiles = []*os.File{pipe.Child()} + cmd.ExtraFiles = []*os.File{child} if err := cmd.Start(); err != nil { + child.Close() return -1, err } - pipe.CloseChild() + child.Close() + + terminate := func(terr error) (int, error) { + // TODO: log the errors for kill and wait + cmd.Process.Kill() + cmd.Wait() + return -1, terr + } // Enter cgroups. if err := EnterCgroups(state, cmd.Process.Pid); err != nil { - return -1, err + return terminate(err) } - if err := pipe.SendToChild(container); err != nil { - cmd.Process.Kill() - cmd.Wait() - return -1, err + if err := json.NewEncoder(parent).Encode(container); err != nil { + return terminate(err) } if startCallback != nil { @@ -81,7 +86,6 @@ func ExecIn(container *libcontainer.Config, state *libcontainer.State, userArgs return -1, err } } - return cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil } diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/init.go b/vendor/src/github.com/docker/libcontainer/namespaces/init.go index 879ac21e0d..482ba0f399 100644 --- a/vendor/src/github.com/docker/libcontainer/namespaces/init.go +++ b/vendor/src/github.com/docker/libcontainer/namespaces/init.go @@ -3,7 +3,9 @@ package namespaces import ( + "encoding/json" "fmt" + "io/ioutil" "os" "strings" "syscall" @@ -18,7 +20,6 @@ import ( "github.com/docker/libcontainer/network" "github.com/docker/libcontainer/security/capabilities" "github.com/docker/libcontainer/security/restrict" - "github.com/docker/libcontainer/syncpipe" "github.com/docker/libcontainer/system" "github.com/docker/libcontainer/user" "github.com/docker/libcontainer/utils" @@ -30,11 +31,22 @@ import ( // and other options required for the new container. // The caller of Init function has to ensure that the go runtime is locked to an OS thread // (using runtime.LockOSThread) else system calls like setns called within Init may not work as intended. -func Init(container *libcontainer.Config, uncleanRootfs, consolePath string, syncPipe *syncpipe.SyncPipe, args []string) (err error) { +func Init(container *libcontainer.Config, uncleanRootfs, consolePath string, pipe *os.File, args []string) (err error) { defer func() { + // if we have an error during the initialization of the container's init then send it back to the + // parent process in the form of an initError. if err != nil { - syncPipe.ReportChildError(err) + // ensure that any data sent from the parent is consumed so it doesn't + // receive ECONNRESET when the child writes to the pipe. + ioutil.ReadAll(pipe) + if err := json.NewEncoder(pipe).Encode(initError{ + Message: err.Error(), + }); err != nil { + panic(err) + } } + // ensure that this pipe is always closed + pipe.Close() }() rootfs, err := utils.ResolveRootfs(uncleanRootfs) @@ -50,7 +62,7 @@ func Init(container *libcontainer.Config, uncleanRootfs, consolePath string, syn // We always read this as it is a way to sync with the parent as well var networkState *network.NetworkState - if err := syncPipe.ReadFromParent(&networkState); err != nil { + if err := json.NewDecoder(pipe).Decode(&networkState); err != nil { return err } @@ -164,11 +176,11 @@ func SetupUser(u string) error { return fmt.Errorf("setgroups %s", err) } - if err := syscall.Setgid(gid); err != nil { + if err := system.Setgid(gid); err != nil { return fmt.Errorf("setgid %s", err) } - if err := syscall.Setuid(uid); err != nil { + if err := system.Setuid(uid); err != nil { return fmt.Errorf("setuid %s", err) } diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/utils.go b/vendor/src/github.com/docker/libcontainer/namespaces/utils.go new file mode 100644 index 0000000000..bf60cd8f0e --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/namespaces/utils.go @@ -0,0 +1,38 @@ +// +build linux + +package namespaces + +import ( + "os" + "syscall" +) + +type initError struct { + Message string `json:"message,omitempty"` +} + +func (i initError) Error() string { + return i.Message +} + +// New returns a newly initialized Pipe for communication between processes +func newInitPipe() (parent *os.File, child *os.File, err error) { + fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0) + if err != nil { + return nil, nil, err + } + return os.NewFile(uintptr(fds[1]), "parent"), os.NewFile(uintptr(fds[0]), "child"), nil +} + +// GetNamespaceFlags parses the container's Namespaces options to set the correct +// flags on clone, unshare, and setns +func GetNamespaceFlags(namespaces map[string]bool) (flag int) { + for key, enabled := range namespaces { + if enabled { + if ns := GetNamespace(key); ns != nil { + flag |= ns.Value + } + } + } + return flag +} diff --git a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go index c858b1129e..93ebade5c0 100644 --- a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go +++ b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go @@ -7,6 +7,7 @@ import ( "math/rand" "net" "os" + "path/filepath" "sync/atomic" "syscall" "unsafe" @@ -1204,6 +1205,28 @@ func SetMacAddress(name, addr string) error { return nil } +func SetHairpinMode(iface *net.Interface, enabled bool) error { + sysPath := filepath.Join("/sys/class/net", iface.Name, "brport/hairpin_mode") + + sysFile, err := os.OpenFile(sysPath, os.O_WRONLY, 0) + if err != nil { + return err + } + defer sysFile.Close() + + var writeVal []byte + if enabled { + writeVal = []byte("1") + } else { + writeVal = []byte("0") + } + if _, err := sysFile.Write(writeVal); err != nil { + return err + } + + return nil +} + func ChangeName(iface *net.Interface, newName string) error { if len(newName) >= IFNAMSIZ { return fmt.Errorf("Interface name %s too long", newName) @@ -1224,5 +1247,6 @@ func ChangeName(iface *net.Interface, newName string) error { if _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.SIOCSIFNAME, uintptr(unsafe.Pointer(&data[0]))); errno != 0 { return errno } + return nil } diff --git a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go index 0320c47221..be896a14a4 100644 --- a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go +++ b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go @@ -116,7 +116,7 @@ func TestNetworkSetMacAddress(t *testing.T) { ifcBeforeSet := readLink(t, tl.name) if err := NetworkSetMacAddress(ifcBeforeSet, macaddr); err != nil { - t.Fatalf("Could not set %s MAC address on %#v interface: err", macaddr, tl, err) + t.Fatalf("Could not set %s MAC address on %#v interface: %s", macaddr, tl, err) } ifcAfterSet := readLink(t, tl.name) @@ -140,7 +140,7 @@ func TestNetworkSetMTU(t *testing.T) { ifcBeforeSet := readLink(t, tl.name) if err := NetworkSetMTU(ifcBeforeSet, mtu); err != nil { - t.Fatalf("Could not set %d MTU on %#v interface: err", mtu, tl, err) + t.Fatalf("Could not set %d MTU on %#v interface: %s", mtu, tl, err) } ifcAfterSet := readLink(t, tl.name) diff --git a/vendor/src/github.com/docker/libcontainer/network/network.go b/vendor/src/github.com/docker/libcontainer/network/network.go index 2c3499b6d6..ba8f6f74e7 100644 --- a/vendor/src/github.com/docker/libcontainer/network/network.go +++ b/vendor/src/github.com/docker/libcontainer/network/network.go @@ -95,3 +95,11 @@ func SetMtu(name string, mtu int) error { } return netlink.NetworkSetMTU(iface, mtu) } + +func SetHairpinMode(name string, enabled bool) error { + iface, err := net.InterfaceByName(name) + if err != nil { + return err + } + return netlink.SetHairpinMode(iface, enabled) +} diff --git a/vendor/src/github.com/docker/libcontainer/network/veth.go b/vendor/src/github.com/docker/libcontainer/network/veth.go index 3d7dc8729e..240da57986 100644 --- a/vendor/src/github.com/docker/libcontainer/network/veth.go +++ b/vendor/src/github.com/docker/libcontainer/network/veth.go @@ -39,6 +39,9 @@ func (v *Veth) Create(n *Network, nspid int, networkState *NetworkState) error { if err := SetMtu(name1, n.Mtu); err != nil { return err } + if err := SetHairpinMode(name1, true); err != nil { + return err + } if err := InterfaceUp(name1); err != nil { return err } diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/init.go b/vendor/src/github.com/docker/libcontainer/nsinit/init.go index c091ee1099..6df9b1d894 100644 --- a/vendor/src/github.com/docker/libcontainer/nsinit/init.go +++ b/vendor/src/github.com/docker/libcontainer/nsinit/init.go @@ -8,7 +8,6 @@ import ( "github.com/codegangsta/cli" "github.com/docker/libcontainer/namespaces" - "github.com/docker/libcontainer/syncpipe" ) var ( @@ -41,12 +40,8 @@ func initAction(context *cli.Context) { log.Fatal(err) } - syncPipe, err := syncpipe.NewSyncPipeFromFd(0, uintptr(pipeFd)) - if err != nil { - log.Fatalf("unable to create sync pipe: %s", err) - } - - if err := namespaces.Init(container, rootfs, console, syncPipe, []string(context.Args())); err != nil { + pipe := os.NewFile(uintptr(pipeFd), "pipe") + if err := namespaces.Init(container, rootfs, console, pipe, []string(context.Args())); err != nil { log.Fatalf("unable to initialize for container: %s", err) } } diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/utils.go b/vendor/src/github.com/docker/libcontainer/nsinit/utils.go index 7f5155942c..6a8aafbf17 100644 --- a/vendor/src/github.com/docker/libcontainer/nsinit/utils.go +++ b/vendor/src/github.com/docker/libcontainer/nsinit/utils.go @@ -8,7 +8,6 @@ import ( "github.com/codegangsta/cli" "github.com/docker/libcontainer" - "github.com/docker/libcontainer/syncpipe" ) // rFunc is a function registration for calling after an execin @@ -59,16 +58,13 @@ func findUserArgs() []string { // loadConfigFromFd loads a container's config from the sync pipe that is provided by // fd 3 when running a process func loadConfigFromFd() (*libcontainer.Config, error) { - syncPipe, err := syncpipe.NewSyncPipeFromFd(0, 3) - if err != nil { - return nil, err - } + pipe := os.NewFile(3, "pipe") + defer pipe.Close() var config *libcontainer.Config - if err := syncPipe.ReadFromParent(&config); err != nil { + if err := json.NewDecoder(pipe).Decode(&config); err != nil { return nil, err } - return config, nil } diff --git a/vendor/src/github.com/docker/libcontainer/selinux/selinux.go b/vendor/src/github.com/docker/libcontainer/selinux/selinux.go index e0c90ee551..e5bd820980 100644 --- a/vendor/src/github.com/docker/libcontainer/selinux/selinux.go +++ b/vendor/src/github.com/docker/libcontainer/selinux/selinux.go @@ -434,3 +434,28 @@ func Chcon(fpath string, scon string, recurse bool) error { return Setfilecon(fpath, scon) } + +// DupSecOpt takes an SELinux process label and returns security options that +// can will set the SELinux Type and Level for future container processes +func DupSecOpt(src string) []string { + if src == "" { + return nil + } + con := NewContext(src) + if con["user"] == "" || + con["role"] == "" || + con["type"] == "" || + con["level"] == "" { + return nil + } + return []string{"label:user:" + con["user"], + "label:role:" + con["role"], + "label:type:" + con["type"], + "label:level:" + con["level"]} +} + +// DisableSecOpt returns a security opt that can be used to disabling SELinux +// labeling support for future container processes +func DisableSecOpt() []string { + return []string{"label:disable"} +} diff --git a/vendor/src/github.com/docker/libcontainer/selinux/selinux_test.go b/vendor/src/github.com/docker/libcontainer/selinux/selinux_test.go index 34c3497441..228ad8361c 100644 --- a/vendor/src/github.com/docker/libcontainer/selinux/selinux_test.go +++ b/vendor/src/github.com/docker/libcontainer/selinux/selinux_test.go @@ -42,7 +42,7 @@ func TestSELinux(t *testing.T) { t.Log("getenforce ", selinux.SelinuxGetEnforce()) t.Log("getenforcemode ", selinux.SelinuxGetEnforceMode()) pid := os.Getpid() - t.Log("PID:%d MCS:%s\n", pid, selinux.IntToMcs(pid, 1023)) + t.Logf("PID:%d MCS:%s\n", pid, selinux.IntToMcs(pid, 1023)) err = selinux.Setfscreatecon("unconfined_u:unconfined_r:unconfined_t:s0") if err == nil { t.Log(selinux.Getfscreatecon()) diff --git a/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe.go b/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe.go deleted file mode 100644 index f73c354dbf..0000000000 --- a/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe.go +++ /dev/null @@ -1,105 +0,0 @@ -package syncpipe - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "syscall" -) - -// SyncPipe allows communication to and from the child processes -// to it's parent and allows the two independent processes to -// syncronize their state. -type SyncPipe struct { - parent, child *os.File -} - -func NewSyncPipeFromFd(parentFd, childFd uintptr) (*SyncPipe, error) { - s := &SyncPipe{} - - if parentFd > 0 { - s.parent = os.NewFile(parentFd, "parentPipe") - } else if childFd > 0 { - s.child = os.NewFile(childFd, "childPipe") - } else { - return nil, fmt.Errorf("no valid sync pipe fd specified") - } - - return s, nil -} - -func (s *SyncPipe) Child() *os.File { - return s.child -} - -func (s *SyncPipe) Parent() *os.File { - return s.parent -} - -func (s *SyncPipe) SendToChild(v interface{}) error { - data, err := json.Marshal(v) - if err != nil { - return err - } - - s.parent.Write(data) - - return syscall.Shutdown(int(s.parent.Fd()), syscall.SHUT_WR) -} - -func (s *SyncPipe) ReadFromChild() error { - data, err := ioutil.ReadAll(s.parent) - if err != nil { - return err - } - - if len(data) > 0 { - return fmt.Errorf("%s", data) - } - - return nil -} - -func (s *SyncPipe) ReadFromParent(v interface{}) error { - data, err := ioutil.ReadAll(s.child) - if err != nil { - return fmt.Errorf("error reading from sync pipe %s", err) - } - - if len(data) > 0 { - if err := json.Unmarshal(data, v); err != nil { - return err - } - } - - return nil -} - -func (s *SyncPipe) ReportChildError(err error) { - // ensure that any data sent from the parent is consumed so it doesn't - // receive ECONNRESET when the child writes to the pipe. - ioutil.ReadAll(s.child) - - s.child.Write([]byte(err.Error())) - s.CloseChild() -} - -func (s *SyncPipe) Close() error { - if s.parent != nil { - s.parent.Close() - } - - if s.child != nil { - s.child.Close() - } - - return nil -} - -func (s *SyncPipe) CloseChild() { - if s.child != nil { - s.child.Close() - s.child = nil - } -} diff --git a/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe_linux.go b/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe_linux.go deleted file mode 100644 index bea4b52f9e..0000000000 --- a/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe_linux.go +++ /dev/null @@ -1,20 +0,0 @@ -package syncpipe - -import ( - "os" - "syscall" -) - -func NewSyncPipe() (s *SyncPipe, err error) { - s = &SyncPipe{} - - fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0) - if err != nil { - return nil, err - } - - s.child = os.NewFile(uintptr(fds[0]), "child syncpipe") - s.parent = os.NewFile(uintptr(fds[1]), "parent syncpipe") - - return s, nil -} diff --git a/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe_test.go b/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe_test.go deleted file mode 100644 index 906e6ed24d..0000000000 --- a/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package syncpipe - -import ( - "fmt" - "syscall" - "testing" -) - -type testStruct struct { - Name string -} - -func TestSendErrorFromChild(t *testing.T) { - pipe, err := NewSyncPipe() - if err != nil { - t.Fatal(err) - } - defer func() { - if err := pipe.Close(); err != nil { - t.Fatal(err) - } - }() - - childfd, err := syscall.Dup(int(pipe.Child().Fd())) - if err != nil { - t.Fatal(err) - } - childPipe, _ := NewSyncPipeFromFd(0, uintptr(childfd)) - - pipe.CloseChild() - pipe.SendToChild(nil) - - expected := "something bad happened" - childPipe.ReportChildError(fmt.Errorf(expected)) - - childError := pipe.ReadFromChild() - if childError == nil { - t.Fatal("expected an error to be returned but did not receive anything") - } - - if childError.Error() != expected { - t.Fatalf("expected %q but received error message %q", expected, childError.Error()) - } -} - -func TestSendPayloadToChild(t *testing.T) { - pipe, err := NewSyncPipe() - if err != nil { - t.Fatal(err) - } - - defer func() { - if err := pipe.Close(); err != nil { - t.Fatal(err) - } - }() - - expected := "libcontainer" - - if err := pipe.SendToChild(testStruct{Name: expected}); err != nil { - t.Fatal(err) - } - - var s *testStruct - if err := pipe.ReadFromParent(&s); err != nil { - t.Fatal(err) - } - - if s.Name != expected { - t.Fatalf("expected name %q but received %q", expected, s.Name) - } -} diff --git a/vendor/src/github.com/docker/libcontainer/system/syscall_linux_amd64.go b/vendor/src/github.com/docker/libcontainer/system/syscall_linux_amd64.go index 0a346c3b9c..516c17e921 100644 --- a/vendor/src/github.com/docker/libcontainer/system/syscall_linux_amd64.go +++ b/vendor/src/github.com/docker/libcontainer/system/syscall_linux_amd64.go @@ -1,4 +1,5 @@ // +build linux,amd64 + package system import ( From ed556fb38f4d1cba1460650f703cc8147a7b8f32 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 6 Nov 2014 01:14:13 +0000 Subject: [PATCH 259/592] Update docker with syncpipe changes We removed the syncpipe package and replaced it with specific calls to create a new *os.File from a specified fd passed to the process. This reduced code and an extra object to manage the container's init lifecycle. Signed-off-by: Michael Crosby --- daemon/execdriver/native/init.go | 8 +------- daemon/execdriver/native/utils.go | 10 ++-------- 2 files changed, 3 insertions(+), 15 deletions(-) diff --git a/daemon/execdriver/native/init.go b/daemon/execdriver/native/init.go index c1c988d934..754d842c3b 100644 --- a/daemon/execdriver/native/init.go +++ b/daemon/execdriver/native/init.go @@ -13,7 +13,6 @@ import ( "github.com/docker/docker/pkg/reexec" "github.com/docker/libcontainer" "github.com/docker/libcontainer/namespaces" - "github.com/docker/libcontainer/syncpipe" ) func init() { @@ -48,12 +47,7 @@ func initializer() { writeError(err) } - syncPipe, err := syncpipe.NewSyncPipeFromFd(0, uintptr(*pipe)) - if err != nil { - writeError(err) - } - - if err := namespaces.Init(container, rootfs, *console, syncPipe, flag.Args()); err != nil { + if err := namespaces.Init(container, rootfs, *console, os.NewFile(uintptr(*pipe), "child"), flag.Args()); err != nil { writeError(err) } diff --git a/daemon/execdriver/native/utils.go b/daemon/execdriver/native/utils.go index e337cf4316..88aefaf382 100644 --- a/daemon/execdriver/native/utils.go +++ b/daemon/execdriver/native/utils.go @@ -3,10 +3,10 @@ package native import ( + "encoding/json" "os" "github.com/docker/libcontainer" - "github.com/docker/libcontainer/syncpipe" ) func findUserArgs() []string { @@ -21,15 +21,9 @@ func findUserArgs() []string { // loadConfigFromFd loads a container's config from the sync pipe that is provided by // fd 3 when running a process func loadConfigFromFd() (*libcontainer.Config, error) { - syncPipe, err := syncpipe.NewSyncPipeFromFd(0, 3) - if err != nil { - return nil, err - } - var config *libcontainer.Config - if err := syncPipe.ReadFromParent(&config); err != nil { + if err := json.NewDecoder(os.NewFile(3, "child")).Decode(&config); err != nil { return nil, err } - return config, nil } From b36bf9817482e97448f11a2f3eaf15b3a795d2f7 Mon Sep 17 00:00:00 2001 From: Abin Shahab Date: Tue, 4 Nov 2014 16:40:59 +0000 Subject: [PATCH 260/592] LXC TEMPLATE WILL CREATE MOUNT Lxc driver was throwing errors for mounts where the mount point does not exist in the container. This adds a create=dir/file mount option to the lxc template, to alleviate this issue. Docker-DCO-1.1-Signed-off-by: Abin Shahab (github: ashahab-altiscale) --- daemon/execdriver/lxc/lxc_template.go | 26 ++++-- .../execdriver/lxc/lxc_template_unit_test.go | 88 +++++++++++++++++++ 2 files changed, 109 insertions(+), 5 deletions(-) diff --git a/daemon/execdriver/lxc/lxc_template.go b/daemon/execdriver/lxc/lxc_template.go index 2cd63dc72d..d3fd85b7ab 100644 --- a/daemon/execdriver/lxc/lxc_template.go +++ b/daemon/execdriver/lxc/lxc_template.go @@ -1,11 +1,11 @@ package lxc import ( - "strings" - "text/template" - "github.com/docker/docker/daemon/execdriver" "github.com/docker/libcontainer/label" + "os" + "strings" + "text/template" ) const LxcTemplate = ` @@ -70,10 +70,11 @@ lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts {{formatMo lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs {{formatMountLabel "size=65536k,nosuid,nodev,noexec" ""}} 0 0 {{range $value := .Mounts}} +{{$createVal := isDirectory $value.Source}} {{if $value.Writable}} -lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,rw 0 0 +lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,rw,create={{$createVal}} 0 0 {{else}} -lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,ro 0 0 +lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,ro,create={{$createVal}} 0 0 {{end}} {{end}} @@ -117,6 +118,20 @@ func escapeFstabSpaces(field string) string { return strings.Replace(field, " ", "\\040", -1) } +func isDirectory(source string) string { + f, err := os.Stat(source) + if err != nil { + if os.IsNotExist(err) { + return "dir" + } + return "" + } + if f.IsDir() { + return "dir" + } + return "file" +} + func getMemorySwap(v *execdriver.Resources) int64 { // By default, MemorySwap is set to twice the size of RAM. // If you want to omit MemorySwap, set it to `-1'. @@ -143,6 +158,7 @@ func init() { "getMemorySwap": getMemorySwap, "escapeFstabSpaces": escapeFstabSpaces, "formatMountLabel": label.FormatMountLabel, + "isDirectory": isDirectory, } LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate) if err != nil { diff --git a/daemon/execdriver/lxc/lxc_template_unit_test.go b/daemon/execdriver/lxc/lxc_template_unit_test.go index 900700b740..e76d5e9d03 100644 --- a/daemon/execdriver/lxc/lxc_template_unit_test.go +++ b/daemon/execdriver/lxc/lxc_template_unit_test.go @@ -140,3 +140,91 @@ func TestEscapeFstabSpaces(t *testing.T) { } } } + +func TestIsDirectory(t *testing.T) { + tempDir, err := ioutil.TempDir("", "TestIsDir") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + + tempFile, err := ioutil.TempFile(tempDir, "TestIsDirFile") + if err != nil { + t.Fatal(err) + } + + if isDirectory(tempDir) != "dir" { + t.Logf("Could not identify %s as a directory", tempDir) + t.Fail() + } + + if isDirectory(tempFile.Name()) != "file" { + t.Logf("Could not identify %s as a file", tempFile.Name()) + t.Fail() + } +} + +func TestCustomLxcConfigMounts(t *testing.T) { + root, err := ioutil.TempDir("", "TestCustomLxcConfig") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + tempDir, err := ioutil.TempDir("", "TestIsDir") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + + tempFile, err := ioutil.TempFile(tempDir, "TestIsDirFile") + if err != nil { + t.Fatal(err) + } + os.MkdirAll(path.Join(root, "containers", "1"), 0777) + + driver, err := NewDriver(root, "", false) + if err != nil { + t.Fatal(err) + } + processConfig := execdriver.ProcessConfig{ + Privileged: false, + } + mounts := []execdriver.Mount{ + { + Source: tempDir, + Destination: tempDir, + Writable: false, + Private: true, + }, + { + Source: tempFile.Name(), + Destination: tempFile.Name(), + Writable: true, + Private: true, + }, + } + command := &execdriver.Command{ + ID: "1", + LxcConfig: []string{ + "lxc.utsname = docker", + "lxc.cgroup.cpuset.cpus = 0,1", + }, + Network: &execdriver.Network{ + Mtu: 1500, + Interface: nil, + }, + Mounts: mounts, + ProcessConfig: processConfig, + } + + p, err := driver.generateLXCConfig(command) + if err != nil { + t.Fatal(err) + } + + grepFile(t, p, "lxc.utsname = docker") + grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") + + grepFile(t, p, fmt.Sprintf("lxc.mount.entry = %s %s none rbind,ro,create=%s 0 0", tempDir, "/"+tempDir, "dir")) + grepFile(t, p, fmt.Sprintf("lxc.mount.entry = %s %s none rbind,rw,create=%s 0 0", tempFile.Name(), "/"+tempFile.Name(), "file")) +} From 69a5b827dcf01a6de5949a161606058017014cdc Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Wed, 5 Nov 2014 18:23:42 -0800 Subject: [PATCH 261/592] See #8379 - if the container doesn't start I added code to make sure that if no other processing sets the container.exitCode to a non-zero value when we make sure its done before we return. I also made sure that while trying to start the CMD/ENTRYPOINT, if it fails, then we set the container.exitCode to the exitStatus from the exec(). Closes #8379 Signed-off-by: Doug Davis --- daemon/container.go | 4 ++++ daemon/monitor.go | 3 +++ integration-cli/docker_cli_run_test.go | 30 ++++++++++++++++++++++++++ 3 files changed, 37 insertions(+) diff --git a/daemon/container.go b/daemon/container.go index 8525206e4a..905dbd5707 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -302,6 +302,10 @@ func (container *Container) Start() (err error) { defer func() { if err != nil { container.setError(err) + // if no one else has set it, make sure we don't leave it at zero + if container.ExitCode == 0 { + container.ExitCode = 128 + } container.toDisk() container.cleanup() } diff --git a/daemon/monitor.go b/daemon/monitor.go index cbb74c335b..d0d9d70a99 100644 --- a/daemon/monitor.go +++ b/daemon/monitor.go @@ -138,6 +138,7 @@ func (m *containerMonitor) Start() error { // if we receive an internal error from the initial start of a container then lets // return it instead of entering the restart loop if m.container.RestartCount == 0 { + m.container.ExitCode = exitStatus m.resetContainer(false) return err @@ -163,10 +164,12 @@ func (m *containerMonitor) Start() error { // we need to check this before reentering the loop because the waitForNextRestart could have // been terminated by a request from a user if m.shouldStop { + m.container.ExitCode = exitStatus return err } continue } + m.container.ExitCode = exitStatus m.container.LogEvent("die") m.resetContainer(true) return err diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 4c3e8d0a08..d536c626bb 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -2538,3 +2538,33 @@ func TestRunAllowPortRangeThroughExpose(t *testing.T) { } logDone("run - allow port range through --expose flag") } + +func TestRunUnknownCommand(t *testing.T) { + defer deleteAllContainers() + runCmd := exec.Command(dockerBinary, "create", "busybox", "/bin/nada") + cID, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatalf("Failed to create container: %v, output: %q", err, cID) + } + cID = strings.TrimSpace(cID) + + runCmd = exec.Command(dockerBinary, "start", cID) + _, _, _, err = runCommandWithStdoutStderr(runCmd) + if err == nil { + t.Fatalf("Container should not have been able to start!") + } + + runCmd = exec.Command(dockerBinary, "inspect", "--format={{.State.ExitCode}}", cID) + rc, _, _, err2 := runCommandWithStdoutStderr(runCmd) + rc = strings.TrimSpace(rc) + + if err2 != nil { + t.Fatalf("Error getting status of container: %v", err2) + } + + if rc != "-1" { + t.Fatalf("ExitCode(%v) was supposed to be -1", rc) + } + + logDone("run - Unknown Command") +} From 2329354dc3e874b6e93b7b9feafa9c67172840fc Mon Sep 17 00:00:00 2001 From: Yohei Ueda Date: Fri, 7 Nov 2014 02:17:02 +0900 Subject: [PATCH 262/592] Fix the unit test not to remove /tmp Signed-off-by: Yohei Ueda --- .../operatingsystem/operatingsystem_test.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/pkg/parsers/operatingsystem/operatingsystem_test.go b/pkg/parsers/operatingsystem/operatingsystem_test.go index d264b35f03..b7d54cbb1c 100644 --- a/pkg/parsers/operatingsystem/operatingsystem_test.go +++ b/pkg/parsers/operatingsystem/operatingsystem_test.go @@ -38,12 +38,13 @@ BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`) ) dir := os.TempDir() + etcOsRelease = filepath.Join(dir, "etcOsRelease") + defer func() { + os.Remove(etcOsRelease) etcOsRelease = backup - os.RemoveAll(dir) }() - etcOsRelease = filepath.Join(dir, "etcOsRelease") for expect, osRelease := range map[string][]byte{ "Ubuntu 14.04 LTS": ubuntuTrusty, "Gentoo/Linux": gentoo, @@ -92,13 +93,13 @@ func TestIsContainerized(t *testing.T) { ) dir := os.TempDir() - defer func() { - proc1Cgroup = backup - os.RemoveAll(dir) - }() - proc1Cgroup = filepath.Join(dir, "proc1Cgroup") + defer func() { + os.Remove(proc1Cgroup) + proc1Cgroup = backup + }() + if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroup, 0600); err != nil { t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) } From 5bd5ef5a0accf24d4fd8f810e167a7717c9aa7ae Mon Sep 17 00:00:00 2001 From: Yohei Ueda Date: Fri, 7 Nov 2014 02:40:13 +0900 Subject: [PATCH 263/592] Make test-unit runnable without parallel Signed-off-by: Yohei Ueda --- hack/make/test-unit | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/make/test-unit b/hack/make/test-unit index 5040e37d6b..0f19c54f62 100644 --- a/hack/make/test-unit +++ b/hack/make/test-unit @@ -42,7 +42,7 @@ bundle_test_unit() { ) else # aww, no "parallel" available - fall back to boring for test_dir in $TESTDIRS; do - "$(dirname "$BASH_SOURCE")/.go-compile-test-dir" "$test_dir" + . "$(dirname "$BASH_SOURCE")/.go-compile-test-dir" "$test_dir" done fi echo "$TESTDIRS" | go_run_test_dir From 2fbfa29318f66aac131bdddd9b32f28b7b7e508a Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 6 Nov 2014 14:06:52 -0500 Subject: [PATCH 264/592] devmapper: add vbatts to MAINTAINERS Signed-off-by: Vincent Batts --- daemon/graphdriver/devmapper/MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/daemon/graphdriver/devmapper/MAINTAINERS b/daemon/graphdriver/devmapper/MAINTAINERS index 9e629d5fcc..39d865305d 100644 --- a/daemon/graphdriver/devmapper/MAINTAINERS +++ b/daemon/graphdriver/devmapper/MAINTAINERS @@ -1 +1,2 @@ Alexander Larsson (@alexlarsson) +Vincent Batts (@vbatts) From 6cbe1fa726fb88a1743c3a3da5e699c9bb3ae55a Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Wed, 5 Nov 2014 12:24:15 -0800 Subject: [PATCH 265/592] Make /etc/hosts records consistent Fixes #8972 Signed-off-by: Alexandr Morozov --- daemon/container.go | 8 ++--- pkg/networkfs/etchosts/etchosts.go | 52 +++++++++++++++++++----------- 2 files changed, 37 insertions(+), 23 deletions(-) diff --git a/daemon/container.go b/daemon/container.go index 8525206e4a..8d80a9450c 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -420,7 +420,7 @@ func (container *Container) buildHostsFiles(IP string) error { } container.HostsPath = hostsPath - extraContent := make(map[string]string) + var extraContent []etchosts.Record children, err := container.daemon.Children(container.Name) if err != nil { @@ -429,15 +429,15 @@ func (container *Container) buildHostsFiles(IP string) error { for linkAlias, child := range children { _, alias := path.Split(linkAlias) - extraContent[alias] = child.NetworkSettings.IPAddress + extraContent = append(extraContent, etchosts.Record{Hosts: alias, IP: child.NetworkSettings.IPAddress}) } for _, extraHost := range container.hostConfig.ExtraHosts { parts := strings.Split(extraHost, ":") - extraContent[parts[0]] = parts[1] + extraContent = append(extraContent, etchosts.Record{Hosts: parts[0], IP: parts[1]}) } - return etchosts.Build(container.HostsPath, IP, container.Config.Hostname, container.Config.Domainname, &extraContent) + return etchosts.Build(container.HostsPath, IP, container.Config.Hostname, container.Config.Domainname, extraContent) } func (container *Container) buildHostnameAndHostsFiles(IP string) error { diff --git a/pkg/networkfs/etchosts/etchosts.go b/pkg/networkfs/etchosts/etchosts.go index 6cf29b046f..d7edef27f6 100644 --- a/pkg/networkfs/etchosts/etchosts.go +++ b/pkg/networkfs/etchosts/etchosts.go @@ -3,40 +3,54 @@ package etchosts import ( "bytes" "fmt" + "io" "io/ioutil" "regexp" ) -var defaultContent = map[string]string{ - "localhost": "127.0.0.1", - "localhost ip6-localhost ip6-loopback": "::1", - "ip6-localnet": "fe00::0", - "ip6-mcastprefix": "ff00::0", - "ip6-allnodes": "ff02::1", - "ip6-allrouters": "ff02::2", +type Record struct { + Hosts string + IP string } -func Build(path, IP, hostname, domainname string, extraContent *map[string]string) error { +func (r Record) WriteTo(w io.Writer) (int64, error) { + n, err := fmt.Fprintf(w, "%s\t%s\n", r.IP, r.Hosts) + return int64(n), err +} + +var defaultContent = []Record{ + {Hosts: "localhost", IP: "127.0.0.1"}, + {Hosts: "localhost ip6-localhost ip6-loopback", IP: "::1"}, + {Hosts: "ip6-localnet", IP: "fe00::0"}, + {Hosts: "ip6-mcastprefix", IP: "ff00::0"}, + {Hosts: "ip6-allnodes", IP: "ff02::1"}, + {Hosts: "ip6-allrouters", IP: "ff02::2"}, +} + +func Build(path, IP, hostname, domainname string, extraContent []Record) error { content := bytes.NewBuffer(nil) if IP != "" { + var mainRec Record + mainRec.IP = IP if domainname != "" { - content.WriteString(fmt.Sprintf("%s\t%s.%s %s\n", IP, hostname, domainname, hostname)) + mainRec.Hosts = fmt.Sprintf("%s.%s %s", hostname, domainname, hostname) } else { - content.WriteString(fmt.Sprintf("%s\t%s\n", IP, hostname)) + mainRec.Hosts = hostname } - } - - for hosts, ip := range defaultContent { - if _, err := content.WriteString(fmt.Sprintf("%s\t%s\n", ip, hosts)); err != nil { + if _, err := mainRec.WriteTo(content); err != nil { return err } } - if extraContent != nil { - for hosts, ip := range *extraContent { - if _, err := content.WriteString(fmt.Sprintf("%s\t%s\n", ip, hosts)); err != nil { - return err - } + for _, r := range defaultContent { + if _, err := r.WriteTo(content); err != nil { + return err + } + } + + for _, r := range extraContent { + if _, err := r.WriteTo(content); err != nil { + return err } } From 048d0c4e84aa850f28f9f12735d274a887c79509 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Thu, 6 Nov 2014 11:36:09 -0800 Subject: [PATCH 266/592] Test for etchosts consistency Signed-off-by: Alexandr Morozov --- pkg/networkfs/etchosts/etchosts_test.go | 26 +++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/pkg/networkfs/etchosts/etchosts_test.go b/pkg/networkfs/etchosts/etchosts_test.go index 05a4f447f7..c033904c31 100644 --- a/pkg/networkfs/etchosts/etchosts_test.go +++ b/pkg/networkfs/etchosts/etchosts_test.go @@ -7,6 +7,32 @@ import ( "testing" ) +func TestBuildDefault(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + + // check that /etc/hosts has consistent ordering + for i := 0; i <= 5; i++ { + err = Build(file.Name(), "", "", "", nil) + if err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + expected := "127.0.0.1\tlocalhost\n::1\tlocalhost ip6-localhost ip6-loopback\nfe00::0\tip6-localnet\nff00::0\tip6-mcastprefix\nff02::1\tip6-allnodes\nff02::2\tip6-allrouters\n" + + if expected != string(content) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } + } +} + func TestBuildHostnameDomainname(t *testing.T) { file, err := ioutil.TempFile("", "") if err != nil { From ef7415258ba5195af7a890d1d52214b3a181a379 Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Wed, 22 Oct 2014 06:48:02 -0700 Subject: [PATCH 267/592] Add import/pull events to the stream Closes #8160 Signed-off-by: Doug Davis --- graph/import.go | 8 ++++ graph/pull.go | 12 +++++ integration-cli/docker_cli_events_test.go | 54 +++++++++++++++++++++++ 3 files changed, 74 insertions(+) diff --git a/graph/import.go b/graph/import.go index 36d0d3fe10..a8e8e04b5b 100644 --- a/graph/import.go +++ b/graph/import.go @@ -4,6 +4,7 @@ import ( "net/http" "net/url" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/utils" @@ -57,5 +58,12 @@ func (s *TagStore) CmdImport(job *engine.Job) engine.Status { } } job.Stdout.Write(sf.FormatStatus("", img.ID)) + logID := img.ID + if tag != "" { + logID += ":" + tag + } + if err = job.Eng.Job("log", "import", logID, "").Run(); err != nil { + log.Errorf("Error logging event 'import' for %s: %s", logID, err) + } return engine.StatusOK } diff --git a/graph/pull.go b/graph/pull.go index 3166a42ee5..775c318af1 100644 --- a/graph/pull.go +++ b/graph/pull.go @@ -139,6 +139,11 @@ func (s *TagStore) CmdPull(job *engine.Job) engine.Status { mirrors = s.mirrors } + logName := localName + if tag != "" { + logName += ":" + tag + } + if len(mirrors) == 0 && (isOfficial || endpoint.Version == registry.APIVersion2) { j := job.Eng.Job("trust_update_base") if err = j.Run(); err != nil { @@ -146,6 +151,9 @@ func (s *TagStore) CmdPull(job *engine.Job) engine.Status { } if err := s.pullV2Repository(job.Eng, r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err == nil { + if err = job.Eng.Job("log", "pull", logName, "").Run(); err != nil { + log.Errorf("Error logging event 'pull' for %s: %s", logName, err) + } return engine.StatusOK } else if err != registry.ErrDoesNotExist { log.Errorf("Error from V2 registry: %s", err) @@ -156,6 +164,10 @@ func (s *TagStore) CmdPull(job *engine.Job) engine.Status { return job.Error(err) } + if err = job.Eng.Job("log", "pull", logName, "").Run(); err != nil { + log.Errorf("Error logging event 'pull' for %s: %s", logName, err) + } + return engine.StatusOK } diff --git a/integration-cli/docker_cli_events_test.go b/integration-cli/docker_cli_events_test.go index b7f410b175..2c4111ce55 100644 --- a/integration-cli/docker_cli_events_test.go +++ b/integration-cli/docker_cli_events_test.go @@ -215,3 +215,57 @@ func TestEventsRedirectStdout(t *testing.T) { logDone("events - redirect stdout") } + +func TestEventsImagePull(t *testing.T) { + since := time.Now().Unix() + pullCmd := exec.Command(dockerBinary, "pull", "scratch") + if out, _, err := runCommandWithOutput(pullCmd); err != nil { + t.Fatal("pulling the scratch image from has failed: %s, %v", out, err) + } + + eventsCmd := exec.Command(dockerBinary, "events", + fmt.Sprintf("--since=%d", since), + fmt.Sprintf("--until=%d", time.Now().Unix())) + out, _, _ := runCommandWithOutput(eventsCmd) + + events := strings.Split(strings.TrimSpace(out), "\n") + event := strings.TrimSpace(events[len(events)-1]) + + if !strings.HasSuffix(event, "scratch:latest: pull") { + t.Fatalf("Missing pull event - got:%q", event) + } + + logDone("events - image pull is logged") +} + +func TestEventsImageImport(t *testing.T) { + since := time.Now().Unix() + + server, err := fileServer(map[string]string{ + "/cirros.tar.gz": "/cirros.tar.gz", + }) + if err != nil { + t.Fatal(err) + } + defer server.Close() + fileURL := fmt.Sprintf("%s/cirros.tar.gz", server.URL) + importCmd := exec.Command(dockerBinary, "import", fileURL) + out, _, err := runCommandWithOutput(importCmd) + if err != nil { + t.Errorf("import failed with errors: %v, output: %q", err, out) + } + + eventsCmd := exec.Command(dockerBinary, "events", + fmt.Sprintf("--since=%d", since), + fmt.Sprintf("--until=%d", time.Now().Unix())) + out, _, _ = runCommandWithOutput(eventsCmd) + + events := strings.Split(strings.TrimSpace(out), "\n") + event := strings.TrimSpace(events[len(events)-1]) + + if !strings.HasSuffix(event, ": import") { + t.Fatalf("Missing pull event - got:%q", event) + } + + logDone("events - image import is logged") +} From 0f57c902450b1d4f7a676dc693689debca002e98 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Thu, 6 Nov 2014 15:59:25 -0500 Subject: [PATCH 268/592] docker-remove-redundant-json-tags In previous patch I had introduce json:"-" tags to be on safer side to make sure certain fields are not marshalled/unmarshalled. But struct fields starting with small letter are not exported so they will not be marshalled anyway. So remove json:"-" tags from there. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 30 +++++++++++------------ 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index e661dbb7ad..59c5ec82e7 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -64,23 +64,23 @@ type MetaData struct { type DeviceSet struct { MetaData `json:"-"` sync.Mutex `json:"-"` // Protects Devices map and serializes calls into libdevmapper - root string `json:"-"` - devicePrefix string `json:"-"` - TransactionId uint64 `json:"-"` - NewTransactionId uint64 `json:"-"` - NextDeviceId int `json:"next_device_id"` + root string + devicePrefix string + TransactionId uint64 `json:"-"` + NewTransactionId uint64 `json:"-"` + NextDeviceId int `json:"next_device_id"` // Options - dataLoopbackSize int64 `json:"-"` - metaDataLoopbackSize int64 `json:"-"` - baseFsSize uint64 `json:"-"` - filesystem string `json:"-"` - mountOptions string `json:"-"` - mkfsArgs []string `json:"-"` - dataDevice string `json:"-"` - metadataDevice string `json:"-"` - doBlkDiscard bool `json:"-"` - thinpBlockSize uint32 `json:"-"` + dataLoopbackSize int64 + metaDataLoopbackSize int64 + baseFsSize uint64 + filesystem string + mountOptions string + mkfsArgs []string + dataDevice string + metadataDevice string + doBlkDiscard bool + thinpBlockSize uint32 } type DiskUsage struct { From 318b11f62fe0f16a190e85e3cfe5d01432bf92a9 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 6 Nov 2014 15:04:10 -0500 Subject: [PATCH 269/592] btrfs: information for the information gods Signed-off-by: Vincent Batts --- daemon/graphdriver/btrfs/btrfs.go | 5 ++++- daemon/graphdriver/btrfs/version.go | 15 +++++++++++++++ daemon/graphdriver/btrfs/version_test.go | 13 +++++++++++++ 3 files changed, 32 insertions(+), 1 deletion(-) create mode 100644 daemon/graphdriver/btrfs/version.go create mode 100644 daemon/graphdriver/btrfs/version_test.go diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go index 954cf9b245..ef77ae9158 100644 --- a/daemon/graphdriver/btrfs/btrfs.go +++ b/daemon/graphdriver/btrfs/btrfs.go @@ -60,7 +60,10 @@ func (d *Driver) String() string { } func (d *Driver) Status() [][2]string { - return nil + return [][2]string{ + {"Build Version", BtrfsBuildVersion()}, + {"Library Version", fmt.Sprintf("%d", BtrfsLibVersion())}, + } } func (d *Driver) Cleanup() error { diff --git a/daemon/graphdriver/btrfs/version.go b/daemon/graphdriver/btrfs/version.go new file mode 100644 index 0000000000..1b2b148c07 --- /dev/null +++ b/daemon/graphdriver/btrfs/version.go @@ -0,0 +1,15 @@ +// +build linux + +package btrfs + +/* +#include +*/ +import "C" + +func BtrfsBuildVersion() string { + return string(C.BTRFS_BUILD_VERSION) +} +func BtrfsLibVersion() int { + return int(C.BTRFS_LIB_VERSION) +} diff --git a/daemon/graphdriver/btrfs/version_test.go b/daemon/graphdriver/btrfs/version_test.go new file mode 100644 index 0000000000..d96e33f3df --- /dev/null +++ b/daemon/graphdriver/btrfs/version_test.go @@ -0,0 +1,13 @@ +// +build linux + +package btrfs + +import ( + "testing" +) + +func TestBuildVersion(t *testing.T) { + if len(BtrfsBuildVersion()) == 0 { + t.Errorf("expected output from btrfs build version, but got empty string") + } +} From b83fc07d88175c32bb46368a07a9c9e277b22392 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Thu, 6 Nov 2014 15:25:14 -0800 Subject: [PATCH 270/592] Not fail on updating parent links Fixes #8796 Signed-off-by: Alexandr Morozov --- daemon/container.go | 2 +- integration-cli/docker_cli_links_test.go | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/daemon/container.go b/daemon/container.go index 905dbd5707..c304225c97 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -987,7 +987,7 @@ func (container *Container) updateParentsHosts() error { c := container.daemon.Get(cid) if c != nil && !container.daemon.config.DisableNetwork && container.hostConfig.NetworkMode.IsPrivate() { if err := etchosts.Update(c.HostsPath, container.NetworkSettings.IPAddress, container.Name[1:]); err != nil { - return fmt.Errorf("Failed to update /etc/hosts in parent container: %v", err) + log.Errorf("Failed to update /etc/hosts in parent container: %v", err) } } } diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go index f327a52ec7..7b19434fb5 100644 --- a/integration-cli/docker_cli_links_test.go +++ b/integration-cli/docker_cli_links_test.go @@ -157,3 +157,23 @@ func TestLinksInspectLinksStopped(t *testing.T) { logDone("link - links in stopped container inspect") } + +func TestLinksNotStartedParentNotFail(t *testing.T) { + defer deleteAllContainers() + runCmd := exec.Command(dockerBinary, "create", "--name=first", "busybox", "top") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatal(out, err) + } + runCmd = exec.Command(dockerBinary, "create", "--name=second", "--link=first:first", "busybox", "top") + out, _, _, err = runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatal(out, err) + } + runCmd = exec.Command(dockerBinary, "start", "first") + out, _, _, err = runCommandWithStdoutStderr(runCmd) + if err != nil { + t.Fatal(out, err) + } + logDone("link - container start not failing on updating stopped parent links") +} From 6dc11cc992531a2cf75666e417af7730a7f0764b Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Thu, 6 Nov 2014 13:39:42 +1000 Subject: [PATCH 271/592] makes the -s --size option documentation clearer #8950 Signed-off-by: Sven Dowideit --- api/client/commands.go | 2 +- contrib/completion/fish/docker.fish | 2 +- contrib/completion/zsh/_docker | 2 +- docs/sources/reference/commandline/cli.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index da29b28f3d..14b5a5d332 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -1523,7 +1523,7 @@ func (cli *DockerCli) CmdPs(args ...string) error { cmd = cli.Subcmd("ps", "", "List containers") quiet = cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") - size = cmd.Bool([]string{"s", "-size"}, false, "Display sizes") + size = cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes") all = cmd.Bool([]string{"a", "-all"}, false, "Show all containers. Only running containers are shown by default.") noTrunc = cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") nLatest = cmd.Bool([]string{"l", "-latest"}, false, "Show only the latest created container, include non-running ones.") diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish index 48b0279cee..b250dfd0b1 100644 --- a/contrib/completion/fish/docker.fish +++ b/contrib/completion/fish/docker.fish @@ -185,7 +185,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d ' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display sizes' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display total file sizes' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.' # pull diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker index 4c0937e10c..f867d76a78 100644 --- a/contrib/completion/zsh/_docker +++ b/contrib/completion/zsh/_docker @@ -357,7 +357,7 @@ __docker_subcommand () { '-n[Show n last created containers, include non-running one]:n:(1 5 10 25 50)' \ '--no-trunc[Do not truncate output]' \ {-q,--quiet}'[Only show numeric IDs]' \ - {-s,--size}'[Display sizes]' \ + {-s,--size}'[Display total file sizes]' \ '--since=-[Show only containers created since...]:containers:__docker_containers' ;; (tag) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 50c0ff3cce..b3043b3afa 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -1042,7 +1042,7 @@ for further details. -n=-1 Show n last created containers, include non-running ones. --no-trunc=false Don't truncate output -q, --quiet=false Only display numeric IDs - -s, --size=false Display sizes + -s, --size=false Display total file sizes --since="" Show only containers created since Id or Name, include non-running ones. Running `docker ps` showing 2 linked containers. From dad58737aea8ef4c6389cb118566fc06402fd635 Mon Sep 17 00:00:00 2001 From: Yohei Ueda Date: Fri, 7 Nov 2014 16:54:25 +0900 Subject: [PATCH 272/592] Export envvars necessary for unit-test Signed-off-by: Yohei Ueda --- hack/make/test-unit | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/hack/make/test-unit b/hack/make/test-unit index 0f19c54f62..910b887a8e 100644 --- a/hack/make/test-unit +++ b/hack/make/test-unit @@ -22,29 +22,31 @@ bundle_test_unit() { if [ -z "$TESTDIRS" ]; then TESTDIRS=$(find_dirs '*_test.go') fi - - if command -v parallel &> /dev/null; then ( - # accomodate parallel to be able to access variables - export SHELL="$BASH" - export HOME="$(mktemp -d)" - mkdir -p "$HOME/.parallel" - touch "$HOME/.parallel/ignored_vars" + ( export LDFLAGS="$LDFLAGS $LDFLAGS_STATIC_DOCKER" export TESTFLAGS export HAVE_GO_TEST_COVER export DEST - # some hack to export array variables - export BUILDFLAGS_FILE="$HOME/buildflags_file" - ( IFS=$'\n'; echo "${BUILDFLAGS[*]}" ) > "$BUILDFLAGS_FILE" + if command -v parallel &> /dev/null; then + # accomodate parallel to be able to access variables + export SHELL="$BASH" + export HOME="$(mktemp -d)" + mkdir -p "$HOME/.parallel" + touch "$HOME/.parallel/ignored_vars" - echo "$TESTDIRS" | parallel --jobs "$PARALLEL_JOBS" --halt 2 --env _ "$(dirname "$BASH_SOURCE")/.go-compile-test-dir" - rm -rf "$HOME" - ) else - # aww, no "parallel" available - fall back to boring - for test_dir in $TESTDIRS; do - . "$(dirname "$BASH_SOURCE")/.go-compile-test-dir" "$test_dir" - done - fi + # some hack to export array variables + export BUILDFLAGS_FILE="$HOME/buildflags_file" + ( IFS=$'\n'; echo "${BUILDFLAGS[*]}" ) > "$BUILDFLAGS_FILE" + + echo "$TESTDIRS" | parallel --jobs "$PARALLEL_JOBS" --halt 2 --env _ "$(dirname "$BASH_SOURCE")/.go-compile-test-dir" + rm -rf "$HOME" + else + # aww, no "parallel" available - fall back to boring + for test_dir in $TESTDIRS; do + "$(dirname "$BASH_SOURCE")/.go-compile-test-dir" "$test_dir" + done + fi + ) echo "$TESTDIRS" | go_run_test_dir } } From c30ccc62e447ed570ca283feedd872eb359d457b Mon Sep 17 00:00:00 2001 From: Vincent Giersch Date: Fri, 7 Nov 2014 17:10:36 +0100 Subject: [PATCH 273/592] Fix Docker Hub tags list specification The current implementation of the Docker Hub returns a list of objects containing the tag name and the layer id. Docker-DCO-1.1-Signed-off-by: Vincent Giersch --- docs/sources/reference/api/hub_registry_spec.md | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/docs/sources/reference/api/hub_registry_spec.md b/docs/sources/reference/api/hub_registry_spec.md index 66724cdc82..26d4ffca30 100644 --- a/docs/sources/reference/api/hub_registry_spec.md +++ b/docs/sources/reference/api/hub_registry_spec.md @@ -579,13 +579,19 @@ The following naming restrictions apply: ### Get all tags: -GET /v1/repositories///tags + GET /v1/repositories///tags **Return**: HTTP 200 - { "latest": - "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f", - “0.1.1”: - “b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087” } + [ + { + "layer": "9e89cc6f", + "name": "latest" + }, + { + "layer": "b486531f", + "name": "0.1.1", + } + ] **4.3.2 Read the content of a tag (resolve the image id):** From 64e7a1722daa6508f38287c6b8eee72850f22a86 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Fri, 31 Oct 2014 16:28:20 -0400 Subject: [PATCH 274/592] pkg/mount: mountinfo from specified pid Signed-off-by: Vincent Batts --- pkg/mount/mountinfo_linux.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pkg/mount/mountinfo_linux.go b/pkg/mount/mountinfo_linux.go index 84bf5516b5..68f4e9f1bc 100644 --- a/pkg/mount/mountinfo_linux.go +++ b/pkg/mount/mountinfo_linux.go @@ -1,3 +1,5 @@ +// +build linux + package mount import ( @@ -72,3 +74,14 @@ func parseInfoFile(r io.Reader) ([]*MountInfo, error) { } return out, nil } + +// PidMountInfo collects the mounts for a specific Pid +func PidMountInfo(pid int) ([]*MountInfo, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} From 480b5e9c62819da3c6bff47faaec16acb42a0185 Mon Sep 17 00:00:00 2001 From: Fred Lifton Date: Fri, 7 Nov 2014 13:13:01 -0800 Subject: [PATCH 275/592] Revise Docs Maintainers list Removing Sonat from docs maintainers because he no longer has time to complete all the responsibilities. Many thanks to Sonat for his help. Docker-DCO-1.1-Signed-off-by: Fred Lifton (github: fredlf) --- docs/MAINTAINERS | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/MAINTAINERS b/docs/MAINTAINERS index d07b531d72..ecf56752c2 100644 --- a/docs/MAINTAINERS +++ b/docs/MAINTAINERS @@ -1,4 +1,3 @@ Fred Lifton (@fredlf) James Turnbull (@jamtur01) Sven Dowideit (@SvenDowideit) -O.S. Tezer (@OSTezer) From 2e482c86bc14ddd4cfa45281dcf8f9ca141c9f14 Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Wed, 22 Oct 2014 11:16:42 -0700 Subject: [PATCH 276/592] Use the HTTP Last-Modified http header as the mtime value for ADD cmd when present Closes #8331 Signed-off-by: Doug Davis --- builder/internals.go | 18 ++++++- docs/sources/reference/builder.md | 6 ++- integration-cli/docker_cli_build_test.go | 60 ++++++++++++++++++++++++ 3 files changed, 81 insertions(+), 3 deletions(-) diff --git a/builder/internals.go b/builder/internals.go index d8093507d3..f6083e7918 100644 --- a/builder/internals.go +++ b/builder/internals.go @@ -9,6 +9,7 @@ import ( "fmt" "io" "io/ioutil" + "net/http" "net/url" "os" "path" @@ -254,8 +255,21 @@ func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath stri fmt.Fprintf(b.OutStream, "\n") tmpFile.Close() - // Remove the mtime of the newly created tmp file - if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil { + // Set the mtime to the Last-Modified header value if present + // Otherwise just remove atime and mtime + times := make([]syscall.Timespec, 2) + + lastMod := resp.Header.Get("Last-Modified") + if lastMod != "" { + mTime, err := http.ParseTime(lastMod) + // If we can't parse it then just let it default to 'zero' + // otherwise use the parsed time value + if err == nil { + times[1] = syscall.NsecToTimespec(mTime.UnixNano()) + } + } + + if err := system.UtimesNano(tmpFileName, times); err != nil { return err } diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index 1f18f0c63a..00d9ba3eb8 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -376,7 +376,11 @@ destination container. All new files and directories are created with a UID and GID of 0. In the case where `` is a remote file URL, the destination will -have permissions of 600. +have permissions of 600. If the remote file being retrieved has an HTTP +`Last-Modified` header, the timestamp from that header will be used +to set the `mtime` on the destination file. Then, like any other file +processed during an `ADD`, `mtime` will be included in the determination +of whether or not the file has changed and the cache should be updated. > **Note**: > If you build by passing a `Dockerfile` through STDIN (`docker diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 68d607521e..9a194812b1 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -7,9 +7,11 @@ import ( "io/ioutil" "os" "os/exec" + "path" "path/filepath" "regexp" "strings" + "syscall" "testing" "time" @@ -2214,6 +2216,64 @@ func TestBuildADDRemoteFileWithoutCache(t *testing.T) { logDone("build - add remote file without cache") } +func TestBuildADDRemoteFileMTime(t *testing.T) { + name := "testbuildaddremotefilemtime" + defer deleteImages(name) + + server, err := fakeStorage(map[string]string{"baz": "hello"}) + if err != nil { + t.Fatal(err) + } + defer server.Close() + + ctx, err := fakeContext(fmt.Sprintf(`FROM scratch + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL), nil) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id1 != id2 { + t.Fatal("The cache should have been used but wasn't - #1") + } + + // Now set baz's times to anything else and redo the build + // This time the cache should not be used + bazPath := path.Join(server.FakeContext.Dir, "baz") + err = syscall.UtimesNano(bazPath, make([]syscall.Timespec, 2)) + if err != nil { + t.Fatalf("Error setting mtime on %q: %v", bazPath, err) + } + + id3, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id1 == id3 { + t.Fatal("The cache should not have been used but was") + } + + // And for good measure do it again and make sure cache is used this time + id4, err := buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + if id3 != id4 { + t.Fatal("The cache should have been used but wasn't - #2") + } + logDone("build - add remote file testing mtime") +} + func TestBuildADDLocalAndRemoteFilesWithCache(t *testing.T) { name := "testbuildaddlocalandremotefilewithcache" defer deleteImages(name) From a10cca257f678e5e3c866b3c35f77877fe4789d2 Mon Sep 17 00:00:00 2001 From: Aleksa Sarai Date: Sat, 8 Nov 2014 09:57:20 +1100 Subject: [PATCH 277/592] vendor: update vendor'd libcontainer version This patch updates the vendor'd libcontainer version, so that Docker can take advantage of the updates to the `user` API. Signed-off-by: Aleksa Sarai (github: cyphar) --- hack/vendor.sh | 2 +- .../docker/libcontainer/namespaces/init.go | 27 +- .../libcontainer/netlink/netlink_linux.go | 11 +- .../netlink/netlink_linux_test.go | 28 ++ .../route_source_address_selection.json | 209 ++++++++++++++ .../docker/libcontainer/user/lookup.go | 108 ++++++++ .../docker/libcontainer/user/lookup_unix.go | 30 ++ .../libcontainer/user/lookup_unsupported.go | 21 ++ .../docker/libcontainer/user/user.go | 200 +++++++++---- .../docker/libcontainer/user/user_test.go | 262 +++++++++++++++++- 10 files changed, 828 insertions(+), 70 deletions(-) create mode 100644 vendor/src/github.com/docker/libcontainer/sample_configs/route_source_address_selection.json create mode 100644 vendor/src/github.com/docker/libcontainer/user/lookup.go create mode 100644 vendor/src/github.com/docker/libcontainer/user/lookup_unix.go create mode 100644 vendor/src/github.com/docker/libcontainer/user/lookup_unsupported.go diff --git a/hack/vendor.sh b/hack/vendor.sh index ae45dbe2d8..4c0b09fed1 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -66,7 +66,7 @@ if [ "$1" = '--go' ]; then mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar fi -clone git github.com/docker/libcontainer fd6df76562137aa3b18e44b790cb484fe2b6fa0b +clone git github.com/docker/libcontainer 4ae31b6ceb2c2557c9f05f42da61b0b808faa5a4 # see src/github.com/docker/libcontainer/update-vendor.sh which is the "source of truth" for libcontainer deps (just like this file) rm -rf src/github.com/docker/libcontainer/vendor eval "$(grep '^clone ' src/github.com/docker/libcontainer/update-vendor.sh | grep -v 'github.com/codegangsta/cli')" diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/init.go b/vendor/src/github.com/docker/libcontainer/namespaces/init.go index 482ba0f399..72af200cc6 100644 --- a/vendor/src/github.com/docker/libcontainer/namespaces/init.go +++ b/vendor/src/github.com/docker/libcontainer/namespaces/init.go @@ -167,26 +167,43 @@ func RestoreParentDeathSignal(old int) error { // SetupUser changes the groups, gid, and uid for the user inside the container func SetupUser(u string) error { - uid, gid, suppGids, home, err := user.GetUserGroupSupplementaryHome(u, syscall.Getuid(), syscall.Getgid(), "/") + // Set up defaults. + defaultExecUser := user.ExecUser{ + Uid: syscall.Getuid(), + Gid: syscall.Getgid(), + Home: "/", + } + + passwdFile, err := user.GetPasswdFile() + if err != nil { + return err + } + + groupFile, err := user.GetGroupFile() + if err != nil { + return err + } + + execUser, err := user.GetExecUserFile(u, &defaultExecUser, passwdFile, groupFile) if err != nil { return fmt.Errorf("get supplementary groups %s", err) } - if err := syscall.Setgroups(suppGids); err != nil { + if err := syscall.Setgroups(execUser.Sgids); err != nil { return fmt.Errorf("setgroups %s", err) } - if err := system.Setgid(gid); err != nil { + if err := system.Setgid(execUser.Gid); err != nil { return fmt.Errorf("setgid %s", err) } - if err := system.Setuid(uid); err != nil { + if err := system.Setuid(execUser.Uid); err != nil { return fmt.Errorf("setuid %s", err) } // if we didn't get HOME already, set it based on the user's HOME if envHome := os.Getenv("HOME"); envHome == "" { - if err := os.Setenv("HOME", home); err != nil { + if err := os.Setenv("HOME", execUser.Home); err != nil { return fmt.Errorf("set HOME %s", err) } } diff --git a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go index 93ebade5c0..57790421c0 100644 --- a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go +++ b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go @@ -1003,28 +1003,23 @@ func AddRoute(destination, source, gateway, device string) error { } if source != "" { - srcIP, srcNet, err := net.ParseCIDR(source) + srcIP := net.ParseIP(source) if err != nil { - return fmt.Errorf("source CIDR %s couldn't be parsed", source) + return fmt.Errorf("source IP %s couldn't be parsed", source) } srcFamily := getIpFamily(srcIP) if currentFamily != -1 && currentFamily != srcFamily { return fmt.Errorf("source and destination ip were not the same IP family") } currentFamily = srcFamily - srcLen, bits := srcNet.Mask.Size() - if srcLen == 0 && bits == 0 { - return fmt.Errorf("source CIDR %s generated a non-canonical Mask", source) - } msg.Family = uint8(srcFamily) - msg.Src_len = uint8(srcLen) var srcData []byte if srcFamily == syscall.AF_INET { srcData = srcIP.To4() } else { srcData = srcIP.To16() } - rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_SRC, srcData)) + rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_PREFSRC, srcData)) } if gateway != "" { diff --git a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go index be896a14a4..4b098777cd 100644 --- a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go +++ b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go @@ -280,6 +280,34 @@ func TestAddDelNetworkIp(t *testing.T) { } } +func TestAddRouteSourceSelection(t *testing.T) { + tstIp := "127.1.1.1" + tl := testLink{name: "tstEth", linkType: "dummy"} + + addLink(t, tl.name, tl.linkType) + defer deleteLink(t, tl.name) + + ip := net.ParseIP(tstIp) + mask := net.IPv4Mask(255, 255, 255, 255) + ipNet := &net.IPNet{IP: ip, Mask: mask} + + iface, err := net.InterfaceByName(tl.name) + if err != nil { + t.Fatalf("Lost created link %#v", tl) + } + + if err := NetworkLinkAddIp(iface, ip, ipNet); err != nil { + t.Fatalf("Could not add IP address %s to interface %#v: %s", ip.String(), iface, err) + } + + upLink(t, tl.name) + defer downLink(t, tl.name) + + if err := AddRoute("127.0.0.0/8", tstIp, "", tl.name); err != nil { + t.Fatalf("Failed to add route with source address") + } +} + func TestCreateVethPair(t *testing.T) { if testing.Short() { return diff --git a/vendor/src/github.com/docker/libcontainer/sample_configs/route_source_address_selection.json b/vendor/src/github.com/docker/libcontainer/sample_configs/route_source_address_selection.json new file mode 100644 index 0000000000..d4baf94cde --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/sample_configs/route_source_address_selection.json @@ -0,0 +1,209 @@ +{ + "capabilities": [ + "CHOWN", + "DAC_OVERRIDE", + "FOWNER", + "MKNOD", + "NET_RAW", + "SETGID", + "SETUID", + "SETFCAP", + "SETPCAP", + "NET_BIND_SERVICE", + "SYS_CHROOT", + "KILL" + ], + "cgroups": { + "allowed_devices": [ + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "m", + "major_number": -1, + "minor_number": -1, + "type": 98 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 1, + "path": "/dev/console", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "path": "/dev/tty0", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 4, + "minor_number": 1, + "path": "/dev/tty1", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 136, + "minor_number": -1, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 5, + "minor_number": 2, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "major_number": 10, + "minor_number": 200, + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ], + "name": "docker-koye", + "parent": "docker" + }, + "restrict_sys": true, + "mount_config": { + "device_nodes": [ + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 3, + "path": "/dev/null", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 5, + "path": "/dev/zero", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 7, + "path": "/dev/full", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 5, + "path": "/dev/tty", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 9, + "path": "/dev/urandom", + "type": 99 + }, + { + "cgroup_permissions": "rwm", + "file_mode": 438, + "major_number": 1, + "minor_number": 8, + "path": "/dev/random", + "type": 99 + } + ] + }, + "environment": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=koye", + "TERM=xterm" + ], + "hostname": "koye", + "namespaces": { + "NEWIPC": true, + "NEWNET": true, + "NEWNS": true, + "NEWPID": true, + "NEWUTS": true + }, + "networks": [ + { + "address": "127.0.0.1/0", + "gateway": "localhost", + "mtu": 1500, + "type": "loopback" + }, + { + "address": "172.17.0.101/16", + "bridge": "docker0", + "veth_prefix": "veth", + "mtu": 1500, + "type": "veth" + } + ], + "routes": [ + { + "destination": "0.0.0.0/0", + "source": "172.17.0.101", + "gateway": "172.17.42.1", + "interface_name": "eth0" + } + ], + "tty": true +} diff --git a/vendor/src/github.com/docker/libcontainer/user/lookup.go b/vendor/src/github.com/docker/libcontainer/user/lookup.go new file mode 100644 index 0000000000..6f8a982ff7 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/user/lookup.go @@ -0,0 +1,108 @@ +package user + +import ( + "errors" + "fmt" + "syscall" +) + +var ( + // The current operating system does not provide the required data for user lookups. + ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data") +) + +func lookupUser(filter func(u User) bool) (User, error) { + // Get operating system-specific passwd reader-closer. + passwd, err := GetPasswd() + if err != nil { + return User{}, err + } + defer passwd.Close() + + // Get the users. + users, err := ParsePasswdFilter(passwd, filter) + if err != nil { + return User{}, err + } + + // No user entries found. + if len(users) == 0 { + return User{}, fmt.Errorf("no matching entries in passwd file") + } + + // Assume the first entry is the "correct" one. + return users[0], nil +} + +// CurrentUser looks up the current user by their user id in /etc/passwd. If the +// user cannot be found (or there is no /etc/passwd file on the filesystem), +// then CurrentUser returns an error. +func CurrentUser() (User, error) { + return LookupUid(syscall.Getuid()) +} + +// LookupUser looks up a user by their username in /etc/passwd. If the user +// cannot be found (or there is no /etc/passwd file on the filesystem), then +// LookupUser returns an error. +func LookupUser(username string) (User, error) { + return lookupUser(func(u User) bool { + return u.Name == username + }) +} + +// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot +// be found (or there is no /etc/passwd file on the filesystem), then LookupId +// returns an error. +func LookupUid(uid int) (User, error) { + return lookupUser(func(u User) bool { + return u.Uid == uid + }) +} + +func lookupGroup(filter func(g Group) bool) (Group, error) { + // Get operating system-specific group reader-closer. + group, err := GetGroup() + if err != nil { + return Group{}, err + } + defer group.Close() + + // Get the users. + groups, err := ParseGroupFilter(group, filter) + if err != nil { + return Group{}, err + } + + // No user entries found. + if len(groups) == 0 { + return Group{}, fmt.Errorf("no matching entries in group file") + } + + // Assume the first entry is the "correct" one. + return groups[0], nil +} + +// CurrentGroup looks up the current user's group by their primary group id's +// entry in /etc/passwd. If the group cannot be found (or there is no +// /etc/group file on the filesystem), then CurrentGroup returns an error. +func CurrentGroup() (Group, error) { + return LookupGid(syscall.Getgid()) +} + +// LookupGroup looks up a group by its name in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGroup +// returns an error. +func LookupGroup(groupname string) (Group, error) { + return lookupGroup(func(g Group) bool { + return g.Name == groupname + }) +} + +// LookupGid looks up a group by its group id in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGid +// returns an error. +func LookupGid(gid int) (Group, error) { + return lookupGroup(func(g Group) bool { + return g.Gid == gid + }) +} diff --git a/vendor/src/github.com/docker/libcontainer/user/lookup_unix.go b/vendor/src/github.com/docker/libcontainer/user/lookup_unix.go new file mode 100644 index 0000000000..409c114e26 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/user/lookup_unix.go @@ -0,0 +1,30 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package user + +import ( + "io" + "os" +) + +// Unix-specific path to the passwd and group formatted files. +const ( + unixPasswdFile = "/etc/passwd" + unixGroupFile = "/etc/group" +) + +func GetPasswdFile() (string, error) { + return unixPasswdFile, nil +} + +func GetPasswd() (io.ReadCloser, error) { + return os.Open(unixPasswdFile) +} + +func GetGroupFile() (string, error) { + return unixGroupFile, nil +} + +func GetGroup() (io.ReadCloser, error) { + return os.Open(unixGroupFile) +} diff --git a/vendor/src/github.com/docker/libcontainer/user/lookup_unsupported.go b/vendor/src/github.com/docker/libcontainer/user/lookup_unsupported.go new file mode 100644 index 0000000000..0f15c57d82 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/user/lookup_unsupported.go @@ -0,0 +1,21 @@ +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package user + +import "io" + +func GetPasswdFile() (string, error) { + return "", ErrUnsupported +} + +func GetPasswd() (io.ReadCloser, error) { + return nil, ErrUnsupported +} + +func GetGroupFile() (string, error) { + return "", ErrUnsupported +} + +func GetGroup() (io.ReadCloser, error) { + return nil, ErrUnsupported +} diff --git a/vendor/src/github.com/docker/libcontainer/user/user.go b/vendor/src/github.com/docker/libcontainer/user/user.go index 493dd86f20..69387f2ef6 100644 --- a/vendor/src/github.com/docker/libcontainer/user/user.go +++ b/vendor/src/github.com/docker/libcontainer/user/user.go @@ -69,23 +69,36 @@ func parseLine(line string, v ...interface{}) { } } -func ParsePasswd() ([]*User, error) { - return ParsePasswdFilter(nil) -} - -func ParsePasswdFilter(filter func(*User) bool) ([]*User, error) { - f, err := os.Open("/etc/passwd") +func ParsePasswdFile(path string) ([]User, error) { + passwd, err := os.Open(path) if err != nil { return nil, err } - defer f.Close() - return parsePasswdFile(f, filter) + defer passwd.Close() + return ParsePasswd(passwd) } -func parsePasswdFile(r io.Reader, filter func(*User) bool) ([]*User, error) { +func ParsePasswd(passwd io.Reader) ([]User, error) { + return ParsePasswdFilter(passwd, nil) +} + +func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { + passwd, err := os.Open(path) + if err != nil { + return nil, err + } + defer passwd.Close() + return ParsePasswdFilter(passwd, filter) +} + +func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { + if r == nil { + return nil, fmt.Errorf("nil source for passwd-formatted data") + } + var ( s = bufio.NewScanner(r) - out = []*User{} + out = []User{} ) for s.Scan() { @@ -103,7 +116,7 @@ func parsePasswdFile(r io.Reader, filter func(*User) bool) ([]*User, error) { // Name:Pass:Uid:Gid:Gecos:Home:Shell // root:x:0:0:root:/root:/bin/bash // adm:x:3:4:adm:/var/adm:/bin/false - p := &User{} + p := User{} parseLine( text, &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell, @@ -117,23 +130,36 @@ func parsePasswdFile(r io.Reader, filter func(*User) bool) ([]*User, error) { return out, nil } -func ParseGroup() ([]*Group, error) { - return ParseGroupFilter(nil) -} - -func ParseGroupFilter(filter func(*Group) bool) ([]*Group, error) { - f, err := os.Open("/etc/group") +func ParseGroupFile(path string) ([]Group, error) { + group, err := os.Open(path) if err != nil { return nil, err } - defer f.Close() - return parseGroupFile(f, filter) + defer group.Close() + return ParseGroup(group) } -func parseGroupFile(r io.Reader, filter func(*Group) bool) ([]*Group, error) { +func ParseGroup(group io.Reader) ([]Group, error) { + return ParseGroupFilter(group, nil) +} + +func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { + group, err := os.Open(path) + if err != nil { + return nil, err + } + defer group.Close() + return ParseGroupFilter(group, filter) +} + +func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { + if r == nil { + return nil, fmt.Errorf("nil source for group-formatted data") + } + var ( s = bufio.NewScanner(r) - out = []*Group{} + out = []Group{} ) for s.Scan() { @@ -151,7 +177,7 @@ func parseGroupFile(r io.Reader, filter func(*Group) bool) ([]*Group, error) { // Name:Pass:Gid:List // root:x:0:root // adm:x:4:root,adm,daemon - p := &Group{} + p := Group{} parseLine( text, &p.Name, &p.Pass, &p.Gid, &p.List, @@ -165,94 +191,160 @@ func parseGroupFile(r io.Reader, filter func(*Group) bool) ([]*Group, error) { return out, nil } -// Given a string like "user", "1000", "user:group", "1000:1000", returns the uid, gid, list of supplementary group IDs, and home directory, if available and/or applicable. -func GetUserGroupSupplementaryHome(userSpec string, defaultUid, defaultGid int, defaultHome string) (int, int, []int, string, error) { - var ( - uid = defaultUid - gid = defaultGid - suppGids = []int{} - home = defaultHome +type ExecUser struct { + Uid, Gid int + Sgids []int + Home string +} +// GetExecUserFile is a wrapper for GetExecUser. It reads data from each of the +// given file paths and uses that data as the arguments to GetExecUser. If the +// files cannot be opened for any reason, the error is ignored and a nil +// io.Reader is passed instead. +func GetExecUserFile(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { + passwd, err := os.Open(passwdPath) + if err != nil { + passwd = nil + } else { + defer passwd.Close() + } + + group, err := os.Open(groupPath) + if err != nil { + group = nil + } else { + defer group.Close() + } + + return GetExecUser(userSpec, defaults, passwd, group) +} + +// GetExecUser parses a user specification string (using the passwd and group +// readers as sources for /etc/passwd and /etc/group data, respectively). In +// the case of blank fields or missing data from the sources, the values in +// defaults is used. +// +// GetExecUser will return an error if a user or group literal could not be +// found in any entry in passwd and group respectively. +// +// Examples of valid user specifications are: +// * "" +// * "user" +// * "uid" +// * "user:group" +// * "uid:gid +// * "user:gid" +// * "uid:group" +func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { + var ( userArg, groupArg string + name string ) + if defaults == nil { + defaults = new(ExecUser) + } + + // Copy over defaults. + user := &ExecUser{ + Uid: defaults.Uid, + Gid: defaults.Gid, + Sgids: defaults.Sgids, + Home: defaults.Home, + } + + // Sgids slice *cannot* be nil. + if user.Sgids == nil { + user.Sgids = []int{} + } + // allow for userArg to have either "user" syntax, or optionally "user:group" syntax parseLine(userSpec, &userArg, &groupArg) - users, err := ParsePasswdFilter(func(u *User) bool { + users, err := ParsePasswdFilter(passwd, func(u User) bool { if userArg == "" { - return u.Uid == uid + return u.Uid == user.Uid } return u.Name == userArg || strconv.Itoa(u.Uid) == userArg }) - if err != nil && !os.IsNotExist(err) { + if err != nil && passwd != nil { if userArg == "" { - userArg = strconv.Itoa(uid) + userArg = strconv.Itoa(user.Uid) } - return 0, 0, nil, "", fmt.Errorf("Unable to find user %v: %v", userArg, err) + return nil, fmt.Errorf("Unable to find user %v: %v", userArg, err) } haveUser := users != nil && len(users) > 0 if haveUser { // if we found any user entries that matched our filter, let's take the first one as "correct" - uid = users[0].Uid - gid = users[0].Gid - home = users[0].Home + name = users[0].Name + user.Uid = users[0].Uid + user.Gid = users[0].Gid + user.Home = users[0].Home } else if userArg != "" { // we asked for a user but didn't find them... let's check to see if we wanted a numeric user - uid, err = strconv.Atoi(userArg) + user.Uid, err = strconv.Atoi(userArg) if err != nil { // not numeric - we have to bail - return 0, 0, nil, "", fmt.Errorf("Unable to find user %v", userArg) + return nil, fmt.Errorf("Unable to find user %v", userArg) } - if uid < minId || uid > maxId { - return 0, 0, nil, "", ErrRange + + // Must be inside valid uid range. + if user.Uid < minId || user.Uid > maxId { + return nil, ErrRange } // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit } - if groupArg != "" || (haveUser && users[0].Name != "") { - groups, err := ParseGroupFilter(func(g *Group) bool { + if groupArg != "" || name != "" { + groups, err := ParseGroupFilter(group, func(g Group) bool { + // Explicit group format takes precedence. if groupArg != "" { return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg } + + // Check if user is a member. for _, u := range g.List { - if u == users[0].Name { + if u == name { return true } } + return false }) - if err != nil && !os.IsNotExist(err) { - return 0, 0, nil, "", fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err) + if err != nil && group != nil { + return nil, fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err) } haveGroup := groups != nil && len(groups) > 0 if groupArg != "" { if haveGroup { // if we found any group entries that matched our filter, let's take the first one as "correct" - gid = groups[0].Gid + user.Gid = groups[0].Gid } else { // we asked for a group but didn't find id... let's check to see if we wanted a numeric group - gid, err = strconv.Atoi(groupArg) + user.Gid, err = strconv.Atoi(groupArg) if err != nil { // not numeric - we have to bail - return 0, 0, nil, "", fmt.Errorf("Unable to find group %v", groupArg) + return nil, fmt.Errorf("Unable to find group %v", groupArg) } - if gid < minId || gid > maxId { - return 0, 0, nil, "", ErrRange + + // Ensure gid is inside gid range. + if user.Gid < minId || user.Gid > maxId { + return nil, ErrRange } // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit } } else if haveGroup { - suppGids = make([]int, len(groups)) + // If implicit group format, fill supplementary gids. + user.Sgids = make([]int, len(groups)) for i, group := range groups { - suppGids[i] = group.Gid + user.Sgids[i] = group.Gid } } } - return uid, gid, suppGids, home, nil + return user, nil } diff --git a/vendor/src/github.com/docker/libcontainer/user/user_test.go b/vendor/src/github.com/docker/libcontainer/user/user_test.go index 136632c27e..4fe008fb39 100644 --- a/vendor/src/github.com/docker/libcontainer/user/user_test.go +++ b/vendor/src/github.com/docker/libcontainer/user/user_test.go @@ -1,6 +1,8 @@ package user import ( + "io" + "reflect" "strings" "testing" ) @@ -54,7 +56,7 @@ func TestUserParseLine(t *testing.T) { } func TestUserParsePasswd(t *testing.T) { - users, err := parsePasswdFile(strings.NewReader(` + users, err := ParsePasswdFilter(strings.NewReader(` root:x:0:0:root:/root:/bin/bash adm:x:3:4:adm:/var/adm:/bin/false this is just some garbage data @@ -74,7 +76,7 @@ this is just some garbage data } func TestUserParseGroup(t *testing.T) { - groups, err := parseGroupFile(strings.NewReader(` + groups, err := ParseGroupFilter(strings.NewReader(` root:x:0:root adm:x:4:root,adm,daemon this is just some garbage data @@ -92,3 +94,259 @@ this is just some garbage data t.Fatalf("Expected groups[1] to be 4 - adm - 3 members, got %v - %v - %v", groups[1].Gid, groups[1].Name, len(groups[1].List)) } } + +func TestValidGetExecUser(t *testing.T) { + const passwdContent = ` +root:x:0:0:root user:/root:/bin/bash +adm:x:42:43:adm:/var/adm:/bin/false +this is just some garbage data +` + const groupContent = ` +root:x:0:root +adm:x:43: +grp:x:1234:root,adm +this is just some garbage data +` + defaultExecUser := ExecUser{ + Uid: 8888, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + } + + tests := []struct { + ref string + expected ExecUser + }{ + { + ref: "root", + expected: ExecUser{ + Uid: 0, + Gid: 0, + Sgids: []int{0, 1234}, + Home: "/root", + }, + }, + { + ref: "adm", + expected: ExecUser{ + Uid: 42, + Gid: 43, + Sgids: []int{1234}, + Home: "/var/adm", + }, + }, + { + ref: "root:adm", + expected: ExecUser{ + Uid: 0, + Gid: 43, + Sgids: defaultExecUser.Sgids, + Home: "/root", + }, + }, + { + ref: "adm:1234", + expected: ExecUser{ + Uid: 42, + Gid: 1234, + Sgids: defaultExecUser.Sgids, + Home: "/var/adm", + }, + }, + { + ref: "42:1234", + expected: ExecUser{ + Uid: 42, + Gid: 1234, + Sgids: defaultExecUser.Sgids, + Home: "/var/adm", + }, + }, + { + ref: "1337:1234", + expected: ExecUser{ + Uid: 1337, + Gid: 1234, + Sgids: defaultExecUser.Sgids, + Home: defaultExecUser.Home, + }, + }, + { + ref: "1337", + expected: ExecUser{ + Uid: 1337, + Gid: defaultExecUser.Gid, + Sgids: defaultExecUser.Sgids, + Home: defaultExecUser.Home, + }, + }, + { + ref: "", + expected: ExecUser{ + Uid: defaultExecUser.Uid, + Gid: defaultExecUser.Gid, + Sgids: defaultExecUser.Sgids, + Home: defaultExecUser.Home, + }, + }, + } + + for _, test := range tests { + passwd := strings.NewReader(passwdContent) + group := strings.NewReader(groupContent) + + execUser, err := GetExecUser(test.ref, &defaultExecUser, passwd, group) + if err != nil { + t.Logf("got unexpected error when parsing '%s': %s", test.ref, err.Error()) + t.Fail() + continue + } + + if !reflect.DeepEqual(test.expected, *execUser) { + t.Logf("got: %#v", execUser) + t.Logf("expected: %#v", test.expected) + t.Fail() + continue + } + } +} + +func TestInvalidGetExecUser(t *testing.T) { + const passwdContent = ` +root:x:0:0:root user:/root:/bin/bash +adm:x:42:43:adm:/var/adm:/bin/false +this is just some garbage data +` + const groupContent = ` +root:x:0:root +adm:x:43: +grp:x:1234:root,adm +this is just some garbage data +` + + tests := []string{ + // No such user/group. + "notuser", + "notuser:notgroup", + "root:notgroup", + "notuser:adm", + "8888:notgroup", + "notuser:8888", + + // Invalid user/group values. + "-1:0", + "0:-3", + "-5:-2", + } + + for _, test := range tests { + passwd := strings.NewReader(passwdContent) + group := strings.NewReader(groupContent) + + execUser, err := GetExecUser(test, nil, passwd, group) + if err == nil { + t.Logf("got unexpected success when parsing '%s': %#v", test, execUser) + t.Fail() + continue + } + } +} + +func TestGetExecUserNilSources(t *testing.T) { + const passwdContent = ` +root:x:0:0:root user:/root:/bin/bash +adm:x:42:43:adm:/var/adm:/bin/false +this is just some garbage data +` + const groupContent = ` +root:x:0:root +adm:x:43: +grp:x:1234:root,adm +this is just some garbage data +` + + defaultExecUser := ExecUser{ + Uid: 8888, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + } + + tests := []struct { + ref string + passwd, group bool + expected ExecUser + }{ + { + ref: "", + passwd: false, + group: false, + expected: ExecUser{ + Uid: 8888, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + }, + }, + { + ref: "root", + passwd: true, + group: false, + expected: ExecUser{ + Uid: 0, + Gid: 0, + Sgids: []int{8888}, + Home: "/root", + }, + }, + { + ref: "0", + passwd: false, + group: false, + expected: ExecUser{ + Uid: 0, + Gid: 8888, + Sgids: []int{8888}, + Home: "/8888", + }, + }, + { + ref: "0:0", + passwd: false, + group: false, + expected: ExecUser{ + Uid: 0, + Gid: 0, + Sgids: []int{8888}, + Home: "/8888", + }, + }, + } + + for _, test := range tests { + var passwd, group io.Reader + + if test.passwd { + passwd = strings.NewReader(passwdContent) + } + + if test.group { + group = strings.NewReader(groupContent) + } + + execUser, err := GetExecUser(test.ref, &defaultExecUser, passwd, group) + if err != nil { + t.Logf("got unexpected error when parsing '%s': %s", test.ref, err.Error()) + t.Fail() + continue + } + + if !reflect.DeepEqual(test.expected, *execUser) { + t.Logf("got: %#v", execUser) + t.Logf("expected: %#v", test.expected) + t.Fail() + continue + } + } +} From 3ac4aa0d6ba6cb10b9a46df40f18b81dba137840 Mon Sep 17 00:00:00 2001 From: Aleksa Sarai Date: Sat, 8 Nov 2014 09:58:39 +1100 Subject: [PATCH 278/592] *: transition to new libcontainer/user API This patch fixes the compilation errors in Docker due to changes in the libcontainer/user API. There is no functionality change due to this patch. Signed-off-by: Aleksa Sarai (github: cyphar) --- api/server/server.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/api/server/server.go b/api/server/server.go index d77a6c22a2..ac5801e9c9 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -1370,7 +1370,11 @@ func ServeFd(addr string, handle http.Handler) error { } func lookupGidByName(nameOrGid string) (int, error) { - groups, err := user.ParseGroupFilter(func(g *user.Group) bool { + groupFile, err := user.GetGroupFile() + if err != nil { + return -1, err + } + groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool { return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid }) if err != nil { From f96e04ffc7973e290653044cc86dbc1efb18276d Mon Sep 17 00:00:00 2001 From: Vishnu Kannan Date: Wed, 8 Oct 2014 17:03:57 +0000 Subject: [PATCH 279/592] This patch adds ability in docker to detect out of memory conditions in containers. Since the containers can handle the out of memory kernel kills gracefully, docker will only provide out of memory information as an additional metadata as part of container status. Docker-DCO-1.1-Signed-off-by: Vishnu Kannan (github: vishh) --- daemon/daemon.go | 6 +- daemon/execdriver/driver.go | 11 +++- daemon/execdriver/lxc/driver.go | 14 ++--- daemon/execdriver/native/driver.go | 96 +++++++++++++++++++++--------- daemon/monitor.go | 10 ++-- daemon/state.go | 29 ++++++--- daemon/state_test.go | 4 +- integration/runtime_test.go | 3 +- 8 files changed, 119 insertions(+), 54 deletions(-) diff --git a/daemon/daemon.go b/daemon/daemon.go index b0feae917b..e04caa8ffe 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -231,7 +231,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err log.Debugf("killing old running container %s", container.ID) existingPid := container.Pid - container.SetStopped(0) + container.SetStopped(&execdriver.ExitStatus{0, false}) // We only have to handle this for lxc because the other drivers will ensure that // no processes are left when docker dies @@ -263,7 +263,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err log.Debugf("Marking as stopped") - container.SetStopped(-127) + container.SetStopped(&execdriver.ExitStatus{-127, false}) if err := container.ToDisk(); err != nil { return err } @@ -991,7 +991,7 @@ func (daemon *Daemon) Diff(container *Container) (archive.Archive, error) { return daemon.driver.Diff(container.ID, initID) } -func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { +func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (*execdriver.ExitStatus, error) { return daemon.execDriver.Run(c.command, pipes, startCallback) } diff --git a/daemon/execdriver/driver.go b/daemon/execdriver/driver.go index bc2eb24eda..6ed98b78ba 100644 --- a/daemon/execdriver/driver.go +++ b/daemon/execdriver/driver.go @@ -40,9 +40,18 @@ type TtyTerminal interface { Master() *os.File } +// ExitStatus provides exit reasons for a container. +type ExitStatus struct { + // The exit code with which the container exited. + ExitCode int + + // Whether the container encountered an OOM. + OOMKilled bool +} + type Driver interface { Run(c *Command, pipes *Pipes, startCallback StartCallback) (int, error) // Run executes the process and blocks until the process exits and returns the exit code - // Exec executes the process in a running container, blocks until the process exits and returns the exit code + // Exec executes the process in an existing container, blocks until the process exits and returns the exit code Exec(c *Command, processConfig *ProcessConfig, pipes *Pipes, startCallback StartCallback) (int, error) Kill(c *Command, sig int) error Pause(c *Command) error diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go index 7583a3e64f..3d8aca0354 100644 --- a/daemon/execdriver/lxc/driver.go +++ b/daemon/execdriver/lxc/driver.go @@ -55,7 +55,7 @@ func (d *driver) Name() string { return fmt.Sprintf("%s-%s", DriverName, version) } -func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { +func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (*execdriver.ExitStatus, error) { var ( term execdriver.Terminal err error @@ -76,11 +76,11 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba }) if err := d.generateEnvConfig(c); err != nil { - return -1, err + return nil, err } configPath, err := d.generateLXCConfig(c) if err != nil { - return -1, err + return nil, err } params := []string{ "lxc-start", @@ -155,11 +155,11 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba c.ProcessConfig.Args = append([]string{name}, arg...) if err := nodes.CreateDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil { - return -1, err + return nil, err } if err := c.ProcessConfig.Start(); err != nil { - return -1, err + return nil, err } var ( @@ -183,7 +183,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba c.ProcessConfig.Process.Kill() c.ProcessConfig.Wait() } - return -1, err + return nil, err } c.ContainerPid = pid @@ -194,7 +194,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba <-waitLock - return getExitCode(c), waitErr + return &execdriver.ExitStatus{getExitCode(c), false}, waitErr } /// Return the exit code of the process diff --git a/daemon/execdriver/native/driver.go b/daemon/execdriver/native/driver.go index 3628d7b575..a37eccbabe 100644 --- a/daemon/execdriver/native/driver.go +++ b/daemon/execdriver/native/driver.go @@ -14,6 +14,7 @@ import ( "sync" "syscall" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/pkg/term" "github.com/docker/libcontainer" @@ -60,11 +61,20 @@ func NewDriver(root, initPath string) (*driver, error) { }, nil } -func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { +func (d *driver) notifyOnOOM(config *libcontainer.Config) (<-chan struct{}, error) { + return fs.NotifyOnOOM(config.Cgroups) +} + +type execOutput struct { + exitCode int + err error +} + +func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (*execdriver.ExitStatus, error) { // take the Command and populate the libcontainer.Config from it container, err := d.createContainer(c) if err != nil { - return -1, err + return nil, err } var term execdriver.Terminal @@ -75,7 +85,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes) } if err != nil { - return -1, err + return nil, err } c.ProcessConfig.Terminal = term @@ -92,40 +102,70 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba ) if err := d.createContainerRoot(c.ID); err != nil { - return -1, err + return nil, err } defer d.cleanContainer(c.ID) if err := d.writeContainerFile(container, c.ID); err != nil { - return -1, err + return nil, err } - return namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, dataPath, args, func(container *libcontainer.Config, console, dataPath, init string, child *os.File, args []string) *exec.Cmd { - c.ProcessConfig.Path = d.initPath - c.ProcessConfig.Args = append([]string{ - DriverName, - "-console", console, - "-pipe", "3", - "-root", filepath.Join(d.root, c.ID), - "--", - }, args...) + execOutputChan := make(chan execOutput, 0) + waitForStart := make(chan struct{}, 0) - // set this to nil so that when we set the clone flags anything else is reset - c.ProcessConfig.SysProcAttr = &syscall.SysProcAttr{ - Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)), + go func() { + exitCode, err := namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, dataPath, args, func(container *libcontainer.Config, console, dataPath, init string, child *os.File, args []string) *exec.Cmd { + c.ProcessConfig.Path = d.initPath + c.ProcessConfig.Args = append([]string{ + DriverName, + "-console", console, + "-pipe", "3", + "-root", filepath.Join(d.root, c.ID), + "--", + }, args...) + + // set this to nil so that when we set the clone flags anything else is reset + c.ProcessConfig.SysProcAttr = &syscall.SysProcAttr{ + Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)), + } + c.ProcessConfig.ExtraFiles = []*os.File{child} + + c.ProcessConfig.Env = container.Env + c.ProcessConfig.Dir = container.RootFs + + return &c.ProcessConfig.Cmd + }, func() { + close(waitForStart) + if startCallback != nil { + c.ContainerPid = c.ProcessConfig.Process.Pid + startCallback(&c.ProcessConfig, c.ContainerPid) + } + }) + execOutputChan <- execOutput{exitCode, err} + }() + + select { + case execOutput := <-execOutputChan: + return &execdriver.ExitStatus{execOutput.exitCode, false}, execOutput.err + case <-waitForStart: + break + } + + oomKill := false + go func() { + oomKillNotification, err := d.notifyOnOOM(container) + if err == nil { + if _, ok := <-oomKillNotification; ok { + oomKill = true + } + } else { + log.Infof("WARNING: Your kernel does not support OOM notifications: %s", err) } - c.ProcessConfig.ExtraFiles = []*os.File{child} + }() + // wait for the container to exit. + execOutput := <-execOutputChan - c.ProcessConfig.Env = container.Env - c.ProcessConfig.Dir = container.RootFs - - return &c.ProcessConfig.Cmd - }, func() { - if startCallback != nil { - c.ContainerPid = c.ProcessConfig.Process.Pid - startCallback(&c.ProcessConfig, c.ContainerPid) - } - }) + return &execdriver.ExitStatus{execOutput.exitCode, oomKill}, execOutput.err } func (d *driver) Kill(p *execdriver.Command, sig int) error { diff --git a/daemon/monitor.go b/daemon/monitor.go index d0d9d70a99..9ef991eb66 100644 --- a/daemon/monitor.go +++ b/daemon/monitor.go @@ -100,7 +100,7 @@ func (m *containerMonitor) Close() error { func (m *containerMonitor) Start() error { var ( err error - exitStatus int + exitStatus *execdriver.ExitStatus // this variable indicates where we in execution flow: // before Run or after afterRun bool @@ -150,9 +150,9 @@ func (m *containerMonitor) Start() error { // here container.Lock is already lost afterRun = true - m.resetMonitor(err == nil && exitStatus == 0) + m.resetMonitor(err == nil && exitStatus.ExitCode == 0) - if m.shouldRestart(exitStatus) { + if m.shouldRestart(exitStatus.ExitCode) { m.container.SetRestarting(exitStatus) m.container.LogEvent("die") m.resetContainer(true) @@ -209,7 +209,7 @@ func (m *containerMonitor) waitForNextRestart() { // shouldRestart checks the restart policy and applies the rules to determine if // the container's process should be restarted -func (m *containerMonitor) shouldRestart(exitStatus int) bool { +func (m *containerMonitor) shouldRestart(exitCode int) bool { m.mux.Lock() defer m.mux.Unlock() @@ -228,7 +228,7 @@ func (m *containerMonitor) shouldRestart(exitStatus int) bool { return false } - return exitStatus != 0 + return exitCode != 0 } return false diff --git a/daemon/state.go b/daemon/state.go index 2dd57bd94b..282f5da930 100644 --- a/daemon/state.go +++ b/daemon/state.go @@ -5,6 +5,7 @@ import ( "sync" "time" + "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/pkg/units" ) @@ -13,6 +14,7 @@ type State struct { Running bool Paused bool Restarting bool + OOMKilled bool Pid int ExitCode int Error string // contains last known error when starting the container @@ -29,12 +31,16 @@ func NewState() *State { // String returns a human-readable description of the state func (s *State) String() string { + oomInfo := "" + if s.OOMKilled { + oomInfo = "possibly due to lack of memory" + } if s.Running { if s.Paused { return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) } if s.Restarting { - return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) + return fmt.Sprintf("Restarting (%d) %s ago %s", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)), oomInfo) } return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) @@ -44,7 +50,7 @@ func (s *State) String() string { return "" } - return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) + return fmt.Sprintf("Exited (%d) %s ago %s", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)), oomInfo) } // StateString returns a single string to describe state @@ -149,25 +155,29 @@ func (s *State) setRunning(pid int) { s.waitChan = make(chan struct{}) } -func (s *State) SetStopped(exitCode int) { +func (s *State) SetStopped(exitStatus *execdriver.ExitStatus) { s.Lock() - s.setStopped(exitCode) + s.setStopped(exitStatus) s.Unlock() } -func (s *State) setStopped(exitCode int) { +func (s *State) setStopped(exitStatus *execdriver.ExitStatus) { s.Running = false s.Restarting = false s.Pid = 0 s.FinishedAt = time.Now().UTC() - s.ExitCode = exitCode + s.ExitCode = exitStatus.ExitCode + s.OOMKilled = false + if exitStatus.OOMKilled { + s.OOMKilled = true + } close(s.waitChan) // fire waiters for stop s.waitChan = make(chan struct{}) } // SetRestarting is when docker hanldes the auto restart of containers when they are // in the middle of a stop and being restarted again -func (s *State) SetRestarting(exitCode int) { +func (s *State) SetRestarting(exitStatus *execdriver.ExitStatus) { s.Lock() // we should consider the container running when it is restarting because of // all the checks in docker around rm/stop/etc @@ -175,7 +185,10 @@ func (s *State) SetRestarting(exitCode int) { s.Restarting = true s.Pid = 0 s.FinishedAt = time.Now().UTC() - s.ExitCode = exitCode + s.ExitCode = exitStatus.ExitCode + if exitStatus.OOMKilled { + s.OOMKilled = true + } close(s.waitChan) // fire waiters for stop s.waitChan = make(chan struct{}) s.Unlock() diff --git a/daemon/state_test.go b/daemon/state_test.go index 35524356a3..32c005cf2e 100644 --- a/daemon/state_test.go +++ b/daemon/state_test.go @@ -4,6 +4,8 @@ import ( "sync/atomic" "testing" "time" + + "github.com/docker/docker/daemon/execdriver" ) func TestStateRunStop(t *testing.T) { @@ -47,7 +49,7 @@ func TestStateRunStop(t *testing.T) { atomic.StoreInt64(&exit, int64(exitCode)) close(stopped) }() - s.SetStopped(i) + s.SetStopped(&execdriver.ExitStatus{i, false}) if s.IsRunning() { t.Fatal("State is running") } diff --git a/integration/runtime_test.go b/integration/runtime_test.go index 01097b156e..75f68d5c1b 100644 --- a/integration/runtime_test.go +++ b/integration/runtime_test.go @@ -18,6 +18,7 @@ import ( log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon" + "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/engine" "github.com/docker/docker/image" "github.com/docker/docker/nat" @@ -652,7 +653,7 @@ func TestRestore(t *testing.T) { if err := container3.Run(); err != nil { t.Fatal(err) } - container2.SetStopped(0) + container2.SetStopped(&execdriver.ExitStatus{0, false}) } func TestDefaultContainerName(t *testing.T) { From 46f29449777b9fb67a02e13fe0f12fc10c99ab4b Mon Sep 17 00:00:00 2001 From: Vishnu Kannan Date: Thu, 30 Oct 2014 23:06:54 +0000 Subject: [PATCH 280/592] Address comments. Docker-DCO-1.1-Signed-off-by: Vishnu Kannan (github: vishh) --- daemon/daemon.go | 2 +- daemon/execdriver/driver.go | 2 +- daemon/execdriver/lxc/driver.go | 14 ++++++------ daemon/execdriver/native/driver.go | 34 +++++++++++++----------------- daemon/monitor.go | 12 +++++------ daemon/state.go | 17 ++++----------- 6 files changed, 34 insertions(+), 47 deletions(-) diff --git a/daemon/daemon.go b/daemon/daemon.go index e04caa8ffe..88fb9fde66 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -991,7 +991,7 @@ func (daemon *Daemon) Diff(container *Container) (archive.Archive, error) { return daemon.driver.Diff(container.ID, initID) } -func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (*execdriver.ExitStatus, error) { +func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) { return daemon.execDriver.Run(c.command, pipes, startCallback) } diff --git a/daemon/execdriver/driver.go b/daemon/execdriver/driver.go index 6ed98b78ba..c3ec559c02 100644 --- a/daemon/execdriver/driver.go +++ b/daemon/execdriver/driver.go @@ -50,7 +50,7 @@ type ExitStatus struct { } type Driver interface { - Run(c *Command, pipes *Pipes, startCallback StartCallback) (int, error) // Run executes the process and blocks until the process exits and returns the exit code + Run(c *Command, pipes *Pipes, startCallback StartCallback) (ExitStatus, error) // Run executes the process and blocks until the process exits and returns the exit code // Exec executes the process in an existing container, blocks until the process exits and returns the exit code Exec(c *Command, processConfig *ProcessConfig, pipes *Pipes, startCallback StartCallback) (int, error) Kill(c *Command, sig int) error diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go index 3d8aca0354..4628672af3 100644 --- a/daemon/execdriver/lxc/driver.go +++ b/daemon/execdriver/lxc/driver.go @@ -55,7 +55,7 @@ func (d *driver) Name() string { return fmt.Sprintf("%s-%s", DriverName, version) } -func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (*execdriver.ExitStatus, error) { +func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) { var ( term execdriver.Terminal err error @@ -76,11 +76,11 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba }) if err := d.generateEnvConfig(c); err != nil { - return nil, err + return execdriver.ExitStatus{-1, false}, err } configPath, err := d.generateLXCConfig(c) if err != nil { - return nil, err + return execdriver.ExitStatus{-1, false}, err } params := []string{ "lxc-start", @@ -155,11 +155,11 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba c.ProcessConfig.Args = append([]string{name}, arg...) if err := nodes.CreateDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil { - return nil, err + return execdriver.ExitStatus{-1, false}, err } if err := c.ProcessConfig.Start(); err != nil { - return nil, err + return execdriver.ExitStatus{-1, false}, err } var ( @@ -183,7 +183,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba c.ProcessConfig.Process.Kill() c.ProcessConfig.Wait() } - return nil, err + return execdriver.ExitStatus{-1, false}, err } c.ContainerPid = pid @@ -194,7 +194,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba <-waitLock - return &execdriver.ExitStatus{getExitCode(c), false}, waitErr + return execdriver.ExitStatus{getExitCode(c), false}, waitErr } /// Return the exit code of the process diff --git a/daemon/execdriver/native/driver.go b/daemon/execdriver/native/driver.go index a37eccbabe..01455a8101 100644 --- a/daemon/execdriver/native/driver.go +++ b/daemon/execdriver/native/driver.go @@ -70,11 +70,11 @@ type execOutput struct { err error } -func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (*execdriver.ExitStatus, error) { +func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) { // take the Command and populate the libcontainer.Config from it container, err := d.createContainer(c) if err != nil { - return nil, err + return execdriver.ExitStatus{-1, false}, err } var term execdriver.Terminal @@ -85,7 +85,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes) } if err != nil { - return nil, err + return execdriver.ExitStatus{-1, false}, err } c.ProcessConfig.Terminal = term @@ -102,16 +102,16 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba ) if err := d.createContainerRoot(c.ID); err != nil { - return nil, err + return execdriver.ExitStatus{-1, false}, err } defer d.cleanContainer(c.ID) if err := d.writeContainerFile(container, c.ID); err != nil { - return nil, err + return execdriver.ExitStatus{-1, false}, err } - execOutputChan := make(chan execOutput, 0) - waitForStart := make(chan struct{}, 0) + execOutputChan := make(chan execOutput, 1) + waitForStart := make(chan struct{}) go func() { exitCode, err := namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, dataPath, args, func(container *libcontainer.Config, console, dataPath, init string, child *os.File, args []string) *exec.Cmd { @@ -146,26 +146,22 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba select { case execOutput := <-execOutputChan: - return &execdriver.ExitStatus{execOutput.exitCode, false}, execOutput.err + return execdriver.ExitStatus{execOutput.exitCode, false}, execOutput.err case <-waitForStart: break } oomKill := false - go func() { - oomKillNotification, err := d.notifyOnOOM(container) - if err == nil { - if _, ok := <-oomKillNotification; ok { - oomKill = true - } - } else { - log.Infof("WARNING: Your kernel does not support OOM notifications: %s", err) - } - }() + oomKillNotification, err := d.notifyOnOOM(container) + if err == nil { + _, oomKill = <-oomKillNotification + } else { + log.Warnf("WARNING: Your kernel does not support OOM notifications: %s", err) + } // wait for the container to exit. execOutput := <-execOutputChan - return &execdriver.ExitStatus{execOutput.exitCode, oomKill}, execOutput.err + return execdriver.ExitStatus{execOutput.exitCode, oomKill}, execOutput.err } func (d *driver) Kill(p *execdriver.Command, sig int) error { diff --git a/daemon/monitor.go b/daemon/monitor.go index 9ef991eb66..12a6996330 100644 --- a/daemon/monitor.go +++ b/daemon/monitor.go @@ -100,7 +100,7 @@ func (m *containerMonitor) Close() error { func (m *containerMonitor) Start() error { var ( err error - exitStatus *execdriver.ExitStatus + exitStatus execdriver.ExitStatus // this variable indicates where we in execution flow: // before Run or after afterRun bool @@ -110,7 +110,7 @@ func (m *containerMonitor) Start() error { defer func() { if afterRun { m.container.Lock() - m.container.setStopped(exitStatus) + m.container.setStopped(&exitStatus) defer m.container.Unlock() } m.Close() @@ -138,7 +138,7 @@ func (m *containerMonitor) Start() error { // if we receive an internal error from the initial start of a container then lets // return it instead of entering the restart loop if m.container.RestartCount == 0 { - m.container.ExitCode = exitStatus + m.container.ExitCode = -1 m.resetContainer(false) return err @@ -153,7 +153,7 @@ func (m *containerMonitor) Start() error { m.resetMonitor(err == nil && exitStatus.ExitCode == 0) if m.shouldRestart(exitStatus.ExitCode) { - m.container.SetRestarting(exitStatus) + m.container.SetRestarting(&exitStatus) m.container.LogEvent("die") m.resetContainer(true) @@ -164,12 +164,12 @@ func (m *containerMonitor) Start() error { // we need to check this before reentering the loop because the waitForNextRestart could have // been terminated by a request from a user if m.shouldStop { - m.container.ExitCode = exitStatus + m.container.ExitCode = exitStatus.ExitCode return err } continue } - m.container.ExitCode = exitStatus + m.container.ExitCode = exitStatus.ExitCode m.container.LogEvent("die") m.resetContainer(true) return err diff --git a/daemon/state.go b/daemon/state.go index 282f5da930..3aba57090f 100644 --- a/daemon/state.go +++ b/daemon/state.go @@ -31,16 +31,12 @@ func NewState() *State { // String returns a human-readable description of the state func (s *State) String() string { - oomInfo := "" - if s.OOMKilled { - oomInfo = "possibly due to lack of memory" - } if s.Running { if s.Paused { return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) } if s.Restarting { - return fmt.Sprintf("Restarting (%d) %s ago %s", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)), oomInfo) + return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) } return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) @@ -50,7 +46,7 @@ func (s *State) String() string { return "" } - return fmt.Sprintf("Exited (%d) %s ago %s", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)), oomInfo) + return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) } // StateString returns a single string to describe state @@ -167,10 +163,7 @@ func (s *State) setStopped(exitStatus *execdriver.ExitStatus) { s.Pid = 0 s.FinishedAt = time.Now().UTC() s.ExitCode = exitStatus.ExitCode - s.OOMKilled = false - if exitStatus.OOMKilled { - s.OOMKilled = true - } + s.OOMKilled = exitStatus.OOMKilled close(s.waitChan) // fire waiters for stop s.waitChan = make(chan struct{}) } @@ -186,9 +179,7 @@ func (s *State) SetRestarting(exitStatus *execdriver.ExitStatus) { s.Pid = 0 s.FinishedAt = time.Now().UTC() s.ExitCode = exitStatus.ExitCode - if exitStatus.OOMKilled { - s.OOMKilled = true - } + s.OOMKilled = exitStatus.OOMKilled close(s.waitChan) // fire waiters for stop s.waitChan = make(chan struct{}) s.Unlock() From 6f8e42ac748e0c7597aeedf31adf81451adcda94 Mon Sep 17 00:00:00 2001 From: unclejack Date: Sat, 8 Nov 2014 16:27:25 +0200 Subject: [PATCH 281/592] execdriver/lxc: add comment to MAINTAINERS Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- daemon/execdriver/lxc/MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/daemon/execdriver/lxc/MAINTAINERS b/daemon/execdriver/lxc/MAINTAINERS index e9753be645..ac8ff535ff 100644 --- a/daemon/execdriver/lxc/MAINTAINERS +++ b/daemon/execdriver/lxc/MAINTAINERS @@ -1 +1,2 @@ +# the LXC exec driver needs more maintainers and contributions Dinesh Subhraveti (@dineshs-altiscale) From ef150d023a4a2ea57dd2a8ebcb262a98c08feedf Mon Sep 17 00:00:00 2001 From: unclejack Date: Fri, 7 Nov 2014 17:20:16 +0200 Subject: [PATCH 282/592] pkg/namesgenerator: add Yeong-Sil Jang Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- pkg/namesgenerator/names-generator.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/namesgenerator/names-generator.go b/pkg/namesgenerator/names-generator.go index 3e4e2d9f44..b641e915fc 100644 --- a/pkg/namesgenerator/names-generator.go +++ b/pkg/namesgenerator/names-generator.go @@ -76,7 +76,8 @@ var ( // http://en.wikipedia.org/wiki/John_Bardeen // http://en.wikipedia.org/wiki/Walter_Houser_Brattain // http://en.wikipedia.org/wiki/William_Shockley - right = [...]string{"albattani", "almeida", "archimedes", "ardinghelli", "babbage", "bardeen", "bartik", "bell", "blackwell", "bohr", "brattain", "brown", "carson", "colden", "cori", "curie", "darwin", "davinci", "einstein", "elion", "engelbart", "euclid", "fermat", "fermi", "feynman", "franklin", "galileo", "goldstine", "goodall", "hawking", "heisenberg", "hodgkin", "hoover", "hopper", "hypatia", "jones", "kirch", "kowalevski", "lalande", "leakey", "lovelace", "lumiere", "mayer", "mccarthy", "mcclintock", "mclean", "meitner", "mestorf", "morse", "newton", "nobel", "pare", "pasteur", "perlman", "pike", "poincare", "ptolemy", "ritchie", "rosalind", "sammet", "shockley", "sinoussi", "stallman", "tesla", "thompson", "torvalds", "turing", "wilson", "wozniak", "wright", "yalow", "yonath"} + // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. http://en.wikipedia.org/wiki/Jang_Yeong-sil + right = [...]string{"albattani", "almeida", "archimedes", "ardinghelli", "babbage", "bardeen", "bartik", "bell", "blackwell", "bohr", "brattain", "brown", "carson", "colden", "cori", "curie", "darwin", "davinci", "einstein", "elion", "engelbart", "euclid", "fermat", "fermi", "feynman", "franklin", "galileo", "goldstine", "goodall", "hawking", "heisenberg", "hodgkin", "hoover", "hopper", "hypatia", "jang", "jones", "kirch", "kowalevski", "lalande", "leakey", "lovelace", "lumiere", "mayer", "mccarthy", "mcclintock", "mclean", "meitner", "mestorf", "morse", "newton", "nobel", "pare", "pasteur", "perlman", "pike", "poincare", "ptolemy", "ritchie", "rosalind", "sammet", "shockley", "sinoussi", "stallman", "tesla", "thompson", "torvalds", "turing", "wilson", "wozniak", "wright", "yalow", "yonath"} ) func GetRandomName(retry int) string { From 6c79ee7d1e38f7b2f408105db9af7fcf9942f7ca Mon Sep 17 00:00:00 2001 From: xuzhaokui Date: Sun, 9 Nov 2014 22:12:54 +0800 Subject: [PATCH 283/592] Remove the redundant stripComment. Signed-off-by: xuzhaokui --- builder/parser/parser.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/builder/parser/parser.go b/builder/parser/parser.go index 6b0ab7ab8c..9e34b5920e 100644 --- a/builder/parser/parser.go +++ b/builder/parser/parser.go @@ -103,10 +103,6 @@ func Parse(rwc io.Reader) (*Node, error) { for scanner.Scan() { scannedLine := strings.TrimLeftFunc(scanner.Text(), unicode.IsSpace) - if stripComments(scannedLine) == "" { - continue - } - line, child, err := parseLine(scannedLine) if err != nil { return nil, err From 22afaa628f319c852c536824cdd18444ddf87665 Mon Sep 17 00:00:00 2001 From: Prasanna Gautam Date: Sun, 9 Nov 2014 10:16:47 -0500 Subject: [PATCH 284/592] Additional info for docker binary Signed-off-by: Prasanna Gautam I found that certain docker installations do not handle binding to the source directory quite right. Just writing it based on help from backjlack and tibor in IRC. --- docs/sources/contributing/devenvironment.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/sources/contributing/devenvironment.md b/docs/sources/contributing/devenvironment.md index ee120a79c8..a5c2cc3057 100644 --- a/docs/sources/contributing/devenvironment.md +++ b/docs/sources/contributing/devenvironment.md @@ -63,7 +63,11 @@ To create the Docker binary, run this command: $ sudo make binary -This will create the Docker binary in `./bundles/-dev/binary/` +This will create the Docker binary in `./bundles/-dev/binary/`. If you do not see files in the `./bundles` directory in your host, your BINDDIR setting is not set quite right. You want to run the following command: + + $ sudo make BINDDIR=. binary + +If you are not in Linux - for example: OSX, you want to run `make cross` or `make BINDDIR=. cross`. ### Using your built Docker binary From 932cc230814ba642c493c77d05932f63c0ef3e7f Mon Sep 17 00:00:00 2001 From: Andrey Stolbovsky Date: Sun, 9 Nov 2014 23:23:45 +0300 Subject: [PATCH 285/592] Fix typo in Docker Links chapter of User Guide --- docs/sources/userguide/dockerlinks.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/userguide/dockerlinks.md b/docs/sources/userguide/dockerlinks.md index fa665b7266..e2228cef00 100644 --- a/docs/sources/userguide/dockerlinks.md +++ b/docs/sources/userguide/dockerlinks.md @@ -195,7 +195,7 @@ port. Where `` is the alias name specified in the `--link` parameter is either `TCP` or `UDP`. The format of the URL will be: `://:` (e.g. `tcp://172.17.0.82:8080`). This URL will then be -split into the following 3 environment variables for convinience: +split into the following 3 environment variables for convenience: * `_PORT___ADDR` will contain just the IP address from the URL (e.g. `WEBDB_PORT_8080_TCP_ADDR=172.17.0.82`). * `_PORT___PORT` will contain just the port number From 543127e211ce60b53d554199fdbbd3fe7b85c1c6 Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Sun, 9 Nov 2014 15:16:19 +0000 Subject: [PATCH 286/592] Improve "security reports" section in contribution docs. Signed-off-by: Solomon Hykes --- CONTRIBUTING.md | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index de6434c221..93ff8d8799 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -6,17 +6,25 @@ feels wrong or incomplete. ## Topics -* [Security Reports](#security-reports) +* [Reporting Security Issues](#reporting-security-issues) * [Design and Cleanup Proposals](#design-and-cleanup-proposals) * [Reporting Issues](#reporting-issues) * [Build Environment](#build-environment) * [Contribution Guidelines](#contribution-guidelines) * [Community Guidelines](#docker-community-guidelines) -## Security Reports +## Reporting Security Issues -Please **DO NOT** file an issue for security related issues. Please send your -reports to [security@docker.com](mailto:security@docker.com) instead. +The Docker maintainers take security very seriously. If you discover a security issue, +please bring it to their attention right away! + +Please send your report privately to [security@docker.com](mailto:security@docker.com), +please **DO NOT** file a public issue. + +Security reports are greatly appreciated and we will publicly thank you for it. We also +like to send gifts - if you're into Docker shwag make sure to let us know :) +We currently do not offer a paid security bounty program, but are not ruling it out in +the future. ## Design and Cleanup Proposals From 2d688b0a787d6f38de244ed1294b0ed91c3533ba Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Sun, 9 Nov 2014 15:17:23 +0000 Subject: [PATCH 287/592] In contribution docs, emphasize that bug reports are appreciated! Signed-off-by: Solomon Hykes --- CONTRIBUTING.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 93ff8d8799..29a3ce1404 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -48,6 +48,10 @@ When considering a cleanup task, we are looking for: ## Reporting Issues +A great way to contribute to the project is to send a detailed report when you +encounter an issue. We always appreciate a well-written, thorough bug report, +and will thank you for it! + When reporting [issues](https://github.com/docker/docker/issues) on GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc). Please include: From 4cf0aa711edf3692291766c1d84ec550431b79b2 Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Sun, 9 Nov 2014 15:33:19 +0000 Subject: [PATCH 288/592] Move GOVERNANCE.md to hack/ Signed-off-by: Solomon Hykes --- GOVERNANCE.md => hack/GOVERNANCE.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename GOVERNANCE.md => hack/GOVERNANCE.md (100%) diff --git a/GOVERNANCE.md b/hack/GOVERNANCE.md similarity index 100% rename from GOVERNANCE.md rename to hack/GOVERNANCE.md From 32e61b8f5c3f855f5e204064be1aea6a877dda43 Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Sun, 9 Nov 2014 21:29:30 +0000 Subject: [PATCH 289/592] Move 'hack' to the less confusing 'project' We might want to break it up into smaller pieces (eg. tools in one place, documents in another) but let's worry about that later. Signed-off-by: Solomon Hykes --- hack | 1 + {hack => project}/CONTRIBUTORS.md | 0 {hack => project}/GOVERNANCE.md | 0 {hack => project}/MAINTAINERS | 0 {hack => project}/MAINTAINERS.md | 0 {hack => project}/PACKAGERS.md | 0 {hack => project}/PRINCIPLES.md | 0 {hack => project}/README.md | 0 {hack => project}/RELEASE-CHECKLIST.md | 0 {hack => project}/ROADMAP.md | 0 {hack => project}/allmaintainers.sh | 0 {hack => project}/dind | 0 {hack => project}/generate-authors.sh | 0 {hack => project}/getmaintainer.sh | 0 {hack => project}/install.sh | 0 {hack => project}/make.sh | 0 {hack => project}/make/.ensure-busybox | 0 {hack => project}/make/.ensure-scratch | 0 {hack => project}/make/.go-compile-test-dir | 0 {hack => project}/make/.validate | 0 {hack => project}/make/README.md | 0 {hack => project}/make/binary | 0 {hack => project}/make/cover | 0 {hack => project}/make/cross | 0 {hack => project}/make/dynbinary | 0 {hack => project}/make/dyntest-integration | 0 {hack => project}/make/dyntest-unit | 0 {hack => project}/make/test-integration | 0 {hack => project}/make/test-integration-cli | 0 {hack => project}/make/test-unit | 0 {hack => project}/make/tgz | 0 {hack => project}/make/ubuntu | 0 {hack => project}/make/validate-dco | 0 {hack => project}/make/validate-gofmt | 0 {hack => project}/release.sh | 0 {hack => project}/stats.sh | 0 {hack => project}/vendor.sh | 0 37 files changed, 1 insertion(+) create mode 120000 hack rename {hack => project}/CONTRIBUTORS.md (100%) rename {hack => project}/GOVERNANCE.md (100%) rename {hack => project}/MAINTAINERS (100%) rename {hack => project}/MAINTAINERS.md (100%) rename {hack => project}/PACKAGERS.md (100%) rename {hack => project}/PRINCIPLES.md (100%) rename {hack => project}/README.md (100%) rename {hack => project}/RELEASE-CHECKLIST.md (100%) rename {hack => project}/ROADMAP.md (100%) rename {hack => project}/allmaintainers.sh (100%) rename {hack => project}/dind (100%) rename {hack => project}/generate-authors.sh (100%) rename {hack => project}/getmaintainer.sh (100%) rename {hack => project}/install.sh (100%) rename {hack => project}/make.sh (100%) rename {hack => project}/make/.ensure-busybox (100%) rename {hack => project}/make/.ensure-scratch (100%) rename {hack => project}/make/.go-compile-test-dir (100%) rename {hack => project}/make/.validate (100%) rename {hack => project}/make/README.md (100%) rename {hack => project}/make/binary (100%) rename {hack => project}/make/cover (100%) rename {hack => project}/make/cross (100%) rename {hack => project}/make/dynbinary (100%) rename {hack => project}/make/dyntest-integration (100%) rename {hack => project}/make/dyntest-unit (100%) rename {hack => project}/make/test-integration (100%) rename {hack => project}/make/test-integration-cli (100%) rename {hack => project}/make/test-unit (100%) rename {hack => project}/make/tgz (100%) rename {hack => project}/make/ubuntu (100%) rename {hack => project}/make/validate-dco (100%) rename {hack => project}/make/validate-gofmt (100%) rename {hack => project}/release.sh (100%) rename {hack => project}/stats.sh (100%) rename {hack => project}/vendor.sh (100%) diff --git a/hack b/hack new file mode 120000 index 0000000000..e3f094ee63 --- /dev/null +++ b/hack @@ -0,0 +1 @@ +project \ No newline at end of file diff --git a/hack/CONTRIBUTORS.md b/project/CONTRIBUTORS.md similarity index 100% rename from hack/CONTRIBUTORS.md rename to project/CONTRIBUTORS.md diff --git a/hack/GOVERNANCE.md b/project/GOVERNANCE.md similarity index 100% rename from hack/GOVERNANCE.md rename to project/GOVERNANCE.md diff --git a/hack/MAINTAINERS b/project/MAINTAINERS similarity index 100% rename from hack/MAINTAINERS rename to project/MAINTAINERS diff --git a/hack/MAINTAINERS.md b/project/MAINTAINERS.md similarity index 100% rename from hack/MAINTAINERS.md rename to project/MAINTAINERS.md diff --git a/hack/PACKAGERS.md b/project/PACKAGERS.md similarity index 100% rename from hack/PACKAGERS.md rename to project/PACKAGERS.md diff --git a/hack/PRINCIPLES.md b/project/PRINCIPLES.md similarity index 100% rename from hack/PRINCIPLES.md rename to project/PRINCIPLES.md diff --git a/hack/README.md b/project/README.md similarity index 100% rename from hack/README.md rename to project/README.md diff --git a/hack/RELEASE-CHECKLIST.md b/project/RELEASE-CHECKLIST.md similarity index 100% rename from hack/RELEASE-CHECKLIST.md rename to project/RELEASE-CHECKLIST.md diff --git a/hack/ROADMAP.md b/project/ROADMAP.md similarity index 100% rename from hack/ROADMAP.md rename to project/ROADMAP.md diff --git a/hack/allmaintainers.sh b/project/allmaintainers.sh similarity index 100% rename from hack/allmaintainers.sh rename to project/allmaintainers.sh diff --git a/hack/dind b/project/dind similarity index 100% rename from hack/dind rename to project/dind diff --git a/hack/generate-authors.sh b/project/generate-authors.sh similarity index 100% rename from hack/generate-authors.sh rename to project/generate-authors.sh diff --git a/hack/getmaintainer.sh b/project/getmaintainer.sh similarity index 100% rename from hack/getmaintainer.sh rename to project/getmaintainer.sh diff --git a/hack/install.sh b/project/install.sh similarity index 100% rename from hack/install.sh rename to project/install.sh diff --git a/hack/make.sh b/project/make.sh similarity index 100% rename from hack/make.sh rename to project/make.sh diff --git a/hack/make/.ensure-busybox b/project/make/.ensure-busybox similarity index 100% rename from hack/make/.ensure-busybox rename to project/make/.ensure-busybox diff --git a/hack/make/.ensure-scratch b/project/make/.ensure-scratch similarity index 100% rename from hack/make/.ensure-scratch rename to project/make/.ensure-scratch diff --git a/hack/make/.go-compile-test-dir b/project/make/.go-compile-test-dir similarity index 100% rename from hack/make/.go-compile-test-dir rename to project/make/.go-compile-test-dir diff --git a/hack/make/.validate b/project/make/.validate similarity index 100% rename from hack/make/.validate rename to project/make/.validate diff --git a/hack/make/README.md b/project/make/README.md similarity index 100% rename from hack/make/README.md rename to project/make/README.md diff --git a/hack/make/binary b/project/make/binary similarity index 100% rename from hack/make/binary rename to project/make/binary diff --git a/hack/make/cover b/project/make/cover similarity index 100% rename from hack/make/cover rename to project/make/cover diff --git a/hack/make/cross b/project/make/cross similarity index 100% rename from hack/make/cross rename to project/make/cross diff --git a/hack/make/dynbinary b/project/make/dynbinary similarity index 100% rename from hack/make/dynbinary rename to project/make/dynbinary diff --git a/hack/make/dyntest-integration b/project/make/dyntest-integration similarity index 100% rename from hack/make/dyntest-integration rename to project/make/dyntest-integration diff --git a/hack/make/dyntest-unit b/project/make/dyntest-unit similarity index 100% rename from hack/make/dyntest-unit rename to project/make/dyntest-unit diff --git a/hack/make/test-integration b/project/make/test-integration similarity index 100% rename from hack/make/test-integration rename to project/make/test-integration diff --git a/hack/make/test-integration-cli b/project/make/test-integration-cli similarity index 100% rename from hack/make/test-integration-cli rename to project/make/test-integration-cli diff --git a/hack/make/test-unit b/project/make/test-unit similarity index 100% rename from hack/make/test-unit rename to project/make/test-unit diff --git a/hack/make/tgz b/project/make/tgz similarity index 100% rename from hack/make/tgz rename to project/make/tgz diff --git a/hack/make/ubuntu b/project/make/ubuntu similarity index 100% rename from hack/make/ubuntu rename to project/make/ubuntu diff --git a/hack/make/validate-dco b/project/make/validate-dco similarity index 100% rename from hack/make/validate-dco rename to project/make/validate-dco diff --git a/hack/make/validate-gofmt b/project/make/validate-gofmt similarity index 100% rename from hack/make/validate-gofmt rename to project/make/validate-gofmt diff --git a/hack/release.sh b/project/release.sh similarity index 100% rename from hack/release.sh rename to project/release.sh diff --git a/hack/stats.sh b/project/stats.sh similarity index 100% rename from hack/stats.sh rename to project/stats.sh diff --git a/hack/vendor.sh b/project/vendor.sh similarity index 100% rename from hack/vendor.sh rename to project/vendor.sh From b95f9c10ff2bd05fc2d30b30bebb8174883c683a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20K=C3=B6hler?= Date: Mon, 10 Nov 2014 01:00:05 +0100 Subject: [PATCH 290/592] Fix mkdir typo in dockerfile_best-practices.md. --- docs/sources/articles/dockerfile_best-practices.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/articles/dockerfile_best-practices.md b/docs/sources/articles/dockerfile_best-practices.md index 3a320efb2c..85095f1c03 100644 --- a/docs/sources/articles/dockerfile_best-practices.md +++ b/docs/sources/articles/dockerfile_best-practices.md @@ -281,7 +281,7 @@ things like: And instead, do something like: - RUN mdkir -p /usr/src/things \ + RUN mkdir -p /usr/src/things \ && curl -SL http://example.com/big.tar.gz \ | tar -xJC /usr/src/things \ && make -C /usr/src/things all From 3600bcb53127c7d9f67d639c1f4e61c460b1d605 Mon Sep 17 00:00:00 2001 From: "gautam, prasanna" Date: Sun, 9 Nov 2014 19:52:38 -0500 Subject: [PATCH 291/592] limiting to 80 chars Signed-off-by: Prasanna Gautam < prasannagautam@gmail.com > --- docs/sources/contributing/devenvironment.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/sources/contributing/devenvironment.md b/docs/sources/contributing/devenvironment.md index a5c2cc3057..0f6f5f4bc1 100644 --- a/docs/sources/contributing/devenvironment.md +++ b/docs/sources/contributing/devenvironment.md @@ -63,7 +63,8 @@ To create the Docker binary, run this command: $ sudo make binary -This will create the Docker binary in `./bundles/-dev/binary/`. If you do not see files in the `./bundles` directory in your host, your BINDDIR setting is not set quite right. You want to run the following command: +This will create the Docker binary in `./bundles/-dev/binary/`. If you do not see files in the `./bundles` directory in your host, +your `BINDDIR` setting is not set quite right. You want to run the following command: $ sudo make BINDDIR=. binary From b92d7cd60db0a03b0ace62a2a6c11df9f64f4256 Mon Sep 17 00:00:00 2001 From: tobe Date: Mon, 10 Nov 2014 20:23:46 +0800 Subject: [PATCH 292/592] Remove the redundant include_search in mkdocs.yml --- docs/mkdocs.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index faa758546b..f5ea845e95 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -17,7 +17,6 @@ use_absolute_urls: true # theme: docker theme_dir: ./theme/mkdocs/ theme_center_lead: false -include_search: true copyright: Copyright © 2014, Docker, Inc. google_analytics: ['UA-6096819-11', 'docker.io'] From fb62e184412b6d2bf38975a7051738f05b1f413d Mon Sep 17 00:00:00 2001 From: Thomas Orozco Date: Thu, 23 Oct 2014 14:50:06 +0200 Subject: [PATCH 293/592] Fix: Failed Start breaks VolumesFrom Running parseVolumesFromSpec on all VolumesFrom specs before initialize any mounts endures that we don't leave container.Volumes in an inconsistent (partially initialized) if one of out mount groups is not available (e.g. the container we're trying to mount from does not exist). Keeping container.Volumes in a consistent state ensures that next time we Start() the container, it'll run prepareVolumes() again. The attached test demonstrates that when a container fails to start due to a missing container specified in VolumesFrom, it "remembers" a Volume that worked. Fixes: #8726 Signed-off-by: Thomas Orozco --- daemon/volumes.go | 9 ++++++-- integration-cli/docker_cli_start_test.go | 28 ++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/daemon/volumes.go b/daemon/volumes.go index 0fd54144ed..6523dae853 100644 --- a/daemon/volumes.go +++ b/daemon/volumes.go @@ -204,15 +204,20 @@ func parseBindMountSpec(spec string) (string, string, bool, error) { func (container *Container) applyVolumesFrom() error { volumesFrom := container.hostConfig.VolumesFrom + mountGroups := make([]map[string]*Mount, 0, len(volumesFrom)) + for _, spec := range volumesFrom { - mounts, err := parseVolumesFromSpec(container.daemon, spec) + mountGroup, err := parseVolumesFromSpec(container.daemon, spec) if err != nil { return err } + mountGroups = append(mountGroups, mountGroup) + } + for _, mounts := range mountGroups { for _, mnt := range mounts { mnt.container = container - if err = mnt.initialize(); err != nil { + if err := mnt.initialize(); err != nil { return err } } diff --git a/integration-cli/docker_cli_start_test.go b/integration-cli/docker_cli_start_test.go index 72c0bfc4ef..6af5f43f54 100644 --- a/integration-cli/docker_cli_start_test.go +++ b/integration-cli/docker_cli_start_test.go @@ -109,3 +109,31 @@ func TestStartRecordError(t *testing.T) { logDone("start - set state error when start fails") } + +// gh#8726: a failed Start() breaks --volumes-from on subsequent Start()'s +func TestStartVolumesFromFailsCleanly(t *testing.T) { + defer deleteAllContainers() + + // Create the first data volume + cmd(t, "run", "-d", "--name", "data_before", "-v", "/foo", "busybox") + + // Expect this to fail because the data test after contaienr doesn't exist yet + if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "consumer", "--volumes-from", "data_before", "--volumes-from", "data_after", "busybox")); err == nil { + t.Fatal("Expected error but got none") + } + + // Create the second data volume + cmd(t, "run", "-d", "--name", "data_after", "-v", "/bar", "busybox") + + // Now, all the volumes should be there + cmd(t, "start", "consumer") + + // Check that we have the volumes we want + out, _, _ := cmd(t, "inspect", "--format='{{ len .Volumes }}'", "consumer") + n_volumes := strings.Trim(out, " \r\n'") + if n_volumes != "2" { + t.Fatalf("Missing volumes: expected 2, got %s", n_volumes) + } + + logDone("start - missing containers in --volumes-from did not affect subsequent runs") +} From 271f54aea375158b808aa50db58711e7c1d3eef5 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Mon, 10 Nov 2014 12:24:52 -0500 Subject: [PATCH 294/592] Add missing docs for #8509 Signed-off-by: Brian Goff --- docs/sources/reference/api/docker_remote_api.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md index 1db0f76928..5813091411 100644 --- a/docs/sources/reference/api/docker_remote_api.md +++ b/docs/sources/reference/api/docker_remote_api.md @@ -53,6 +53,7 @@ You can still call an old version of the API using total memory available (`MemTotal`). `POST /containers/create` + **New!** You can set the new container's MAC address explicitly. @@ -62,6 +63,11 @@ You can set the new container's MAC address explicitly. Passing the container's `HostConfig` on start is now deprecated. You should set this when creating the container. +`POST /containers/(id)/copy` + +**New!** +You can now copy data which is contained in a volume. + ## v1.15 ### Full Documentation From 67ca7415e71a3e6389b883f746a1a15580deb892 Mon Sep 17 00:00:00 2001 From: Fred Lifton Date: Mon, 10 Nov 2014 11:33:16 -0800 Subject: [PATCH 295/592] Add link to Docs README. Linking to the docs readme to help would-be contributors discover the style guide and docs contribution guidelines. Docker-DCO-1.1-Signed-off-by: Fred Lifton (github: fredlf) --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 2d143a6d6e..7aa7795464 100644 --- a/README.md +++ b/README.md @@ -181,10 +181,10 @@ Contributing to Docker [![Build Status](https://ci.dockerproject.com/github.com/docker/docker/status.svg?branch=master)](https://ci.dockerproject.com/github.com/docker/docker) Want to hack on Docker? Awesome! There are instructions to get you -started [here](CONTRIBUTING.md). +started [here](CONTRIBUTING.md). If you'd like to contribute to the +documentation, please take a look at this [README.md](https://github.com/docker/docker/blob/master/docs/README.md). -They are probably not perfect, please let us know if anything feels -wrong or incomplete. +These instructions are probably not perfect, please let us know if anything feels wrong or incomplete. ### Legal From dacae746b70f50dd1f3ea9d40834386b96b6c200 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Fri, 7 Nov 2014 15:21:19 -0500 Subject: [PATCH 296/592] Cleanup api server creation Current implementation is hard to reason about because of trying to mix unix/tcp server implementations, even though they are quite different. This cleans that up. Also makes it possible to create and manage a new API server easily, e.g. for adding an introspection socket to a container. Built in such a way as to allow a non-HTTP server to work as well, such as libchan. Signed-off-by: Brian Goff --- api/server/server.go | 213 +++++++++++++++++++++++++++---------------- 1 file changed, 134 insertions(+), 79 deletions(-) diff --git a/api/server/server.go b/api/server/server.go index d77a6c22a2..13affc334a 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -3,8 +3,7 @@ package server import ( "bufio" "bytes" - "crypto/tls" - "crypto/x509" + "encoding/base64" "encoding/json" "expvar" @@ -19,6 +18,9 @@ import ( "strings" "syscall" + "crypto/tls" + "crypto/x509" + "code.google.com/p/go.net/websocket" "github.com/docker/libcontainer/user" "github.com/gorilla/mux" @@ -39,6 +41,18 @@ var ( activationLock chan struct{} ) +type HttpServer struct { + srv *http.Server + l net.Listener +} + +func (s *HttpServer) Serve() error { + return s.srv.Serve(s.l) +} +func (s *HttpServer) Close() error { + return s.l.Close() +} + type HttpApiFunc func(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { @@ -1334,9 +1348,14 @@ func ServeRequest(eng *engine.Engine, apiversion version.Version, w http.Respons return nil } -// ServeFD creates an http.Server and sets it up to serve given a socket activated +// serveFd creates an http.Server and sets it up to serve given a socket activated // argument. -func ServeFd(addr string, handle http.Handler) error { +func serveFd(addr string, job *engine.Job) error { + r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) + if err != nil { + return err + } + ls, e := systemd.ListenFD(addr) if e != nil { return e @@ -1354,7 +1373,7 @@ func ServeFd(addr string, handle http.Handler) error { for i := range ls { listener := ls[i] go func() { - httpSrv := http.Server{Handler: handle} + httpSrv := http.Server{Handler: r} chErrors <- httpSrv.Serve(listener) }() } @@ -1382,6 +1401,41 @@ func lookupGidByName(nameOrGid string) (int, error) { return -1, fmt.Errorf("Group %s not found", nameOrGid) } +func setupTls(cert, key, ca string, l net.Listener) (net.Listener, error) { + tlsCert, err := tls.LoadX509KeyPair(cert, key) + if err != nil { + return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", + cert, key, err) + } + tlsConfig := &tls.Config{ + NextProtos: []string{"http/1.1"}, + Certificates: []tls.Certificate{tlsCert}, + // Avoid fallback on insecure SSL protocols + MinVersion: tls.VersionTLS10, + } + + if ca != "" { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(ca) + if err != nil { + return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + + return tls.NewListener(l, tlsConfig), nil +} + +func newListener(proto, addr string, bufferRequests bool) (net.Listener, error) { + if bufferRequests { + return listenbuffer.NewListenBuffer(proto, addr, activationLock) + } + + return net.Listen(proto, addr) +} + func changeGroup(addr string, nameOrGid string) error { gid, err := lookupGidByName(nameOrGid) if err != nil { @@ -1392,99 +1446,95 @@ func changeGroup(addr string, nameOrGid string) error { return os.Chown(addr, 0, gid) } -// ListenAndServe sets up the required http.Server and gets it listening for -// each addr passed in and does protocol specific checking. -func ListenAndServe(proto, addr string, job *engine.Job) error { - var l net.Listener +func setSocketGroup(addr, group string) error { + if group == "" { + return nil + } + + if err := changeGroup(addr, group); err != nil { + if group != "docker" { + return err + } + log.Debugf("Warning: could not chgrp %s to docker: %v", addr, err) + } + + return nil +} + +func setupUnixHttp(addr string, job *engine.Job) (*HttpServer, error) { r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) if err != nil { - return err + return nil, err } - if proto == "fd" { - return ServeFd(addr, r) + if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) { + return nil, err } + mask := syscall.Umask(0777) + defer syscall.Umask(mask) - if proto == "unix" { - if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) { - return err - } - } - - var oldmask int - if proto == "unix" { - oldmask = syscall.Umask(0777) - } - - if job.GetenvBool("BufferRequests") { - l, err = listenbuffer.NewListenBuffer(proto, addr, activationLock) - } else { - l, err = net.Listen(proto, addr) - } - - if proto == "unix" { - syscall.Umask(oldmask) - } + l, err := newListener("unix", addr, job.GetenvBool("BufferRequests")) if err != nil { - return err + return nil, err } - if proto != "unix" && (job.GetenvBool("Tls") || job.GetenvBool("TlsVerify")) { - tlsCert := job.Getenv("TlsCert") - tlsKey := job.Getenv("TlsKey") - cert, err := tls.LoadX509KeyPair(tlsCert, tlsKey) - if err != nil { - return fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", - tlsCert, tlsKey, err) - } - tlsConfig := &tls.Config{ - NextProtos: []string{"http/1.1"}, - Certificates: []tls.Certificate{cert}, - // Avoid fallback on insecure SSL protocols - MinVersion: tls.VersionTLS10, - } + if err := setSocketGroup(addr, job.Getenv("SocketGroup")); err != nil { + return nil, err + } + + if err := os.Chmod(addr, 0660); err != nil { + return nil, err + } + + return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil +} + +func setupTcpHttp(addr string, job *engine.Job) (*HttpServer, error) { + if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") { + log.Infof("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") + } + + r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) + if err != nil { + return nil, err + } + + l, err := newListener("tcp", addr, job.GetenvBool("BufferRequests")) + if err != nil { + return nil, err + } + + if job.GetenvBool("Tls") || job.GetenvBool("TlsVerify") { + var tlsCa string if job.GetenvBool("TlsVerify") { - certPool := x509.NewCertPool() - file, err := ioutil.ReadFile(job.Getenv("TlsCa")) - if err != nil { - return fmt.Errorf("Couldn't read CA certificate: %s", err) - } - certPool.AppendCertsFromPEM(file) - - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - tlsConfig.ClientCAs = certPool + tlsCa = job.Getenv("TlsCa") + } + l, err = setupTls(job.Getenv("TlsCert"), job.Getenv("TlsKey"), tlsCa, l) + if err != nil { + return nil, err } - l = tls.NewListener(l, tlsConfig) } + return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil +} +// NewServer sets up the required Server and does protocol specific checking. +func NewServer(proto, addr string, job *engine.Job) (Server, error) { // Basic error and sanity checking switch proto { + case "fd": + return nil, serveFd(addr, job) case "tcp": - if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") { - log.Infof("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") - } + return setupTcpHttp(addr, job) case "unix": - socketGroup := job.Getenv("SocketGroup") - if socketGroup != "" { - if err := changeGroup(addr, socketGroup); err != nil { - if socketGroup == "docker" { - // if the user hasn't explicitly specified the group ownership, don't fail on errors. - log.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error()) - } else { - return err - } - } - - } - if err := os.Chmod(addr, 0660); err != nil { - return err - } + return setupUnixHttp(addr, job) default: - return fmt.Errorf("Invalid protocol format.") + return nil, fmt.Errorf("Invalid protocol format.") } +} - httpSrv := http.Server{Addr: addr, Handler: r} - return httpSrv.Serve(l) +type Server interface { + Serve() error + Close() error } // ServeApi loops through all of the protocols sent in to docker and spawns @@ -1506,7 +1556,12 @@ func ServeApi(job *engine.Job) engine.Status { } go func() { log.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1]) - chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job) + srv, err := NewServer(protoAddrParts[0], protoAddrParts[1], job) + if err != nil { + chErrors <- err + return + } + chErrors <- srv.Serve() }() } From cf19df4683f416e4d536e772dc1ff02334794264 Mon Sep 17 00:00:00 2001 From: "gautam, prasanna" Date: Mon, 10 Nov 2014 18:04:23 -0500 Subject: [PATCH 297/592] fixing punctuation and grammar Signed-off-by: Prasanna Gautam --- docs/sources/contributing/devenvironment.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/contributing/devenvironment.md b/docs/sources/contributing/devenvironment.md index 0f6f5f4bc1..70eafbbde4 100644 --- a/docs/sources/contributing/devenvironment.md +++ b/docs/sources/contributing/devenvironment.md @@ -68,7 +68,7 @@ your `BINDDIR` setting is not set quite right. You want to run the following com $ sudo make BINDDIR=. binary -If you are not in Linux - for example: OSX, you want to run `make cross` or `make BINDDIR=. cross`. +If you are on a non-Linux platform, e.g., OSX, you'll want to run `make cross` or `make BINDDIR=. cross`. ### Using your built Docker binary From 48424df68539965fec3056f4c349383948890896 Mon Sep 17 00:00:00 2001 From: Prasanna Gautam Date: Mon, 10 Nov 2014 20:05:35 -0500 Subject: [PATCH 298/592] properly line wrapping Signed-off-by: Prasanna Gautam --- docs/sources/contributing/devenvironment.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/sources/contributing/devenvironment.md b/docs/sources/contributing/devenvironment.md index 70eafbbde4..f39dec6708 100644 --- a/docs/sources/contributing/devenvironment.md +++ b/docs/sources/contributing/devenvironment.md @@ -63,12 +63,14 @@ To create the Docker binary, run this command: $ sudo make binary -This will create the Docker binary in `./bundles/-dev/binary/`. If you do not see files in the `./bundles` directory in your host, -your `BINDDIR` setting is not set quite right. You want to run the following command: +This will create the Docker binary in `./bundles/-dev/binary/`. If you +do not see files in the `./bundles` directory in your host, your `BINDDIR` +setting is not set quite right. You want to run the following command: $ sudo make BINDDIR=. binary -If you are on a non-Linux platform, e.g., OSX, you'll want to run `make cross` or `make BINDDIR=. cross`. +If you are on a non-Linux platform, e.g., OSX, you'll want to run `make cross` +or `make BINDDIR=. cross`. ### Using your built Docker binary From d94de133f4900509821cafb5208d91443119f809 Mon Sep 17 00:00:00 2001 From: Scott Johnston Date: Sun, 9 Nov 2014 14:27:59 -0800 Subject: [PATCH 299/592] Update ROADMAP.md based on 10/21/2014 DGAB meeting The Docker Governance Advisory Board (DGAB) met for the first time Tue 10/21/2014. Among other topics, the DGAB reviewed and refreshed the Docker Project Statement of Direction. (Sven added from the Pull Req #9055) Docker-DCO-1.1-Signed-off-by: Scott Johnston (github: j0hnst0n) Signed-off-by: Sven Dowideit --- hack/ROADMAP.md | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/hack/ROADMAP.md b/hack/ROADMAP.md index d49664b7b3..bee2ea83a6 100644 --- a/hack/ROADMAP.md +++ b/hack/ROADMAP.md @@ -1,6 +1,6 @@ -# Docker: what's next? +# Docker: Statement of Direction -This document is a high-level overview of where we want to take Docker next. +This document is a high-level overview of where we want to take Docker. It is a curated selection of planned improvements which are either important, difficult, or both. For a more complete view of planned and requested improvements, see [the Github issues](https://github.com/docker/docker/issues). @@ -8,34 +8,36 @@ For a more complete view of planned and requested improvements, see [the Github To suggest changes to the roadmap, including additions, please write the change as if it were already in effect, and make a pull request. -## Container wiring and service discovery +## Orchestration -In its current version, docker doesn’t make it very easy to manipulate multiple containers as a cohesive group (ie. orchestration), and it doesn’t make it seamless for containers to connect to each other as network services (ie. wiring). +Orchestration touches on several aspects of multi-container applications.  These include provisioning hosts with the Docker daemon, organizing and maintaining multiple Docker hosts as a cluster, composing an application using multiple containers, and handling the networking between the containers across the hosts. -To achieve wiring and orchestration with docker today, you need to write glue scripts yourself, or use one several companion tools available, like Orchestra, Shipper, Deis, Pipeworks, etc. +Today, users accomplish this using a combination of glue scripts and various tools, like Shipper, Deis, Pipeworks, etc. -We want the Docker API to support orchestration and wiring natively, so that these tools can cleanly and seamlessly integrate into the Docker user experience, and remain interoperable with each other. +We want the Docker API to support all aspects of orchestration natively, so that these tools can cleanly and seamlessly integrate into the Docker user experience, and remain interoperable with each other. +## Networking -## Better integration with process supervisors +The current Docker networking model works for communication between containers all residing on the same host.  Since Docker applications in production are made up of many containers deployed across multiple hosts (and sometimes multiple data centers), Docker’s networking model will evolve to accommodate this.  An aspect of this evolution includes providing a Networking API to enable alternative implementations. -For docker to be fully usable in production, it needs to cleanly integrate with the host machine’s process supervisor of choice. Whether it’s sysV-init, upstart, systemd, runit or supervisord, we want to make sure docker plays nice with your existing system. This will be a major focus of the 0.7 release. +## Storage +Currently, stateful Docker containers are pinned to specific hosts during their lifetime.  To support additional resiliency, capacity management, and load balancing we want to enable live stateful containers to dynamically migrate between hosts.  While the Docker Project will provide a “batteries included” implementation for a great out-of-box experience, we will also provide an API for alternative implementations. + +## Microsoft Windows + +The next Microsoft Windows Server will ship with primitives to support container-based process isolation and resource management.  The Docker Project will guide contributors and maintainers developing native Microsoft versions of the Docker Remote API client and Docker daemon to take advantage of these primitives. + +## Provenance + +When assembling Docker applications we want users to be confident that images they didn’t create themselves are safe to use and build upon.  Provenance gives users the capability to digitally verify the inputs and processes constituting an image’s origins and lifecycle events. ## Plugin API We want Docker to run everywhere, and to integrate with every devops tool. Those are ambitious goals, and the only way to reach them is with the Docker community. For the community to participate fully, we need an API which allows Docker to be deeply and easily customized. -We are working on a plugin API which will make Docker very, very customization-friendly. We believe it will facilitate the integrations listed above – and many more we didn’t even think about. +We are working on a plugin API which will make Docker very customization-friendly. We believe it will facilitate the integrations listed above – and many more we didn’t even think about. +## Multi-Architecture Support -## Broader kernel support - -Our goal is to make Docker run everywhere, but currently Docker requires Linux version 3.8 or higher with cgroups support. If you’re deploying new machines for the purpose of running Docker, this is a fairly easy requirement to meet. However, if you’re adding Docker to an existing deployment, you may not have the flexibility to update and patch the kernel. - -Expanding Docker’s kernel support is a priority. This includes running on older kernel versions, specifically focusing on versions already popular in server deployments such as those used by RHEL and the OpenVZ stack. - - -## Cross-architecture support - -Our goal is to make Docker run everywhere. However currently Docker only runs on x86_64 systems. We plan on expanding architecture support, so that Docker containers can be created and used on more architectures. +Our goal is to make Docker run everywhere. However, currently Docker only runs on x86_64 systems. We plan on expanding architecture support, so that Docker containers can be created and used on more architectures, including ARM, Joyent SmartOS, and Microsoft. From ca0885729df52698264dd10ba403eb8ab88d9642 Mon Sep 17 00:00:00 2001 From: Fred Lifton Date: Mon, 10 Nov 2014 17:50:12 -0800 Subject: [PATCH 300/592] Fix 80 char. error Docker-DCO-1.1-Signed-off-by: Fred Lifton (github: fredlf) --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 7aa7795464..fc41e409df 100644 --- a/README.md +++ b/README.md @@ -184,7 +184,8 @@ Want to hack on Docker? Awesome! There are instructions to get you started [here](CONTRIBUTING.md). If you'd like to contribute to the documentation, please take a look at this [README.md](https://github.com/docker/docker/blob/master/docs/README.md). -These instructions are probably not perfect, please let us know if anything feels wrong or incomplete. +These instructions are probably not perfect, please let us know if anything +feels wrong or incomplete. ### Legal From 447387474a5379f941457ece191c6920191c2a35 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Tue, 11 Nov 2014 15:54:31 +1000 Subject: [PATCH 301/592] Add some details about the lifecycle of commands started with docker exec Signed-off-by: Sven Dowideit --- docs/man/docker-exec.1.md | 6 ++++++ docs/sources/reference/commandline/cli.md | 6 +++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/docs/man/docker-exec.1.md b/docs/man/docker-exec.1.md index d5ec1265bd..38ba3de412 100644 --- a/docs/man/docker-exec.1.md +++ b/docs/man/docker-exec.1.md @@ -15,6 +15,12 @@ docker-exec - Run a command in a running container Run a process in a running container. +The command started using `docker exec` will only run while the container's primary +process (`PID 1`) is running, and will not be restarted if the container is restarted. + +If the container is paused, then the `docker exec` command will wait until the +container is unpaused, and then run. + # Options **-d**, **--detach**=*true*|*false* diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 50c0ff3cce..e0a7b2328a 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -646,7 +646,11 @@ You'll need two shells for this example. The `docker exec` command runs a new command in a running container. -The `docker exec` command will typically be used after `docker run` or `docker start`. +The command started using `docker exec` will only run while the container's primary +process (`PID 1`) is running, and will not be restarted if the container is restarted. + +If the container is paused, then the `docker exec` command will wait until the +container is unpaused, and then run. #### Examples From 5cd53195fddd9a2843ad57c6e1ac11bec1ac37f0 Mon Sep 17 00:00:00 2001 From: Recursive Madman Date: Mon, 10 Nov 2014 18:14:12 +0100 Subject: [PATCH 302/592] Don't loose precision when parsing image size on 32 bit machines. Presumably fixes #8979. Signed-off-by: Recursive Madman --- image/image.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/image/image.go b/image/image.go index dfa8e9a6e3..f1ce48935a 100644 --- a/image/image.go +++ b/image/image.go @@ -64,7 +64,10 @@ func LoadImage(root string) (*Image, error) { // because a layer size of 0 (zero) is valid img.Size = -1 } else { - size, err := strconv.Atoi(string(buf)) + // Using Atoi here instead would temporarily convert the size to a machine + // dependent integer type, which causes images larger than 2^31 bytes to + // display negative sizes on 32-bit machines: + size, err := strconv.ParseInt(string(buf), 10, 64) if err != nil { return nil, err } From adb07b53e083784e4f09935b8e3bdcf123db284e Mon Sep 17 00:00:00 2001 From: Abin Shahab Date: Tue, 11 Nov 2014 08:52:41 +0000 Subject: [PATCH 303/592] LINKED CONTAINER ID PASSED TO LXC This passed the --net=container:CONTINER_ID to lxc-start as --share-net Docker-DCO-1.1-Signed-off-by: Abin Shahab (github: ashahab-altiscale) --- daemon/execdriver/lxc/driver.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go index 7583a3e64f..ec99bf5e50 100644 --- a/daemon/execdriver/lxc/driver.go +++ b/daemon/execdriver/lxc/driver.go @@ -86,10 +86,17 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba "lxc-start", "-n", c.ID, "-f", configPath, - "--", - c.InitPath, + } + if c.Network.ContainerID != "" { + params = append(params, + "--share-net", c.Network.ContainerID, + ) } + params = append(params, + "--", + c.InitPath, + ) if c.Network.Interface != nil { params = append(params, "-g", c.Network.Interface.Gateway, From 5cd9b7513f05948ddc1068041f6c6d822875921e Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Tue, 11 Nov 2014 08:48:11 -0500 Subject: [PATCH 304/592] pkg/tarsum: adding more tests Ensuring case size of headers will still be accounted for. https://github.com/docker/docker/pull/8869#discussion_r20114401 Signed-off-by: Vincent Batts --- pkg/tarsum/tarsum_test.go | 147 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 147 insertions(+) diff --git a/pkg/tarsum/tarsum_test.go b/pkg/tarsum/tarsum_test.go index 1e06cda178..60fcc97c93 100644 --- a/pkg/tarsum/tarsum_test.go +++ b/pkg/tarsum/tarsum_test.go @@ -318,6 +318,153 @@ func TestTarSums(t *testing.T) { } } +func TestIteration(t *testing.T) { + headerTests := []struct { + expectedSum string // TODO(vbatts) it would be nice to get individual sums of each + version Version + hdr *tar.Header + data []byte + }{ + { + "tarsum+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + Version0, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + VersionDev, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + VersionDev, + &tar.Header{ + Name: "another.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.key1": "value1", + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.KEY1": "value1", // adding different case to ensure different sum + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa", + Version0, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.NOT": "CALCULATED", + }, + }, + []byte("test"), + }, + } + for _, htest := range headerTests { + s, err := renderSumForHeader(htest.version, htest.hdr, htest.data) + if err != nil { + t.Fatal(err) + } + + if s != htest.expectedSum { + t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s) + } + } + +} + +func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) { + buf := bytes.NewBuffer(nil) + // first build our test tar + tw := tar.NewWriter(buf) + if err := tw.WriteHeader(h); err != nil { + return "", err + } + if _, err := tw.Write(data); err != nil { + return "", err + } + tw.Close() + + ts, err := NewTarSum(buf, true, v) + if err != nil { + return "", err + } + tr := tar.NewReader(ts) + for { + hdr, err := tr.Next() + if hdr == nil || err == io.EOF { + break + } + if err != nil { + return "", err + } + if _, err = io.Copy(ioutil.Discard, tr); err != nil { + return "", err + } + break // we're just reading one header ... + } + return ts.Sum(nil), nil +} + func Benchmark9kTar(b *testing.B) { buf := bytes.NewBuffer([]byte{}) fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") From 14692d0d417378efc3456ddb810eae5646652bda Mon Sep 17 00:00:00 2001 From: unclejack Date: Thu, 6 Nov 2014 20:01:37 +0200 Subject: [PATCH 305/592] pkg/archive: add interface for Untar Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- pkg/archive/archive.go | 71 +++++++++++++++++++++++++++--------------- 1 file changed, 46 insertions(+), 25 deletions(-) diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 2e339b3156..d8f34d9694 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -36,10 +36,17 @@ type ( NoLchown bool Name string } + + // Archiver allows the reuse of most utility functions of this package + // with a pluggable Untar function. + Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + } ) var ( ErrNotImplemented = errors.New("Function not implemented") + defaultArchiver = &Archiver{Untar} ) const ( @@ -549,45 +556,47 @@ loop: return nil } -// TarUntar is a convenience function which calls Tar and Untar, with -// the output of one piped into the other. If either Tar or Untar fails, -// TarUntar aborts and returns the error. -func TarUntar(src string, dst string) error { +func (archiver *Archiver) TarUntar(src, dst string) error { log.Debugf("TarUntar(%s %s)", src, dst) archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() - return Untar(archive, dst, nil) + return archiver.Untar(archive, dst, nil) } -// UntarPath is a convenience function which looks for an archive -// at filesystem path `src`, and unpacks it at `dst`. -func UntarPath(src, dst string) error { +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return defaultArchiver.TarUntar(src, dst) +} + +func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() - if err := Untar(archive, dst, nil); err != nil { + if err := archiver.Untar(archive, dst, nil); err != nil { return err } return nil } -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -// -func CopyWithTar(src, dst string) error { +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return defaultArchiver.UntarPath(src, dst) +} + +func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { - return CopyFileWithTar(src, dst) + return archiver.CopyFileWithTar(src, dst) } // Create dst, copy src's content into it log.Debugf("Creating dest directory: %s", dst) @@ -595,16 +604,18 @@ func CopyWithTar(src, dst string) error { return err } log.Debugf("Calling TarUntar(%s, %s)", src, dst) - return TarUntar(src, dst) + return archiver.TarUntar(src, dst) } -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -// -// If `dst` ends with a trailing slash '/', the final destination path -// will be `dst/base(src)`. -func CopyFileWithTar(src, dst string) (err error) { +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return defaultArchiver.CopyWithTar(src, dst) +} + +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { log.Debugf("CopyFileWithTar(%s, %s)", src, dst) srcSt, err := os.Stat(src) if err != nil { @@ -652,7 +663,17 @@ func CopyFileWithTar(src, dst string) (err error) { err = er } }() - return Untar(r, filepath.Dir(dst), nil) + return archiver.Untar(r, filepath.Dir(dst), nil) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// If `dst` ends with a trailing slash '/', the final destination path +// will be `dst/base(src)`. +func CopyFileWithTar(src, dst string) (err error) { + return defaultArchiver.CopyFileWithTar(src, dst) } // CmdStream executes a command, and returns its stdout as a stream. From 0cef21cfba5b06ce7bd5d6b68865a9df0aca95fc Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Tue, 11 Nov 2014 08:30:07 -0800 Subject: [PATCH 306/592] Cleanup some integration-cli output I noticed a few things that were bugging me in the output of the integration-cli tests. - one of the tests used println to stdout so we had garage sent to the screen - some of the test, in their final log message, didn't include the name of the group/file e.g. daemon - run,iptables was just run,iptables And yes, I noticed this because I'm anal :-) but also because we should keep the output of the tests as clean as possible so its easy to spot it when things go bad. Signed-off-by: Doug Davis --- integration-cli/docker_cli_daemon_test.go | 4 ++-- integration-cli/docker_cli_exec_test.go | 1 - integration-cli/docker_cli_rmi_test.go | 2 +- integration-cli/docker_cli_save_load_test.go | 6 ++---- 4 files changed, 5 insertions(+), 8 deletions(-) diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go index 42995def13..fa7901d82d 100644 --- a/integration-cli/docker_cli_daemon_test.go +++ b/integration-cli/docker_cli_daemon_test.go @@ -169,7 +169,7 @@ func TestDaemonIptablesClean(t *testing.T) { deleteAllContainers() - logDone("run,iptables - iptables rules cleaned after daemon restart") + logDone("daemon - run,iptables - iptables rules cleaned after daemon restart") } func TestDaemonIptablesCreate(t *testing.T) { @@ -221,5 +221,5 @@ func TestDaemonIptablesCreate(t *testing.T) { deleteAllContainers() - logDone("run,iptables - iptables rules for always restarted container created after daemon restart") + logDone("daemon - run,iptables - iptables rules for always restarted container created after daemon restart") } diff --git a/integration-cli/docker_cli_exec_test.go b/integration-cli/docker_cli_exec_test.go index 6626a33a8b..ed5778bbb2 100644 --- a/integration-cli/docker_cli_exec_test.go +++ b/integration-cli/docker_cli_exec_test.go @@ -40,7 +40,6 @@ func TestExecInteractiveStdinClose(t *testing.T) { } contId := strings.TrimSpace(out) - println(contId) returnchan := make(chan struct{}) diff --git a/integration-cli/docker_cli_rmi_test.go b/integration-cli/docker_cli_rmi_test.go index 5cb126f822..c28e771971 100644 --- a/integration-cli/docker_cli_rmi_test.go +++ b/integration-cli/docker_cli_rmi_test.go @@ -74,7 +74,7 @@ func TestRmiTag(t *testing.T) { } } - logDone("tag,rmi- tagging the same images multiple times then removing tags") + logDone("rmi - tag,rmi- tagging the same images multiple times then removing tags") } func TestRmiTagWithExistingContainers(t *testing.T) { diff --git a/integration-cli/docker_cli_save_load_test.go b/integration-cli/docker_cli_save_load_test.go index 351f249fd0..ceb73a571f 100644 --- a/integration-cli/docker_cli_save_load_test.go +++ b/integration-cli/docker_cli_save_load_test.go @@ -71,8 +71,7 @@ func TestSaveAndLoadRepoStdout(t *testing.T) { os.Remove("/tmp/foobar-save-load-test.tar") - logDone("save - save a repo using stdout") - logDone("load - load a repo using stdout") + logDone("save - save/load a repo using stdout") pty, tty, err := pty.Open() if err != nil { @@ -228,8 +227,7 @@ func TestSaveAndLoadRepoFlags(t *testing.T) { os.Remove("/tmp/foobar-save-load-test.tar") - logDone("save - save a repo using -o") - logDone("load - load a repo using -i") + logDone("save - save a repo using -o && load a repo using -i") } func TestSaveMultipleNames(t *testing.T) { From 75e3b35bf15dd01363f8b422d6b8a4a62b1054c6 Mon Sep 17 00:00:00 2001 From: Johan Euphrosine Date: Tue, 11 Nov 2014 11:01:49 -0800 Subject: [PATCH 307/592] registry: add tests for IsSecure Signed-off-by: Johan Euphrosine --- registry/registry_test.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/registry/registry_test.go b/registry/registry_test.go index 23aef6c361..f7b5168b45 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -319,3 +319,23 @@ func TestAddRequiredHeadersToRedirectedRequests(t *testing.T) { } } } + +func TestIsSecure(t *testing.T) { + tests := []struct { + addr string + insecureRegistries []string + expected bool + }{ + {"example.com", []string{}, true}, + {"example.com", []string{"example.com"}, false}, + {"localhost", []string{"localhost:5000"}, true}, + {"localhost:5000", []string{"localhost:5000"}, false}, + {"localhost", []string{"example.com"}, true}, + {"127.0.0.1:5000", []string{"127.0.0.1:5000"}, false}, + } + for _, tt := range tests { + if sec := IsSecure(tt.addr, tt.insecureRegistries); sec != tt.expected { + t.Errorf("IsSecure failed for %q %v, expected %v got %v", tt.addr, tt.insecureRegistries, tt.expected, sec) + } + } +} From bc45428220e4a8d05d9a0cbc701a729f7fe2aa8d Mon Sep 17 00:00:00 2001 From: Srini Brahmaroutu Date: Tue, 11 Nov 2014 22:17:53 +0000 Subject: [PATCH 308/592] improve error message to print the tag Closes #9001 Signed-off-by: Srini Brahmaroutu --- graph/pull.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graph/pull.go b/graph/pull.go index 775c318af1..a7394a4b92 100644 --- a/graph/pull.go +++ b/graph/pull.go @@ -177,7 +177,7 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, localName, repoData, err := r.GetRepositoryData(remoteName) if err != nil { if strings.Contains(err.Error(), "HTTP code: 404") { - return fmt.Errorf("Error: image %s not found", remoteName) + return fmt.Errorf("Error: image %s:%s not found", remoteName, askedTag) } // Unexpected HTTP error return err From 64a335ceac77dfd9eacc42e9d1f6fb2505e26e45 Mon Sep 17 00:00:00 2001 From: Steven Burgess Date: Tue, 11 Nov 2014 18:01:08 -0500 Subject: [PATCH 309/592] Fix comment to match the arg name Signed-off-by: Steven Burgess --- pkg/archive/archive.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index d8f34d9694..530ea303ad 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -455,7 +455,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `path`. +// and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. From 814bc06d7bf69c7775b775179c7a3edb8d30685c Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Tue, 11 Nov 2014 15:18:20 -0800 Subject: [PATCH 310/592] Add missing comma in docs Found by Michael Voznesensky Signed-off-by: Doug Davis --- docs/sources/reference/api/docker_remote_api_v1.15.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.16.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/reference/api/docker_remote_api_v1.15.md b/docs/sources/reference/api/docker_remote_api_v1.15.md index f5ce896c5f..a634f7c550 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.15.md +++ b/docs/sources/reference/api/docker_remote_api_v1.15.md @@ -124,7 +124,7 @@ Create a container "Cmd":[ "date" ], - "Entrypoint": "" + "Entrypoint": "", "Image":"base", "Volumes":{ "/tmp": {} diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md index 113b97e462..db07a97a6e 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.16.md +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -124,7 +124,7 @@ Create a container "Cmd":[ "date" ], - "Entrypoint": "" + "Entrypoint": "", "Image":"base", "Volumes":{ "/tmp": {} From 4b4ad26b977bba0b52f6ea15d08750a7453304a4 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Wed, 8 Oct 2014 18:18:42 +0000 Subject: [PATCH 311/592] handle GET redirects Signed-off-by: Victor Vieux --- api/client/cli.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/api/client/cli.go b/api/client/cli.go index 74e645171a..424ccf2fa2 100644 --- a/api/client/cli.go +++ b/api/client/cli.go @@ -138,14 +138,17 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, key libtrust.PrivateKey, // The transport is created here for reuse during the client session tr := &http.Transport{ TLSClientConfig: tlsConfig, - Dial: func(dial_network, dial_addr string) (net.Conn, error) { - // Why 32? See issue 8035 - return net.DialTimeout(proto, addr, 32*time.Second) - }, } + + // Why 32? See issue 8035 if proto == "unix" { // no need in compressing for local communications tr.DisableCompression = true + tr.Dial = func(network, addr string) (net.Conn, error) { + return net.DialTimeout("unix", addr, 32*time.Second) + } + } else { + tr.Dial = (&net.Dialer{Timeout: 32 * time.Second}).Dial } return &DockerCli{ From 6ca144ef3d62e358327249546bf83ce1e347be5c Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Wed, 12 Nov 2014 00:37:54 +0000 Subject: [PATCH 312/592] fix unix socket Signed-off-by: Victor Vieux --- api/client/cli.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/client/cli.go b/api/client/cli.go index 424ccf2fa2..7a806d64bb 100644 --- a/api/client/cli.go +++ b/api/client/cli.go @@ -144,8 +144,8 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, key libtrust.PrivateKey, if proto == "unix" { // no need in compressing for local communications tr.DisableCompression = true - tr.Dial = func(network, addr string) (net.Conn, error) { - return net.DialTimeout("unix", addr, 32*time.Second) + tr.Dial = func(dial_network, dial_addr string) (net.Conn, error) { + return net.DialTimeout(proto, addr, 32*time.Second) } } else { tr.Dial = (&net.Dialer{Timeout: 32 * time.Second}).Dial From 15c74bebc1ea2d51612b5809b4477551547a8b3d Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Fri, 7 Nov 2014 16:53:34 -0500 Subject: [PATCH 313/592] devmapper: Take care of some review comments Took care of some review comments from crosbymichael. v2: - Return "err = nil" if file deviceset-metadata file does not exist. - Use json.Decoder() interface for loading deviceset metadata. v3: - Reverted back to json marshal interface in loadDeviceSetMetaData(). Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 32 +++++++++++------------ 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index 59c5ec82e7..125483f6c2 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -32,6 +32,8 @@ var ( DefaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors ) +const deviceSetMetaFile string = "deviceset-metadata" + type DevInfo struct { Hash string `json:"-"` DeviceId int `json:"device_id"` @@ -139,7 +141,7 @@ func (devices *DeviceSet) metadataFile(info *DevInfo) string { } func (devices *DeviceSet) deviceSetMetaFile() string { - return path.Join(devices.metadataDir(), "deviceset-metadata") + return path.Join(devices.metadataDir(), deviceSetMetaFile) } func (devices *DeviceSet) oldMetadataFile() string { @@ -236,8 +238,7 @@ func (devices *DeviceSet) saveMetadata(info *DevInfo) error { if err != nil { return fmt.Errorf("Error encoding metadata to json: %s", err) } - err = devices.writeMetaFile(jsonData, devices.metadataFile(info)) - if err != nil { + if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { return err } @@ -552,29 +553,24 @@ func (devices *DeviceSet) ResizePool(size int64) error { func (devices *DeviceSet) loadDeviceSetMetaData() error { jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) if err != nil { - return nil + // For backward compatibility return success if file does + // not exist. + if os.IsNotExist(err) { + return nil + } + return err } - if err := json.Unmarshal(jsonData, devices); err != nil { - return nil - } - - return nil + return json.Unmarshal(jsonData, devices) } func (devices *DeviceSet) saveDeviceSetMetaData() error { jsonData, err := json.Marshal(devices) - if err != nil { return fmt.Errorf("Error encoding metadata to json: %s", err) } - err = devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) - if err != nil { - return err - } - - return nil + return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) } func (devices *DeviceSet) initDevmapper(doInit bool) error { @@ -710,7 +706,9 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { // Right now this loads only NextDeviceId. If there is more metatadata // down the line, we might have to move it earlier. - devices.loadDeviceSetMetaData() + if err = devices.loadDeviceSetMetaData(); err != nil { + return err + } // Setup the base image if doInit { From 497fc8876ede9924f61c0eee4dfadd71e5d9f537 Mon Sep 17 00:00:00 2001 From: Dan Walsh Date: Mon, 10 Nov 2014 16:14:17 -0500 Subject: [PATCH 314/592] Allow IPC namespace to be shared between containers or with the host Some workloads rely on IPC for communications with other processes. We would like to split workloads between two container but still allow them to communicate though shared IPC. This patch mimics the --net code to allow --ipc=host to not split off the IPC Namespace. ipc=container:CONTAINERID to share ipc between containers If you share IPC between containers, then you need to make sure SELinux labels match. Docker-DCO-1.1-Signed-off-by: Dan Walsh (github: rhatdan) --- daemon/container.go | 28 +++++++++ daemon/create.go | 26 +++++++++ daemon/execdriver/driver.go | 7 +++ daemon/execdriver/native/create.go | 26 +++++++++ docs/man/docker-run.1.md | 81 ++++++++++++++++++++++++-- docs/sources/reference/run.md | 17 ++++++ integration-cli/docker_cli_run_test.go | 70 ++++++++++++++++++++++ runconfig/hostconfig.go | 40 +++++++++++++ runconfig/parse.go | 7 +++ 9 files changed, 298 insertions(+), 4 deletions(-) diff --git a/daemon/container.go b/daemon/container.go index 2ac8316137..bf93787ebf 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -233,6 +233,18 @@ func populateCommand(c *Container, env []string) error { return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode) } + ipc := &execdriver.Ipc{} + + if c.hostConfig.IpcMode.IsContainer() { + ic, err := c.getIpcContainer() + if err != nil { + return err + } + ipc.ContainerID = ic.ID + } else { + ipc.HostIpc = c.hostConfig.IpcMode.IsHost() + } + // Build lists of devices allowed and created within the container. userSpecifiedDevices := make([]*devices.Device, len(c.hostConfig.Devices)) for i, deviceMapping := range c.hostConfig.Devices { @@ -274,6 +286,7 @@ func populateCommand(c *Container, env []string) error { InitPath: "/.dockerinit", WorkingDir: c.Config.WorkingDir, Network: en, + Ipc: ipc, Resources: resources, AllowedDevices: allowedDevices, AutoCreatedDevices: autoCreatedDevices, @@ -1250,10 +1263,25 @@ func (container *Container) GetMountLabel() string { return container.MountLabel } +func (container *Container) getIpcContainer() (*Container, error) { + containerID := container.hostConfig.IpcMode.Container() + c := container.daemon.Get(containerID) + if c == nil { + return nil, fmt.Errorf("no such container to join IPC: %s", containerID) + } + if !c.IsRunning() { + return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID) + } + return c, nil +} + func (container *Container) getNetworkedContainer() (*Container, error) { parts := strings.SplitN(string(container.hostConfig.NetworkMode), ":", 2) switch parts[0] { case "container": + if len(parts) != 2 { + return nil, fmt.Errorf("no container specified to join network") + } nc := container.daemon.Get(parts[1]) if nc == nil { return nil, fmt.Errorf("no such container to join network: %s", parts[1]) diff --git a/daemon/create.go b/daemon/create.go index e72b0ef206..3a71a8ac7e 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -1,10 +1,13 @@ package daemon import ( + "fmt" + "github.com/docker/docker/engine" "github.com/docker/docker/graph" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/runconfig" + "github.com/docker/libcontainer/label" ) func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status { @@ -80,6 +83,12 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil { return nil, nil, err } + if hostConfig != nil && config.SecurityOpt == nil { + config.SecurityOpt, err = daemon.GenerateSecurityOpt(hostConfig.IpcMode) + if err != nil { + return nil, nil, err + } + } if container, err = daemon.newContainer(name, config, img); err != nil { return nil, nil, err } @@ -99,3 +108,20 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos } return container, warnings, nil } +func (daemon *Daemon) GenerateSecurityOpt(ipcMode runconfig.IpcMode) ([]string, error) { + if ipcMode.IsHost() { + return label.DisableSecOpt(), nil + } + if ipcContainer := ipcMode.Container(); ipcContainer != "" { + c := daemon.Get(ipcContainer) + if c == nil { + return nil, fmt.Errorf("no such container to join IPC: %s", ipcContainer) + } + if !c.IsRunning() { + return nil, fmt.Errorf("cannot join IPC of a non running container: %s", ipcContainer) + } + + return label.DupSecOpt(c.ProcessLabel), nil + } + return nil, nil +} diff --git a/daemon/execdriver/driver.go b/daemon/execdriver/driver.go index bc2eb24eda..b2febe5761 100644 --- a/daemon/execdriver/driver.go +++ b/daemon/execdriver/driver.go @@ -62,6 +62,12 @@ type Network struct { HostNetworking bool `json:"host_networking"` } +// IPC settings of the container +type Ipc struct { + ContainerID string `json:"container_id"` // id of the container to join ipc. + HostIpc bool `json:"host_ipc"` +} + type NetworkInterface struct { Gateway string `json:"gateway"` IPAddress string `json:"ip"` @@ -106,6 +112,7 @@ type Command struct { WorkingDir string `json:"working_dir"` ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver Network *Network `json:"network"` + Ipc *Ipc `json:"ipc"` Resources *Resources `json:"resources"` Mounts []Mount `json:"mounts"` AllowedDevices []*devices.Device `json:"allowed_devices"` diff --git a/daemon/execdriver/native/create.go b/daemon/execdriver/native/create.go index 492247e492..de103eca8a 100644 --- a/daemon/execdriver/native/create.go +++ b/daemon/execdriver/native/create.go @@ -36,6 +36,10 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, e container.MountConfig.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != "" container.RestrictSys = true + if err := d.createIpc(container, c); err != nil { + return nil, err + } + if err := d.createNetwork(container, c); err != nil { return nil, err } @@ -124,6 +128,28 @@ func (d *driver) createNetwork(container *libcontainer.Config, c *execdriver.Com return nil } +func (d *driver) createIpc(container *libcontainer.Config, c *execdriver.Command) error { + if c.Ipc.HostIpc { + container.Namespaces["NEWIPC"] = false + return nil + } + + if c.Ipc.ContainerID != "" { + d.Lock() + active := d.activeContainers[c.Ipc.ContainerID] + d.Unlock() + + if active == nil || active.cmd.Process == nil { + return fmt.Errorf("%s is not a valid running container to join", c.Ipc.ContainerID) + } + cmd := active.cmd + + container.IpcNsPath = filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "ipc") + } + + return nil +} + func (d *driver) setPrivileged(container *libcontainer.Config) (err error) { container.Capabilities = capabilities.GetAllCapabilities() container.Cgroups.AllowAllDevices = true diff --git a/docs/man/docker-run.1.md b/docs/man/docker-run.1.md index ff3dac17b0..0aa4cad3fe 100644 --- a/docs/man/docker-run.1.md +++ b/docs/man/docker-run.1.md @@ -23,6 +23,7 @@ docker-run - Run a command in a new container [**--expose**[=*[]*]] [**-h**|**--hostname**[=*HOSTNAME*]] [**-i**|**--interactive**[=*false*]] +[**--ipc**[=*[]*]] [**--security-opt**[=*[]*]] [**--link**[=*[]*]] [**--lxc-conf**[=*[]*]] @@ -142,6 +143,12 @@ ENTRYPOINT. **-i**, **--interactive**=*true*|*false* When set to true, keep stdin open even if not attached. The default is false. +**--ipc**=[] + Set the IPC mode for the container + **container**:<*name*|*id*>: reuses another container's IPC stack + **host**: use the host's IPC stack inside the container. + Note: the host mode gives the container full access to local IPC and is therefore considered insecure. + **--security-opt**=*secdriver*:*name*:*value* "label:user:USER" : Set the label user for the container "label:role:ROLE" : Set the label role for the container @@ -183,10 +190,11 @@ and foreground Docker containers. **--net**="bridge" Set the Network mode for the container - 'bridge': creates a new network stack for the container on the docker bridge - 'none': no networking for this container - 'container:': reuses another container network stack - 'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. + **bridge**: creates a new network stack for the container on the docker bridge + **none**: no networking for this container + **container**:<*name*|*id*>: reuses another container's network stack + **host**: use the host network stack inside the container. + Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. **--mac-address**=*macaddress* Set the MAC address for the container's Ethernet device: @@ -310,6 +318,71 @@ you’d like to connect instead, as in: # docker run -a stdin -a stdout -i -t fedora /bin/bash +## Sharing IPC between containers + +Using shm_server.c available here: http://www.cs.cf.ac.uk/Dave/C/node27.html + +Testing `--ipc=host` mode: + +Host shows a shared memory segment with 7 pids attached, happens to be from httpd: + +``` + $ sudo ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x01128e25 0 root 600 1000 7 +``` + +Now run a regular container, and it correctly does NOT see the shared memory segment from the host: + +``` + $ sudo docker run -it shm ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status +``` + +Run a container with the new `--ipc=host` option, and it now sees the shared memory segment from the host httpd: + + ``` + $ sudo docker run -it --ipc=host shm ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x01128e25 0 root 600 1000 7 +``` +Testing `--ipc=container:CONTAINERID` mode: + +Start a container with a program to create a shared memory segment: +``` + sudo docker run -it shm bash + $ sudo shm/shm_server & + $ sudo ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x0000162e 0 root 666 27 1 +``` +Create a 2nd container correctly shows no shared memory segment from 1st container: +``` + $ sudo docker run shm ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status +``` + +Create a 3rd container using the new --ipc=container:CONTAINERID option, now it shows the shared memory segment from the first: + +``` + $ sudo docker run -it --ipc=container:ed735b2264ac shm ipcs -m + $ sudo ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x0000162e 0 root 666 27 1 +``` + ## Linking Containers The link feature allows multiple containers to communicate with each other. For diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index 1abb7d0575..31029e2a11 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -50,6 +50,7 @@ following options. - [Container Identification](#container-identification) - [Name (--name)](#name-name) - [PID Equivalent](#pid-equivalent) + - [IPC Settings](#ipc-settings) - [Network Settings](#network-settings) - [Clean Up (--rm)](#clean-up-rm) - [Runtime Constraints on CPU and Memory](#runtime-constraints-on-cpu-and-memory) @@ -131,6 +132,22 @@ While not strictly a means of identifying a container, you can specify a version image you'd like to run the container with by adding `image[:tag]` to the command. For example, `docker run ubuntu:14.04`. +## IPC Settings + --ipc="" : Set the IPC mode for the container, + 'container:': reuses another container's IPC namespace + 'host': use the host's IPC namespace inside the container +By default, all containers have the IPC namespace enabled + +IPC (POSIX/SysV IPC) namespace provides separation of named shared memory segments, semaphores and message queues. + +Shared memory segments are used to accelerate inter-process communication at +memory speed, rather than through pipes or through the network stack. Shared +memory is commonly used by databases and custom-built (typically C/OpenMPI, +C++/using boost libraries) high performance applications for scientific +computing and financial services industries. If these types of applications +are broken into multiple containers, you might need to share the IPC mechanisms +of the containers. + ## Network settings --dns=[] : Set custom dns servers for the container diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index d536c626bb..54949730a1 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -2568,3 +2568,73 @@ func TestRunUnknownCommand(t *testing.T) { logDone("run - Unknown Command") } + +func TestRunModeIpcHost(t *testing.T) { + hostIpc, err := os.Readlink("/proc/1/ns/ipc") + if err != nil { + t.Fatal(err) + } + + cmd := exec.Command(dockerBinary, "run", "--ipc=host", "busybox", "readlink", "/proc/self/ns/ipc") + out2, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out2) + } + + out2 = strings.Trim(out2, "\n") + if hostIpc != out2 { + t.Fatalf("IPC different with --ipc=host %s != %s\n", hostIpc, out2) + } + + cmd = exec.Command(dockerBinary, "run", "busybox", "readlink", "/proc/self/ns/ipc") + out2, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out2) + } + + out2 = strings.Trim(out2, "\n") + if hostIpc == out2 { + t.Fatalf("IPC should be different without --ipc=host %s != %s\n", hostIpc, out2) + } + deleteAllContainers() + + logDone("run - hostname and several network modes") +} + +func TestRunModeIpcContainer(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + id := strings.TrimSpace(out) + state, err := inspectField(id, "State.Running") + if err != nil { + t.Fatal(err) + } + if state != "true" { + t.Fatal("Container state is 'not running'") + } + pid1, err := inspectField(id, "State.Pid") + if err != nil { + t.Fatal(err) + } + + parentContainerIpc, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/ipc", pid1)) + if err != nil { + t.Fatal(err) + } + cmd = exec.Command(dockerBinary, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "readlink", "/proc/self/ns/ipc") + out2, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out2) + } + + out2 = strings.Trim(out2, "\n") + if parentContainerIpc != out2 { + t.Fatalf("IPC different with --ipc=container:%s %s != %s\n", id, parentContainerIpc, out2) + } + deleteAllContainers() + + logDone("run - hostname and several network modes") +} diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go index 5c49522038..01388ad727 100644 --- a/runconfig/hostconfig.go +++ b/runconfig/hostconfig.go @@ -28,6 +28,44 @@ func (n NetworkMode) IsNone() bool { return n == "none" } +type IpcMode string + +// IsPrivate indicates whether container use it's private ipc stack +func (n IpcMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +func (n IpcMode) IsHost() bool { + return n == "host" +} + +func (n IpcMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +func (n IpcMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +func (n IpcMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + type DeviceMapping struct { PathOnHost string PathInContainer string @@ -53,6 +91,7 @@ type HostConfig struct { VolumesFrom []string Devices []DeviceMapping NetworkMode NetworkMode + IpcMode IpcMode CapAdd []string CapDrop []string RestartPolicy RestartPolicy @@ -84,6 +123,7 @@ func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { Privileged: job.GetenvBool("Privileged"), PublishAllPorts: job.GetenvBool("PublishAllPorts"), NetworkMode: NetworkMode(job.Getenv("NetworkMode")), + IpcMode: IpcMode(job.Getenv("IpcMode")), } job.GetenvJson("LxcConf", &hostConfig.LxcConf) diff --git a/runconfig/parse.go b/runconfig/parse.go index c62ab3fdd4..dfc84c1892 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -60,6 +60,7 @@ func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, flCpuset = cmd.String([]string{"-cpuset"}, "", "CPUs in which to allow execution (0-3, 0,1)") flNetMode = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container\n'bridge': creates a new network stack for the container on the docker bridge\n'none': no networking for this container\n'container:': reuses another container network stack\n'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure.") flMacAddress = cmd.String([]string{"-mac-address"}, "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)") + flIpcMode = cmd.String([]string{"-ipc"}, "", "Default is to create a private IPC namespace (POSIX SysV IPC) for the container\n'container:': reuses another container shared memory, semaphores and message queues\n'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure.") flRestartPolicy = cmd.String([]string{"-restart"}, "", "Restart policy to apply when a container exits (no, on-failure[:max-retry], always)") ) @@ -241,6 +242,11 @@ func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, // parse the '-e' and '--env' after, to allow override envVariables = append(envVariables, flEnv.GetAll()...) + ipcMode := IpcMode(*flIpcMode) + if !ipcMode.Valid() { + return nil, nil, cmd, fmt.Errorf("--ipc: invalid IPC mode: %v", err) + } + netMode, err := parseNetMode(*flNetMode) if err != nil { return nil, nil, cmd, fmt.Errorf("--net: invalid net mode: %v", err) @@ -289,6 +295,7 @@ func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, ExtraHosts: flExtraHosts.GetAll(), VolumesFrom: flVolumesFrom.GetAll(), NetworkMode: netMode, + IpcMode: ipcMode, Devices: deviceMappings, CapAdd: flCapAdd.GetAll(), CapDrop: flCapDrop.GetAll(), From 28ee373e19bbfdf47c747f6fd9385a8a75f0a483 Mon Sep 17 00:00:00 2001 From: Johan Euphrosine Date: Fri, 31 Oct 2014 13:00:49 -0700 Subject: [PATCH 315/592] registry: default --insecure-registry to localhost and 127.0.0.1 Signed-off-by: Johan Euphrosine --- registry/endpoint.go | 12 +++++++++++- registry/registry_test.go | 21 +++++++++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/registry/endpoint.go b/registry/endpoint.go index 88dbeafd96..cb96cb4fc2 100644 --- a/registry/endpoint.go +++ b/registry/endpoint.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "io/ioutil" + "net" "net/http" "net/url" "strings" @@ -154,7 +155,16 @@ func IsSecure(hostname string, insecureRegistries []string) bool { if hostname == IndexServerAddress() { return true } - + if len(insecureRegistries) == 0 { + host, _, err := net.SplitHostPort(hostname) + if err != nil { + host = hostname + } + if host == "127.0.0.1" || host == "localhost" { + return false + } + return true + } for _, h := range insecureRegistries { if hostname == h { return false diff --git a/registry/registry_test.go b/registry/registry_test.go index f7b5168b45..7191acea30 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -339,3 +339,24 @@ func TestIsSecure(t *testing.T) { } } } + +func TestIsSecure(t *testing.T) { + tests := []struct { + addr string + insecureRegistries []string + expected bool + }{ + {"localhost", []string{}, false}, + {"localhost:5000", []string{}, false}, + {"127.0.0.1", []string{}, false}, + {"localhost", []string{"example.com"}, true}, + {"127.0.0.1", []string{"example.com"}, true}, + {"example.com", []string{}, true}, + {"example.com", []string{"example.com"}, false}, + } + for _, tt := range tests { + if sec := IsSecure(tt.addr, tt.insecureRegistries); sec != tt.expected { + t.Errorf("IsSecure failed for %q %v, expected %v got %v", tt.addr, tt.insecureRegistries, tt.expected, sec) + } + } +} From 11380a109e53bc5f388b6212c12794609c0241eb Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Wed, 12 Nov 2014 09:08:45 -0800 Subject: [PATCH 316/592] registry: always treat 127.0.0.1 as insecure for all cases anytime anywhere Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- registry/endpoint.go | 20 +++++++++++++------- registry/registry_test.go | 24 ++++++------------------ 2 files changed, 19 insertions(+), 25 deletions(-) diff --git a/registry/endpoint.go b/registry/endpoint.go index cb96cb4fc2..0d0749d7a2 100644 --- a/registry/endpoint.go +++ b/registry/endpoint.go @@ -152,19 +152,25 @@ func (e Endpoint) Ping() (RegistryInfo, error) { // IsSecure returns false if the provided hostname is part of the list of insecure registries. // Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. func IsSecure(hostname string, insecureRegistries []string) bool { + if hostname == IndexServerAddress() { return true } + + host, _, err := net.SplitHostPort(hostname) + + if err != nil { + host = hostname + } + + if host == "127.0.0.1" || host == "localhost" { + return false + } + if len(insecureRegistries) == 0 { - host, _, err := net.SplitHostPort(hostname) - if err != nil { - host = hostname - } - if host == "127.0.0.1" || host == "localhost" { - return false - } return true } + for _, h := range insecureRegistries { if hostname == h { return false diff --git a/registry/registry_test.go b/registry/registry_test.go index 7191acea30..032c9fbf03 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -328,31 +328,19 @@ func TestIsSecure(t *testing.T) { }{ {"example.com", []string{}, true}, {"example.com", []string{"example.com"}, false}, - {"localhost", []string{"localhost:5000"}, true}, + {"localhost", []string{"localhost:5000"}, false}, {"localhost:5000", []string{"localhost:5000"}, false}, - {"localhost", []string{"example.com"}, true}, + {"localhost", []string{"example.com"}, false}, {"127.0.0.1:5000", []string{"127.0.0.1:5000"}, false}, - } - for _, tt := range tests { - if sec := IsSecure(tt.addr, tt.insecureRegistries); sec != tt.expected { - t.Errorf("IsSecure failed for %q %v, expected %v got %v", tt.addr, tt.insecureRegistries, tt.expected, sec) - } - } -} - -func TestIsSecure(t *testing.T) { - tests := []struct { - addr string - insecureRegistries []string - expected bool - }{ {"localhost", []string{}, false}, {"localhost:5000", []string{}, false}, {"127.0.0.1", []string{}, false}, - {"localhost", []string{"example.com"}, true}, - {"127.0.0.1", []string{"example.com"}, true}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, {"example.com", []string{}, true}, {"example.com", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"example.com"}, false}, } for _, tt := range tests { if sec := IsSecure(tt.addr, tt.insecureRegistries); sec != tt.expected { From 06bf79552d222ef6c4547ae8b0f3cb474a031c76 Mon Sep 17 00:00:00 2001 From: Vishnu Kannan Date: Wed, 12 Nov 2014 22:55:32 +0000 Subject: [PATCH 317/592] Adding Vish as a maintainer for daemon code. Docker-DCO-1.1-Signed-off-by: Vishnu Kannan (github: vishh) --- daemon/MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/daemon/MAINTAINERS b/daemon/MAINTAINERS index 434aad9d57..9360465f2d 100644 --- a/daemon/MAINTAINERS +++ b/daemon/MAINTAINERS @@ -3,4 +3,5 @@ Victor Vieux (@vieux) Michael Crosby (@crosbymichael) Cristian Staretu (@unclejack) Tibor Vass (@tiborvass) +Vishnu Kannan (@vishh) volumes.go: Brian Goff (@cpuguy83) From 460036cdefb3e452fd5e8d77724d0df55be31da5 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Wed, 12 Nov 2014 02:31:49 -0800 Subject: [PATCH 318/592] Update CONTRIBUTING to reflect new PR-docs process. Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- CONTRIBUTING.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b6a05de26a..de6434c221 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -23,10 +23,11 @@ reports to [security@docker.com](mailto:security@docker.com) instead. When considering a design proposal, we are looking for: * A description of the problem this design proposal solves -* An issue -- not a pull request -- that describes what you will take action on +* A pull request, not an issue, that modifies the documentation describing + the feature you are proposing, adding new documentation if necessary. * Please prefix your issue with `Proposal:` in the title -* Please review [the existing Proposals](https://github.com/docker/docker/issues?direction=asc&labels=Proposal&page=1&sort=created&state=open) - before reporting a new issue. You can always pair with someone if you both +* Please review [the existing Proposals](https://github.com/docker/docker/pulls?q=is%3Aopen+is%3Apr+label%3AProposal) + before reporting a new one. You can always pair with someone if you both have the same idea. When considering a cleanup task, we are looking for: From ad6467f9e17205fa76a3b916efe51ba5c1b37506 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Thu, 30 Oct 2014 20:26:39 -0400 Subject: [PATCH 319/592] devmapper: use proper DM_UDEV_DISABLE_*_FLAG when creating the thin-pool Otherwise udev can unecessarily execute various rules (and issue scanning IO, etc) against the thin-pool -- which can never be a top-level device. Docker-DCO-1.1-Signed-off-by: Mike Snitzer (github: snitm) --- pkg/devicemapper/devmapper.go | 3 ++- pkg/devicemapper/devmapper_wrapper.go | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/pkg/devicemapper/devmapper.go b/pkg/devicemapper/devmapper.go index c0b931c3fb..3de42ba9e8 100644 --- a/pkg/devicemapper/devmapper.go +++ b/pkg/devicemapper/devmapper.go @@ -361,7 +361,8 @@ func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize } var cookie uint = 0 - if err := task.SetCookie(&cookie, 0); err != nil { + var flags uint16 = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag + if err := task.SetCookie(&cookie, flags); err != nil { return fmt.Errorf("Can't set cookie %s", err) } diff --git a/pkg/devicemapper/devmapper_wrapper.go b/pkg/devicemapper/devmapper_wrapper.go index c7e96a1617..499405a10d 100644 --- a/pkg/devicemapper/devmapper_wrapper.go +++ b/pkg/devicemapper/devmapper_wrapper.go @@ -82,6 +82,12 @@ const ( LoNameSize = C.LO_NAME_SIZE ) +const ( + DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG + DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG + DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG +) + var ( DmGetLibraryVersion = dmGetLibraryVersionFct DmGetNextTarget = dmGetNextTargetFct From 2b10749cdd0939e4b9e6e18e160984129d733663 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Fri, 24 Oct 2014 19:25:24 -0400 Subject: [PATCH 320/592] devmapper: Add option for specifying an lvm2 created thin-pool device Ideally lvm2 would be used to create/manage the thin-pool volume that is then handed to docker to exclusively create/manage the thin and thin snapshot volumes needed for it's containers. Managing the thin-pool outside of docker makes for the most feature-rich method of having docker utilize device mapper thin provisioning as the backing storage for docker's containers. lvm2-based thin-pool management feature highlights include: automatic or interactive thin-pool resize support, dynamically change thin-pool features, automatic thinp metadata checking when lvm2 activates the thin-pool, etc. Docker will not activate/deactivate the specified thin-pool device but it will exclusively manage/create thin and thin snapshot volumes in it. Docker will not take ownership of the specified thin-pool device unless it has 0 data blocks used and a transaction id of 0. This should help guard against using a thin-pool that is already in use. Also fix typos in setupBaseImage() relative to the thin volume type of the base image. Docker-DCO-1.1-Signed-off-by: Mike Snitzer (github: snitm) --- daemon/graphdriver/devmapper/README.md | 19 ++++++++++++ daemon/graphdriver/devmapper/deviceset.go | 37 ++++++++++++++++++----- 2 files changed, 49 insertions(+), 7 deletions(-) diff --git a/daemon/graphdriver/devmapper/README.md b/daemon/graphdriver/devmapper/README.md index c42620247b..3b69cef84f 100644 --- a/daemon/graphdriver/devmapper/README.md +++ b/daemon/graphdriver/devmapper/README.md @@ -100,6 +100,25 @@ Here is the list of supported options: ``docker -d --storage-opt dm.mountopt=nodiscard`` + * `dm.thinpooldev` + + Specifies a custom blockdevice to use for the thin pool. + + If using a block device for device mapper storage, ideally lvm2 + would be used to create/manage the thin-pool volume that is then + handed to docker to exclusively create/manage the thin and thin + snapshot volumes needed for it's containers. Managing the thin-pool + outside of docker makes for the most feature-rich method of having + docker utilize device mapper thin provisioning as the backing + storage for docker's containers. lvm2-based thin-pool management + feature highlights include: automatic or interactive thin-pool + resize support, dynamically change thin-pool features, automatic + thinp metadata checking when lvm2 activates the thin-pool, etc. + + Example use: + + ``docker -d --storage-opt dm.thinpooldev=/dev/mapper/thin-pool`` + * `dm.datadev` Specifies a custom blockdevice to use for data for the thin pool. diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index 79d9dc6d06..0e94ca7a03 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -84,6 +84,7 @@ type DeviceSet struct { metadataDevice string doBlkDiscard bool thinpBlockSize uint32 + thinPoolDevice string } type DiskUsage struct { @@ -150,7 +151,11 @@ func (devices *DeviceSet) oldMetadataFile() string { } func (devices *DeviceSet) getPoolName() string { - return devices.devicePrefix + "-pool" + if devices.thinPoolDevice == "" { + return devices.devicePrefix + "-pool" + } else { + return devices.thinPoolDevice + } } func (devices *DeviceSet) getPoolDevName() string { @@ -411,7 +416,21 @@ func (devices *DeviceSet) setupBaseImage() error { } } - log.Debugf("Initializing base device-manager snapshot") + if devices.thinPoolDevice != "" && oldInfo == nil { + if _, transactionId, dataUsed, _, _, _, err := devices.poolStatus(); err != nil { + return err + } else { + if dataUsed != 0 { + return fmt.Errorf("Unable to take ownership of thin-pool (%s) that already has used data blocks", + devices.thinPoolDevice) + } else if transactionId != 0 { + return fmt.Errorf("Unable to take ownership of thin-pool (%s) with non-zero transaction Id", + devices.thinPoolDevice) + } + } + } + + log.Debugf("Initializing base device-mapper thin volume") id := devices.NextDeviceId @@ -430,7 +449,7 @@ func (devices *DeviceSet) setupBaseImage() error { return err } - log.Debugf("Creating filesystem on base device-manager snapshot") + log.Debugf("Creating filesystem on base device-mapper thin volume") if err = devices.activateDeviceIfNeeded(info); err != nil { return err @@ -605,7 +624,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) log.Debugf("Generated prefix: %s", devices.devicePrefix) - // Check for the existence of the device -pool + // Check for the existence of the thin-pool device log.Debugf("Checking for existence of the pool '%s'", devices.getPoolName()) info, err := devicemapper.GetInfo(devices.getPoolName()) if info == nil { @@ -624,7 +643,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { createdLoopback := false // If the pool doesn't exist, create it - if info.Exists == 0 { + if info.Exists == 0 && devices.thinPoolDevice == "" { log.Debugf("Pool doesn't exist. Creating it.") var ( @@ -988,8 +1007,10 @@ func (devices *DeviceSet) Shutdown() error { } devices.Lock() - if err := devices.deactivatePool(); err != nil { - log.Debugf("Shutdown deactivate pool , error: %s", err) + if devices.thinPoolDevice == "" { + if err := devices.deactivatePool(); err != nil { + log.Debugf("Shutdown deactivate pool , error: %s", err) + } } devices.saveDeviceSetMetaData() @@ -1275,6 +1296,8 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error devices.metadataDevice = val case "dm.datadev": devices.dataDevice = val + case "dm.thinpooldev": + devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") case "dm.blkdiscard": foundBlkDiscard = true devices.doBlkDiscard, err = strconv.ParseBool(val) From 4455f517605f1fd7279bbe9547915f15c037997d Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Tue, 11 Nov 2014 17:37:44 -0500 Subject: [PATCH 321/592] registry: refactor registry.IsSecure calls into registry.NewEndpoint Signed-off-by: Tibor Vass --- graph/pull.go | 4 +--- graph/push.go | 4 +--- registry/endpoint.go | 16 +++++++++------- registry/endpoint_test.go | 2 +- registry/registry_test.go | 4 ++-- registry/service.go | 6 ++---- 6 files changed, 16 insertions(+), 20 deletions(-) diff --git a/graph/pull.go b/graph/pull.go index a7394a4b92..716a27c909 100644 --- a/graph/pull.go +++ b/graph/pull.go @@ -113,9 +113,7 @@ func (s *TagStore) CmdPull(job *engine.Job) engine.Status { return job.Error(err) } - secure := registry.IsSecure(hostname, s.insecureRegistries) - - endpoint, err := registry.NewEndpoint(hostname, secure) + endpoint, err := registry.NewEndpoint(hostname, s.insecureRegistries) if err != nil { return job.Error(err) } diff --git a/graph/push.go b/graph/push.go index 4cda8914b3..29fc4a066d 100644 --- a/graph/push.go +++ b/graph/push.go @@ -214,9 +214,7 @@ func (s *TagStore) CmdPush(job *engine.Job) engine.Status { return job.Error(err) } - secure := registry.IsSecure(hostname, s.insecureRegistries) - - endpoint, err := registry.NewEndpoint(hostname, secure) + endpoint, err := registry.NewEndpoint(hostname, s.insecureRegistries) if err != nil { return job.Error(err) } diff --git a/registry/endpoint.go b/registry/endpoint.go index 0d0749d7a2..390eec2e6a 100644 --- a/registry/endpoint.go +++ b/registry/endpoint.go @@ -34,12 +34,15 @@ func scanForAPIVersion(hostname string) (string, APIVersion) { return hostname, DefaultAPIVersion } -func NewEndpoint(hostname string, secure bool) (*Endpoint, error) { - endpoint, err := newEndpoint(hostname, secure) +func NewEndpoint(hostname string, insecureRegistries []string) (*Endpoint, error) { + endpoint, err := newEndpoint(hostname) if err != nil { return nil, err } + secure := isSecure(endpoint.URL.Host, insecureRegistries) + endpoint.secure = secure + // Try HTTPS ping to registry endpoint.URL.Scheme = "https" if _, err := endpoint.Ping(); err != nil { @@ -65,9 +68,9 @@ func NewEndpoint(hostname string, secure bool) (*Endpoint, error) { return endpoint, nil } -func newEndpoint(hostname string, secure bool) (*Endpoint, error) { +func newEndpoint(hostname string) (*Endpoint, error) { var ( - endpoint = Endpoint{secure: secure} + endpoint = Endpoint{secure: true} trimmedHostname string err error ) @@ -149,10 +152,9 @@ func (e Endpoint) Ping() (RegistryInfo, error) { return info, nil } -// IsSecure returns false if the provided hostname is part of the list of insecure registries. +// isSecure returns false if the provided hostname is part of the list of insecure registries. // Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. -func IsSecure(hostname string, insecureRegistries []string) bool { - +func isSecure(hostname string, insecureRegistries []string) bool { if hostname == IndexServerAddress() { return true } diff --git a/registry/endpoint_test.go b/registry/endpoint_test.go index def5e0d7ae..0ec1220d9c 100644 --- a/registry/endpoint_test.go +++ b/registry/endpoint_test.go @@ -12,7 +12,7 @@ func TestEndpointParse(t *testing.T) { {"0.0.0.0:5000", "https://0.0.0.0:5000/v1/"}, } for _, td := range testData { - e, err := newEndpoint(td.str, true) + e, err := newEndpoint(td.str) if err != nil { t.Errorf("%q: %s", td.str, err) } diff --git a/registry/registry_test.go b/registry/registry_test.go index 032c9fbf03..8bc6a35166 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -343,8 +343,8 @@ func TestIsSecure(t *testing.T) { {"127.0.0.1:5000", []string{"example.com"}, false}, } for _, tt := range tests { - if sec := IsSecure(tt.addr, tt.insecureRegistries); sec != tt.expected { - t.Errorf("IsSecure failed for %q %v, expected %v got %v", tt.addr, tt.insecureRegistries, tt.expected, sec) + if sec := isSecure(tt.addr, tt.insecureRegistries); sec != tt.expected { + t.Errorf("isSecure failed for %q %v, expected %v got %v", tt.addr, tt.insecureRegistries, tt.expected, sec) } } } diff --git a/registry/service.go b/registry/service.go index 7051d93430..53e8278b04 100644 --- a/registry/service.go +++ b/registry/service.go @@ -40,7 +40,7 @@ func (s *Service) Auth(job *engine.Job) engine.Status { job.GetenvJson("authConfig", authConfig) if addr := authConfig.ServerAddress; addr != "" && addr != IndexServerAddress() { - endpoint, err := NewEndpoint(addr, IsSecure(addr, s.insecureRegistries)) + endpoint, err := NewEndpoint(addr, s.insecureRegistries) if err != nil { return job.Error(err) } @@ -92,9 +92,7 @@ func (s *Service) Search(job *engine.Job) engine.Status { return job.Error(err) } - secure := IsSecure(hostname, s.insecureRegistries) - - endpoint, err := NewEndpoint(hostname, secure) + endpoint, err := NewEndpoint(hostname, s.insecureRegistries) if err != nil { return job.Error(err) } From 78e859f3c35d1f31e7d6f3ded9a414dc0fbb8eaa Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Tue, 11 Nov 2014 20:08:59 -0500 Subject: [PATCH 322/592] Put mock registry address in insecureRegistries for unit tests Signed-off-by: Tibor Vass --- registry/registry_mock_test.go | 10 ++++++++-- registry/registry_test.go | 4 ++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/registry/registry_mock_test.go b/registry/registry_mock_test.go index 02884c6224..1c710e21e9 100644 --- a/registry/registry_mock_test.go +++ b/registry/registry_mock_test.go @@ -19,8 +19,9 @@ import ( ) var ( - testHTTPServer *httptest.Server - testLayers = map[string]map[string]string{ + testHTTPServer *httptest.Server + insecureRegistries []string + testLayers = map[string]map[string]string{ "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20": { "json": `{"id":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", "comment":"test base image","created":"2013-03-23T12:53:11.10432-07:00", @@ -100,6 +101,11 @@ func init() { r.HandleFunc("/v2/version", handlerGetPing).Methods("GET") testHTTPServer = httptest.NewServer(handlerAccessLog(r)) + URL, err := url.Parse(testHTTPServer.URL) + if err != nil { + panic(err) + } + insecureRegistries = []string{URL.Host} } func handlerAccessLog(handler http.Handler) http.Handler { diff --git a/registry/registry_test.go b/registry/registry_test.go index 8bc6a35166..37dedc2acc 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -21,7 +21,7 @@ const ( func spawnTestRegistrySession(t *testing.T) *Session { authConfig := &AuthConfig{} - endpoint, err := NewEndpoint(makeURL("/v1/"), false) + endpoint, err := NewEndpoint(makeURL("/v1/"), insecureRegistries) if err != nil { t.Fatal(err) } @@ -33,7 +33,7 @@ func spawnTestRegistrySession(t *testing.T) *Session { } func TestPingRegistryEndpoint(t *testing.T) { - ep, err := NewEndpoint(makeURL("/v1/"), false) + ep, err := NewEndpoint(makeURL("/v1/"), insecureRegistries) if err != nil { t.Fatal(err) } From 5fbfec333a90b86add1dcf6bd8d5fcb728d34cdf Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Thu, 13 Nov 2014 03:21:18 +0000 Subject: [PATCH 323/592] update timeout Signed-off-by: Victor Vieux --- api/client/cli.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/api/client/cli.go b/api/client/cli.go index 7a806d64bb..3455962bd2 100644 --- a/api/client/cli.go +++ b/api/client/cli.go @@ -141,14 +141,15 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, key libtrust.PrivateKey, } // Why 32? See issue 8035 + timeout := 32 * time.Second if proto == "unix" { // no need in compressing for local communications tr.DisableCompression = true tr.Dial = func(dial_network, dial_addr string) (net.Conn, error) { - return net.DialTimeout(proto, addr, 32*time.Second) + return net.DialTimeout(proto, addr, timeout) } } else { - tr.Dial = (&net.Dialer{Timeout: 32 * time.Second}).Dial + tr.Dial = (&net.Dialer{Timeout: timeout}).Dial } return &DockerCli{ From 3c3968692d3024fde90637dbdfe56f08de100cdd Mon Sep 17 00:00:00 2001 From: Lei Jitang Date: Thu, 13 Nov 2014 16:21:34 +0800 Subject: [PATCH 324/592] Cleanup:change latest to graph.DEFAULTTAG Signed-off-by: Lei Jitang --- api/client/commands.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index da29b28f3d..ad94a8d996 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -1235,7 +1235,7 @@ func (cli *DockerCli) CmdPull(args ...string) error { ) taglessRemote, tag := parsers.ParseRepositoryTag(remote) if tag == "" && !*allTags { - newRemote = taglessRemote + ":latest" + newRemote = taglessRemote + ":" + graph.DEFAULTTAG } if tag != "" && *allTags { return fmt.Errorf("tag can't be used with --all-tags/-a") @@ -2034,7 +2034,7 @@ func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error { repos, tag := parsers.ParseRepositoryTag(image) // pull only the image tagged 'latest' if no tag was specified if tag == "" { - tag = "latest" + tag = graph.DEFAULTTAG } v.Set("fromImage", repos) v.Set("tag", tag) From e71f241c4b8006f097e4c63f7b3ea28d4591ddee Mon Sep 17 00:00:00 2001 From: Michal Minar Date: Thu, 13 Nov 2014 10:40:45 +0100 Subject: [PATCH 325/592] Corrected description of --sig-proxy Signal proxy does work only in non-TTY mode (--tty=false). Man pages and commands should not lie about it. Signed-off-by: Michal Minar --- api/client/commands.go | 4 ++-- contrib/completion/fish/docker.fish | 4 ++-- docs/man/docker-attach.1.md | 2 +- docs/man/docker-run.1.md | 2 +- docs/sources/reference/commandline/cli.md | 4 ++-- docs/sources/reference/run.md | 2 +- docs/sources/userguide/usingdocker.md | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index da29b28f3d..b8d89ba1f7 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -1867,7 +1867,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error { var ( cmd = cli.Subcmd("attach", "CONTAINER", "Attach to a running container") noStdin = cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach STDIN") - proxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy all received signals to the process (even in non-TTY mode). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.") + proxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.") ) if err := cmd.Parse(args); err != nil { @@ -2191,7 +2191,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { var ( flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)") flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run the container in the background and print the new container ID") - flSigProxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy received signals to the process (even in non-TTY mode). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.") + flSigProxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.") flName = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container") flAttach *opts.ListOpts diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish index 48b0279cee..dd6fb2152a 100644 --- a/contrib/completion/fish/docker.fish +++ b/contrib/completion/fish/docker.fish @@ -67,7 +67,7 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -s v -l version -d 'Print # attach complete -c docker -f -n '__fish_docker_no_subcommand' -a attach -d 'Attach to a running container' complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l no-stdin -d 'Do not attach stdin' -complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxify all received signal to the process (even in non-tty mode)' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxify all received signal to the process (non-TTY mode only)' complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container" # build @@ -237,7 +237,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host (format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort) (use 'docker port' to see the actual mapping)" complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l rm -d 'Automatically remove the container when it exits (incompatible with -d)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxify all received signal to the process (even in non-tty mode)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxify all received signal to the process (non-TTY mode only)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-tty' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)' diff --git a/docs/man/docker-attach.1.md b/docs/man/docker-attach.1.md index 7deda6c75e..78fdac60a8 100644 --- a/docs/man/docker-attach.1.md +++ b/docs/man/docker-attach.1.md @@ -25,7 +25,7 @@ the client. Do not attach STDIN. The default is *false*. **--sig-proxy**=*true*|*false* - Proxy all received signals to the process (even in non-TTY mode). SIGCHLD, SIGKILL, and SIGSTOP are not proxied. The default is *true*. + Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied. The default is *true*. # EXAMPLES diff --git a/docs/man/docker-run.1.md b/docs/man/docker-run.1.md index ff3dac17b0..6564d25b74 100644 --- a/docs/man/docker-run.1.md +++ b/docs/man/docker-run.1.md @@ -225,7 +225,7 @@ outside of a container on the host. Automatically remove the container when it exits (incompatible with -d). The default is *false*. **--sig-proxy**=*true*|*false* - Proxy received signals to the process (even in non-TTY mode). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is *true*. + Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is *true*. **-t**, **--tty**=*true*|*false* When set to true Docker can allocate a pseudo-tty and attach to the standard diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index e0a7b2328a..f3ffc3f90f 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -227,7 +227,7 @@ Docker supports softlinks for the Docker data directory Attach to a running container --no-stdin=false Do not attach STDIN - --sig-proxy=true Proxy all received signals to the process (even in non-TTY mode). SIGCHLD, SIGKILL, and SIGSTOP are not proxied. + --sig-proxy=true Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied. The `attach` command lets you view or interact with any running container's primary process (`pid 1`). @@ -1243,7 +1243,7 @@ removed before the image is removed. --privileged=false Give extended privileges to this container --restart="" Restart policy to apply when a container exits (no, on-failure[:max-retry], always) --rm=false Automatically remove the container when it exits (incompatible with -d) - --sig-proxy=true Proxy received signals to the process (even in non-TTY mode). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. + --sig-proxy=true Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. -t, --tty=false Allocate a pseudo-TTY -u, --user="" Username or UID -v, --volume=[] Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container) diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index 1abb7d0575..b15c4dd309 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -82,7 +82,7 @@ and pass along signals. All of that is configurable: -a=[] : Attach to `STDIN`, `STDOUT` and/or `STDERR` -t=false : Allocate a pseudo-tty - --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) + --sig-proxy=true: Proxify all received signal to the process (non-TTY mode only) -i=false : Keep STDIN open even if not attached If you do not specify `-a` then Docker will [attach all standard diff --git a/docs/sources/userguide/usingdocker.md b/docs/sources/userguide/usingdocker.md index e6564d588c..865f446bd0 100644 --- a/docs/sources/userguide/usingdocker.md +++ b/docs/sources/userguide/usingdocker.md @@ -85,7 +85,7 @@ This will display the help text and all available flags: Attach to a running container --no-stdin=false: Do not attach stdin - --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) + --sig-proxy=true: Proxify all received signal to the process (non-TTY mode only) > **Note:** > You can see a full list of Docker's commands From fbe10c83d81843412fd3485a8d6bb75849de97d4 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Thu, 13 Nov 2014 06:56:36 -0800 Subject: [PATCH 326/592] registry: parse INDEXSERVERADDRESS into a URL for easier check in isSecure Signed-off-by: Tibor Vass --- registry/auth.go | 10 ++++++++++ registry/endpoint.go | 14 ++++++-------- registry/endpoint_test.go | 2 +- registry/registry_test.go | 1 + 4 files changed, 18 insertions(+), 9 deletions(-) diff --git a/registry/auth.go b/registry/auth.go index 1b11179533..a22d0b881f 100644 --- a/registry/auth.go +++ b/registry/auth.go @@ -7,6 +7,7 @@ import ( "fmt" "io/ioutil" "net/http" + "net/url" "os" "path" "strings" @@ -27,8 +28,17 @@ const ( var ( ErrConfigFileMissing = errors.New("The Auth config file is missing") + IndexServerURL *url.URL ) +func init() { + url, err := url.Parse(INDEXSERVER) + if err != nil { + panic(err) + } + IndexServerURL = url +} + type AuthConfig struct { Username string `json:"username,omitempty"` Password string `json:"password,omitempty"` diff --git a/registry/endpoint.go b/registry/endpoint.go index 390eec2e6a..bd23c30299 100644 --- a/registry/endpoint.go +++ b/registry/endpoint.go @@ -35,21 +35,18 @@ func scanForAPIVersion(hostname string) (string, APIVersion) { } func NewEndpoint(hostname string, insecureRegistries []string) (*Endpoint, error) { - endpoint, err := newEndpoint(hostname) + endpoint, err := newEndpoint(hostname, insecureRegistries) if err != nil { return nil, err } - secure := isSecure(endpoint.URL.Host, insecureRegistries) - endpoint.secure = secure - // Try HTTPS ping to registry endpoint.URL.Scheme = "https" if _, err := endpoint.Ping(); err != nil { //TODO: triggering highland build can be done there without "failing" - if secure { + if endpoint.secure { // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. return nil, fmt.Errorf("Invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) @@ -68,9 +65,9 @@ func NewEndpoint(hostname string, insecureRegistries []string) (*Endpoint, error return endpoint, nil } -func newEndpoint(hostname string) (*Endpoint, error) { +func newEndpoint(hostname string, insecureRegistries []string) (*Endpoint, error) { var ( - endpoint = Endpoint{secure: true} + endpoint = Endpoint{} trimmedHostname string err error ) @@ -82,6 +79,7 @@ func newEndpoint(hostname string) (*Endpoint, error) { if err != nil { return nil, err } + endpoint.secure = isSecure(endpoint.URL.Host, insecureRegistries) return &endpoint, nil } @@ -155,7 +153,7 @@ func (e Endpoint) Ping() (RegistryInfo, error) { // isSecure returns false if the provided hostname is part of the list of insecure registries. // Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. func isSecure(hostname string, insecureRegistries []string) bool { - if hostname == IndexServerAddress() { + if hostname == IndexServerURL.Host { return true } diff --git a/registry/endpoint_test.go b/registry/endpoint_test.go index 0ec1220d9c..54105ec174 100644 --- a/registry/endpoint_test.go +++ b/registry/endpoint_test.go @@ -12,7 +12,7 @@ func TestEndpointParse(t *testing.T) { {"0.0.0.0:5000", "https://0.0.0.0:5000/v1/"}, } for _, td := range testData { - e, err := newEndpoint(td.str) + e, err := newEndpoint(td.str, insecureRegistries) if err != nil { t.Errorf("%q: %s", td.str, err) } diff --git a/registry/registry_test.go b/registry/registry_test.go index 37dedc2acc..3e0950efe0 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -326,6 +326,7 @@ func TestIsSecure(t *testing.T) { insecureRegistries []string expected bool }{ + {IndexServerURL.Host, nil, true}, {"example.com", []string{}, true}, {"example.com", []string{"example.com"}, false}, {"localhost", []string{"localhost:5000"}, false}, From a01f1e707eb682ec60d489a4171d2c82de79ee57 Mon Sep 17 00:00:00 2001 From: Sami Wagiaalla Date: Wed, 12 Nov 2014 16:55:34 -0500 Subject: [PATCH 327/592] Remove reference to 'ifaceName' from configureBridge comment. The argument ifaceName was removed in a much earlier commit. Signed-off-by: Sami Wagiaalla --- daemon/networkdriver/bridge/driver.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/daemon/networkdriver/bridge/driver.go b/daemon/networkdriver/bridge/driver.go index 5d0040a8e7..663a362e42 100644 --- a/daemon/networkdriver/bridge/driver.go +++ b/daemon/networkdriver/bridge/driver.go @@ -253,9 +253,9 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error { return nil } -// configureBridge attempts to create and configure a network bridge interface named `ifaceName` on the host +// configureBridge attempts to create and configure a network bridge interface named `bridgeIface` on the host // If bridgeIP is empty, it will try to find a non-conflicting IP from the Docker-specified private ranges -// If the bridge `ifaceName` already exists, it will only perform the IP address association with the existing +// If the bridge `bridgeIface` already exists, it will only perform the IP address association with the existing // bridge (fixes issue #8444) // If an address which doesn't conflict with existing interfaces can't be found, an error is returned. func configureBridge(bridgeIP string) error { From 6d97339ca23ada27812572016ad4ff9ccffa8b09 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Thu, 13 Nov 2014 19:57:28 +0200 Subject: [PATCH 328/592] Fix AUFS silent mount errors on many layers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #1171 Fixes #6465 Data passed to mount(2) is clipped to PAGE_SIZE if its bigger. Previous implementation checked if error was returned and then started to append layers one by one. But if the PAGE_SIZE clipping appeared in between the paths, in the permission sections or in xino definition the call would not error and remaining layers would just be skipped(or some other unknown situation). This also optimizes system calls as it tries to mount as much as possible with the first mount. Signed-off-by: Tõnis Tiigi (github: tonistiigi) --- daemon/graphdriver/aufs/aufs.go | 65 +++++++++++++++------------- daemon/graphdriver/aufs/aufs_test.go | 32 ++++++++++++-- 2 files changed, 64 insertions(+), 33 deletions(-) diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go index 4da42cd233..da3c720d16 100644 --- a/daemon/graphdriver/aufs/aufs.go +++ b/daemon/graphdriver/aufs/aufs.go @@ -412,39 +412,44 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro } }() - if err = a.tryMount(ro, rw, target, mountLabel); err != nil { - if err = a.mountRw(rw, target, mountLabel); err != nil { - return - } + // Mount options are clipped to page size(4096 bytes). If there are more + // layers then these are remounted individually using append. - for _, layer := range ro { - data := label.FormatMountLabel(fmt.Sprintf("append:%s=ro+wh", layer), mountLabel) - if err = mount("none", target, "aufs", MsRemount, data); err != nil { - return + b := make([]byte, syscall.Getpagesize()-len(mountLabel)-50) // room for xino & mountLabel + bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) + + firstMount := true + i := 0 + + for { + for ; i < len(ro); i++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[i]) + + if firstMount { + if bp+len(layer) > len(b) { + break + } + bp += copy(b[bp:], layer) + } else { + data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) + if err = mount("none", target, "aufs", MsRemount, data); err != nil { + return + } } } + + if firstMount { + data := label.FormatMountLabel(fmt.Sprintf("%s,xino=/dev/shm/aufs.xino", string(b[:bp])), mountLabel) + if err = mount("none", target, "aufs", 0, data); err != nil { + return + } + firstMount = false + } + + if i == len(ro) { + break + } } + return } - -// Try to mount using the aufs fast path, if this fails then -// append ro layers. -func (a *Driver) tryMount(ro []string, rw, target, mountLabel string) (err error) { - var ( - rwBranch = fmt.Sprintf("%s=rw", rw) - roBranches = fmt.Sprintf("%s=ro+wh:", strings.Join(ro, "=ro+wh:")) - data = label.FormatMountLabel(fmt.Sprintf("br:%v:%v,xino=/dev/shm/aufs.xino", rwBranch, roBranches), mountLabel) - ) - return mount("none", target, "aufs", 0, data) -} - -func (a *Driver) mountRw(rw, target, mountLabel string) error { - data := label.FormatMountLabel(fmt.Sprintf("br:%s,xino=/dev/shm/aufs.xino", rw), mountLabel) - return mount("none", target, "aufs", 0, data) -} - -func rollbackMount(target string, err error) { - if err != nil { - Unmount(target) - } -} diff --git a/daemon/graphdriver/aufs/aufs_test.go b/daemon/graphdriver/aufs/aufs_test.go index cc5b3a2030..971d448af8 100644 --- a/daemon/graphdriver/aufs/aufs_test.go +++ b/daemon/graphdriver/aufs/aufs_test.go @@ -635,9 +635,13 @@ func hash(c string) string { return hex.EncodeToString(h.Sum(nil)) } -func TestMountMoreThan42Layers(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) +func testMountMoreThan42Layers(t *testing.T, mountPath string) { + if err := os.MkdirAll(mountPath, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(mountPath, t).(*Driver) + defer os.RemoveAll(mountPath) defer d.Cleanup() var last string var expected int @@ -695,3 +699,25 @@ func TestMountMoreThan42Layers(t *testing.T) { t.Fatalf("Expected %d got %d", expected, len(files)) } } + +func TestMountMoreThan42Layers(t *testing.T) { + testMountMoreThan42Layers(t, tmp) +} + +func TestMountMoreThan42LayersMatchingPathLength(t *testing.T) { + tmp := "aufs-tests" + for { + // This finds a mount path so that when combined into aufs mount options + // 4096 byte boundary would be in between the paths or in permission + // section. For '/tmp' it will use '/tmp/aufs-tests00000000/aufs' + mountPath := path.Join(os.TempDir(), tmp, "aufs") + pathLength := 77 + len(mountPath) + + if mod := 4095 % pathLength; mod == 0 || mod > pathLength-2 { + t.Logf("Using path: %s", mountPath) + testMountMoreThan42Layers(t, mountPath) + return + } + tmp += "0" + } +} From e49567ba729001c31fe71e4b715eed8f50d7ded9 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Thu, 13 Nov 2014 13:37:47 -0500 Subject: [PATCH 329/592] devmapper: disable discards by default if dm.thinpooldev was specified User may still enable discards by setting dm.blkdiscard=true Docker-DCO-1.1-Signed-off-by: Mike Snitzer (github: snitm) --- daemon/graphdriver/devmapper/deviceset.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index 0e94ca7a03..f28dc982bd 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -1317,7 +1317,7 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error } // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive - if !foundBlkDiscard && devices.dataDevice != "" { + if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { devices.doBlkDiscard = false } From 912b0f0f73346bf93c4feb32c84c62c18ee62dbc Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Thu, 13 Nov 2014 11:06:50 -0800 Subject: [PATCH 330/592] Allow a few more DOCKER_* env vars to pass thru Makefile I was trying to just build the Docker client but DOCKER_CLIENTONLY wasn't getting passed thru from the shell to the container building docker. So, this PR passes this var (via the -e option) on the docker run command so we pick it up from the devs shell when running "make ...". While in there I pulled all of the "-e" options into a new Makefile variable so its easy to see just the list of env vars we pass along. Signed-off-by: Doug Davis --- Makefile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 2d1c79ac2e..b3baca8e03 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,10 @@ DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/docker/docker/$(BINDDIR)") -DOCKER_RUN_DOCKER := docker run --rm -it --privileged -e TIMEOUT -e BUILDFLAGS -e TESTFLAGS -e TESTDIRS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER $(DOCKER_MOUNT) "$(DOCKER_IMAGE)" +DOCKER_ENVS := -e TIMEOUT -e BUILDFLAGS -e TESTFLAGS \ + -e TESTDIRS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER \ + -e DOCKER_CLIENTONLY +DOCKER_RUN_DOCKER := docker run --rm -it --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) "$(DOCKER_IMAGE)" # to allow `make DOCSDIR=docs docs-shell` DOCKER_RUN_DOCS := docker run --rm -it $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) -e AWS_S3_BUCKET From 25154682a5cd57aa4fc3ef88baeee3ce1f204060 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 6 Nov 2014 15:56:13 -0500 Subject: [PATCH 331/592] btrfs: build tag to enable showing version info be default it is on, with build tags to disable the version info Signed-off-by: Vincent Batts --- daemon/graphdriver/btrfs/btrfs.go | 10 +++++++--- daemon/graphdriver/btrfs/version.go | 11 ++++++++++- daemon/graphdriver/btrfs/version_none.go | 13 +++++++++++++ hack/PACKAGERS.md | 6 ++++++ 4 files changed, 36 insertions(+), 4 deletions(-) create mode 100644 daemon/graphdriver/btrfs/version_none.go diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go index ef77ae9158..a3964b963c 100644 --- a/daemon/graphdriver/btrfs/btrfs.go +++ b/daemon/graphdriver/btrfs/btrfs.go @@ -60,10 +60,14 @@ func (d *Driver) String() string { } func (d *Driver) Status() [][2]string { - return [][2]string{ - {"Build Version", BtrfsBuildVersion()}, - {"Library Version", fmt.Sprintf("%d", BtrfsLibVersion())}, + status := [][2]string{} + if bv := BtrfsBuildVersion(); bv != "-" { + status = append(status, [2]string{"Build Version", bv}) } + if lv := BtrfsLibVersion(); lv != -1 { + status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) + } + return status } func (d *Driver) Cleanup() error { diff --git a/daemon/graphdriver/btrfs/version.go b/daemon/graphdriver/btrfs/version.go index 1b2b148c07..89ed85749d 100644 --- a/daemon/graphdriver/btrfs/version.go +++ b/daemon/graphdriver/btrfs/version.go @@ -1,9 +1,18 @@ -// +build linux +// +build linux,!btrfs_noversion package btrfs /* #include + +// because around version 3.16, they did not define lib version yet +int my_btrfs_lib_version() { +#ifdef BTRFS_LIB_VERSION + return BTRFS_LIB_VERSION; +#else + return -1; +#endif +} */ import "C" diff --git a/daemon/graphdriver/btrfs/version_none.go b/daemon/graphdriver/btrfs/version_none.go new file mode 100644 index 0000000000..69a4e51cf8 --- /dev/null +++ b/daemon/graphdriver/btrfs/version_none.go @@ -0,0 +1,13 @@ +// +build linux,btrfs_noversion + +package btrfs + +// TODO(vbatts) remove this work-around once supported linux distros are on +// btrfs utililties of >= 3.16.1 + +func BtrfsBuildVersion() string { + return "-" +} +func BtrfsLibVersion() int { + return -1 +} diff --git a/hack/PACKAGERS.md b/hack/PACKAGERS.md index 265f7d676b..65bf60cd03 100644 --- a/hack/PACKAGERS.md +++ b/hack/PACKAGERS.md @@ -162,6 +162,12 @@ SELinux, you will need to use the `selinux` build tag: export DOCKER_BUILDTAGS='selinux' ``` +If your version of btrfs-progs is < 3.16.1 (also called btrfs-tools), then you +will need the following tag to not check for btrfs version headers: +```bash +export DOCKER_BUILDTAGS='btrfs_noversion' +``` + There are build tags for disabling graphdrivers as well. By default, support for all graphdrivers are built in. From d7c37b5a28de6e7c0a9270815c092a45d8d7fef7 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 13 Nov 2014 16:25:10 -0500 Subject: [PATCH 332/592] Dockerfile: buildtags for old btrfs Since the build uses ubuntu 14.04, which has an old btrfs, include the buildtags needed for this old version to not break the build. Signed-off-by: Vincent Batts --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index e1c6236da8..43e15b31d4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -103,7 +103,7 @@ RUN useradd --create-home --gid docker unprivilegeduser VOLUME /var/lib/docker WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor selinux +ENV DOCKER_BUILDTAGS apparmor selinux btrfs_noversion # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] From cbf2879349bd6cc3b43f89733a9b81f55bd0c495 Mon Sep 17 00:00:00 2001 From: pixelistik Date: Thu, 13 Nov 2014 23:26:49 +0100 Subject: [PATCH 333/592] Fix typo "infortmation" in docs for `docker run` --- docs/sources/reference/commandline/cli.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index e0a7b2328a..df3269d395 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -1257,7 +1257,7 @@ specified image, and then `starts` it using the specified command. That is, previous changes intact using `docker start`. See `docker ps -a` to view a list of all containers. -There is detailed infortmation about `docker run` in the [Docker run reference]( +There is detailed information about `docker run` in the [Docker run reference]( /reference/run/). The `docker run` command can be used in combination with `docker commit` to From 4f5be9da86ffed40b810bb1840c19526a9156ab3 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Thu, 13 Nov 2014 01:32:38 +0000 Subject: [PATCH 334/592] Do not display empty lines in docker info if the key doesn't exists Signed-off-by: Victor Vieux --- api/client/commands.go | 60 ++++++++++++++++++++++++++++-------------- 1 file changed, 40 insertions(+), 20 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index da29b28f3d..1e4bc6f8b8 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -473,20 +473,33 @@ func (cli *DockerCli) CmdInfo(args ...string) error { } out.Close() - fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers")) - fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images")) - fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver")) - var driverStatus [][2]string - if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil { - return err + if remoteInfo.Exists("Containers") { + fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers")) } - for _, pair := range driverStatus { - fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) + if remoteInfo.Exists("Images") { + fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images")) + } + if remoteInfo.Exists("Driver") { + fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver")) + } + if remoteInfo.Exists("DriverStatus") { + var driverStatus [][2]string + if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil { + return err + } + for _, pair := range driverStatus { + fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) + } + } + if remoteInfo.Exists("ExecutionDriver") { + fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver")) + } + if remoteInfo.Exists("KernelVersion") { + fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion")) + } + if remoteInfo.Exists("OperatingSystem") { + fmt.Fprintf(cli.out, "Operating System: %s\n", remoteInfo.Get("OperatingSystem")) } - fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver")) - fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion")) - fmt.Fprintf(cli.out, "Operating System: %s\n", remoteInfo.Get("OperatingSystem")) - if remoteInfo.Exists("NCPU") { fmt.Fprintf(cli.out, "CPUs: %d\n", remoteInfo.GetInt("NCPU")) } @@ -495,12 +508,19 @@ func (cli *DockerCli) CmdInfo(args ...string) error { } if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" { - fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug")) + if remoteInfo.Exists("Debug") { + fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug")) + } fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "") - fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd")) - fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines")) - fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener")) - + if remoteInfo.Exists("NFd") { + fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd")) + } + if remoteInfo.Exists("NGoroutines") { + fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines")) + } + if remoteInfo.Exists("NEventsListener") { + fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener")) + } if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" { fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1) } @@ -517,13 +537,13 @@ func (cli *DockerCli) CmdInfo(args ...string) error { fmt.Fprintf(cli.out, "Registry: %v\n", remoteInfo.GetList("IndexServerAddress")) } } - if !remoteInfo.GetBool("MemoryLimit") { + if remoteInfo.Exists("MemoryLimit") && !remoteInfo.GetBool("MemoryLimit") { fmt.Fprintf(cli.err, "WARNING: No memory limit support\n") } - if !remoteInfo.GetBool("SwapLimit") { + if remoteInfo.Exists("SwapLimit") && !remoteInfo.GetBool("SwapLimit") { fmt.Fprintf(cli.err, "WARNING: No swap limit support\n") } - if !remoteInfo.GetBool("IPv4Forwarding") { + if remoteInfo.Exists("IPv4Forwarding") && !remoteInfo.GetBool("IPv4Forwarding") { fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n") } return nil From c9a76622086cf6019b3ec71ae53fc71c0396a1ef Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 13 Nov 2014 17:20:24 -0500 Subject: [PATCH 335/592] pkg/devicemapper: cleanup removeDevice differences Fixes failure on RemoveDevice when host is AUFS, and running devicemapper test docker-in-docker https://gist.github.com/tonistiigi/59559cbfb3f2df26b29c Signed-off-by: Vincent Batts --- pkg/devicemapper/devmapper.go | 44 ++++++++++++++--------------------- 1 file changed, 18 insertions(+), 26 deletions(-) diff --git a/pkg/devicemapper/devmapper.go b/pkg/devicemapper/devmapper.go index c0b931c3fb..87f5a1f71c 100644 --- a/pkg/devicemapper/devmapper.go +++ b/pkg/devicemapper/devmapper.go @@ -62,7 +62,7 @@ var ( ErrSetDevDir = errors.New("dm_set_dev_dir failed") ErrGetLibraryVersion = errors.New("dm_get_library_version failed") ErrCreateRemoveTask = errors.New("Can't create task of type DeviceRemove") - ErrRunRemoveDevice = errors.New("running removeDevice failed") + ErrRunRemoveDevice = errors.New("running RemoveDevice failed") ErrInvalidAddNode = errors.New("Invalide AddNoce type") ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity") @@ -296,18 +296,27 @@ func GetLibraryVersion() (string, error) { // Useful helper for cleanup func RemoveDevice(name string) error { - // TODO(vbatts) just use the other removeDevice() - task := TaskCreate(DeviceRemove) + log.Debugf("[devmapper] RemoveDevice START") + defer log.Debugf("[devmapper] RemoveDevice END") + task, err := createTask(DeviceRemove, name) if task == nil { - return ErrCreateRemoveTask - } - if err := task.SetName(name); err != nil { - log.Debugf("Can't set task name %s", name) return err } - if err := task.Run(); err != nil { - return ErrRunRemoveDevice + + var cookie uint = 0 + if err := task.SetCookie(&cookie, 0); err != nil { + return fmt.Errorf("Can not set cookie: %s", err) } + + if err = task.Run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("Error running RemoveDevice %s", err) + } + + UdevWait(cookie) + return nil } @@ -568,23 +577,6 @@ func DeleteDevice(poolName string, deviceId int) error { return nil } -func removeDevice(name string) error { - log.Debugf("[devmapper] RemoveDevice START") - defer log.Debugf("[devmapper] RemoveDevice END") - task, err := createTask(DeviceRemove, name) - if task == nil { - return err - } - dmSawBusy = false - if err = task.Run(); err != nil { - if dmSawBusy { - return ErrBusy - } - return fmt.Errorf("Error running RemoveDevice %s", err) - } - return nil -} - func ActivateDevice(poolName string, name string, deviceId int, size uint64) error { task, err := createTask(DeviceCreate, name) if task == nil { From b4336803f35591c8e4d94b023c9d5e9525795520 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Hans=20R=C3=B8dtang?= Date: Fri, 14 Nov 2014 06:16:41 +0100 Subject: [PATCH 336/592] Updated cover tool import path. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Hans Rødtang --- Dockerfile | 2 +- builder/parser/testfiles/docker/Dockerfile | 2 +- builder/parser/testfiles/docker/result | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index e1c6236da8..c0ee7c0e2f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -74,7 +74,7 @@ ENV GOARM 5 RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' # Grab Go's cover tool for dead-simple code coverage testing -RUN go get code.google.com/p/go.tools/cmd/cover +RUN go get golang.org/x/tools/cmd/cover # TODO replace FPM with some very minimal debhelper stuff RUN gem install --no-rdoc --no-ri fpm --version 1.3.2 diff --git a/builder/parser/testfiles/docker/Dockerfile b/builder/parser/testfiles/docker/Dockerfile index 1c173126ae..de6ebca8f7 100644 --- a/builder/parser/testfiles/docker/Dockerfile +++ b/builder/parser/testfiles/docker/Dockerfile @@ -75,7 +75,7 @@ ENV GOARM 5 RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' # Grab Go's cover tool for dead-simple code coverage testing -RUN go get code.google.com/p/go.tools/cmd/cover +RUN go get golang.org/x/tools/cmd/cover # TODO replace FPM with some very minimal debhelper stuff RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 diff --git a/builder/parser/testfiles/docker/result b/builder/parser/testfiles/docker/result index 3ab006ec40..80f219ecb4 100644 --- a/builder/parser/testfiles/docker/result +++ b/builder/parser/testfiles/docker/result @@ -10,7 +10,7 @@ (env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm darwin/amd64 darwin/386 freebsd/amd64 freebsd/386 freebsd/arm") (env "GOARM" "5") (run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'") -(run "go get code.google.com/p/go.tools/cmd/cover") +(run "go get golang.org/x/tools/cmd/cover") (run "gem install --no-rdoc --no-ri fpm --version 1.0.2") (run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox") (run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg") From b37fdc5dd1db196209ebb860c88a37d67bb2cf98 Mon Sep 17 00:00:00 2001 From: Anthony Baire Date: Tue, 11 Nov 2014 10:18:22 +0100 Subject: [PATCH 337/592] fix missing layers when exporting a full repository Therer is a bug in the 'skip' decision when exporting a repository (`docker save repo`) Only the layers of the first image are included in the archive (the layers of the next images are missing) Signed-off-by: Anthony Baire --- graph/export.go | 36 ++++-------- integration-cli/docker_cli_save_load_test.go | 62 ++++++++++++++++++++ 2 files changed, 73 insertions(+), 25 deletions(-) diff --git a/graph/export.go b/graph/export.go index 75314076ed..7a8054010e 100644 --- a/graph/export.go +++ b/graph/export.go @@ -30,24 +30,21 @@ func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status { defer os.RemoveAll(tempdir) rootRepoMap := map[string]Repository{} + addKey := func(name string, tag string, id string) { + log.Debugf("add key [%s:%s]", name, tag) + if repo, ok := rootRepoMap[name]; !ok { + rootRepoMap[name] = Repository{tag: id} + } else { + repo[tag] = id + } + } for _, name := range job.Args { log.Debugf("Serializing %s", name) rootRepo := s.Repositories[name] if rootRepo != nil { // this is a base repo name, like 'busybox' - for _, id := range rootRepo { - if _, ok := rootRepoMap[name]; !ok { - rootRepoMap[name] = rootRepo - } else { - log.Debugf("Duplicate key [%s]", name) - if rootRepoMap[name].Contains(rootRepo) { - log.Debugf("skipping, because it is present [%s:%q]", name, rootRepo) - continue - } - log.Debugf("updating [%s]: [%q] with [%q]", name, rootRepoMap[name], rootRepo) - rootRepoMap[name].Update(rootRepo) - } - + for tag, id := range rootRepo { + addKey(name, tag, id) if err := s.exportImage(job.Eng, id, tempdir); err != nil { return job.Error(err) } @@ -65,18 +62,7 @@ func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status { // check this length, because a lookup of a truncated has will not have a tag // and will not need to be added to this map if len(repoTag) > 0 { - if _, ok := rootRepoMap[repoName]; !ok { - rootRepoMap[repoName] = Repository{repoTag: img.ID} - } else { - log.Debugf("Duplicate key [%s]", repoName) - newRepo := Repository{repoTag: img.ID} - if rootRepoMap[repoName].Contains(newRepo) { - log.Debugf("skipping, because it is present [%s:%q]", repoName, newRepo) - continue - } - log.Debugf("updating [%s]: [%q] with [%q]", repoName, rootRepoMap[repoName], newRepo) - rootRepoMap[repoName].Update(newRepo) - } + addKey(repoName, repoTag, img.ID) } if err := s.exportImage(job.Eng, img.ID, tempdir); err != nil { return job.Error(err) diff --git a/integration-cli/docker_cli_save_load_test.go b/integration-cli/docker_cli_save_load_test.go index ceb73a571f..73df63dc55 100644 --- a/integration-cli/docker_cli_save_load_test.go +++ b/integration-cli/docker_cli_save_load_test.go @@ -8,6 +8,8 @@ import ( "os/exec" "path/filepath" "reflect" + "sort" + "strings" "testing" "github.com/docker/docker/vendor/src/github.com/kr/pty" @@ -257,6 +259,66 @@ func TestSaveMultipleNames(t *testing.T) { logDone("save - save by multiple names") } +func TestSaveRepoWithMultipleImages(t *testing.T) { + + makeImage := func(from string, tag string) string { + runCmd := exec.Command(dockerBinary, "run", "-d", from, "true") + var ( + out string + err error + ) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + t.Fatalf("failed to create a container: %v %v", out, err) + } + cleanedContainerID := stripTrailingCharacters(out) + + commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, tag) + if out, _, err = runCommandWithOutput(commitCmd); err != nil { + t.Fatalf("failed to commit container: %v %v", out, err) + } + imageID := stripTrailingCharacters(out) + + deleteContainer(cleanedContainerID) + return imageID + } + + repoName := "foobar-save-multi-images-test" + tagFoo := repoName + ":foo" + tagBar := repoName + ":bar" + + idFoo := makeImage("busybox:latest", tagFoo) + idBar := makeImage("busybox:latest", tagBar) + + deleteImages(repoName) + + // create the archive + saveCmdFinal := fmt.Sprintf("%v save %v | tar t | grep 'VERSION' |cut -d / -f1", dockerBinary, repoName) + saveCmd := exec.Command("bash", "-c", saveCmdFinal) + out, _, err := runCommandWithOutput(saveCmd) + if err != nil { + t.Fatalf("failed to save multiple images: %s, %v", out, err) + } + actual := strings.Split(stripTrailingCharacters(out), "\n") + + // make the list of expected layers + historyCmdFinal := fmt.Sprintf("%v history -q --no-trunc %v", dockerBinary, "busybox:latest") + historyCmd := exec.Command("bash", "-c", historyCmdFinal) + out, _, err = runCommandWithOutput(historyCmd) + if err != nil { + t.Fatalf("failed to get history: %s, %v", out, err) + } + + expected := append(strings.Split(stripTrailingCharacters(out), "\n"), idFoo, idBar) + + sort.Strings(actual) + sort.Strings(expected) + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("achive does not contains the right layers: got %v, expected %v", actual, expected) + } + + logDone("save - save repository with multiple images") +} + // Issue #6722 #5892 ensure directories are included in changes func TestSaveDirectoryPermissions(t *testing.T) { layerEntries := []string{"opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} From 3ec623ee2fc503a6236a7b0edde5dd6abb967e49 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Fri, 14 Nov 2014 09:20:37 -0500 Subject: [PATCH 338/592] pkg/devicemapper: missed MAINTAINERS on split Signed-off-by: Vincent Batts --- daemon/graphdriver/devmapper/MAINTAINERS | 2 +- pkg/devicemapper/MAINTAINERS | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 pkg/devicemapper/MAINTAINERS diff --git a/daemon/graphdriver/devmapper/MAINTAINERS b/daemon/graphdriver/devmapper/MAINTAINERS index 39d865305d..9382fc3a42 100644 --- a/daemon/graphdriver/devmapper/MAINTAINERS +++ b/daemon/graphdriver/devmapper/MAINTAINERS @@ -1,2 +1,2 @@ Alexander Larsson (@alexlarsson) -Vincent Batts (@vbatts) +Vincent Batts (@vbatts) diff --git a/pkg/devicemapper/MAINTAINERS b/pkg/devicemapper/MAINTAINERS new file mode 100644 index 0000000000..4428dec019 --- /dev/null +++ b/pkg/devicemapper/MAINTAINERS @@ -0,0 +1 @@ +Vincent Batts (@vbatts) From 7107898d5cf0f86dc1c6dab29e9dbdad3edc9411 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Tue, 11 Nov 2014 11:17:33 -0500 Subject: [PATCH 339/592] Initialize volumes when container is created Fixes #8942 Current behavior is that volumes aren't initialized until start. Volumes still need to be initialized on start since VolumesFrom and Binds can be passed in as part of HostConfig on start, however anything that's already been initialized will just be skipped as is the current behavior. Signed-off-by: Brian Goff --- daemon/create.go | 7 +++++++ .../reference/api/docker_remote_api.md | 3 +++ integration-cli/docker_cli_create_test.go | 19 +++++++++++++++++++ 3 files changed, 29 insertions(+) diff --git a/daemon/create.go b/daemon/create.go index e72b0ef206..527a90c661 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -94,6 +94,13 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos return nil, nil, err } } + if err := container.Mount(); err != nil { + return nil, nil, err + } + defer container.Unmount() + if err := container.prepareVolumes(); err != nil { + return nil, nil, err + } if err := container.ToDisk(); err != nil { return nil, nil, err } diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md index 5813091411..d735589d40 100644 --- a/docs/sources/reference/api/docker_remote_api.md +++ b/docs/sources/reference/api/docker_remote_api.md @@ -57,6 +57,9 @@ total memory available (`MemTotal`). **New!** You can set the new container's MAC address explicitly. +**New!** +Volumes are now initialized when the container is created. + `POST /containers/(id)/start` **New!** diff --git a/integration-cli/docker_cli_create_test.go b/integration-cli/docker_cli_create_test.go index d85fde1930..498065b64d 100644 --- a/integration-cli/docker_cli_create_test.go +++ b/integration-cli/docker_cli_create_test.go @@ -2,6 +2,7 @@ package main import ( "encoding/json" + "os" "os/exec" "testing" "time" @@ -125,3 +126,21 @@ func TestCreateEchoStdout(t *testing.T) { logDone("create - echo test123") } + +func TestCreateVolumesCreated(t *testing.T) { + name := "test_create_volume" + cmd(t, "create", "--name", name, "-v", "/foo", "busybox") + dir, err := inspectFieldMap(name, "Volumes", "/foo") + if err != nil { + t.Fatalf("Error getting volume host path: %q", err) + } + + if _, err := os.Stat(dir); err != nil && os.IsNotExist(err) { + t.Fatalf("Volume was not created") + } + if err != nil { + t.Fatalf("Error statting volume host path: %q", err) + } + + logDone("create - volumes are created") +} From 454f56e37eefd072e350739a6c5a06743ff913ef Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 14 Nov 2014 19:31:52 +0000 Subject: [PATCH 340/592] use _, _ string Signed-off-by: Victor Vieux --- api/client/cli.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/client/cli.go b/api/client/cli.go index 3455962bd2..a477d0b3a9 100644 --- a/api/client/cli.go +++ b/api/client/cli.go @@ -145,7 +145,7 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, key libtrust.PrivateKey, if proto == "unix" { // no need in compressing for local communications tr.DisableCompression = true - tr.Dial = func(dial_network, dial_addr string) (net.Conn, error) { + tr.Dial = func(_, _ string) (net.Conn, error) { return net.DialTimeout(proto, addr, timeout) } } else { From cef27e1d6c0bd302e1c58e9478a0fba99fd3a2d0 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Fri, 14 Nov 2014 14:18:35 -0500 Subject: [PATCH 341/592] pkg/devicemapper: defer udev wait during removal Signed-off-by: Vincent Batts --- pkg/devicemapper/devmapper.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/devicemapper/devmapper.go b/pkg/devicemapper/devmapper.go index 87f5a1f71c..4043da6b45 100644 --- a/pkg/devicemapper/devmapper.go +++ b/pkg/devicemapper/devmapper.go @@ -307,7 +307,9 @@ func RemoveDevice(name string) error { if err := task.SetCookie(&cookie, 0); err != nil { return fmt.Errorf("Can not set cookie: %s", err) } + defer UdevWait(cookie) + dmSawBusy = false // reset before the task is run if err = task.Run(); err != nil { if dmSawBusy { return ErrBusy @@ -315,8 +317,6 @@ func RemoveDevice(name string) error { return fmt.Errorf("Error running RemoveDevice %s", err) } - UdevWait(cookie) - return nil } @@ -543,7 +543,7 @@ func CreateDevice(poolName string, deviceId *int) error { return fmt.Errorf("Can't set message %s", err) } - dmSawExist = false + dmSawExist = false // reset before the task is run if err := task.Run(); err != nil { if dmSawExist { // Already exists, try next id @@ -638,7 +638,7 @@ func CreateSnapDevice(poolName string, deviceId *int, baseName string, baseDevic return fmt.Errorf("Can't set message %s", err) } - dmSawExist = false + dmSawExist = false // reset before the task is run if err := task.Run(); err != nil { if dmSawExist { // Already exists, try next id From 6aba75db4e7b0151aeb48f450bb43e659ce0ec82 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Tue, 11 Nov 2014 16:31:15 -0500 Subject: [PATCH 342/592] Add the possibility of specifying a subnet for --insecure-registry Signed-off-by: Tibor Vass --- daemon/config.go | 10 +++- docs/sources/reference/commandline/cli.md | 2 +- registry/endpoint.go | 60 +++++++++++++++++------ registry/registry_mock_test.go | 26 ++++++++++ registry/registry_test.go | 19 ++++--- 5 files changed, 95 insertions(+), 22 deletions(-) diff --git a/daemon/config.go b/daemon/config.go index ddb6040bff..0876ce0802 100644 --- a/daemon/config.go +++ b/daemon/config.go @@ -56,7 +56,7 @@ func (config *Config) InstallFlags() { flag.StringVar(&config.BridgeIP, []string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b") flag.StringVar(&config.BridgeIface, []string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking") flag.StringVar(&config.FixedCIDR, []string{"-fixed-cidr"}, "", "IPv4 subnet for fixed IPs (ex: 10.20.0.0/16)\nthis subnet must be nested in the bridge subnet (which is defined by -b or --bip)") - opts.ListVar(&config.InsecureRegistries, []string{"-insecure-registry"}, "Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback)") + opts.ListVar(&config.InsecureRegistries, []string{"-insecure-registry"}, "Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16)") flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Enable inter-container communication") flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver") flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver") @@ -68,6 +68,14 @@ func (config *Config) InstallFlags() { opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers") opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains") opts.MirrorListVar(&config.Mirrors, []string{"-registry-mirror"}, "Specify a preferred Docker registry mirror") + + // Localhost is by default considered as an insecure registry + // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). + // + // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change + // daemon flags on boot2docker? + // If so, do not forget to check the TODO in TestIsSecure + config.InsecureRegistries = append(config.InsecureRegistries, "127.0.0.0/8") } func getDefaultNetworkMtu() int { diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index e0a7b2328a..b9575c3aca 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -70,7 +70,7 @@ expect an integer, and they can only be specified once. -g, --graph="/var/lib/docker" Path to use as the root of the Docker runtime -H, --host=[] The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. --icc=true Enable inter-container communication - --insecure-registry=[] Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) + --insecure-registry=[] Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (ex: localhost:5000 or 10.20.0.0/16) --ip=0.0.0.0 Default IP address to use when binding container ports --ip-forward=true Enable net.ipv4.ip_forward --ip-masq=true Enable IP masquerading for bridge's IP range diff --git a/registry/endpoint.go b/registry/endpoint.go index bd23c30299..c485a13d8f 100644 --- a/registry/endpoint.go +++ b/registry/endpoint.go @@ -12,6 +12,9 @@ import ( log "github.com/Sirupsen/logrus" ) +// for mocking in unit tests +var lookupIP = net.LookupIP + // scans string for api version in the URL path. returns the trimmed hostname, if version found, string and API version. func scanForAPIVersion(hostname string) (string, APIVersion) { var ( @@ -79,7 +82,10 @@ func newEndpoint(hostname string, insecureRegistries []string) (*Endpoint, error if err != nil { return nil, err } - endpoint.secure = isSecure(endpoint.URL.Host, insecureRegistries) + endpoint.secure, err = isSecure(endpoint.URL.Host, insecureRegistries) + if err != nil { + return nil, err + } return &endpoint, nil } @@ -152,30 +158,56 @@ func (e Endpoint) Ping() (RegistryInfo, error) { // isSecure returns false if the provided hostname is part of the list of insecure registries. // Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. -func isSecure(hostname string, insecureRegistries []string) bool { +// +// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. +// If the subnet contains one of the IPs of the registry specified by hostname, the latter is considered +// insecure. +// +// hostname should be a URL.Host (`host:port` or `host`) +func isSecure(hostname string, insecureRegistries []string) (bool, error) { if hostname == IndexServerURL.Host { - return true + return true, nil } host, _, err := net.SplitHostPort(hostname) - if err != nil { + // assume hostname is of the form `host` without the port and go on. host = hostname } - - if host == "127.0.0.1" || host == "localhost" { - return false + addrs, err := lookupIP(host) + if err != nil { + ip := net.ParseIP(host) + if ip == nil { + // if resolving `host` fails, error out, since host is to be net.Dial-ed anyway + return true, fmt.Errorf("issecure: could not resolve %q: %v", host, err) + } + addrs = []net.IP{ip} + } + if len(addrs) == 0 { + return true, fmt.Errorf("issecure: could not resolve %q", host) } - if len(insecureRegistries) == 0 { - return true - } + for _, addr := range addrs { + for _, r := range insecureRegistries { + // hostname matches insecure registry + if hostname == r { + return false, nil + } - for _, h := range insecureRegistries { - if hostname == h { - return false + // now assume a CIDR was passed to --insecure-registry + _, ipnet, err := net.ParseCIDR(r) + if err != nil { + // if could not parse it as a CIDR, even after removing + // assume it's not a CIDR and go on with the next candidate + continue + } + + // check if the addr falls in the subnet + if ipnet.Contains(addr) { + return false, nil + } } } - return true + return true, nil } diff --git a/registry/registry_mock_test.go b/registry/registry_mock_test.go index 1c710e21e9..887d2ef6f2 100644 --- a/registry/registry_mock_test.go +++ b/registry/registry_mock_test.go @@ -2,9 +2,11 @@ package registry import ( "encoding/json" + "errors" "fmt" "io" "io/ioutil" + "net" "net/http" "net/http/httptest" "net/url" @@ -80,6 +82,11 @@ var ( "latest": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", }, } + mockHosts = map[string][]net.IP{ + "": {net.ParseIP("0.0.0.0")}, + "localhost": {net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, + "example.com": {net.ParseIP("42.42.42.42")}, + } ) func init() { @@ -106,6 +113,25 @@ func init() { panic(err) } insecureRegistries = []string{URL.Host} + + // override net.LookupIP + lookupIP = func(host string) ([]net.IP, error) { + if host == "127.0.0.1" { + // I believe in future Go versions this will fail, so let's fix it later + return net.LookupIP(host) + } + for h, addrs := range mockHosts { + if host == h { + return addrs, nil + } + for _, addr := range addrs { + if addr.String() == host { + return []net.IP{addr}, nil + } + } + } + return nil, errors.New("lookup: no such host") + } } func handlerAccessLog(handler http.Handler) http.Handler { diff --git a/registry/registry_test.go b/registry/registry_test.go index 3e0950efe0..d24a5f5751 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -333,19 +333,26 @@ func TestIsSecure(t *testing.T) { {"localhost:5000", []string{"localhost:5000"}, false}, {"localhost", []string{"example.com"}, false}, {"127.0.0.1:5000", []string{"127.0.0.1:5000"}, false}, - {"localhost", []string{}, false}, - {"localhost:5000", []string{}, false}, - {"127.0.0.1", []string{}, false}, + {"localhost", nil, false}, + {"localhost:5000", nil, false}, + {"127.0.0.1", nil, false}, {"localhost", []string{"example.com"}, false}, {"127.0.0.1", []string{"example.com"}, false}, - {"example.com", []string{}, true}, + {"example.com", nil, true}, {"example.com", []string{"example.com"}, false}, {"127.0.0.1", []string{"example.com"}, false}, {"127.0.0.1:5000", []string{"example.com"}, false}, + {"example.com:5000", []string{"42.42.0.0/16"}, false}, + {"example.com", []string{"42.42.0.0/16"}, false}, + {"example.com:5000", []string{"42.42.42.42/8"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.0/8"}, false}, + {"42.42.42.42:5000", []string{"42.1.1.1/8"}, false}, } for _, tt := range tests { - if sec := isSecure(tt.addr, tt.insecureRegistries); sec != tt.expected { - t.Errorf("isSecure failed for %q %v, expected %v got %v", tt.addr, tt.insecureRegistries, tt.expected, sec) + // TODO: remove this once we remove localhost insecure by default + insecureRegistries := append(tt.insecureRegistries, "127.0.0.0/8") + if sec, err := isSecure(tt.addr, insecureRegistries); err != nil || sec != tt.expected { + t.Fatalf("isSecure failed for %q %v, expected %v got %v. Error: %v", tt.addr, insecureRegistries, tt.expected, sec, err) } } } From 5937663a08d9e7ddc9347c4fc33a506d3d596ccd Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Fri, 14 Nov 2014 11:04:47 -0800 Subject: [PATCH 343/592] Rewrite documentation for insecure registries Signed-off-by: Tibor Vass --- docs/sources/reference/commandline/cli.md | 46 ++++++++++++++++------- 1 file changed, 33 insertions(+), 13 deletions(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index b9575c3aca..bf34b7d923 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -70,7 +70,7 @@ expect an integer, and they can only be specified once. -g, --graph="/var/lib/docker" Path to use as the root of the Docker runtime -H, --host=[] The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. --icc=true Enable inter-container communication - --insecure-registry=[] Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (ex: localhost:5000 or 10.20.0.0/16) + --insecure-registry=[] Enable insecure communication with specified registries (disables certificate verification for HTTPS and enables HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16) --ip=0.0.0.0 Default IP address to use when binding container ports --ip-forward=true Enable net.ipv4.ip_forward --ip-masq=true Enable IP masquerading for bridge's IP range @@ -193,24 +193,44 @@ To set the DNS server for all Docker containers, use To set the DNS search domain for all Docker containers, use `docker -d --dns-search example.com`. +### Insecure registries + +Docker considers a private registry either secure or insecure. +In the rest of this section, *registry* is used for *private registry*, and `myregistry:5000` +is a placeholder example for a private registry. + +A secure registry uses TLS and a copy of its CA certificate is placed on the Docker host at +`/etc/docker/certs.d/myregistry:5000/ca.crt`. +An insecure registry is either not using TLS (i.e., listening on plain text HTTP), or is using +TLS with a CA certificate not known by the Docker daemon. The latter can happen when the +certificate was not found under `/etc/docker/certs.d/myregistry:5000/`, or if the certificate +verification failed (i.e., wrong CA). + +By default, Docker assumes all, but local (see local registries below), registries are secure. +Communicating with an insecure registry is not possible if Docker assumes that registry is secure. +In order to communicate with an insecure registry, the Docker daemon requires `--insecure-registry` +in one of the following two forms: + +* `--insecure-registry myregistry:5000` tells the Docker daemon that myregistry:5000 should be considered insecure. +* `--insecure-registry 10.1.0.0/16` tells the Docker daemon that all registries whose domain resolve to an IP address is part +of the subnet described by the CIDR syntax, should be considered insecure. + +The flag can be used multiple times to allow multiple registries to be marked as insecure. + +If an insecure registry is not marked as insecure, `docker pull`, `docker push`, and `docker search` +will result in an error message prompting the user to either secure or pass the `--insecure-registry` +flag to the Docker daemon as described above. + +Local registries, whose IP address falls in the 127.0.0.0/8 range, are automatically marked as insecure +as of Docker 1.3.2. It is not recommended to rely on this, as it may change in the future. + + ### Miscellaneous options IP masquerading uses address translation to allow containers without a public IP to talk to other machines on the Internet. This may interfere with some network topologies and can be disabled with --ip-masq=false. - -By default, Docker will assume all registries are secured via TLS with certificate verification -enabled. Prior versions of Docker used an auto fallback if a registry did not support TLS -(or if the TLS connection failed). This introduced the opportunity for Man In The Middle (MITM) -attacks, so as of Docker 1.3.1, the user must now specify the `--insecure-registry` daemon flag -for each insecure registry. An insecure registry is either not using TLS (i.e. plain text HTTP), -or is using TLS with a CA certificate not known by the Docker daemon (i.e. certification -verification disabled). For example, if there is a registry listening for HTTP at 127.0.0.1:5000, -as of Docker 1.3.1 you are required to specify `--insecure-registry 127.0.0.1:5000` when starting -the Docker daemon. - - Docker supports softlinks for the Docker data directory (`/var/lib/docker`) and for `/var/lib/docker/tmp`. The `DOCKER_TMPDIR` and the data directory can be set like this: From 975b6e598d44408c865993bbb650cc7117133ffc Mon Sep 17 00:00:00 2001 From: John Gossman Date: Thu, 23 Oct 2014 16:44:57 -0700 Subject: [PATCH 344/592] Refactor pkg/term package for Windows tty support Signed-off-by: John Gossman --- pkg/term/console_windows.go | 87 ++++++++++++++++++++++++++++++++++++ pkg/term/term.go | 2 + pkg/term/term_windows.go | 89 +++++++++++++++++++++++++++++++++++++ 3 files changed, 178 insertions(+) create mode 100644 pkg/term/console_windows.go create mode 100644 pkg/term/term_windows.go diff --git a/pkg/term/console_windows.go b/pkg/term/console_windows.go new file mode 100644 index 0000000000..6335b2b837 --- /dev/null +++ b/pkg/term/console_windows.go @@ -0,0 +1,87 @@ +// +build windows + +package term + +import ( + "syscall" + "unsafe" +) + +const ( + // Consts for Get/SetConsoleMode function + // see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx + ENABLE_ECHO_INPUT = 0x0004 + ENABLE_INSERT_MODE = 0x0020 + ENABLE_LINE_INPUT = 0x0002 + ENABLE_MOUSE_INPUT = 0x0010 + ENABLE_PROCESSED_INPUT = 0x0001 + ENABLE_QUICK_EDIT_MODE = 0x0040 + ENABLE_WINDOW_INPUT = 0x0008 + // If parameter is a screen buffer handle, additional values + ENABLE_PROCESSED_OUTPUT = 0x0001 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 +) + +var kernel32DLL = syscall.NewLazyDLL("kernel32.dll") + +var ( + setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") + getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") +) + +func GetConsoleMode(fileDesc uintptr) (uint32, error) { + var mode uint32 + err := syscall.GetConsoleMode(syscall.Handle(fileDesc), &mode) + return mode, err +} + +func SetConsoleMode(fileDesc uintptr, mode uint32) error { + r, _, err := setConsoleModeProc.Call(fileDesc, uintptr(mode), 0) + if r == 0 { + if err != nil { + return err + } + return syscall.EINVAL + } + return nil +} + +// types for calling GetConsoleScreenBufferInfo +// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093(v=vs.85).aspx +type ( + SHORT int16 + + SMALL_RECT struct { + Left SHORT + Top SHORT + Right SHORT + Bottom SHORT + } + + COORD struct { + X SHORT + Y SHORT + } + + WORD uint16 + + CONSOLE_SCREEN_BUFFER_INFO struct { + dwSize COORD + dwCursorPosition COORD + wAttributes WORD + srWindow SMALL_RECT + dwMaximumWindowSize COORD + } +) + +func GetConsoleScreenBufferInfo(fileDesc uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { + var info CONSOLE_SCREEN_BUFFER_INFO + r, _, err := getConsoleScreenBufferInfoProc.Call(uintptr(fileDesc), uintptr(unsafe.Pointer(&info)), 0) + if r == 0 { + if err != nil { + return nil, err + } + return nil, syscall.EINVAL + } + return &info, nil +} diff --git a/pkg/term/term.go b/pkg/term/term.go index ea94b44ade..553747a7a0 100644 --- a/pkg/term/term.go +++ b/pkg/term/term.go @@ -1,3 +1,5 @@ +// +build !windows + package term import ( diff --git a/pkg/term/term_windows.go b/pkg/term/term_windows.go new file mode 100644 index 0000000000..d372e86a88 --- /dev/null +++ b/pkg/term/term_windows.go @@ -0,0 +1,89 @@ +// +build windows + +package term + +type State struct { + mode uint32 +} + +type Winsize struct { + Height uint16 + Width uint16 + x uint16 + y uint16 +} + +func GetWinsize(fd uintptr) (*Winsize, error) { + ws := &Winsize{} + var info *CONSOLE_SCREEN_BUFFER_INFO + info, err := GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil, err + } + ws.Height = uint16(info.srWindow.Right - info.srWindow.Left + 1) + ws.Width = uint16(info.srWindow.Bottom - info.srWindow.Top + 1) + + ws.x = 0 // todo azlinux -- this is the pixel size of the Window, and not currently used by any caller + ws.y = 0 + + return ws, nil +} + +func SetWinsize(fd uintptr, ws *Winsize) error { + return nil +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + _, e := GetConsoleMode(fd) + return e == nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func RestoreTerminal(fd uintptr, state *State) error { + return SetConsoleMode(fd, state.mode) +} + +func SaveState(fd uintptr) (*State, error) { + mode, e := GetConsoleMode(fd) + if e != nil { + return nil, e + } + return &State{mode}, nil +} + +// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx for these flag settings +func DisableEcho(fd uintptr, state *State) error { + state.mode &^= (ENABLE_ECHO_INPUT) + state.mode |= (ENABLE_PROCESSED_INPUT | ENABLE_LINE_INPUT) + return SetConsoleMode(fd, state.mode) +} + +func SetRawTerminal(fd uintptr) (*State, error) { + oldState, err := MakeRaw(fd) + if err != nil { + return nil, err + } + // TODO (azlinux): implement handling interrupt and restore state of terminal + return oldState, err +} + +// MakeRaw puts the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var state *State + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + // see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx for these flag settings + state.mode &^= (ENABLE_ECHO_INPUT | ENABLE_PROCESSED_INPUT | ENABLE_LINE_INPUT) + err = SetConsoleMode(fd, state.mode) + if err != nil { + return nil, err + } + return state, nil +} From 91a86670aac52d916c81a818aff3dfcf445da83e Mon Sep 17 00:00:00 2001 From: Ahmet Alp Balkan Date: Thu, 13 Nov 2014 10:40:22 -0800 Subject: [PATCH 345/592] Extract client signals to pkg/signal SIGCHLD and SIGWINCH used in api/client (cli code) are not available on Windows. Extracting into separate files with build tags. Signed-off-by: Ahmet Alp Balkan --- api/client/commands.go | 5 ++--- api/client/utils.go | 4 ++-- pkg/signal/signal_unix.go | 12 ++++++++++++ pkg/signal/signal_windows.go | 12 ++++++++++++ 4 files changed, 28 insertions(+), 5 deletions(-) create mode 100644 pkg/signal/signal_unix.go create mode 100644 pkg/signal/signal_windows.go diff --git a/api/client/commands.go b/api/client/commands.go index d45c076037..3802bf4867 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -18,7 +18,6 @@ import ( "runtime" "strconv" "strings" - "syscall" "text/tabwriter" "text/template" "time" @@ -608,7 +607,7 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { signal.CatchAll(sigc) go func() { for s := range sigc { - if s == syscall.SIGCHLD { + if s == signal.SIGCHLD { continue } var sig string @@ -619,7 +618,7 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { } } if sig == "" { - log.Errorf("Unsupported signal: %d. Discarding.", s) + log.Errorf("Unsupported signal: %v. Discarding.", s) } if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, false)); err != nil { log.Debugf("Error sending signal: %s", err) diff --git a/api/client/utils.go b/api/client/utils.go index e71afd608b..f094635714 100644 --- a/api/client/utils.go +++ b/api/client/utils.go @@ -14,12 +14,12 @@ import ( gosignal "os/signal" "strconv" "strings" - "syscall" log "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/dockerversion" "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/docker/pkg/term" "github.com/docker/docker/registry" @@ -238,7 +238,7 @@ func (cli *DockerCli) monitorTtySize(id string, isExec bool) error { cli.resizeTty(id, isExec) sigchan := make(chan os.Signal, 1) - gosignal.Notify(sigchan, syscall.SIGWINCH) + gosignal.Notify(sigchan, signal.SIGWINCH) go func() { for _ = range sigchan { cli.resizeTty(id, isExec) diff --git a/pkg/signal/signal_unix.go b/pkg/signal/signal_unix.go new file mode 100644 index 0000000000..613e30e57c --- /dev/null +++ b/pkg/signal/signal_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package signal + +import ( + "syscall" +) + +// Signals used in api/client (no windows equivalent, use +// invalid signals so they don't get handled) +const SIGCHLD = syscall.SIGCHLD +const SIGWINCH = syscall.SIGWINCH diff --git a/pkg/signal/signal_windows.go b/pkg/signal/signal_windows.go new file mode 100644 index 0000000000..9f00b99994 --- /dev/null +++ b/pkg/signal/signal_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package signal + +import ( + "syscall" +) + +// Signals used in api/client (no windows equivalent, use +// invalid signals so they don't get handled) +const SIGCHLD = syscall.Signal(0xff) +const SIGWINCH = syscall.Signal(0xff) From 376ae7780bf04d89ad1532f1c2a752535fa7ac7d Mon Sep 17 00:00:00 2001 From: Ahmet Alp Balkan Date: Thu, 13 Nov 2014 10:48:19 -0800 Subject: [PATCH 346/592] Consolidate tmpdir implementations, include Windows Signed-off-by: Ahmet Alp Balkan --- utils/tmpdir.go | 12 ++++++++---- utils/tmpdir_unix.go | 18 ------------------ 2 files changed, 8 insertions(+), 22 deletions(-) delete mode 100644 utils/tmpdir_unix.go diff --git a/utils/tmpdir.go b/utils/tmpdir.go index 921a8f697c..e200f340db 100644 --- a/utils/tmpdir.go +++ b/utils/tmpdir.go @@ -1,12 +1,16 @@ -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd - package utils import ( "os" + "path/filepath" ) // TempDir returns the default directory to use for temporary files. -func TempDir(rootdir string) (string error) { - return os.TempDir(), nil +func TempDir(rootDir string) (string, error) { + var tmpDir string + if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { + tmpDir = filepath.Join(rootDir, "tmp") + } + err := os.MkdirAll(tmpDir, 0700) + return tmpDir, err } diff --git a/utils/tmpdir_unix.go b/utils/tmpdir_unix.go deleted file mode 100644 index 30d7c3a192..0000000000 --- a/utils/tmpdir_unix.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build darwin dragonfly freebsd linux netbsd openbsd - -package utils - -import ( - "os" - "path/filepath" -) - -// TempDir returns the default directory to use for temporary files. -func TempDir(rootDir string) (string, error) { - var tmpDir string - if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { - tmpDir = filepath.Join(rootDir, "tmp") - } - err := os.MkdirAll(tmpDir, 0700) - return tmpDir, err -} From b64c9b521ab4e4082ed874a23a493f4a266304d5 Mon Sep 17 00:00:00 2001 From: Ahmet Alp Balkan Date: Thu, 13 Nov 2014 10:50:57 -0800 Subject: [PATCH 347/592] Extract TreeSize to daemon build TreeSize uses syscall.Stat_t which is not available on Windows. It's called only on daemon path, therefore extracting it to daemon with build tag 'daemon' Signed-off-by: Ahmet Alp Balkan --- daemon/graphdriver/fsdiff.go | 2 ++ utils/utils.go | 31 ---------------------------- utils/utils_daemon.go | 39 ++++++++++++++++++++++++++++++++++++ 3 files changed, 41 insertions(+), 31 deletions(-) create mode 100644 utils/utils_daemon.go diff --git a/daemon/graphdriver/fsdiff.go b/daemon/graphdriver/fsdiff.go index 269379bddf..3569cf910e 100644 --- a/daemon/graphdriver/fsdiff.go +++ b/daemon/graphdriver/fsdiff.go @@ -1,3 +1,5 @@ +// +build daemon + package graphdriver import ( diff --git a/utils/utils.go b/utils/utils.go index e2254b8bab..84d01f6c9d 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -18,7 +18,6 @@ import ( "strconv" "strings" "sync" - "syscall" log "github.com/Sirupsen/logrus" "github.com/docker/docker/dockerversion" @@ -453,36 +452,6 @@ func ReadSymlinkedDirectory(path string) (string, error) { return realPath, nil } -// TreeSize walks a directory tree and returns its total size in bytes. -func TreeSize(dir string) (size int64, err error) { - data := make(map[uint64]struct{}) - err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, e error) error { - // Ignore directory sizes - if fileInfo == nil { - return nil - } - - s := fileInfo.Size() - if fileInfo.IsDir() || s == 0 { - return nil - } - - // Check inode to handle hard links correctly - inode := fileInfo.Sys().(*syscall.Stat_t).Ino - // inode is not a uint64 on all platforms. Cast it to avoid issues. - if _, exists := data[uint64(inode)]; exists { - return nil - } - // inode is not a uint64 on all platforms. Cast it to avoid issues. - data[uint64(inode)] = struct{}{} - - size += s - - return nil - }) - return -} - // ValidateContextDirectory checks if all the contents of the directory // can be read and returns an error if some files can't be read // symlinks which point to non-existing files don't trigger an error diff --git a/utils/utils_daemon.go b/utils/utils_daemon.go new file mode 100644 index 0000000000..098e227367 --- /dev/null +++ b/utils/utils_daemon.go @@ -0,0 +1,39 @@ +// +build daemon + +package utils + +import ( + "os" + "path/filepath" + "syscall" +) + +// TreeSize walks a directory tree and returns its total size in bytes. +func TreeSize(dir string) (size int64, err error) { + data := make(map[uint64]struct{}) + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, e error) error { + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + // Check inode to handle hard links correctly + inode := fileInfo.Sys().(*syscall.Stat_t).Ino + // inode is not a uint64 on all platforms. Cast it to avoid issues. + if _, exists := data[uint64(inode)]; exists { + return nil + } + // inode is not a uint64 on all platforms. Cast it to avoid issues. + data[uint64(inode)] = struct{}{} + + size += s + + return nil + }) + return +} From 3d2fae353f6ddc819d3a3c4db80887a40ac6f5f0 Mon Sep 17 00:00:00 2001 From: Ahmet Alp Balkan Date: Thu, 13 Nov 2014 12:00:04 -0800 Subject: [PATCH 348/592] Extract mknod, umask, lstat to pkg/system Some parts of pkg/archive is called on both client/daemon code. To get it compiling on Windows, these funcs are extracted into files with build tags. Signed-off-by: Ahmet Alp Balkan --- pkg/archive/archive.go | 2 +- pkg/archive/changes.go | 12 +++++++++++- pkg/archive/diff.go | 7 ------- pkg/system/lstat.go | 16 ++++++++++++++++ pkg/system/lstat_windows.go | 12 ++++++++++++ pkg/system/mknod.go | 18 ++++++++++++++++++ pkg/system/mknod_windows.go | 12 ++++++++++++ pkg/system/umask.go | 11 +++++++++++ pkg/system/umask_windows.go | 8 ++++++++ 9 files changed, 89 insertions(+), 9 deletions(-) create mode 100644 pkg/system/lstat.go create mode 100644 pkg/system/lstat_windows.go create mode 100644 pkg/system/mknod.go create mode 100644 pkg/system/mknod_windows.go create mode 100644 pkg/system/umask.go create mode 100644 pkg/system/umask_windows.go diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 530ea303ad..85d23190d0 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -291,7 +291,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L mode |= syscall.S_IFIFO } - if err := syscall.Mknod(path, mode, int(mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { + if err := syscall.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { return err } diff --git a/pkg/archive/changes.go b/pkg/archive/changes.go index 0a1f741c41..720d549758 100644 --- a/pkg/archive/changes.go +++ b/pkg/archive/changes.go @@ -269,6 +269,14 @@ func newRootFileInfo() *FileInfo { return root } +func lstat(path string) (*stat, error) { + s, err := system.Lstat(path) + if err != nil { + return nil, err + } + return fromStatT(s), nil +} + func collectFileInfo(sourceDir string) (*FileInfo, error) { root := newRootFileInfo() @@ -299,9 +307,11 @@ func collectFileInfo(sourceDir string) (*FileInfo, error) { parent: parent, } - if err := syscall.Lstat(path, &info.stat); err != nil { + s, err := lstat(path) + if err != nil { return err } + info.stat = s info.capability, _ = system.Lgetxattr(path, "security.capability") diff --git a/pkg/archive/diff.go b/pkg/archive/diff.go index 215f62ec0a..c208336ab3 100644 --- a/pkg/archive/diff.go +++ b/pkg/archive/diff.go @@ -14,13 +14,6 @@ import ( "github.com/docker/docker/pkg/pools" ) -// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. -// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, -// then the top 12 bits of the minor -func mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) -} - // ApplyLayer parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. func ApplyLayer(dest string, layer ArchiveReader) error { diff --git a/pkg/system/lstat.go b/pkg/system/lstat.go new file mode 100644 index 0000000000..d7e06b3efb --- /dev/null +++ b/pkg/system/lstat.go @@ -0,0 +1,16 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +func Lstat(path string) (*syscall.Stat_t, error) { + s := &syscall.Stat_t{} + err := syscall.Lstat(path, s) + if err != nil { + return nil, err + } + return s, nil +} diff --git a/pkg/system/lstat_windows.go b/pkg/system/lstat_windows.go new file mode 100644 index 0000000000..f4c7e6d0e9 --- /dev/null +++ b/pkg/system/lstat_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package system + +import ( + "syscall" +) + +func Lstat(path string) (*syscall.Win32FileAttributeData, error) { + // should not be called on cli code path + return nil, ErrNotSupportedPlatform +} diff --git a/pkg/system/mknod.go b/pkg/system/mknod.go new file mode 100644 index 0000000000..06f9c6afbb --- /dev/null +++ b/pkg/system/mknod.go @@ -0,0 +1,18 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +func Mknod(path string, mode uint32, dev int) error { + return syscall.Mknod(path, mode, dev) +} + +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor +func Mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} diff --git a/pkg/system/mknod_windows.go b/pkg/system/mknod_windows.go new file mode 100644 index 0000000000..b4020c11b6 --- /dev/null +++ b/pkg/system/mknod_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package system + +func Mknod(path string, mode uint32, dev int) error { + // should not be called on cli code path + return ErrNotSupportedPlatform +} + +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on windows, should not be called on cli code") +} diff --git a/pkg/system/umask.go b/pkg/system/umask.go new file mode 100644 index 0000000000..fddbecd390 --- /dev/null +++ b/pkg/system/umask.go @@ -0,0 +1,11 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +func Umask(newmask int) (oldmask int, err error) { + return syscall.Umask(newmask), nil +} diff --git a/pkg/system/umask_windows.go b/pkg/system/umask_windows.go new file mode 100644 index 0000000000..3be563f89e --- /dev/null +++ b/pkg/system/umask_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package system + +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} From 2180aa4f6f2ad4d8f284d63ee29e93547263976e Mon Sep 17 00:00:00 2001 From: Ahmet Alp Balkan Date: Thu, 13 Nov 2014 12:36:05 -0800 Subject: [PATCH 349/592] Refactor pkg/archive with a platform-independent stat struct pkg/archive contains code both invoked from cli (cross platform) and daemon (linux only) and Unix-specific dependencies break compilation on Windows. We extracted those stat-related funcs into platform specific implementations at pkg/system and added unit tests. Signed-off-by: Ahmet Alp Balkan --- pkg/archive/archive.go | 19 ++++----------- pkg/archive/archive_unix.go | 39 +++++++++++++++++++++++++++++++ pkg/archive/archive_windows.go | 12 ++++++++++ pkg/archive/changes.go | 38 +++++++++--------------------- pkg/archive/diff.go | 11 ++++++--- pkg/system/lstat.go | 4 ++-- pkg/system/lstat_test.go | 25 ++++++++++++++++++++ pkg/system/lstat_windows.go | 6 +---- pkg/system/stat.go | 42 ++++++++++++++++++++++++++++++++++ pkg/system/stat_linux.go | 13 ++++++----- pkg/system/stat_test.go | 34 +++++++++++++++++++++++++++ pkg/system/stat_unsupported.go | 19 ++++++++------- pkg/system/stat_windows.go | 12 ++++++++++ 13 files changed, 209 insertions(+), 65 deletions(-) create mode 100644 pkg/archive/archive_unix.go create mode 100644 pkg/archive/archive_windows.go create mode 100644 pkg/system/lstat_test.go create mode 100644 pkg/system/stat.go create mode 100644 pkg/system/stat_test.go create mode 100644 pkg/system/stat_windows.go diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 85d23190d0..5a81223dbd 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -192,20 +192,11 @@ func (ta *tarAppender) addTarFile(path, name string) error { hdr.Name = name - var ( - nlink uint32 - inode uint64 - ) - if stat, ok := fi.Sys().(*syscall.Stat_t); ok { - nlink = uint32(stat.Nlink) - inode = uint64(stat.Ino) - // Currently go does not fill in the major/minors - if stat.Mode&syscall.S_IFBLK == syscall.S_IFBLK || - stat.Mode&syscall.S_IFCHR == syscall.S_IFCHR { - hdr.Devmajor = int64(major(uint64(stat.Rdev))) - hdr.Devminor = int64(minor(uint64(stat.Rdev))) - } + nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) + if err != nil { + return err } + // if it's a regular file and has more than 1 link, // it's hardlinked, so set the type flag accordingly if fi.Mode().IsRegular() && nlink > 1 { @@ -291,7 +282,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L mode |= syscall.S_IFIFO } - if err := syscall.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { + if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { return err } diff --git a/pkg/archive/archive_unix.go b/pkg/archive/archive_unix.go new file mode 100644 index 0000000000..c0e8aee93c --- /dev/null +++ b/pkg/archive/archive_unix.go @@ -0,0 +1,39 @@ +// +build !windows + +package archive + +import ( + "errors" + "syscall" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + err = errors.New("cannot convert stat value to syscall.Stat_t") + return + } + + nlink = uint32(s.Nlink) + inode = uint64(s.Ino) + + // Currently go does not fil in the major/minors + if s.Mode&syscall.S_IFBLK == syscall.S_IFBLK || + s.Mode&syscall.S_IFCHR == syscall.S_IFCHR { + hdr.Devmajor = int64(major(uint64(s.Rdev))) + hdr.Devminor = int64(minor(uint64(s.Rdev))) + } + + return +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} diff --git a/pkg/archive/archive_windows.go b/pkg/archive/archive_windows.go new file mode 100644 index 0000000000..3cc2493f6f --- /dev/null +++ b/pkg/archive/archive_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package archive + +import ( + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { + // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows + return +} diff --git a/pkg/archive/changes.go b/pkg/archive/changes.go index 720d549758..85217f6e08 100644 --- a/pkg/archive/changes.go +++ b/pkg/archive/changes.go @@ -135,7 +135,7 @@ func Changes(layers []string, rw string) ([]Change, error) { type FileInfo struct { parent *FileInfo name string - stat syscall.Stat_t + stat *system.Stat children map[string]*FileInfo capability []byte added bool @@ -168,7 +168,7 @@ func (info *FileInfo) path() string { } func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode&syscall.S_IFDIR == syscall.S_IFDIR + return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR == syscall.S_IFDIR } func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { @@ -199,21 +199,21 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { oldChild, _ := oldChildren[name] if oldChild != nil { // change? - oldStat := &oldChild.stat - newStat := &newChild.stat + oldStat := oldChild.stat + newStat := newChild.stat // Note: We can't compare inode or ctime or blocksize here, because these change // when copying a file into a container. However, that is not generally a problem // because any content change will change mtime, and any status change should // be visible when actually comparing the stat fields. The only time this // breaks down is if some code intentionally hides a change by setting // back mtime - if oldStat.Mode != newStat.Mode || - oldStat.Uid != newStat.Uid || - oldStat.Gid != newStat.Gid || - oldStat.Rdev != newStat.Rdev || + if oldStat.Mode() != newStat.Mode() || + oldStat.Uid() != newStat.Uid() || + oldStat.Gid() != newStat.Gid() || + oldStat.Rdev() != newStat.Rdev() || // Don't look at size for dirs, its not a good measure of change - (oldStat.Size != newStat.Size && oldStat.Mode&syscall.S_IFDIR != syscall.S_IFDIR) || - !sameFsTimeSpec(system.GetLastModification(oldStat), system.GetLastModification(newStat)) || + (oldStat.Size() != newStat.Size() && oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR) || + !sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || bytes.Compare(oldChild.capability, newChild.capability) != 0 { change := Change{ Path: newChild.path(), @@ -269,14 +269,6 @@ func newRootFileInfo() *FileInfo { return root } -func lstat(path string) (*stat, error) { - s, err := system.Lstat(path) - if err != nil { - return nil, err - } - return fromStatT(s), nil -} - func collectFileInfo(sourceDir string) (*FileInfo, error) { root := newRootFileInfo() @@ -307,7 +299,7 @@ func collectFileInfo(sourceDir string) (*FileInfo, error) { parent: parent, } - s, err := lstat(path) + s, err := system.Lstat(path) if err != nil { return err } @@ -369,14 +361,6 @@ func ChangesSize(newDir string, changes []Change) int64 { return size } -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} - // ExportChanges produces an Archive from the provided changes, relative to dir. func ExportChanges(dir string, changes []Change) (Archive, error) { reader, writer := io.Pipe() diff --git a/pkg/archive/diff.go b/pkg/archive/diff.go index c208336ab3..eabb7c48ff 100644 --- a/pkg/archive/diff.go +++ b/pkg/archive/diff.go @@ -12,16 +12,21 @@ import ( "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" ) // ApplyLayer parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. func ApplyLayer(dest string, layer ArchiveReader) error { // We need to be able to set any perms - oldmask := syscall.Umask(0) - defer syscall.Umask(oldmask) + oldmask, err := system.Umask(0) + if err != nil { + return err + } - layer, err := DecompressStream(layer) + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + + layer, err = DecompressStream(layer) if err != nil { return err } diff --git a/pkg/system/lstat.go b/pkg/system/lstat.go index d7e06b3efb..9ef82d5523 100644 --- a/pkg/system/lstat.go +++ b/pkg/system/lstat.go @@ -6,11 +6,11 @@ import ( "syscall" ) -func Lstat(path string) (*syscall.Stat_t, error) { +func Lstat(path string) (*Stat, error) { s := &syscall.Stat_t{} err := syscall.Lstat(path, s) if err != nil { return nil, err } - return s, nil + return fromStatT(s) } diff --git a/pkg/system/lstat_test.go b/pkg/system/lstat_test.go new file mode 100644 index 0000000000..7e271efea5 --- /dev/null +++ b/pkg/system/lstat_test.go @@ -0,0 +1,25 @@ +package system + +import ( + "testing" +) + +func TestLstat(t *testing.T) { + file, invalid, _ := prepareFiles(t) + + statFile, err := Lstat(file) + if err != nil { + t.Fatal(err) + } + if statFile == nil { + t.Fatal("returned empty stat for existing file") + } + + statInvalid, err := Lstat(invalid) + if err == nil { + t.Fatal("did not return error for non-existing file") + } + if statInvalid != nil { + t.Fatal("returned non-nil stat for non-existing file") + } +} diff --git a/pkg/system/lstat_windows.go b/pkg/system/lstat_windows.go index f4c7e6d0e9..213a7c7ade 100644 --- a/pkg/system/lstat_windows.go +++ b/pkg/system/lstat_windows.go @@ -2,11 +2,7 @@ package system -import ( - "syscall" -) - -func Lstat(path string) (*syscall.Win32FileAttributeData, error) { +func Lstat(path string) (*Stat, error) { // should not be called on cli code path return nil, ErrNotSupportedPlatform } diff --git a/pkg/system/stat.go b/pkg/system/stat.go new file mode 100644 index 0000000000..5d47494d21 --- /dev/null +++ b/pkg/system/stat.go @@ -0,0 +1,42 @@ +package system + +import ( + "syscall" +) + +type Stat struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +func (s Stat) Mode() uint32 { + return s.mode +} + +func (s Stat) Uid() uint32 { + return s.uid +} + +func (s Stat) Gid() uint32 { + return s.gid +} + +func (s Stat) Rdev() uint64 { + return s.rdev +} + +func (s Stat) Size() int64 { + return s.size +} + +func (s Stat) Mtim() syscall.Timespec { + return s.mtim +} + +func (s Stat) GetLastModification() syscall.Timespec { + return s.Mtim() +} diff --git a/pkg/system/stat_linux.go b/pkg/system/stat_linux.go index e702200360..47cebef5cf 100644 --- a/pkg/system/stat_linux.go +++ b/pkg/system/stat_linux.go @@ -4,10 +4,11 @@ import ( "syscall" ) -func GetLastAccess(stat *syscall.Stat_t) syscall.Timespec { - return stat.Atim -} - -func GetLastModification(stat *syscall.Stat_t) syscall.Timespec { - return stat.Mtim +func fromStatT(s *syscall.Stat_t) (*Stat, error) { + return &Stat{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: s.Rdev, + mtim: s.Mtim}, nil } diff --git a/pkg/system/stat_test.go b/pkg/system/stat_test.go new file mode 100644 index 0000000000..0dcb239ece --- /dev/null +++ b/pkg/system/stat_test.go @@ -0,0 +1,34 @@ +package system + +import ( + "syscall" + "testing" +) + +func TestFromStatT(t *testing.T) { + file, _, _ := prepareFiles(t) + + stat := &syscall.Stat_t{} + err := syscall.Lstat(file, stat) + + s, err := fromStatT(stat) + if err != nil { + t.Fatal(err) + } + + if stat.Mode != s.Mode() { + t.Fatal("got invalid mode") + } + if stat.Uid != s.Uid() { + t.Fatal("got invalid uid") + } + if stat.Gid != s.Gid() { + t.Fatal("got invalid gid") + } + if stat.Rdev != s.Rdev() { + t.Fatal("got invalid rdev") + } + if stat.Mtim != s.Mtim() { + t.Fatal("got invalid mtim") + } +} diff --git a/pkg/system/stat_unsupported.go b/pkg/system/stat_unsupported.go index 4686a4c346..c4d53e6cd6 100644 --- a/pkg/system/stat_unsupported.go +++ b/pkg/system/stat_unsupported.go @@ -1,13 +1,16 @@ -// +build !linux +// +build !linux,!windows package system -import "syscall" +import ( + "syscall" +) -func GetLastAccess(stat *syscall.Stat_t) syscall.Timespec { - return stat.Atimespec -} - -func GetLastModification(stat *syscall.Stat_t) syscall.Timespec { - return stat.Mtimespec +func fromStatT(s *syscall.Stat_t) (*Stat, error) { + return &Stat{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil } diff --git a/pkg/system/stat_windows.go b/pkg/system/stat_windows.go new file mode 100644 index 0000000000..584e8940cc --- /dev/null +++ b/pkg/system/stat_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package system + +import ( + "errors" + "syscall" +) + +func fromStatT(s *syscall.Win32FileAttributeData) (*Stat, error) { + return nil, errors.New("fromStatT should not be called on windows path") +} From e45b0f92711ff190cff4b61b2ea80cdd53203a16 Mon Sep 17 00:00:00 2001 From: John Gossman Date: Thu, 30 Oct 2014 09:35:49 -0700 Subject: [PATCH 350/592] Remove unused sysinfo parameter to runconfig.Parse Removing dead code. Signed-off-by: John Gossman --- api/client/commands.go | 4 ++-- builder/dispatchers.go | 2 +- integration/runtime_test.go | 14 +++++++------- integration/server_test.go | 10 +++++----- integration/utils_test.go | 7 +++---- runconfig/config_test.go | 2 +- runconfig/parse.go | 13 +------------ runconfig/parse_test.go | 17 ++++++++--------- 8 files changed, 28 insertions(+), 41 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index 3802bf4867..60487265ae 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -2183,7 +2183,7 @@ func (cli *DockerCli) CmdCreate(args ...string) error { flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") ) - config, hostConfig, cmd, err := runconfig.Parse(cmd, args, nil) + config, hostConfig, cmd, err := runconfig.Parse(cmd, args) if err != nil { return err } @@ -2219,7 +2219,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d") ) - config, hostConfig, cmd, err := runconfig.Parse(cmd, args, nil) + config, hostConfig, cmd, err := runconfig.Parse(cmd, args) if err != nil { return err } diff --git a/builder/dispatchers.go b/builder/dispatchers.go index f2fdd35955..d1f2890ada 100644 --- a/builder/dispatchers.go +++ b/builder/dispatchers.go @@ -183,7 +183,7 @@ func run(b *Builder, args []string, attributes map[string]bool, original string) runCmd.SetOutput(ioutil.Discard) runCmd.Usage = nil - config, _, _, err := runconfig.Parse(runCmd, append([]string{b.image}, args...), nil) + config, _, _, err := runconfig.Parse(runCmd, append([]string{b.image}, args...)) if err != nil { return err } diff --git a/integration/runtime_test.go b/integration/runtime_test.go index 75f68d5c1b..d173af1f7f 100644 --- a/integration/runtime_test.go +++ b/integration/runtime_test.go @@ -661,7 +661,7 @@ func TestDefaultContainerName(t *testing.T) { daemon := mkDaemonFromEngine(eng, t) defer nuke(daemon) - config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}) if err != nil { t.Fatal(err) } @@ -685,7 +685,7 @@ func TestRandomContainerName(t *testing.T) { daemon := mkDaemonFromEngine(eng, t) defer nuke(daemon) - config, _, _, err := parseRun([]string{GetTestImage(daemon).ID, "echo test"}, nil) + config, _, _, err := parseRun([]string{GetTestImage(daemon).ID, "echo test"}) if err != nil { t.Fatal(err) } @@ -716,7 +716,7 @@ func TestContainerNameValidation(t *testing.T) { {"abc-123_AAA.1", true}, {"\000asdf", false}, } { - config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}) if err != nil { if !test.Valid { continue @@ -757,7 +757,7 @@ func TestLinkChildContainer(t *testing.T) { daemon := mkDaemonFromEngine(eng, t) defer nuke(daemon) - config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}) if err != nil { t.Fatal(err) } @@ -773,7 +773,7 @@ func TestLinkChildContainer(t *testing.T) { t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) } - config, _, _, err = parseRun([]string{GetTestImage(daemon).ID, "echo test"}, nil) + config, _, _, err = parseRun([]string{GetTestImage(daemon).ID, "echo test"}) if err != nil { t.Fatal(err) } @@ -799,7 +799,7 @@ func TestGetAllChildren(t *testing.T) { daemon := mkDaemonFromEngine(eng, t) defer nuke(daemon) - config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}) if err != nil { t.Fatal(err) } @@ -815,7 +815,7 @@ func TestGetAllChildren(t *testing.T) { t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) } - config, _, _, err = parseRun([]string{unitTestImageID, "echo test"}, nil) + config, _, _, err = parseRun([]string{unitTestImageID, "echo test"}) if err != nil { t.Fatal(err) } diff --git a/integration/server_test.go b/integration/server_test.go index a90399957d..1af7bbe22f 100644 --- a/integration/server_test.go +++ b/integration/server_test.go @@ -12,7 +12,7 @@ func TestCreateNumberHostname(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() - config, _, _, err := parseRun([]string{"-h", "web.0", unitTestImageID, "echo test"}, nil) + config, _, _, err := parseRun([]string{"-h", "web.0", unitTestImageID, "echo test"}) if err != nil { t.Fatal(err) } @@ -24,7 +24,7 @@ func TestCommit(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() - config, _, _, err := parseRun([]string{unitTestImageID, "/bin/cat"}, nil) + config, _, _, err := parseRun([]string{unitTestImageID, "/bin/cat"}) if err != nil { t.Fatal(err) } @@ -48,7 +48,7 @@ func TestMergeConfigOnCommit(t *testing.T) { container1, _, _ := mkContainer(runtime, []string{"-e", "FOO=bar", unitTestImageID, "echo test > /tmp/foo"}, t) defer runtime.Destroy(container1) - config, _, _, err := parseRun([]string{container1.ID, "cat /tmp/foo"}, nil) + config, _, _, err := parseRun([]string{container1.ID, "cat /tmp/foo"}) if err != nil { t.Error(err) } @@ -102,7 +102,7 @@ func TestRestartKillWait(t *testing.T) { runtime := mkDaemonFromEngine(eng, t) defer runtime.Nuke() - config, hostConfig, _, err := parseRun([]string{"-i", unitTestImageID, "/bin/cat"}, nil) + config, hostConfig, _, err := parseRun([]string{"-i", unitTestImageID, "/bin/cat"}) if err != nil { t.Fatal(err) } @@ -163,7 +163,7 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() - config, hostConfig, _, err := parseRun([]string{"-i", unitTestImageID, "/bin/cat"}, nil) + config, hostConfig, _, err := parseRun([]string{"-i", unitTestImageID, "/bin/cat"}) if err != nil { t.Fatal(err) } diff --git a/integration/utils_test.go b/integration/utils_test.go index da20de586c..deb6a337a6 100644 --- a/integration/utils_test.go +++ b/integration/utils_test.go @@ -19,7 +19,6 @@ import ( "github.com/docker/docker/daemon" "github.com/docker/docker/engine" flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) @@ -250,7 +249,7 @@ func readFile(src string, t *testing.T) (content string) { // The caller is responsible for destroying the container. // Call t.Fatal() at the first error. func mkContainer(r *daemon.Daemon, args []string, t *testing.T) (*daemon.Container, *runconfig.HostConfig, error) { - config, hc, _, err := parseRun(args, nil) + config, hc, _, err := parseRun(args) defer func() { if err != nil && t != nil { t.Fatal(err) @@ -351,9 +350,9 @@ func getImages(eng *engine.Engine, t *testing.T, all bool, filter string) *engin } -func parseRun(args []string, sysInfo *sysinfo.SysInfo) (*runconfig.Config, *runconfig.HostConfig, *flag.FlagSet, error) { +func parseRun(args []string) (*runconfig.Config, *runconfig.HostConfig, *flag.FlagSet, error) { cmd := flag.NewFlagSet("run", flag.ContinueOnError) cmd.SetOutput(ioutil.Discard) cmd.Usage = nil - return runconfig.Parse(cmd, args, sysInfo) + return runconfig.Parse(cmd, args) } diff --git a/runconfig/config_test.go b/runconfig/config_test.go index d94ec4ec55..f856c87f54 100644 --- a/runconfig/config_test.go +++ b/runconfig/config_test.go @@ -9,7 +9,7 @@ import ( ) func parse(t *testing.T, args string) (*Config, *HostConfig, error) { - config, hostConfig, _, err := parseRun(strings.Split(args+" ubuntu bash", " "), nil) + config, hostConfig, _, err := parseRun(strings.Split(args+" ubuntu bash", " ")) return config, hostConfig, err } diff --git a/runconfig/parse.go b/runconfig/parse.go index dfc84c1892..2bd8cf969e 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -10,7 +10,6 @@ import ( "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/units" "github.com/docker/docker/utils" ) @@ -24,7 +23,7 @@ var ( ErrConflictHostNetworkAndLinks = fmt.Errorf("Conflicting options: --net=host can't be used with links. This would result in undefined behavior.") ) -func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { +func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSet, error) { var ( // FIXME: use utils.ListOpts for attach and volumes? flAttach = opts.NewListOpts(opts.ValidateAttach) @@ -88,11 +87,6 @@ func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, return nil, nil, cmd, err } - // Check if the kernel supports memory limit cgroup. - if sysInfo != nil && *flMemoryString != "" && !sysInfo.MemoryLimit { - *flMemoryString = "" - } - // Validate input params if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) { return nil, nil, cmd, ErrInvalidWorkingDirectory @@ -302,11 +296,6 @@ func Parse(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, RestartPolicy: restartPolicy, } - if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit { - //fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n") - config.MemorySwap = -1 - } - // When allocating stdin in attached mode, close stdin at client disconnect if config.OpenStdin && config.AttachStdin { config.StdinOnce = true diff --git a/runconfig/parse_test.go b/runconfig/parse_test.go index e807180d4c..cd90dc3a94 100644 --- a/runconfig/parse_test.go +++ b/runconfig/parse_test.go @@ -6,14 +6,13 @@ import ( flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/sysinfo" ) -func parseRun(args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { +func parseRun(args []string) (*Config, *HostConfig, *flag.FlagSet, error) { cmd := flag.NewFlagSet("run", flag.ContinueOnError) cmd.SetOutput(ioutil.Discard) cmd.Usage = nil - return Parse(cmd, args, sysInfo) + return Parse(cmd, args) } func TestParseLxcConfOpt(t *testing.T) { @@ -34,27 +33,27 @@ func TestParseLxcConfOpt(t *testing.T) { } func TestNetHostname(t *testing.T) { - if _, _, _, err := parseRun([]string{"-h=name", "img", "cmd"}, nil); err != nil { + if _, _, _, err := parseRun([]string{"-h=name", "img", "cmd"}); err != nil { t.Fatalf("Unexpected error: %s", err) } - if _, _, _, err := parseRun([]string{"--net=host", "img", "cmd"}, nil); err != nil { + if _, _, _, err := parseRun([]string{"--net=host", "img", "cmd"}); err != nil { t.Fatalf("Unexpected error: %s", err) } - if _, _, _, err := parseRun([]string{"-h=name", "--net=bridge", "img", "cmd"}, nil); err != nil { + if _, _, _, err := parseRun([]string{"-h=name", "--net=bridge", "img", "cmd"}); err != nil { t.Fatalf("Unexpected error: %s", err) } - if _, _, _, err := parseRun([]string{"-h=name", "--net=none", "img", "cmd"}, nil); err != nil { + if _, _, _, err := parseRun([]string{"-h=name", "--net=none", "img", "cmd"}); err != nil { t.Fatalf("Unexpected error: %s", err) } - if _, _, _, err := parseRun([]string{"-h=name", "--net=host", "img", "cmd"}, nil); err != ErrConflictNetworkHostname { + if _, _, _, err := parseRun([]string{"-h=name", "--net=host", "img", "cmd"}); err != ErrConflictNetworkHostname { t.Fatalf("Expected error ErrConflictNetworkHostname, got: %s", err) } - if _, _, _, err := parseRun([]string{"-h=name", "--net=container:other", "img", "cmd"}, nil); err != ErrConflictNetworkHostname { + if _, _, _, err := parseRun([]string{"-h=name", "--net=container:other", "img", "cmd"}); err != ErrConflictNetworkHostname { t.Fatalf("Expected error ErrConflictNetworkHostname, got: %s", err) } } From d4dbb708320e59efa91b077303c87f9e9513cd91 Mon Sep 17 00:00:00 2001 From: Ahmet Alp Balkan Date: Tue, 21 Oct 2014 16:15:26 -0700 Subject: [PATCH 351/592] Use USERPROFILE path on Windows as home directory Signed-off-by: Ahmet Alp Balkan --- docker/flags.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/docker/flags.go b/docker/flags.go index 31dcbe2cff..1828c61d5e 100644 --- a/docker/flags.go +++ b/docker/flags.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "path/filepath" + "runtime" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" @@ -16,10 +17,17 @@ var ( func init() { if dockerCertPath == "" { - dockerCertPath = filepath.Join(os.Getenv("HOME"), ".docker") + dockerCertPath = filepath.Join(getHomeDir(), ".docker") } } +func getHomeDir() string { + if runtime.GOOS == "windows" { + return os.Getenv("USERPROFILE") + } + return os.Getenv("HOME") +} + var ( flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") flDaemon = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode") From 5a38680bd2283c87848d2e7f62a14f0261291c7c Mon Sep 17 00:00:00 2001 From: Ahmet Alp Balkan Date: Wed, 29 Oct 2014 15:46:45 -0700 Subject: [PATCH 352/592] Fix input volume path check on Windows used path package instead of path/filepath so that --volumes and --device parameters to always validate paths as unix paths instead of OS-dependent path convention Signed-off-by: Ahmet Alp Balkan --- opts/opts.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/opts/opts.go b/opts/opts.go index 4ca7ec58ce..d3202969b4 100644 --- a/opts/opts.go +++ b/opts/opts.go @@ -5,7 +5,7 @@ import ( "net" "net/url" "os" - "path/filepath" + "path" "regexp" "strings" @@ -151,13 +151,13 @@ func ValidatePath(val string) (string, error) { splited := strings.SplitN(val, ":", 2) if len(splited) == 1 { containerPath = splited[0] - val = filepath.Clean(splited[0]) + val = path.Clean(splited[0]) } else { containerPath = splited[1] - val = fmt.Sprintf("%s:%s", splited[0], filepath.Clean(splited[1])) + val = fmt.Sprintf("%s:%s", splited[0], path.Clean(splited[1])) } - if !filepath.IsAbs(containerPath) { + if !path.IsAbs(containerPath) { return val, fmt.Errorf("%s is not an absolute path", containerPath) } return val, nil From b7703a992e14255a5972bf157d0c2cfc0da3d53f Mon Sep 17 00:00:00 2001 From: Ahmet Alp Balkan Date: Mon, 27 Oct 2014 22:05:57 -0700 Subject: [PATCH 353/592] Add windows/(386,amd64) to cross platforms list Edited make scripts to append .exe to windows binary Signed-off-by: Ahmet Alp Balkan --- .gitignore | 1 + Dockerfile | 3 ++- project/make/binary | 12 ++++++++---- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/.gitignore b/.gitignore index 2a86e41caf..68d2da95bc 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ .vagrant* bin docker/docker +*.exe .*.swp a.out *.orig diff --git a/Dockerfile b/Dockerfile index 65d9a0d171..344551d35d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -68,7 +68,8 @@ RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 ENV DOCKER_CROSSPLATFORMS \ linux/386 linux/arm \ darwin/amd64 darwin/386 \ - freebsd/amd64 freebsd/386 freebsd/arm + freebsd/amd64 freebsd/386 freebsd/arm \ + windows/amd64 windows/386 # (set an explicit GOARM of 5 for maximum compatibility) ENV GOARM 5 RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' diff --git a/project/make/binary b/project/make/binary index b97069a856..962bebc68d 100755 --- a/project/make/binary +++ b/project/make/binary @@ -2,16 +2,20 @@ set -e DEST=$1 +BINARY_NAME="docker-$VERSION" +if [ "$(go env GOOS)" = 'windows' ]; then + BINARY_NAME+='.exe' +fi go build \ - -o "$DEST/docker-$VERSION" \ + -o "$DEST/$BINARY_NAME" \ "${BUILDFLAGS[@]}" \ -ldflags " $LDFLAGS $LDFLAGS_STATIC_DOCKER " \ ./docker -echo "Created binary: $DEST/docker-$VERSION" -ln -sf "docker-$VERSION" "$DEST/docker" +echo "Created binary: $DEST/$BINARY_NAME" +ln -sf "$BINARY_NAME" "$DEST/docker" -hash_files "$DEST/docker-$VERSION" +hash_files "$DEST/$BINARY_NAME" From edc6df256d21eb1d1aa36b241dcc6d4b83d58d75 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Fri, 14 Nov 2014 18:15:56 -0500 Subject: [PATCH 354/592] devmapper: Call UdevWait() even in failure path Currently we set up a cookie and upon failure not call UdevWait(). This does not cleanup the cookie and associated semaphore and system will soon max out on total number of semaphores. To avoid this, call UdevWait() even in failure path which in turn will cleanup associated semaphore. Signed-off-by: Vivek Goyal Signed-off-by: Vincent Batts --- pkg/devicemapper/devmapper.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/pkg/devicemapper/devmapper.go b/pkg/devicemapper/devmapper.go index 4043da6b45..16c0ac1c8c 100644 --- a/pkg/devicemapper/devmapper.go +++ b/pkg/devicemapper/devmapper.go @@ -373,13 +373,12 @@ func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize if err := task.SetCookie(&cookie, 0); err != nil { return fmt.Errorf("Can't set cookie %s", err) } + defer UdevWait(cookie) if err := task.Run(); err != nil { return fmt.Errorf("Error running DeviceCreate (CreatePool) %s", err) } - UdevWait(cookie) - return nil } @@ -516,13 +515,12 @@ func ResumeDevice(name string) error { if err := task.SetCookie(&cookie, 0); err != nil { return fmt.Errorf("Can't set cookie %s", err) } + defer UdevWait(cookie) if err := task.Run(); err != nil { return fmt.Errorf("Error running DeviceResume %s", err) } - UdevWait(cookie) - return nil } @@ -596,12 +594,12 @@ func ActivateDevice(poolName string, name string, deviceId int, size uint64) err return fmt.Errorf("Can't set cookie %s", err) } + defer UdevWait(cookie) + if err := task.Run(); err != nil { return fmt.Errorf("Error running DeviceCreate (ActivateDevice) %s", err) } - UdevWait(cookie) - return nil } From 2facc0467336a80f48c765dbdbd803055a431aa9 Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Wed, 1 Oct 2014 06:07:24 -0700 Subject: [PATCH 355/592] Add --log-level support Next steps, in another PR, would be: - make all logging go through the logrus stuff - I'd like to see if we can remove the env var stuff (like DEBUG) but we'll see Closes #5198 Signed-off-by: Doug Davis --- contrib/docker-device-tool/device_tool.go | 1 + daemon/daemon.go | 1 - docker/docker.go | 20 ++++++-- docker/flags.go | 1 + docker/log.go | 8 +-- docs/man/docker.1.md | 3 ++ docs/sources/reference/commandline/cli.md | 2 + integration-cli/docker_cli_daemon_test.go | 61 +++++++++++++++++++++++ integration-cli/docker_cli_run_test.go | 2 +- integration-cli/docker_utils.go | 24 +++++++-- 10 files changed, 108 insertions(+), 15 deletions(-) diff --git a/contrib/docker-device-tool/device_tool.go b/contrib/docker-device-tool/device_tool.go index 23d19f0237..8ab53de8da 100644 --- a/contrib/docker-device-tool/device_tool.go +++ b/contrib/docker-device-tool/device_tool.go @@ -60,6 +60,7 @@ func main() { if *flDebug { os.Setenv("DEBUG", "1") + log.SetLevel("debug") } if flag.NArg() < 1 { diff --git a/daemon/daemon.go b/daemon/daemon.go index 88fb9fde66..145a466486 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -719,7 +719,6 @@ func NewDaemon(config *Config, eng *engine.Engine) (*Daemon, error) { } func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) { - // Apply configuration defaults if config.Mtu == 0 { config.Mtu = getDefaultNetworkMtu() } diff --git a/docker/docker.go b/docker/docker.go index 92cdd95e0f..bb61d51725 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -5,10 +5,10 @@ import ( "crypto/x509" "fmt" "io/ioutil" - "log" // see gh#8745, client needs to use go log pkg "os" "strings" + log "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/api/client" "github.com/docker/docker/dockerversion" @@ -36,11 +36,23 @@ func main() { showVersion() return } - if *flDebug { - os.Setenv("DEBUG", "1") + + if *flLogLevel != "" { + lvl, err := log.ParseLevel(*flLogLevel) + if err != nil { + log.Fatalf("Unable to parse logging level: %s", *flLogLevel) + } + initLogging(lvl) + } else { + initLogging(log.InfoLevel) } - initLogging(*flDebug) + // -D, --debug, -l/--log-level=debug processing + // When/if -D is removed this block can be deleted + if *flDebug { + os.Setenv("DEBUG", "1") + initLogging(log.DebugLevel) + } if len(flHosts) == 0 { defaultHost := os.Getenv("DOCKER_HOST") diff --git a/docker/flags.go b/docker/flags.go index 31dcbe2cff..78d6b18993 100644 --- a/docker/flags.go +++ b/docker/flags.go @@ -25,6 +25,7 @@ var ( flDaemon = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode") flDebug = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode") flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode\nuse '' (the empty string) to disable setting of a group") + flLogLevel = flag.String([]string{"l", "-log-level"}, "info", "Set the logging level") flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API") flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by tls-verify flags") flTlsVerify = flag.Bool([]string{"-tlsverify"}, dockerTlsVerify, "Use TLS and verify the remote (daemon: verify client, client: verify daemon)") diff --git a/docker/log.go b/docker/log.go index a245aed1fb..cdbbd4408f 100644 --- a/docker/log.go +++ b/docker/log.go @@ -6,11 +6,7 @@ import ( log "github.com/Sirupsen/logrus" ) -func initLogging(debug bool) { +func initLogging(lvl log.Level) { log.SetOutput(os.Stderr) - if debug { - log.SetLevel(log.DebugLevel) - } else { - log.SetLevel(log.InfoLevel) - } + log.SetLevel(lvl) } diff --git a/docs/man/docker.1.md b/docs/man/docker.1.md index 26f5c2133a..f3ff68bc9f 100644 --- a/docs/man/docker.1.md +++ b/docs/man/docker.1.md @@ -65,6 +65,9 @@ unix://[/path/to/socket] to use. **--iptables**=*true*|*false* Disable Docker's addition of iptables rules. Default is true. +**-l**, **--log-level**="*debug*|*info*|*error*|*fatal*"" + Set the logging level. Default is `info`. + **--mtu**=VALUE Set the containers network mtu. Default is `1500`. diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 2da65a2bd1..7526954b12 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -75,6 +75,8 @@ expect an integer, and they can only be specified once. --ip-forward=true Enable net.ipv4.ip_forward --ip-masq=true Enable IP masquerading for bridge's IP range --iptables=true Enable Docker's addition of iptables rules + -l, --log-level="info" Set the logging level + --mtu=0 Set the containers network MTU if no value is provided: default to the default route MTU or 1500 if no default route is available -p, --pidfile="/var/run/docker.pid" Path to use for daemon PID file diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go index fa7901d82d..31bfac3f67 100644 --- a/integration-cli/docker_cli_daemon_test.go +++ b/integration-cli/docker_cli_daemon_test.go @@ -2,6 +2,7 @@ package main import ( "encoding/json" + "io/ioutil" "os" "os/exec" "strings" @@ -223,3 +224,63 @@ func TestDaemonIptablesCreate(t *testing.T) { logDone("daemon - run,iptables - iptables rules for always restarted container created after daemon restart") } + +func TestDaemonLoggingLevel(t *testing.T) { + d := NewDaemon(t) + + if err := d.Start("--log-level=bogus"); err == nil { + t.Fatal("Daemon should not have been able to start") + } + + d = NewDaemon(t) + if err := d.Start("--log-level=debug"); err != nil { + t.Fatal(err) + } + d.Stop() + content, _ := ioutil.ReadFile(d.logFile.Name()) + if !strings.Contains(string(content), `level="debug"`) { + t.Fatalf(`Missing level="debug" in log file:\n%s`, string(content)) + } + + d = NewDaemon(t) + if err := d.Start("--log-level=fatal"); err != nil { + t.Fatal(err) + } + d.Stop() + content, _ = ioutil.ReadFile(d.logFile.Name()) + if strings.Contains(string(content), `level="debug"`) { + t.Fatalf(`Should not have level="debug" in log file:\n%s`, string(content)) + } + + d = NewDaemon(t) + if err := d.Start("-D"); err != nil { + t.Fatal(err) + } + d.Stop() + content, _ = ioutil.ReadFile(d.logFile.Name()) + if !strings.Contains(string(content), `level="debug"`) { + t.Fatalf(`Missing level="debug" in log file using -D:\n%s`, string(content)) + } + + d = NewDaemon(t) + if err := d.Start("--debug"); err != nil { + t.Fatal(err) + } + d.Stop() + content, _ = ioutil.ReadFile(d.logFile.Name()) + if !strings.Contains(string(content), `level="debug"`) { + t.Fatalf(`Missing level="debug" in log file using --debug:\n%s`, string(content)) + } + + d = NewDaemon(t) + if err := d.Start("--debug", "--log-level=fatal"); err != nil { + t.Fatal(err) + } + d.Stop() + content, _ = ioutil.ReadFile(d.logFile.Name()) + if !strings.Contains(string(content), `level="debug"`) { + t.Fatalf(`Missing level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content)) + } + + logDone("daemon - Logging Level") +} diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 54949730a1..ce85f7741b 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -1804,7 +1804,7 @@ func TestRunWithBadDevice(t *testing.T) { if err == nil { t.Fatal("Run should fail with bad device") } - expected := `"/etc": not a device node` + expected := `\"/etc\": not a device node` if !strings.Contains(out, expected) { t.Fatalf("Output should contain %q, actual out: %q", expected, out) } diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go index b9660d20b6..61a616ceb2 100644 --- a/integration-cli/docker_utils.go +++ b/integration-cli/docker_utils.go @@ -41,7 +41,7 @@ func NewDaemon(t *testing.T) *Daemon { t.Fatal("Please set the DEST environment variable") } - dir := filepath.Join(dest, fmt.Sprintf("daemon%d", time.Now().Unix())) + dir := filepath.Join(dest, fmt.Sprintf("daemon%d", time.Now().UnixNano()%100000000)) daemonFolder, err := filepath.Abs(dir) if err != nil { t.Fatalf("Could not make %q an absolute path: %v", dir, err) @@ -69,10 +69,23 @@ func (d *Daemon) Start(arg ...string) error { args := []string{ "--host", d.sock(), - "--daemon", "--debug", + "--daemon", "--graph", fmt.Sprintf("%s/graph", d.folder), "--pidfile", fmt.Sprintf("%s/docker.pid", d.folder), } + + // If we don't explicitly set the log-level or debug flag(-D) then + // turn on debug mode + foundIt := false + for _, a := range arg { + if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") { + foundIt = true + } + } + if !foundIt { + args = append(args, "--debug") + } + if d.storageDriver != "" { args = append(args, "--storage-driver", d.storageDriver) } @@ -83,7 +96,7 @@ func (d *Daemon) Start(arg ...string) error { args = append(args, arg...) d.cmd = exec.Command(dockerBinary, args...) - d.logFile, err = os.OpenFile(filepath.Join(d.folder, "docker.log"), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0600) + d.logFile, err = os.OpenFile(filepath.Join(d.folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) if err != nil { d.t.Fatalf("Could not create %s/docker.log: %v", d.folder, err) } @@ -107,8 +120,13 @@ func (d *Daemon) Start(arg ...string) error { tick := time.Tick(500 * time.Millisecond) // make sure daemon is ready to receive requests + startTime := time.Now().Unix() for { d.t.Log("waiting for daemon to start") + if time.Now().Unix()-startTime > 5 { + // After 5 seconds, give up + return errors.New("Daemon exited and never started") + } select { case <-time.After(2 * time.Second): return errors.New("timeout: daemon does not respond") From bdaa76e8cf3baeeaa645d91aed168028301f537f Mon Sep 17 00:00:00 2001 From: Daehyeok Mun Date: Mon, 17 Nov 2014 04:23:22 +0900 Subject: [PATCH 356/592] Fix misspelling Fix misspelling from independant to independent Signed-off-by: Daehyeok Mun --- docs/sources/contributing/docs_style-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/contributing/docs_style-guide.md b/docs/sources/contributing/docs_style-guide.md index f0e84e789a..2da7728dbf 100644 --- a/docs/sources/contributing/docs_style-guide.md +++ b/docs/sources/contributing/docs_style-guide.md @@ -147,7 +147,7 @@ periods, PC not P.C. When writing lists, keep the following in mind: -Use bullets when the items being listed are independant of each other and the +Use bullets when the items being listed are independent of each other and the order of presentation is not important. Use numbers for steps that have to happen in order or if you have mentioned the From ab7a6544b09c0b061e35aa7df766ef7adc1fd2fe Mon Sep 17 00:00:00 2001 From: Arnaud Porterie Date: Sat, 15 Nov 2014 14:53:11 -0800 Subject: [PATCH 357/592] Update AUTHORS file Signed-off-by: Arnaud Porterie --- .mailmap | 30 ++++- AUTHORS | 239 +++++++++++++++++++++++++++--------- project/generate-authors.sh | 2 +- 3 files changed, 210 insertions(+), 61 deletions(-) diff --git a/.mailmap b/.mailmap index 47860de4c3..826fae0ead 100644 --- a/.mailmap +++ b/.mailmap @@ -1,8 +1,10 @@ -# Generate AUTHORS: hack/generate-authors.sh +# Generate AUTHORS: project/generate-authors.sh # Tip for finding duplicates (besides scanning the output of AUTHORS for name # duplicates that aren't also email duplicates): scan the output of: # git log --format='%aE - %aN' | sort -uf +# +# For explanation on this file format: man git-shortlog @@ -29,6 +31,7 @@ Andy Smith + Walter Stanish @@ -54,6 +57,7 @@ Jean-Baptiste Dalido + @@ -63,10 +67,13 @@ Jean-Baptiste Dalido + Sven Dowideit Sven Dowideit Sven Dowideit Sven Dowideit <¨SvenDowideit@home.org.au¨> +Sven Dowideit +Sven Dowideit unclejack Alexandr Morozov @@ -97,3 +104,24 @@ Matthew Heon Francisco Carriedo + + + + +Brian Goff + + + +Hollie Teal + + + +Jessica Frazelle Jessie Frazelle + + + + + +Thomas LEVEIL Thomas LÉVEIL + + diff --git a/AUTHORS b/AUTHORS index 43904e9e34..3d3fe3c7bc 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,69 +1,87 @@ # This file lists all individuals having contributed content to the repository. -# For how it is generated, see `hack/generate-authors.sh`. +# For how it is generated, see `project/generate-authors.sh`. Aanand Prasad Aaron Feng Aaron Huslage Abel Muiño +Abhinav Ajgaonkar +Abin Shahab Adam Miller Adam Singer Aditya Adrian Mouat Adrien Folie +Ahmet Alp Balkan AJ Bowen -Al Tobey alambike +Alan Thompson +Albert Callarisa Albert Zhang Aleksa Sarai -Alex Gaynor -Alex Warhawk Alexander Larsson Alexander Shopov -Alexandr Morozov +Alexandr Morozov Alexey Kotlyarov Alexey Shamrin +Alex Gaynor Alexis THOMAS +Alex Warhawk almoehi +Al Tobey +Álvaro Lázaro amangoel +Amit Bakshi AnandkumarPatel -Andre Dublin <81dublin@gmail.com> +Anand Patil Andrea Luzzardi -Andrea Turli +Andreas Köhler Andreas Savvides Andreas Tiefenthaler +Andrea Turli +Andre Dublin <81dublin@gmail.com> Andrew Duckworth Andrew France Andrew Macgregor Andrew Munsell +Andrews Medina Andrew Weiss Andrew Williams -Andrews Medina +Andrey Petrov +Andrey Stolbovsky Andy Chambers andy diller Andy Goldstein Andy Kipp Andy Rothfusz Andy Smith +Andy Wilson Anthony Bishopric Anton Löfgren Anton Nikitin Antony Messerli apocas -Arnaud Porterie +ArikaChen +Arnaud Porterie +Arthur Gautier Asbjørn Enge +averagehuman +Avi Miller Barnaby Gray Barry Allard Bartłomiej Piotrowski bdevloed Ben Firshman +Benjamin Atkin +Benoit Chesneau Ben Sargent Ben Toews Ben Wiklund -Benjamin Atkin -Benoit Chesneau Bernerd Schaefer +Bert Goethals Bhiraj Butala bin liu +Blake Geno Bouke Haarsma Boyd Hemphill Brandon Liu @@ -80,10 +98,13 @@ Brian Shumate Brice Jaglin Briehan Lombaard Bruno Bigras +Bruno Binet Bruno Renié Bryan Bess Bryan Matsuo Bryan Murphy +Burke Libbey +Byung Kang Caleb Spare Calen Pennington Cameron Boehmer @@ -95,56 +116,68 @@ Charlie Lewis Chewey Chia-liang Kao Chris Alfonso +Chris Armstrong +chrismckinnel Chris Snow Chris St. Pierre -chrismckinnel Christian Berendt ChristoperBiscardi -Christophe Troestler Christopher Currie Christopher Rigor +Christophe Troestler Ciro S. Costa Clayton Coleman Colin Dunklau Colin Rice Colin Walters Cory Forsyth -cpuguy83 cressie176 Cruceru Calin-Cristian Daan van Berkel +Daehyeok.Mun Dafydd Crosby Dan Buch +Dan Cotora +Dan Griffin Dan Hirsch -Dan Keder -Dan McPherson -Dan Stine -Dan Walsh -Dan Williams +Daniel, Dao Quang Minh Daniel Exner +Daniel Farrell Daniel Garcia Daniel Gasienica +Daniel Menet Daniel Mizyrycki Daniel Norberg Daniel Nordberg Daniel Robinson Daniel Von Fange Daniel YC Lin -Daniel, Dao Quang Minh +Dan Keder +Dan McPherson Danny Berger Danny Yates +Dan Stine +Dan Walsh +Dan Williams Darren Coxall Darren Shepherd David Anderson David Calavera David Corking +Davide Ceretti David Gageot +David Gebler David Mcanulty +David Pelaez David Röthlisberger David Sissitka +Dawn Chen +decadent Deni Bertovic Derek +Derek McGowan Deric Crago +Deshi Xiao Dinesh Subhraveti Djibril Koné dkumor @@ -154,11 +187,13 @@ Dominik Honnef Don Spaulding Doug Davis doug tangren -Dr Nic Williams +dragon788 Dražen Lučanin +Dr Nic Williams Dustin Sallings Edmund Wagner Eiichi Tsukata +Eike Herzbach Eivind Uggedal Elias Probst Emil Hernvall @@ -166,17 +201,19 @@ Emily Rose Eric Hanchrow Eric Lee Eric Myhre -Eric Windisch +Eric Paris Eric Windisch Erik Hollensbe Erik Inge Bolsø +Erik Kristensen Erno Hopearuoho +Eugene Yakubovich eugenkrizo +evanderkoogh Evan Hazlett Evan Krall Evan Phoenix Evan Wies -evanderkoogh Eystein Måløy Stenberg ezbercih Fabio Falci @@ -186,49 +223,60 @@ Faiz Khan Fareed Dudhia Felix Rabe Fernando +Filipe Brandenburger Flavio Castelli FLGMwt Francisco Carriedo Francisco Souza Frank Macreery -Fred Lifton +Frank Rosquin Frederick F. Kautz IV Frederik Loeffert +Fred Lifton Freek Kalter Gabe Rosenhouse Gabor Nagy Gabriel Monroy Galen Sampson Gareth Rushgrove +gautam, prasanna Geoffrey Bachelet +George Xie Gereon Frey German DZ Gert van Valkenhoef Giuseppe Mazzotta Gleb Fotengauer-Malinovskiy +Gleb M Borisov Glyn Normington Goffert van Gool +golubbe Graydon Hoare Greg Thornton grunny Guilherme Salgado +Guillaume Dufour Guillaume J. Charmes Gurjeet Singh Guruprasad +Hans Rødtang Harald Albers Harley Laue Hector Castro Henning Sprang Hobofan -Hollie Teal -Hollie Teal -hollietealok +Hollie Teal +Huayi Zhang +Hugo Duncan Hunter Blanks +Hu Tao +Huu Nguyen hyeongkyu.lee Ian Babrou Ian Bull Ian Main Ian Truslove +Igor Dolzhikov ILYA Khlopotov inglesp Isaac Dupree @@ -236,8 +284,8 @@ Isabel Jimenez Isao Jonas Ivan Fraixedes Jack Danger Canty -Jake Moshenko jakedt +Jake Moshenko James Allen James Carr James DeFelice @@ -245,6 +293,7 @@ James Harrison Fisher James Kyle James Mills James Turnbull +Jan Keromnes Jan Pazdziora Jan Toebes Jaroslaw Zabiello @@ -256,31 +305,35 @@ Jason McVetta Jason Plum Jean-Baptiste Barth Jean-Baptiste Dalido +Jean-Paul Calderone Jeff Lindsay -Jeff Welch Jeffrey Bolle +Jeff Welch Jeremy Grosser +Jérôme Petazzoni Jesse Dubay +Jessica Frazelle Jezeniel Zapanta Jilles Oldenbeuving Jim Alateras -Jim Perrin Jimmy Cuadra +Jim Perrin Jiří Župka Joe Beda +Joe Ferguson +Joel Handwell Joe Shaw Joe Van Dyk -Joel Handwell Joffrey F Johan Euphrosine -Johan Rydberg Johannes 'fish' Ziemke +Johan Rydberg John Costa John Feminella John Gardiner Myers +John Gossman John OBrien III John Warwick -Jon Wedaman Jonas Pfenniger Jonathan Boulle Jonathan Camp @@ -288,22 +341,25 @@ Jonathan McCrohan Jonathan Mueller Jonathan Pares Jonathan Rudenberg +Jon Wedaman Joost Cassee Jordan Arentsen Jordan Sissel Joseph Anthony Pasquale Holsten Joseph Hager -Josh Josh Hawn +Josh Josh Poimboeuf +Josiah Kiehl JP +Julian Taylor Julien Barbier Julien Bordellier Julien Dubois Justin Force Justin Plock Justin Simonelis -Jérôme Petazzoni +Jyrki Puttonen Karan Lyons Karl Grzeszczak Kato Kazuyoshi @@ -311,57 +367,68 @@ Kawsar Saiyeed Keli Hu Ken Cochrane Ken ICHIKAWA -Kevin "qwazerty" Houdebert Kevin Clark Kevin J. Lynagh Kevin Menard +Kevin "qwazerty" Houdebert Kevin Wallace Keyvan Fatehi kies -Kim BKC Carlbacker kim0 +Kim BKC Carlbacker Kimbro Staken Kiran Gangadharan knappe Kohei Tsuruta +Konrad Kleine Konstantin Pelykh +krrg Kyle Conroy kyu Lachlan Coote +Lajos Papp +Lakshan Perera lalyos Lance Chen Lars R. Damerow Laurie Voss leeplay +Lei Jitang Len Weincier +Leszek Kowalski Levi Gross Lewis Peckover Liang-Chi Hsieh +limsy Lokesh Mandvekar Louis Opter lukaspustina lukemarsden +Madhu Venugopal Mahesh Tiyyagura +Malte Janduda Manfred Zabarauskas Manuel Meurer Manuel Woelker Marc Abramowitz Marc Kuo -Marc Tamsky Marco Hennings +Marc Tamsky Marcus Farkas -Marcus Ramberg marcuslinke +Marcus Ramberg Marek Goldmann Marius Voila Mark Allen Mark McGranaghan Marko Mikulicic +Marko Tibold Markus Fix Martijn van Oosterhout Martin Redmond Mason Malone Mateusz Sulima +Mathias Monnerville Mathieu Le Marec - Pasquet Matt Apperson Matt Bachmann @@ -372,17 +439,24 @@ Matthias Klumpp Matthias Kühnle mattymo mattyw -Max Shytikov -Maxim Treskin Maxime Petazzoni +Maxim Treskin +Max Shytikov +Médi-Rémi Hashim meejah +Mengdi Gao +Mert Yazıcıoğlu Michael Brown Michael Crosby Michael Gorsuch +Michael Hudson-Doyle Michael Neale -Michael Prokop -Michael Stapelberg Michaël Pailloncy +Michael Prokop +Michael Scharf +Michael Stapelberg +Michael Thies +Michal Jemala Michiel@unhosted Miguel Angel Fernández Mike Chelen @@ -395,32 +469,40 @@ Mohit Soni Morgante Pell Morten Siebuhr Mrunal Patel +mschurenko +Mustafa Akın Nan Monnand Deng Naoki Orii Nate Jones +Nathan Hsieh Nathan Kleyn Nathan LeClaire Nelson Chen Niall O'Higgins +Nicholas E. Rabenau Nick Payne Nick Stenning Nick Stinemates +Nicolas De loof Nicolas Dudebout +Nicolas Goy Nicolas Kaiser NikolaMandic noducks Nolan Darilek -O.S. Tezer +nzwsch OddBloke odk- Oguz Bilgic +Oh Jinkyun Ole Reifschneider Olivier Gambier +O.S. Tezer pandrew Pascal Borreli +Pascal Hartig Patrick Hemmer pattichen -Paul Paul Annesley Paul Bowsher Paul Hammond @@ -428,25 +510,39 @@ Paul Jimenez Paul Lietar Paul Morie Paul Nasrat +Paul Paul Weaver +Pavlos Ratis Peter Bourgon Peter Braden +Peter Ericson +Peter Salvatore Peter Waller -Phil -Phil Spitler +Phil Estes +Philipp Weissensteiner Phillip Alexander +Phil Spitler +Phil Piergiuliano Bossi Pierre-Alain RIVIERE +Pierre Piotr Bogdan +pixelistik +Prasanna Gautam +Przemek Hejman pysqz +Qiang Huang Quentin Brossard r0n22 Rafal Jeczalik +Rafe Colton Rajat Pandit Rajdeep Dua Ralph Bean Ramkumar Ramachandra Ramon van Alteren +Recursive Madman +Remi Rampin Renato Riccieri Santos Zannon rgstephens Rhys Hiltner @@ -455,6 +551,7 @@ Richo Healey Rick Bradley Rick van de Loo Robert Bachmann +Robert Bittle Robert Obryk Roberto G. Hashioka Robin Speekenbrink @@ -470,25 +567,30 @@ Rovanion Luckey Rudolph Gottesheim Ryan Anderson Ryan Aslett +Ryan Detzel Ryan Fowler Ryan O'Donnell Ryan Seto Ryan Thomas -s-ko Sam Alba Sam Bailey Sam J Sharpe Sam Reis Sam Rijs Samuel Andaya +Samuel PHAN satoru Satoshi Amemiya Scott Bessler Scott Collier +Scott Johnston +Scott Walls Sean Cronin Sean P. Kane Sebastiaan van Stijn -Sebastiaan van Stijn +Sébastien Luttringer +Sébastien +Sébastien Stormacq Senthil Kumar Selvaraj SeongJae Park Shane Canon @@ -496,28 +598,30 @@ shaunol Shawn Landden Shawn Siefkas Shih-Yuan Lee +shuai-z Silas Sewell Simon Taranto Sindhu S Sjoerd Langkemper +s-ko Solomon Hykes Song Gao Soulou soulshake Sridatta Thatipamala Sridhar Ratnakumar +Srini Brahmaroutu Steeve Morin Stefan Praszalowicz Stephen Crosby Steven Burgess +Steven Merrill sudosurootdev -Sven Dowideit +Sven Dowideit Sylvain Bellemare -Sébastien -Sébastien Luttringer -Sébastien Stormacq tang0th Tatsuki Sugiura +Ted M. Young Tehmasp Chaudhri Thatcher Peskens Thermionix @@ -526,25 +630,32 @@ Thomas Bikeev Thomas Frössman Thomas Hansen Thomas LEVEIL +Thomas Orozco Thomas Schroeter Tianon Gravi Tibor Vass Tim Bosse -Tim Ruffles -Tim Ruffles -Tim Terhorst +Tim Hockin Timothy Hobbs +Tim Ruffles +Tim Smith +Tim Terhorst tjmehta +tjwebb123 +tobe Tobias Bieniek Tobias Gesellchen Tobias Schmidt Tobias Schwab Todd Lunter +Tomasz Lipinski Tom Fotherby Tom Hulihan Tom Maaswinkel Tommaso Visconti +Tonis Tiigi Tony Daws +Torstein Husebø tpng Travis Cline Trent Ogren @@ -560,33 +671,43 @@ Victor Vieux Viktor Vojnovski Vincent Batts Vincent Bernat +Vincent Bernat +Vincent Giersch Vincent Mayers Vincent Woo Vinod Kulkarni +Vishal Doshi Vishnu Kannan Vitor Monteiro Vivek Agarwal +Vivek Dasgupta +Vivek Goyal Vladimir Bulyga Vladimir Kirillov Vladimir Rutsky +Vojtech Vitek (V-Teq) waitingkuo Walter Leibbrandt Walter Stanish +Ward Vandewege WarheadsSE Wes Morgan Will Dietz -Will Rouesnel -Will Weaver William Delanoue William Henry William Riancho William Thurston +Will Rouesnel +Will Weaver wyc Xiuming Chen +xuzhaokui Yang Bai Yasunori Mahata +Yohei Ueda Yurii Rashkovskii Zac Dover +Zach Borboa Zain Memon Zaiste! Zane DeGraffenried @@ -594,4 +715,4 @@ Zilin Du zimbatm Zoltan Tombol zqh -Álvaro Lázaro +尹吉峰 diff --git a/project/generate-authors.sh b/project/generate-authors.sh index 83f61df373..0994662767 100755 --- a/project/generate-authors.sh +++ b/project/generate-authors.sh @@ -8,7 +8,7 @@ cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." { cat <<-'EOH' # This file lists all individuals having contributed content to the repository. - # For how it is generated, see `hack/generate-authors.sh`. + # For how it is generated, see `project/generate-authors.sh`. EOH echo git log --format='%aN <%aE>' | sort -uf From d5098fde9a64fdec2098c7a0c6bd777f88f7d4c9 Mon Sep 17 00:00:00 2001 From: Daehyeok Mun Date: Mon, 17 Nov 2014 23:27:03 +0900 Subject: [PATCH 358/592] Remove obsolete comments In the below comment line, already codes for log exists so following comment is obsolete. // TODO log Signed-off-by: Daehyeok Mun --- trust/trusts.go | 1 - 1 file changed, 1 deletion(-) diff --git a/trust/trusts.go b/trust/trusts.go index 33354bd3ce..f5e317e9e3 100644 --- a/trust/trusts.go +++ b/trust/trusts.go @@ -182,7 +182,6 @@ func (t *TrustStore) fetch() { go func() { err := t.reload() if err != nil { - // TODO log log.Infof("Reload of trust graph failed: %s", err) } }() From 91fb9b2f5a07232cecfcde38a98c3ec6410239b7 Mon Sep 17 00:00:00 2001 From: Srini Brahmaroutu Date: Fri, 24 Oct 2014 04:27:09 +0000 Subject: [PATCH 359/592] Fix the documentation and integration test for container resize Closes #8728 Signed-off-by: Srini Brahmaroutu --- docs/sources/http-routingtable.md | 2 +- .../reference/api/docker_remote_api_v1.16.md | 8 +++---- .../reference/api/docker_remote_api_v1.9.md | 22 ------------------- integration-cli/docker_api_resize_test.go | 2 +- 4 files changed, 6 insertions(+), 28 deletions(-) diff --git a/docs/sources/http-routingtable.md b/docs/sources/http-routingtable.md index 4de7bcd3fa..ff66c7a198 100644 --- a/docs/sources/http-routingtable.md +++ b/docs/sources/http-routingtable.md @@ -44,7 +44,7 @@ [`POST /containers/(id)/wait`](../reference/api/docker_remote_api_v1.9/#post--containers-(id)-wait) ** [`POST /containers/create`](../reference/api/docker_remote_api_v1.9/#post--containers-create) ** [`GET /containers/json`](../reference/api/docker_remote_api_v1.9/#get--containers-json) ** - [`GET /containers/resize`](../reference/api/docker_remote_api_v1.9/#get--containers-resize) ** + [`POST /containers/(id)/resize`](../reference/api/docker_remote_api_v1.9/#get--containers-resize) **   **/events** [`GET /events`](../reference/api/docker_remote_api_v1.9/#get--events) ** diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md index db07a97a6e..d8ce9469a6 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.16.md +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -471,13 +471,13 @@ Status Codes: ### Resize a container TTY -`GET /containers/(id)/resize?h=&w=` +`POST /containers/(id)/resize?h=&w=` -Resize the TTY of container `id` +Resize the TTY for container with `id`. The container must be restarted for the resize to take effect. **Example request**: - GET /containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + POST /containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 **Example response**: @@ -489,7 +489,7 @@ Status Codes: - **200** – no error - **404** – No such container -- **500** – bad file descriptor +- **500** – Cannot resize container ### Start a container diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.md b/docs/sources/reference/api/docker_remote_api_v1.9.md index ed12bc3253..e069a09e20 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.9.md +++ b/docs/sources/reference/api/docker_remote_api_v1.9.md @@ -364,28 +364,6 @@ Status Codes: - **404** – no such container - **500** – server error -### Resize a container TTY - -`GET /containers/(id)/resize?h=&w=` - -Resize the TTY of container `id` - -**Example request**: - - GET /containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 - -**Example response**: - - HTTP/1.1 200 OK - Content-Length: 0 - Content-Type: text/plain; charset=utf-8 - -Status Codes: - -- **200** – no error -- **404** – No such container -- **500** – bad file descriptor - ### Start a container `POST /containers/(id)/start` diff --git a/integration-cli/docker_api_resize_test.go b/integration-cli/docker_api_resize_test.go index 3595999a71..355bfd9977 100644 --- a/integration-cli/docker_api_resize_test.go +++ b/integration-cli/docker_api_resize_test.go @@ -7,7 +7,7 @@ import ( ) func TestResizeApiResponse(t *testing.T) { - runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf(out, err) From 9a85f60c75f2017b14ed5e7f2bae5dc4961cb74c Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Mon, 17 Nov 2014 19:23:41 +0000 Subject: [PATCH 360/592] add ID and Hostname in docker info Signed-off-by: Victor Vieux --- api/client/commands.go | 6 ++++++ api/common.go | 25 +++++++++++++++++++++++++ daemon/config.go | 1 + daemon/daemon.go | 8 ++++++++ daemon/info.go | 4 ++++ docker/daemon.go | 2 ++ 6 files changed, 46 insertions(+) diff --git a/api/client/commands.go b/api/client/commands.go index 60487265ae..4f6f71d6d0 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -505,6 +505,12 @@ func (cli *DockerCli) CmdInfo(args ...string) error { if remoteInfo.Exists("MemTotal") { fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(remoteInfo.GetInt64("MemTotal")))) } + if remoteInfo.Exists("Hostname") { + fmt.Fprintf(cli.out, "Hostname: %s\n", remoteInfo.Get("Hostname")) + } + if remoteInfo.Exists("ID") { + fmt.Fprintf(cli.out, "ID: %s\n", remoteInfo.Get("ID")) + } if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" { if remoteInfo.Exists("Debug") { diff --git a/api/common.go b/api/common.go index b151552412..52e67caa13 100644 --- a/api/common.go +++ b/api/common.go @@ -3,12 +3,15 @@ package api import ( "fmt" "mime" + "os" + "path" "strings" log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/version" + "github.com/docker/docker/vendor/src/github.com/docker/libtrust" ) const ( @@ -47,3 +50,25 @@ func MatchesContentType(contentType, expectedType string) bool { } return err == nil && mimetype == expectedType } + +// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, +// otherwise generates a new one +func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { + err := os.MkdirAll(path.Dir(trustKeyPath), 0700) + if err != nil { + return nil, err + } + trustKey, err := libtrust.LoadKeyFile(trustKeyPath) + if err == libtrust.ErrKeyFileDoesNotExist { + trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("Error generating key: %s", err) + } + if err := libtrust.SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("Error saving key file: %s", err) + } + } else if err != nil { + log.Fatalf("Error loading key file: %s", err) + } + return trustKey, nil +} diff --git a/daemon/config.go b/daemon/config.go index 0876ce0802..cbdd95da00 100644 --- a/daemon/config.go +++ b/daemon/config.go @@ -40,6 +40,7 @@ type Config struct { DisableNetwork bool EnableSelinuxSupport bool Context map[string][]string + TrustKeyPath string } // InstallFlags adds command-line options to the top-level flag parser for diff --git a/daemon/daemon.go b/daemon/daemon.go index 145a466486..84628be729 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -15,6 +15,7 @@ import ( "github.com/docker/libcontainer/label" log "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver/execdrivers" "github.com/docker/docker/daemon/execdriver/lxc" @@ -83,6 +84,7 @@ func (c *contStore) List() []*Container { } type Daemon struct { + ID string repository string sysInitPath string containers *contStore @@ -893,7 +895,13 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) return nil, err } + trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) + if err != nil { + return nil, err + } + daemon := &Daemon{ + ID: trustKey.PublicKey().KeyID(), repository: daemonRepo, containers: &contStore{s: make(map[string]*Container)}, execCommands: newExecStore(), diff --git a/daemon/info.go b/daemon/info.go index 78a22c9443..c05c2a569d 100644 --- a/daemon/info.go +++ b/daemon/info.go @@ -56,6 +56,7 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status { return job.Error(err) } v := &engine.Env{} + v.Set("ID", daemon.ID) v.SetInt("Containers", len(daemon.List())) v.SetInt("Images", imgcount) v.Set("Driver", daemon.GraphDriver().String()) @@ -75,6 +76,9 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status { v.Set("InitPath", initPath) v.SetInt("NCPU", runtime.NumCPU()) v.SetInt64("MemTotal", meminfo.MemTotal) + if hostname, err := os.Hostname(); err == nil { + v.Set("Hostname", hostname) + } if _, err := v.WriteTo(job.Stdout); err != nil { return job.Error(err) } diff --git a/docker/daemon.go b/docker/daemon.go index dbf1f05617..3128f7ee55 100644 --- a/docker/daemon.go +++ b/docker/daemon.go @@ -34,6 +34,8 @@ func mainDaemon() { eng := engine.New() signal.Trap(eng.Shutdown) + daemonCfg.TrustKeyPath = *flTrustKey + // Load builtins if err := builtins.Register(eng); err != nil { log.Fatal(err) From 8545155c41b1ccc22056733539660b1afa6790ef Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Mon, 17 Nov 2014 19:26:39 +0000 Subject: [PATCH 361/592] add docs Signed-off-by: Victor Vieux --- docs/sources/reference/api/docker_remote_api.md | 5 +++-- docs/sources/reference/api/docker_remote_api_v1.16.md | 2 ++ docs/sources/reference/commandline/cli.md | 2 ++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md index 5813091411..046e953b37 100644 --- a/docs/sources/reference/api/docker_remote_api.md +++ b/docs/sources/reference/api/docker_remote_api.md @@ -49,8 +49,9 @@ You can still call an old version of the API using `GET /info` **New!** -`info` now returns the number of CPUs available on the machine (`NCPU`) and -total memory available (`MemTotal`). +`info` now returns the number of CPUs available on the machine (`NCPU`), +total memory available (`MemTotal`), the short hostname (`Hostname`). and +the ID (`ID`). `POST /containers/create` diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md index db07a97a6e..5e78e02ffb 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.16.md +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -1220,6 +1220,8 @@ Display system-wide information "KernelVersion":"3.12.0-1-amd64" "NCPU":1, "MemTotal":2099236864, + "Hostname":"prod-server-42", + "ID":"7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" "Debug":false, "NFd": 11, "NGoroutines":21, diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 7526954b12..24271a2c6e 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -856,6 +856,8 @@ For example: Kernel Version: 3.13.0-24-generic Operating System: Ubuntu 14.04 LTS CPUs: 1 + Hostname: prod-server-42 + ID: 7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS Total Memory: 2 GiB Debug mode (server): false Debug mode (client): true From a3068a109cc2c472966d5a080f81ca344232f26c Mon Sep 17 00:00:00 2001 From: Daehyeok Mun Date: Tue, 18 Nov 2014 05:03:00 +0900 Subject: [PATCH 362/592] fix link to PACKAGERS.md fix link from /hack/PACKAGERS.md to /project/PACKAGERS.md Signed-off-by: Daehyeok Mun --- docs/sources/contributing/contributing.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/contributing/contributing.md b/docs/sources/contributing/contributing.md index 7d65a0479c..850b01ce12 100644 --- a/docs/sources/contributing/contributing.md +++ b/docs/sources/contributing/contributing.md @@ -21,4 +21,4 @@ https://github.com/docker/docker/blob/master/docs/Dockerfile) specifies the tools and versions used to build the Documentation. Further interesting details can be found in the [Packaging hints]( -https://github.com/docker/docker/blob/master/hack/PACKAGERS.md). +https://github.com/docker/docker/blob/master/project/PACKAGERS.md). From 975fa5487ca531374fed421b52de1adf133c9810 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 17 Nov 2014 12:16:37 -0800 Subject: [PATCH 363/592] Update libcontainer to 28cb5f9dfd6f3352c610a4f1502 Signed-off-by: Michael Crosby --- project/vendor.sh | 2 +- .../github.com/docker/libcontainer/Dockerfile | 4 +- .../docker/libcontainer/api_temp.go | 19 +- .../docker/libcontainer/cgroups/cgroups.go | 5 - .../libcontainer/cgroups/cgutil/cgutil.go | 264 ------------------ .../cgroups/cgutil/sample_cgroup.json | 10 - .../libcontainer/cgroups/fs/apply_raw.go | 82 ++---- .../docker/libcontainer/cgroups/stats.go | 2 +- .../cgroups/systemd/apply_nosystemd.go | 6 +- .../cgroups/systemd/apply_systemd.go | 80 +----- .../docker/libcontainer/cgroups/utils.go | 13 +- .../docker/libcontainer/namespaces/exec.go | 16 +- .../docker/libcontainer/namespaces/execin.go | 2 +- .../docker/libcontainer/namespaces/init.go | 2 +- .../libcontainer/netlink/netlink_linux.go | 2 +- .../libcontainer/system/syscall_linux_arm.go | 2 +- 16 files changed, 67 insertions(+), 444 deletions(-) delete mode 100644 vendor/src/github.com/docker/libcontainer/cgroups/cgutil/cgutil.go delete mode 100644 vendor/src/github.com/docker/libcontainer/cgroups/cgutil/sample_cgroup.json diff --git a/project/vendor.sh b/project/vendor.sh index 4c0b09fed1..2a474db5d8 100755 --- a/project/vendor.sh +++ b/project/vendor.sh @@ -66,7 +66,7 @@ if [ "$1" = '--go' ]; then mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar fi -clone git github.com/docker/libcontainer 4ae31b6ceb2c2557c9f05f42da61b0b808faa5a4 +clone git github.com/docker/libcontainer 28cb5f9dfd6f3352c610a4f1502b5df4f69389ea # see src/github.com/docker/libcontainer/update-vendor.sh which is the "source of truth" for libcontainer deps (just like this file) rm -rf src/github.com/docker/libcontainer/vendor eval "$(grep '^clone ' src/github.com/docker/libcontainer/update-vendor.sh | grep -v 'github.com/codegangsta/cli')" diff --git a/vendor/src/github.com/docker/libcontainer/Dockerfile b/vendor/src/github.com/docker/libcontainer/Dockerfile index 96d8f35255..614e5979bf 100644 --- a/vendor/src/github.com/docker/libcontainer/Dockerfile +++ b/vendor/src/github.com/docker/libcontainer/Dockerfile @@ -1,7 +1,7 @@ FROM crosbymichael/golang RUN apt-get update && apt-get install -y gcc make -RUN go get code.google.com/p/go.tools/cmd/cover +RUN go get golang.org/x/tools/cmd/cover ENV GOPATH $GOPATH:/go/src/github.com/docker/libcontainer/vendor RUN go get github.com/docker/docker/pkg/term @@ -10,7 +10,7 @@ RUN go get github.com/docker/docker/pkg/term RUN mkdir /busybox && \ curl -sSL 'https://github.com/jpetazzo/docker-busybox/raw/buildroot-2014.02/rootfs.tar' | tar -xC /busybox -RUN curl -sSL https://raw.githubusercontent.com/docker/docker/master/hack/dind -o /dind && \ +RUN curl -sSL https://raw.githubusercontent.com/docker/docker/master/project/dind -o /dind && \ chmod +x /dind COPY . /go/src/github.com/docker/libcontainer diff --git a/vendor/src/github.com/docker/libcontainer/api_temp.go b/vendor/src/github.com/docker/libcontainer/api_temp.go index 9b2c520774..5c682ee344 100644 --- a/vendor/src/github.com/docker/libcontainer/api_temp.go +++ b/vendor/src/github.com/docker/libcontainer/api_temp.go @@ -5,30 +5,17 @@ package libcontainer import ( "github.com/docker/libcontainer/cgroups/fs" - "github.com/docker/libcontainer/cgroups/systemd" "github.com/docker/libcontainer/network" ) // TODO(vmarmol): Complete Stats() in final libcontainer API and move users to that. // DEPRECATED: The below portions are only to be used during the transition to the official API. // Returns all available stats for the given container. -func GetStats(container *Config, state *State) (*ContainerStats, error) { - var ( - err error - stats = &ContainerStats{} - ) - - if systemd.UseSystemd() { - stats.CgroupStats, err = systemd.GetStats(container.Cgroups) - } else { - stats.CgroupStats, err = fs.GetStats(container.Cgroups) - } - - if err != nil { +func GetStats(container *Config, state *State) (stats *ContainerStats, err error) { + stats = &ContainerStats{} + if stats.CgroupStats, err = fs.GetStats(state.CgroupPaths); err != nil { return stats, err } - stats.NetworkStats, err = network.GetStats(&state.NetworkState) - return stats, err } diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/cgroups.go b/vendor/src/github.com/docker/libcontainer/cgroups/cgroups.go index 567e9a6c16..fe3600597b 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/cgroups.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/cgroups.go @@ -53,8 +53,3 @@ type Cgroup struct { Freezer FreezerState `json:"freezer,omitempty"` // set the freeze value for the process Slice string `json:"slice,omitempty"` // Parent slice to use for systemd } - -type ActiveCgroup interface { - Cleanup() error - Paths() (map[string]string, error) -} diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/cgutil/cgutil.go b/vendor/src/github.com/docker/libcontainer/cgroups/cgutil/cgutil.go deleted file mode 100644 index d1a66117f1..0000000000 --- a/vendor/src/github.com/docker/libcontainer/cgroups/cgutil/cgutil.go +++ /dev/null @@ -1,264 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "log" - "os" - "syscall" - "time" - - "github.com/codegangsta/cli" - "github.com/docker/libcontainer/cgroups" - "github.com/docker/libcontainer/cgroups/fs" - "github.com/docker/libcontainer/cgroups/systemd" -) - -var createCommand = cli.Command{ - Name: "create", - Usage: "Create a cgroup container using the supplied configuration and initial process.", - Flags: []cli.Flag{ - cli.StringFlag{Name: "config, c", Value: "cgroup.json", Usage: "path to container configuration (cgroups.Cgroup object)"}, - cli.IntFlag{Name: "pid, p", Value: 0, Usage: "pid of the initial process in the container"}, - }, - Action: createAction, -} - -var destroyCommand = cli.Command{ - Name: "destroy", - Usage: "Destroy an existing cgroup container.", - Flags: []cli.Flag{ - cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"}, - cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"}, - }, - Action: destroyAction, -} - -var statsCommand = cli.Command{ - Name: "stats", - Usage: "Get stats for cgroup", - Flags: []cli.Flag{ - cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"}, - cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"}, - }, - Action: statsAction, -} - -var pauseCommand = cli.Command{ - Name: "pause", - Usage: "Pause cgroup", - Flags: []cli.Flag{ - cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"}, - cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"}, - }, - Action: pauseAction, -} - -var resumeCommand = cli.Command{ - Name: "resume", - Usage: "Resume a paused cgroup", - Flags: []cli.Flag{ - cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"}, - cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"}, - }, - Action: resumeAction, -} - -var psCommand = cli.Command{ - Name: "ps", - Usage: "Get list of pids for a cgroup", - Flags: []cli.Flag{ - cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"}, - cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"}, - }, - Action: psAction, -} - -func getConfigFromFile(c *cli.Context) (*cgroups.Cgroup, error) { - f, err := os.Open(c.String("config")) - if err != nil { - return nil, err - } - defer f.Close() - - var config *cgroups.Cgroup - if err := json.NewDecoder(f).Decode(&config); err != nil { - log.Fatal(err) - } - return config, nil -} - -func openLog(name string) error { - f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0755) - if err != nil { - return err - } - - log.SetOutput(f) - return nil -} - -func getConfig(context *cli.Context) (*cgroups.Cgroup, error) { - name := context.String("name") - if name == "" { - log.Fatal(fmt.Errorf("Missing container name")) - } - parent := context.String("parent") - return &cgroups.Cgroup{ - Name: name, - Parent: parent, - }, nil -} - -func killAll(config *cgroups.Cgroup) { - // We could use freezer here to prevent process spawning while we are trying - // to kill everything. But going with more portable solution of retrying for - // now. - pids := getPids(config) - retry := 10 - for len(pids) != 0 || retry > 0 { - killPids(pids) - time.Sleep(100 * time.Millisecond) - retry-- - pids = getPids(config) - } - if len(pids) != 0 { - log.Fatal(fmt.Errorf("Could not kill existing processes in the container.")) - } -} - -func getPids(config *cgroups.Cgroup) []int { - pids, err := fs.GetPids(config) - if err != nil { - log.Fatal(err) - } - return pids -} - -func killPids(pids []int) { - for _, pid := range pids { - // pids might go away on their own. Ignore errors. - syscall.Kill(pid, syscall.SIGKILL) - } -} - -func setFreezerState(context *cli.Context, state cgroups.FreezerState) { - config, err := getConfig(context) - if err != nil { - log.Fatal(err) - } - - if systemd.UseSystemd() { - err = systemd.Freeze(config, state) - } else { - err = fs.Freeze(config, state) - } - if err != nil { - log.Fatal(err) - } -} - -func createAction(context *cli.Context) { - config, err := getConfigFromFile(context) - if err != nil { - log.Fatal(err) - } - pid := context.Int("pid") - if pid <= 0 { - log.Fatal(fmt.Errorf("Invalid pid : %d", pid)) - } - if systemd.UseSystemd() { - _, err := systemd.Apply(config, pid) - if err != nil { - log.Fatal(err) - } - } else { - _, err := fs.Apply(config, pid) - if err != nil { - log.Fatal(err) - } - } -} - -func destroyAction(context *cli.Context) { - config, err := getConfig(context) - if err != nil { - log.Fatal(err) - } - - killAll(config) - // Systemd will clean up cgroup state for empty container. - if !systemd.UseSystemd() { - err := fs.Cleanup(config) - if err != nil { - log.Fatal(err) - } - } -} - -func statsAction(context *cli.Context) { - config, err := getConfig(context) - if err != nil { - log.Fatal(err) - } - stats, err := fs.GetStats(config) - if err != nil { - log.Fatal(err) - } - - out, err := json.MarshalIndent(stats, "", "\t") - if err != nil { - log.Fatal(err) - } - fmt.Printf("Usage stats for '%s':\n %v\n", config.Name, string(out)) -} - -func pauseAction(context *cli.Context) { - setFreezerState(context, cgroups.Frozen) -} - -func resumeAction(context *cli.Context) { - setFreezerState(context, cgroups.Thawed) -} - -func psAction(context *cli.Context) { - config, err := getConfig(context) - if err != nil { - log.Fatal(err) - } - - pids, err := fs.GetPids(config) - if err != nil { - log.Fatal(err) - } - - fmt.Printf("Pids in '%s':\n", config.Name) - fmt.Println(pids) -} - -func main() { - logPath := os.Getenv("log") - if logPath != "" { - if err := openLog(logPath); err != nil { - log.Fatal(err) - } - } - - app := cli.NewApp() - app.Name = "cgutil" - app.Usage = "Test utility for libcontainer cgroups package" - app.Version = "0.1" - - app.Commands = []cli.Command{ - createCommand, - destroyCommand, - statsCommand, - pauseCommand, - resumeCommand, - psCommand, - } - - if err := app.Run(os.Args); err != nil { - log.Fatal(err) - } -} diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/cgutil/sample_cgroup.json b/vendor/src/github.com/docker/libcontainer/cgroups/cgutil/sample_cgroup.json deleted file mode 100644 index 2d29784941..0000000000 --- a/vendor/src/github.com/docker/libcontainer/cgroups/cgutil/sample_cgroup.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "name": "luke", - "parent": "darth", - "allow_all_devices": true, - "memory": 1073741824, - "memory_swap": -1, - "cpu_shares": 2048, - "cpu_quota": 500000, - "cpu_period": 250000 -} diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go index 599ab57272..6f85793dd2 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go @@ -57,20 +57,35 @@ type data struct { pid int } -func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { +func Apply(c *cgroups.Cgroup, pid int) (map[string]string, error) { d, err := getCgroupData(c, pid) if err != nil { return nil, err } - for _, sys := range subsystems { + paths := make(map[string]string) + defer func() { + if err != nil { + cgroups.RemovePaths(paths) + } + }() + for name, sys := range subsystems { if err := sys.Set(d); err != nil { - d.Cleanup() return nil, err } + // FIXME: Apply should, ideally, be reentrant or be broken up into a separate + // create and join phase so that the cgroup hierarchy for a container can be + // created then join consists of writing the process pids to cgroup.procs + p, err := d.path(name) + if err != nil { + if cgroups.IsNotFound(err) { + continue + } + return nil, err + } + paths[name] = p } - - return d, nil + return paths, nil } // Symmetrical public function to update device based cgroups. Also available @@ -86,33 +101,13 @@ func ApplyDevices(c *cgroups.Cgroup, pid int) error { return devices.Set(d) } -func Cleanup(c *cgroups.Cgroup) error { - d, err := getCgroupData(c, 0) - if err != nil { - return fmt.Errorf("Could not get Cgroup data %s", err) - } - return d.Cleanup() -} - -func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) { +func GetStats(systemPaths map[string]string) (*cgroups.Stats, error) { stats := cgroups.NewStats() - - d, err := getCgroupData(c, 0) - if err != nil { - return nil, fmt.Errorf("getting CgroupData %s", err) - } - - for sysname, sys := range subsystems { - path, err := d.path(sysname) - if err != nil { - // Don't fail if a cgroup hierarchy was not found, just skip this subsystem - if cgroups.IsNotFound(err) { - continue - } - - return nil, err + for name, path := range systemPaths { + sys, ok := subsystems[name] + if !ok { + continue } - if err := sys.GetStats(path, stats); err != nil { return nil, err } @@ -176,26 +171,6 @@ func (raw *data) parent(subsystem string) (string, error) { return filepath.Join(raw.root, subsystem, initPath), nil } -func (raw *data) Paths() (map[string]string, error) { - paths := make(map[string]string) - - for sysname := range subsystems { - path, err := raw.path(sysname) - if err != nil { - // Don't fail if a cgroup hierarchy was not found, just skip this subsystem - if cgroups.IsNotFound(err) { - continue - } - - return nil, err - } - - paths[sysname] = path - } - - return paths, nil -} - func (raw *data) path(subsystem string) (string, error) { // If the cgroup name/path is absolute do not look relative to the cgroup of the init process. if filepath.IsAbs(raw.cgroup) { @@ -234,13 +209,6 @@ func (raw *data) join(subsystem string) (string, error) { return path, nil } -func (raw *data) Cleanup() error { - for _, sys := range subsystems { - sys.Remove(raw) - } - return nil -} - func writeFile(dir, file, data string) error { return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700) } diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/stats.go b/vendor/src/github.com/docker/libcontainer/cgroups/stats.go index 857fc1dc47..dc5dbb3c21 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/stats.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/stats.go @@ -27,7 +27,7 @@ type CpuUsage struct { type CpuStats struct { CpuUsage CpuUsage `json:"cpu_usage,omitempty"` - ThrottlingData ThrottlingData `json:"throlling_data,omitempty"` + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` } type MemoryStats struct { diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go index 42a09e3feb..4b9a2f5b74 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go @@ -12,7 +12,7 @@ func UseSystemd() bool { return false } -func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { +func Apply(c *cgroups.Cgroup, pid int) (map[string]string, error) { return nil, fmt.Errorf("Systemd not supported") } @@ -27,7 +27,3 @@ func ApplyDevices(c *cgroups.Cgroup, pid int) error { func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error { return fmt.Errorf("Systemd not supported") } - -func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) { - return nil, fmt.Errorf("Systemd not supported") -} diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go index 5155b67535..94f3465ffd 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go @@ -31,16 +31,6 @@ var ( connLock sync.Mutex theConn *systemd.Conn hasStartTransientUnit bool - subsystems = map[string]subsystem{ - "devices": &fs.DevicesGroup{}, - "memory": &fs.MemoryGroup{}, - "cpu": &fs.CpuGroup{}, - "cpuset": &fs.CpusetGroup{}, - "cpuacct": &fs.CpuacctGroup{}, - "blkio": &fs.BlkioGroup{}, - "perf_event": &fs.PerfEventGroup{}, - "freezer": &fs.FreezerGroup{}, - } ) func newProp(name string, units interface{}) systemd.Property { @@ -91,7 +81,7 @@ func getIfaceForUnit(unitName string) string { return "Unit" } -func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { +func Apply(c *cgroups.Cgroup, pid int) (map[string]string, error) { var ( unitName = getUnitName(c) slice = "system.slice" @@ -159,45 +149,32 @@ func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { } } - return res, nil -} - -func writeFile(dir, file, data string) error { - return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700) -} - -func (c *systemdCgroup) Paths() (map[string]string, error) { paths := make(map[string]string) - - for sysname := range subsystems { - subsystemPath, err := getSubsystemPath(c.cgroup, sysname) + for _, sysname := range []string{ + "devices", + "memory", + "cpu", + "cpuset", + "cpuacct", + "blkio", + "perf_event", + "freezer", + } { + subsystemPath, err := getSubsystemPath(res.cgroup, sysname) if err != nil { // Don't fail if a cgroup hierarchy was not found, just skip this subsystem if cgroups.IsNotFound(err) { continue } - return nil, err } - paths[sysname] = subsystemPath } - return paths, nil } -func (c *systemdCgroup) Cleanup() error { - // systemd cleans up, we don't need to do much - paths, err := c.Paths() - if err != nil { - return err - } - - for _, path := range paths { - os.RemoveAll(path) - } - - return nil +func writeFile(dir, file, data string) error { + return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700) } func joinFreezer(c *cgroups.Cgroup, pid int) error { @@ -267,35 +244,6 @@ func getUnitName(c *cgroups.Cgroup) string { return fmt.Sprintf("%s-%s.scope", c.Parent, c.Name) } -/* - * This would be nicer to get from the systemd API when accounting - * is enabled, but sadly there is no way to do that yet. - * The lack of this functionality in the API & the approach taken - * is guided by - * http://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#readingaccountinginformation. - */ -func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) { - stats := cgroups.NewStats() - - for sysname, sys := range subsystems { - subsystemPath, err := getSubsystemPath(c, sysname) - if err != nil { - // Don't fail if a cgroup hierarchy was not found, just skip this subsystem - if cgroups.IsNotFound(err) { - continue - } - - return nil, err - } - - if err := sys.GetStats(subsystemPath, stats); err != nil { - return nil, err - } - } - - return stats, nil -} - // Atm we can't use the systemd device support because of two missing things: // * Support for wildcards to allow mknod on any device // * Support for wildcards to allow /dev/pts support diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/utils.go b/vendor/src/github.com/docker/libcontainer/cgroups/utils.go index 77a3c0d7c5..224a20b9b2 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/utils.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/utils.go @@ -189,6 +189,17 @@ func EnterPid(cgroupPaths map[string]string, pid int) error { } } } - return nil } + +// RemovePaths iterates over the provided paths removing them. +// If an error is encountered the removal proceeds and the first error is +// returned to ensure a partial removal is not possible. +func RemovePaths(paths map[string]string) (err error) { + for _, path := range paths { + if rerr := os.RemoveAll(path); err == nil { + err = rerr + } + } + return err +} diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/exec.go b/vendor/src/github.com/docker/libcontainer/namespaces/exec.go index bd3a4a3f9e..b7873edd0e 100644 --- a/vendor/src/github.com/docker/libcontainer/namespaces/exec.go +++ b/vendor/src/github.com/docker/libcontainer/namespaces/exec.go @@ -60,16 +60,11 @@ func Exec(container *libcontainer.Config, stdin io.Reader, stdout, stderr io.Wri // Do this before syncing with child so that no children // can escape the cgroup - cgroupRef, err := SetupCgroups(container, command.Process.Pid) - if err != nil { - return terminate(err) - } - defer cgroupRef.Cleanup() - - cgroupPaths, err := cgroupRef.Paths() + cgroupPaths, err := SetupCgroups(container, command.Process.Pid) if err != nil { return terminate(err) } + defer cgroups.RemovePaths(cgroupPaths) var networkState network.NetworkState if err := InitializeNetworking(container, command.Process.Pid, &networkState); err != nil { @@ -153,18 +148,15 @@ func DefaultCreateCommand(container *libcontainer.Config, console, dataPath, ini // SetupCgroups applies the cgroup restrictions to the process running in the container based // on the container's configuration -func SetupCgroups(container *libcontainer.Config, nspid int) (cgroups.ActiveCgroup, error) { +func SetupCgroups(container *libcontainer.Config, nspid int) (map[string]string, error) { if container.Cgroups != nil { c := container.Cgroups - if systemd.UseSystemd() { return systemd.Apply(c, nspid) } - return fs.Apply(c, nspid) } - - return nil, nil + return map[string]string{}, nil } // InitializeNetworking creates the container's network stack outside of the namespace and moves diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/execin.go b/vendor/src/github.com/docker/libcontainer/namespaces/execin.go index 7dea918735..430dc72fe6 100644 --- a/vendor/src/github.com/docker/libcontainer/namespaces/execin.go +++ b/vendor/src/github.com/docker/libcontainer/namespaces/execin.go @@ -111,7 +111,7 @@ func FinalizeSetns(container *libcontainer.Config, args []string) error { } } - if err := system.Execv(args[0], args[0:], container.Env); err != nil { + if err := system.Execv(args[0], args[0:], os.Environ()); err != nil { return err } diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/init.go b/vendor/src/github.com/docker/libcontainer/namespaces/init.go index 72af200cc6..2fa2780e7f 100644 --- a/vendor/src/github.com/docker/libcontainer/namespaces/init.go +++ b/vendor/src/github.com/docker/libcontainer/namespaces/init.go @@ -100,7 +100,7 @@ func Init(container *libcontainer.Config, uncleanRootfs, consolePath string, pip if container.Hostname != "" { if err := syscall.Sethostname([]byte(container.Hostname)); err != nil { - return fmt.Errorf("sethostname %s", err) + return fmt.Errorf("unable to sethostname %q: %s", container.Hostname, err) } } diff --git a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go index 57790421c0..851d959cd0 100644 --- a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go +++ b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go @@ -1004,7 +1004,7 @@ func AddRoute(destination, source, gateway, device string) error { if source != "" { srcIP := net.ParseIP(source) - if err != nil { + if srcIP == nil { return fmt.Errorf("source IP %s couldn't be parsed", source) } srcFamily := getIpFamily(srcIP) diff --git a/vendor/src/github.com/docker/libcontainer/system/syscall_linux_arm.go b/vendor/src/github.com/docker/libcontainer/system/syscall_linux_arm.go index faf1799577..7d8cda9d00 100644 --- a/vendor/src/github.com/docker/libcontainer/system/syscall_linux_arm.go +++ b/vendor/src/github.com/docker/libcontainer/system/syscall_linux_arm.go @@ -7,7 +7,7 @@ import ( // Setuid sets the uid of the calling thread to the specified uid. func Setuid(uid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID32, uintptr(uid), 0, 0) if e1 != 0 { err = e1 } From 83d7db3e505f2d080ac78ca7777937c812588003 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Mon, 17 Nov 2014 16:17:06 -0500 Subject: [PATCH 364/592] pkg/mount: testing mountinfo fields Signed-off-by: Vincent Batts --- pkg/mount/mountinfo_linux_test.go | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/pkg/mount/mountinfo_linux_test.go b/pkg/mount/mountinfo_linux_test.go index 3c214476df..e92b7e2c74 100644 --- a/pkg/mount/mountinfo_linux_test.go +++ b/pkg/mount/mountinfo_linux_test.go @@ -446,3 +446,32 @@ func TestParseGentooMountinfo(t *testing.T) { t.Fatal(err) } } + +func TestParseFedoraMountinfoFields(t *testing.T) { + r := bytes.NewBuffer([]byte(fedoraMountinfo)) + infos, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } + expectedLength := 58 + if len(infos) != expectedLength { + t.Fatalf("Expected %d entries, got %d", expectedLength, len(infos)) + } + mi := MountInfo{ + Id: 15, + Parent: 35, + Major: 0, + Minor: 3, + Root: "/", + Mountpoint: "/proc", + Opts: "rw,nosuid,nodev,noexec,relatime", + Optional: "shared:5", + Fstype: "proc", + Source: "proc", + VfsOpts: "rw", + } + + if *infos[0] != mi { + t.Fatalf("expected %#v, got %#v", mi, infos[0]) + } +} From a0605107d713f7f8a25277b06369134d17781077 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Mon, 17 Nov 2014 12:49:29 -0800 Subject: [PATCH 365/592] Add test for bug (9056) where rmi -f fails with "no such id". Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_rmi_test.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/integration-cli/docker_cli_rmi_test.go b/integration-cli/docker_cli_rmi_test.go index c28e771971..98cadfe853 100644 --- a/integration-cli/docker_cli_rmi_test.go +++ b/integration-cli/docker_cli_rmi_test.go @@ -99,3 +99,23 @@ func TestRmiTagWithExistingContainers(t *testing.T) { logDone("rmi - delete tag with existing containers") } + +func TestRmiForceWithExistingContainers(t *testing.T) { + image := "busybox-clone" + if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "build", "--no-cache", "-t", image, "/docker-busybox")); err != nil { + t.Fatalf("Could not build %s: %s, %v", image, out, err) + } + + if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "test-force-rmi", image, "/bin/true")); err != nil { + t.Fatalf("Could not run container: %s, %v", out, err) + } + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rmi", "-f", image)) + if err != nil { + t.Fatalf("Could not remove image %s: %s, %v", image, out, err) + } + + deleteAllContainers() + + logDone("rmi - force delete with existing containers") +} From ac40e7cbb3cf01568b3763abe04af814d89a6f36 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Mon, 17 Nov 2014 13:16:33 -0800 Subject: [PATCH 366/592] Fix for rmi -f when error "no such id". (9056) Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- daemon/image_delete.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/daemon/image_delete.go b/daemon/image_delete.go index 332db7b4c0..b0b0c3a023 100644 --- a/daemon/image_delete.go +++ b/daemon/image_delete.go @@ -133,6 +133,9 @@ func (daemon *Daemon) canDeleteImage(imgID string, force bool) error { for _, container := range daemon.List() { parent, err := daemon.Repositories().LookupImage(container.Image) if err != nil { + if daemon.Graph().IsNotExist(err) { + return nil + } return err } From b273c447e0f0b124d5f81ff60033a4894e2acec8 Mon Sep 17 00:00:00 2001 From: Satnam Singh Date: Sat, 15 Nov 2014 22:40:48 -0800 Subject: [PATCH 367/592] Consistently use sudo docker Signed-off-by: Satnam Singh --- docs/sources/articles/registry_mirror.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/sources/articles/registry_mirror.md b/docs/sources/articles/registry_mirror.md index 6cb2b958c3..5d5378e234 100644 --- a/docs/sources/articles/registry_mirror.md +++ b/docs/sources/articles/registry_mirror.md @@ -29,11 +29,11 @@ There are two steps to set up and use a local registry mirror. You will need to pass the `--registry-mirror` option to your Docker daemon on startup: - docker --registry-mirror=http:// -d + sudo docker --registry-mirror=http:// -d For example, if your mirror is serving on `http://10.0.0.2:5000`, you would run: - docker --registry-mirror=http://10.0.0.2:5000 -d + sudo docker --registry-mirror=http://10.0.0.2:5000 -d **NOTE:** Depending on your local host setup, you may be able to add the @@ -47,7 +47,7 @@ You will need to start a local registry mirror service. The functionality. For example, to run a local registry mirror that serves on port `5000` and mirrors the content at `registry-1.docker.io`: - docker run -p 5000:5000 \ + sudo docker run -p 5000:5000 \ -e STANDALONE=false \ -e MIRROR_SOURCE=https://registry-1.docker.io \ -e MIRROR_SOURCE_INDEX=https://index.docker.io registry @@ -57,7 +57,7 @@ port `5000` and mirrors the content at `registry-1.docker.io`: With your mirror running, pull an image that you haven't pulled before (using `time` to time it): - $ time docker pull node:latest + $ time sudo docker pull node:latest Pulling repository node [...] @@ -71,7 +71,7 @@ Now, remove the image from your local machine: Finally, re-pull the image: - $ time docker pull node:latest + $ time sudo docker pull node:latest Pulling repository node [...] From e6fd57b90bcdd3b0105fa5c7d17a64332f829ded Mon Sep 17 00:00:00 2001 From: Marianna Date: Mon, 17 Nov 2014 19:25:06 -0800 Subject: [PATCH 368/592] Fixed a bug - no panic anymore when logining in without TTY Fixes #8956 Signed-off-by: Marianna --- api/client/commands.go | 5 +++- integration-cli/docker_cli_login_test.go | 35 ++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 integration-cli/docker_cli_login_test.go diff --git a/api/client/commands.go b/api/client/commands.go index 6b9c4d4d8f..dfc07835c9 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -289,7 +289,10 @@ func (cli *DockerCli) CmdLogin(args ...string) error { // the password or email from the config file, so prompt them if username != authconfig.Username { if password == "" { - oldState, _ := term.SaveState(cli.inFd) + oldState, err := term.SaveState(cli.inFd) + if err != nil { + return err + } fmt.Fprintf(cli.out, "Password: ") term.DisableEcho(cli.inFd, oldState) diff --git a/integration-cli/docker_cli_login_test.go b/integration-cli/docker_cli_login_test.go new file mode 100644 index 0000000000..cf134e4c9b --- /dev/null +++ b/integration-cli/docker_cli_login_test.go @@ -0,0 +1,35 @@ +package main + +import ( + "bytes" + "io" + "os" + "os/exec" + "testing" +) + +func TestLoginWithoutTTY(t *testing.T) { + cmd := exec.Command(dockerBinary, "login") + // setup STDOUT and STDERR so that we see any output and errors in our console + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + // create a buffer with text then a new line as a return + buf := bytes.NewBuffer([]byte("buffer test string \n")) + + // use a pipe for stdin and manually copy the data so that + // the process does not get the TTY + in, err := cmd.StdinPipe() + if err != nil { + t.Fatal(err) + } + // copy the bytes into the commands stdin along with a new line + go io.Copy(in, buf) + + // run the command and block until it's done + if err := cmd.Run(); err == nil { + t.Fatal("Expected non nil err when loginning in & TTY not available") + } + + logDone("login - login without TTY") +} From b4346c4810c4801e8885485b2bf0311f1b6d00ec Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Sun, 16 Nov 2014 05:13:14 -0800 Subject: [PATCH 369/592] Be clearer in docs about COPY/ADD dirs COPY/ADD just copies the contents of dirs, not dirs themselves. This PR tries to clear that up in the docs. Closes #8775 Signed-off-by: Doug Davis --- docs/sources/reference/builder.md | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index 19cc16ad0f..198e81cf8c 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -420,8 +420,10 @@ The copy obeys the following rules: appropriate filename can be discovered in this case (`http://example.com` will not work). -- If `` is a directory, the entire directory is copied, including - filesystem metadata. +- If `` is a directory, the entire contents of the directory are copied, + including filesystem metadata. +> **Note**: +> The directory itself is not copied, just its contents. - If `` is a *local* tar archive in a recognized compression format (identity, gzip, bzip2 or xz) then it is unpacked as a directory. Resources @@ -480,8 +482,10 @@ The copy obeys the following rules: `docker build` is to send the context directory (and subdirectories) to the docker daemon. -- If `` is a directory, the entire directory is copied, including - filesystem metadata. +- If `` is a directory, the entire contents of the directory are copied, + including filesystem metadata. +> **Note**: +> The directory itself is not copied, just its contents. - If `` is any other kind of file, it is copied individually along with its metadata. In this case, if `` ends with a trailing slash `/`, it From 7fe8d0aeeb373a0fef92758819d8054fbd744ea5 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 18 Nov 2014 10:49:01 -0800 Subject: [PATCH 370/592] Fix steam where it should be stream. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- api/client/commands.go | 8 ++++---- api/client/utils.go | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index 6b9c4d4d8f..63b52ada60 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -916,13 +916,13 @@ func (cli *DockerCli) CmdPort(args ...string) error { return nil } - steam, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false) + stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false) if err != nil { return err } env := engine.Env{} - if err := env.Decode(steam); err != nil { + if err := env.Decode(stream); err != nil { return err } ports := nat.PortMap{} @@ -1856,13 +1856,13 @@ func (cli *DockerCli) CmdLogs(args ...string) error { } name := cmd.Arg(0) - steam, _, err := cli.call("GET", "/containers/"+name+"/json", nil, false) + stream, _, err := cli.call("GET", "/containers/"+name+"/json", nil, false) if err != nil { return err } env := engine.Env{} - if err := env.Decode(steam); err != nil { + if err := env.Decode(stream); err != nil { return err } diff --git a/api/client/utils.go b/api/client/utils.go index f094635714..3799ce6735 100644 --- a/api/client/utils.go +++ b/api/client/utils.go @@ -216,7 +216,7 @@ func waitForExit(cli *DockerCli, containerId string) (int, error) { // getExitCode perform an inspect on the container. It returns // the running state and the exit code. func getExitCode(cli *DockerCli, containerId string) (bool, int, error) { - steam, _, err := cli.call("GET", "/containers/"+containerId+"/json", nil, false) + stream, _, err := cli.call("GET", "/containers/"+containerId+"/json", nil, false) if err != nil { // If we can't connect, then the daemon probably died. if err != ErrConnectionRefused { @@ -226,7 +226,7 @@ func getExitCode(cli *DockerCli, containerId string) (bool, int, error) { } var result engine.Env - if err := result.Decode(steam); err != nil { + if err := result.Decode(stream); err != nil { return false, -1, err } From 54a6e6d1229adb1169809493af89600c5ccc70ae Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Tue, 18 Nov 2014 12:20:49 -0700 Subject: [PATCH 371/592] Add CONFIG_OVERLAYFS_FS to check-config.sh Also, added some slight adjustment to the AUFS_FS output/note to make it more clear what it applies to. Example output: ```console $ ./contrib/check-config.sh info: reading kernel config from /proc/config.gz ... Generally Necessary: - cgroup hierarchy: properly mounted [/sys/fs/cgroup] - CONFIG_NAMESPACES: enabled - CONFIG_NET_NS: enabled - CONFIG_PID_NS: enabled - CONFIG_IPC_NS: enabled - CONFIG_UTS_NS: enabled - CONFIG_DEVPTS_MULTIPLE_INSTANCES: enabled - CONFIG_CGROUPS: enabled - CONFIG_CGROUP_CPUACCT: enabled - CONFIG_CGROUP_DEVICE: enabled - CONFIG_CGROUP_FREEZER: enabled - CONFIG_CGROUP_SCHED: enabled - CONFIG_MACVLAN: enabled - CONFIG_VETH: enabled - CONFIG_BRIDGE: enabled - CONFIG_NF_NAT_IPV4: enabled - CONFIG_IP_NF_FILTER: enabled - CONFIG_IP_NF_TARGET_MASQUERADE: enabled - CONFIG_NETFILTER_XT_MATCH_ADDRTYPE: enabled - CONFIG_NETFILTER_XT_MATCH_CONNTRACK: enabled - CONFIG_NF_NAT: enabled - CONFIG_NF_NAT_NEEDED: enabled Optional Features: - CONFIG_MEMCG_SWAP: enabled - CONFIG_RESOURCE_COUNTERS: enabled - CONFIG_CGROUP_PERF: missing - Storage Drivers: - "aufs": - CONFIG_AUFS_FS: missing (note that some kernels include AUFS patches but not the AUFS_FS flag) - CONFIG_EXT4_FS_POSIX_ACL: enabled - CONFIG_EXT4_FS_SECURITY: enabled - "btrfs": - CONFIG_BTRFS_FS: enabled - "devicemapper": - CONFIG_BLK_DEV_DM: enabled - CONFIG_DM_THIN_PROVISIONING: enabled - CONFIG_EXT4_FS: enabled - CONFIG_EXT4_FS_POSIX_ACL: enabled - CONFIG_EXT4_FS_SECURITY: enabled - "overlayfs": - CONFIG_OVERLAYFS_FS: missing ``` Signed-off-by: Andrew Page --- contrib/check-config.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/contrib/check-config.sh b/contrib/check-config.sh index 26a2f0ae43..54b1359a22 100755 --- a/contrib/check-config.sh +++ b/contrib/check-config.sh @@ -153,16 +153,20 @@ check_flags "${flags[@]}" echo '- Storage Drivers:' { echo '- "'$(wrap_color 'aufs' blue)'":' - check_flags AUFS_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY | sed 's/^/ /' + check_flags AUFS_FS | sed 's/^/ /' if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)" fi + check_flags EXT4_FS_POSIX_ACL EXT4_FS_SECURITY | sed 's/^/ /' echo '- "'$(wrap_color 'btrfs' blue)'":' check_flags BTRFS_FS | sed 's/^/ /' echo '- "'$(wrap_color 'devicemapper' blue)'":' check_flags BLK_DEV_DM DM_THIN_PROVISIONING EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY | sed 's/^/ /' + + echo '- "'$(wrap_color 'overlayfs' blue)'":' + check_flags OVERLAYFS_FS | sed 's/^/ /' } | sed 's/^/ /' echo From f9cdad32721808b6beeab544de26cca9ff68e1f0 Mon Sep 17 00:00:00 2001 From: "Andrew C. Bodine" Date: Tue, 18 Nov 2014 11:32:23 -0800 Subject: [PATCH 372/592] Cleanup: fixes start response status code typo in examples Signed-off-by: Andrew C. Bodine --- docs/sources/reference/api/docker_remote_api_v1.10.md | 4 ++-- docs/sources/reference/api/docker_remote_api_v1.11.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.12.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.13.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.3.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.4.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.5.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.6.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.7.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.8.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.9.md | 2 +- 11 files changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.md b/docs/sources/reference/api/docker_remote_api_v1.10.md index 7918215257..eb3f5cc1e5 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.10.md +++ b/docs/sources/reference/api/docker_remote_api_v1.10.md @@ -363,14 +363,14 @@ Start the container `id` "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}], "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, "PublishAllPorts":false, - "Privileged":false + "Privileged":false, "Dns": ["8.8.8.8"], "VolumesFrom": ["parent", "other:ro"] } **Example response**: - HTTP/1.1 204 No Conten + HTTP/1.1 204 No Content Content-Type: text/plain Json Parameters: diff --git a/docs/sources/reference/api/docker_remote_api_v1.11.md b/docs/sources/reference/api/docker_remote_api_v1.11.md index ad858c3144..838d199ea7 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.11.md +++ b/docs/sources/reference/api/docker_remote_api_v1.11.md @@ -406,7 +406,7 @@ Start the container `id` **Example response**: - HTTP/1.1 204 No Conten + HTTP/1.1 204 No Content Content-Type: text/plain Json Parameters: diff --git a/docs/sources/reference/api/docker_remote_api_v1.12.md b/docs/sources/reference/api/docker_remote_api_v1.12.md index 48e6bb5c9c..9c6351f98e 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.12.md +++ b/docs/sources/reference/api/docker_remote_api_v1.12.md @@ -416,7 +416,7 @@ Start the container `id` **Example response**: - HTTP/1.1 204 No Conten + HTTP/1.1 204 No Content Content-Type: text/plain Json Parameters: diff --git a/docs/sources/reference/api/docker_remote_api_v1.13.md b/docs/sources/reference/api/docker_remote_api_v1.13.md index 595a748e2b..41dbb285c5 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.13.md +++ b/docs/sources/reference/api/docker_remote_api_v1.13.md @@ -407,7 +407,7 @@ Start the container `id` **Example response**: - HTTP/1.1 204 No Conten + HTTP/1.1 204 No Content Content-Type: text/plain Json Parameters: diff --git a/docs/sources/reference/api/docker_remote_api_v1.3.md b/docs/sources/reference/api/docker_remote_api_v1.3.md index 30399ea625..7ae7462bf9 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.3.md +++ b/docs/sources/reference/api/docker_remote_api_v1.3.md @@ -325,7 +325,7 @@ Start the container `id` **Example response**: - HTTP/1.1 204 No Conten + HTTP/1.1 204 No Content Content-Type: text/plain Json Parameters: diff --git a/docs/sources/reference/api/docker_remote_api_v1.4.md b/docs/sources/reference/api/docker_remote_api_v1.4.md index bfd739f8df..5c0a015cc1 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.4.md +++ b/docs/sources/reference/api/docker_remote_api_v1.4.md @@ -341,7 +341,7 @@ Start the container `id` **Example response**: - HTTP/1.1 204 No Conten + HTTP/1.1 204 No Content Content-Type: text/plain Json Parameters: diff --git a/docs/sources/reference/api/docker_remote_api_v1.5.md b/docs/sources/reference/api/docker_remote_api_v1.5.md index 1c6b15df70..56245c303d 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.5.md +++ b/docs/sources/reference/api/docker_remote_api_v1.5.md @@ -338,7 +338,7 @@ Start the container `id` **Example response**: - HTTP/1.1 204 No Conten + HTTP/1.1 204 No Content Content-Type: text/plain Json Parameters: diff --git a/docs/sources/reference/api/docker_remote_api_v1.6.md b/docs/sources/reference/api/docker_remote_api_v1.6.md index 3946cc69c8..9055b24712 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.6.md +++ b/docs/sources/reference/api/docker_remote_api_v1.6.md @@ -393,7 +393,7 @@ Start the container `id` **Example response**: - HTTP/1.1 204 No Conten + HTTP/1.1 204 No Content Content-Type: text/plain Json Parameters: diff --git a/docs/sources/reference/api/docker_remote_api_v1.7.md b/docs/sources/reference/api/docker_remote_api_v1.7.md index ff4b485ec6..2f07b2b698 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.7.md +++ b/docs/sources/reference/api/docker_remote_api_v1.7.md @@ -348,7 +348,7 @@ Start the container `id` **Example response**: - HTTP/1.1 204 No Conten + HTTP/1.1 204 No Content Content-Type: text/plain Json Parameters: diff --git a/docs/sources/reference/api/docker_remote_api_v1.8.md b/docs/sources/reference/api/docker_remote_api_v1.8.md index 768465f2e6..faaa71397e 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.8.md +++ b/docs/sources/reference/api/docker_remote_api_v1.8.md @@ -385,7 +385,7 @@ Start the container `id` **Example response**: - HTTP/1.1 204 No Conten + HTTP/1.1 204 No Content Content-Type: text/plain Json Parameters: diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.md b/docs/sources/reference/api/docker_remote_api_v1.9.md index ed12bc3253..aced3eff9d 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.9.md +++ b/docs/sources/reference/api/docker_remote_api_v1.9.md @@ -407,7 +407,7 @@ Start the container `id` **Example response**: - HTTP/1.1 204 No Conten + HTTP/1.1 204 No Content Content-Type: text/plain Json Parameters: From 33e0de15d77ef57b5c4615c6bd535775d54d8c9b Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Tue, 18 Nov 2014 15:13:35 -0700 Subject: [PATCH 373/592] Allow for custom debootstrap wrappers like qemu-debootstrap in contrib/mkimage/debootstrap Signed-off-by: Andrew Page --- contrib/mkimage/debootstrap | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/contrib/mkimage/debootstrap b/contrib/mkimage/debootstrap index fcda497839..65f154aa95 100755 --- a/contrib/mkimage/debootstrap +++ b/contrib/mkimage/debootstrap @@ -15,9 +15,12 @@ done suite="$1" shift +# allow for DEBOOTSTRAP=qemu-debootstrap ./mkimage.sh ... +: ${DEBOOTSTRAP:=debootstrap} + ( set -x - debootstrap "${before[@]}" "$suite" "$rootfsDir" "$@" + $DEBOOTSTRAP "${before[@]}" "$suite" "$rootfsDir" "$@" ) # now for some Docker-specific tweaks From a0fb8eca30fd97aaa592268b4b6e8ac7737b78ac Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Tue, 18 Nov 2014 17:42:25 -0800 Subject: [PATCH 374/592] tlsverify flag has no dash Signed-off-by: Sven Dowideit --- docker/flags.go | 2 +- docs/sources/reference/commandline/cli.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/flags.go b/docker/flags.go index 4a6b361f97..80fd9fc17c 100644 --- a/docker/flags.go +++ b/docker/flags.go @@ -35,7 +35,7 @@ var ( flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode\nuse '' (the empty string) to disable setting of a group") flLogLevel = flag.String([]string{"l", "-log-level"}, "info", "Set the logging level") flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API") - flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by tls-verify flags") + flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by --tlsverify=true") flTlsVerify = flag.Bool([]string{"-tlsverify"}, dockerTlsVerify, "Use TLS and verify the remote (daemon: verify client, client: verify daemon)") // these are initialized in init() below since their default values depend on dockerCertPath which isn't fully initialized until init() runs diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index ab80f2ff51..358786cb29 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -84,7 +84,7 @@ expect an integer, and they can only be specified once. -s, --storage-driver="" Force the Docker runtime to use a specific storage driver --selinux-enabled=false Enable selinux support. SELinux does not presently support the BTRFS storage driver --storage-opt=[] Set storage driver options - --tls=false Use TLS; implied by tls-verify flags + --tls=false Use TLS; implied by --tlsverify=true --tlscacert="/home/sven/.docker/ca.pem" Trust only remotes providing a certificate signed by the CA given here --tlscert="/home/sven/.docker/cert.pem" Path to TLS certificate file --tlskey="/home/sven/.docker/key.pem" Path to TLS key file From 57b6993c2c99dd89a3cbe012935a82966d88aa92 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Mon, 17 Nov 2014 16:41:54 -0800 Subject: [PATCH 375/592] If an image has more than one repo name or tag, it'll be listed more than once Signed-off-by: Sven Dowideit --- docs/sources/reference/commandline/cli.md | 29 +++++++++++++---------- 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index ab80f2ff51..a9dd124bd4 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -11,7 +11,7 @@ or execute `docker help`: Usage: docker [OPTIONS] COMMAND [arg...] -H, --host=[]: The socket(s) to bind to in daemon mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. - A self-sufficient runtime for linux containers. + A self-sufficient runtime for Linux containers. ... @@ -111,7 +111,7 @@ requiring either `root` permission, or `docker` group membership. If you need to access the Docker daemon remotely, you need to enable the `tcp` Socket. Beware that the default setup provides un-encrypted and un-authenticated direct access to the Docker daemon - and should be secured either using the -[built in https encrypted socket](/articles/https/), or by putting a secure web +[built in HTTPS encrypted socket](/articles/https/), or by putting a secure web proxy in front of it. You can listen on port `2375` on all network interfaces with `-H tcp://0.0.0.0:2375`, or on a particular network interface using its IP address: `-H tcp://192.168.59.103:2375`. It is conventional to use port `2375` @@ -738,19 +738,24 @@ decrease disk usage, and speed up `docker build` by allowing each step to be cached. These intermediate layers are not shown by default. +An image will be listed more than once if it has multiple repository names +or tags. This single image (identifiable by its matching `IMAGE ID`) +uses up the `VIRTUAL SIZE` listed only once. + #### Listing the most recently created images $ sudo docker images | head - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE - 77af4d6b9913 19 hours ago 1.089 GB - committest latest b6fa739cedf5 19 hours ago 1.089 GB - 78a85c484f71 19 hours ago 1.089 GB - docker latest 30557a29d5ab 20 hours ago 1.089 GB - 0124422dd9f9 20 hours ago 1.089 GB - 18ad6fad3402 22 hours ago 1.082 GB - f9f1e26352f0 23 hours ago 1.089 GB - tryout latest 2629d1fa0b81 23 hours ago 131.5 MB - 5ed6274db6ce 24 hours ago 1.089 GB + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + 77af4d6b9913 19 hours ago 1.089 GB + committ latest b6fa739cedf5 19 hours ago 1.089 GB + 78a85c484f71 19 hours ago 1.089 GB + docker latest 30557a29d5ab 20 hours ago 1.089 GB + 5ed6274db6ce 24 hours ago 1.089 GB + postgres 9 746b819f315e 4 days ago 213.4 MB + postgres 9.3 746b819f315e 4 days ago 213.4 MB + postgres 9.3.5 746b819f315e 4 days ago 213.4 MB + postgres latest 746b819f315e 4 days ago 213.4 MB + #### Listing the full length image IDs From f4acfeebda431239c98e07ed8c0d55422e165d4e Mon Sep 17 00:00:00 2001 From: Chun Chen Date: Wed, 19 Nov 2014 10:13:10 +0800 Subject: [PATCH 376/592] Clean config path of bind mount volume Signed-off-by: Chun Chen --- volumes/repository.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/volumes/repository.go b/volumes/repository.go index d6612e7a34..225148b60e 100644 --- a/volumes/repository.go +++ b/volumes/repository.go @@ -166,9 +166,6 @@ func (r *Repository) Delete(path string) error { return fmt.Errorf("Volume %s does not exist", path) } - if volume.IsBindMount { - return fmt.Errorf("Volume %s is a bind-mount and cannot be removed", volume.Path) - } containers := volume.Containers() if len(containers) > 0 { return fmt.Errorf("Volume %s is being used and cannot be removed: used by containers %s", volume.Path, containers) @@ -178,6 +175,10 @@ func (r *Repository) Delete(path string) error { return err } + if volume.IsBindMount { + return nil + } + if err := r.driver.Remove(volume.ID); err != nil { if !os.IsNotExist(err) { return err From 3287ca1e45f74a2eac214070ccb937c7c7030a06 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Tue, 18 Nov 2014 15:07:48 -0500 Subject: [PATCH 377/592] overlayfs: more helpful output when not supported based on https://github.com/docker/docker/pull/7619#discussion_r20385086 Signed-off-by: Vincent Batts --- daemon/graphdriver/overlayfs/overlayfs.go | 1 + 1 file changed, 1 insertion(+) diff --git a/daemon/graphdriver/overlayfs/overlayfs.go b/daemon/graphdriver/overlayfs/overlayfs.go index f2f478dc4a..a9ce75a375 100644 --- a/daemon/graphdriver/overlayfs/overlayfs.go +++ b/daemon/graphdriver/overlayfs/overlayfs.go @@ -129,6 +129,7 @@ func supportsOverlayfs() error { return nil } } + log.Error("'overlayfs' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlayfs support loaded.") return graphdriver.ErrNotSupported } From 1cd12efb5d0065990334de4f97fa85b317f97f11 Mon Sep 17 00:00:00 2001 From: Michal Minar Date: Wed, 19 Nov 2014 09:08:39 +0100 Subject: [PATCH 378/592] Updated sig-proxy text also in zsh completion script Signed-off-by: Michal Minar --- contrib/completion/zsh/_docker | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker index 4c0937e10c..8a157b0b6f 100644 --- a/contrib/completion/zsh/_docker +++ b/contrib/completion/zsh/_docker @@ -190,7 +190,7 @@ __docker_subcommand () { (attach) _arguments \ '--no-stdin[Do not attach stdin]' \ - '--sig-proxy[Proxify all received signal]' \ + '--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]' \ ':containers:__docker_runningcontainers' ;; (build) @@ -396,7 +396,7 @@ __docker_subcommand () { '--restart=-[Restart policy]:restart policy:(no on-failure always)' \ '--rm[Remove intermediate containers when it exits]' \ '*--security-opt=-[Security options]:security option: ' \ - '--sig-proxy[Proxify all received signal]' \ + '--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]' \ {-t,--tty}'[Allocate a pseudo-tty]' \ {-u,--user=-}'[Username or UID]:user:_users' \ '*-v[Bind mount a volume]:volume: '\ From 6a74f071afb4a69a1360ff1e84945745e578c349 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Wed, 19 Nov 2014 15:46:03 -0500 Subject: [PATCH 379/592] pkg/tarsum: actually init the TarSum struct closes #9241 Signed-off-by: Vincent Batts --- pkg/tarsum/tarsum.go | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/pkg/tarsum/tarsum.go b/pkg/tarsum/tarsum.go index 34386ff39d..ba09d4a121 100644 --- a/pkg/tarsum/tarsum.go +++ b/pkg/tarsum/tarsum.go @@ -27,11 +27,7 @@ const ( // including the byte payload of the image's json metadata as well, and for // calculating the checksums for buildcache. func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { - headerSelector, err := getTarHeaderSelector(v) - if err != nil { - return nil, err - } - return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector}, nil + return NewTarSumHash(r, dc, v, DefaultTHash) } // Create a new TarSum, providing a THash to use rather than the DefaultTHash @@ -40,7 +36,9 @@ func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) if err != nil { return nil, err } - return &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash}, nil + ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} + err = ts.initTarSum() + return ts, err } // TarSum is the generic interface for calculating fixed time @@ -134,12 +132,6 @@ func (ts *tarSum) initTarSum() error { } func (ts *tarSum) Read(buf []byte) (int, error) { - if ts.writer == nil { - if err := ts.initTarSum(); err != nil { - return 0, err - } - } - if ts.finished { return ts.bufWriter.Read(buf) } From e257863a9a2bff19c66355230483a8b6fa9de209 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Wed, 19 Nov 2014 12:15:20 -0800 Subject: [PATCH 380/592] Add unit test for tarSum.Sum() with no data Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- pkg/tarsum/tarsum_test.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pkg/tarsum/tarsum_test.go b/pkg/tarsum/tarsum_test.go index 1e06cda178..c5dca6ad7a 100644 --- a/pkg/tarsum/tarsum_test.go +++ b/pkg/tarsum/tarsum_test.go @@ -230,6 +230,17 @@ func TestEmptyTar(t *testing.T) { if resultSum != expectedSum { t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) } + + // Test without ever actually writing anything. + if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil { + t.Fatal(err) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } } var ( From da3d3b97ebe1e6fe1254367521c725ca12a5e61d Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Mon, 17 Nov 2014 18:05:49 +0200 Subject: [PATCH 381/592] Make sure integration-cli test clean up MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Common patterns: - Multiple images were built with same name but only one cleanup. - Containers were deleted after images. - Images not removed after retagging. Signed-off-by: Tõnis Tiigi (github: tonistiigi) --- integration-cli/docker_cli_build_test.go | 109 +++++++++++++------ integration-cli/docker_cli_commit_test.go | 3 + integration-cli/docker_cli_events_test.go | 22 +++- integration-cli/docker_cli_import_test.go | 4 +- integration-cli/docker_cli_rm_test.go | 1 + integration-cli/docker_cli_run_test.go | 25 ++++- integration-cli/docker_cli_save_load_test.go | 3 + 7 files changed, 124 insertions(+), 43 deletions(-) diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index de60a8017f..4e6fe63ae1 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -39,6 +39,7 @@ func TestBuildShCmdJSONEntrypoint(t *testing.T) { exec.Command( dockerBinary, "run", + "--rm", name)) if err != nil { @@ -263,6 +264,8 @@ func TestBuildHandleEscapes(t *testing.T) { t.Fatal("Could not find volume bar set from env foo in volumes table") } + deleteImages(name) + _, err = buildImage(name, ` FROM scratch @@ -287,6 +290,8 @@ func TestBuildHandleEscapes(t *testing.T) { t.Fatal("Could not find volume ${FOO} set from env foo in volumes table") } + deleteImages(name) + // this test in particular provides *7* backslashes and expects 6 to come back. // Like above, the first escape is swallowed and the rest are treated as // literals, this one is just less obvious because of all the character noise. @@ -355,8 +360,8 @@ func TestBuildOnBuildLowercase(t *testing.T) { func TestBuildEnvEscapes(t *testing.T) { name := "testbuildenvescapes" - defer deleteAllContainers() defer deleteImages(name) + defer deleteAllContainers() _, err := buildImage(name, ` FROM busybox @@ -380,8 +385,8 @@ func TestBuildEnvEscapes(t *testing.T) { func TestBuildEnvOverwrite(t *testing.T) { name := "testbuildenvoverwrite" - defer deleteAllContainers() defer deleteImages(name) + defer deleteAllContainers() _, err := buildImage(name, ` @@ -410,7 +415,10 @@ func TestBuildEnvOverwrite(t *testing.T) { func TestBuildOnBuildForbiddenMaintainerInSourceImage(t *testing.T) { name := "testbuildonbuildforbiddenmaintainerinsourceimage" + defer deleteImages("onbuild") defer deleteImages(name) + defer deleteAllContainers() + createCmd := exec.Command(dockerBinary, "create", "busybox", "true") out, _, _, err := runCommandWithStdoutStderr(createCmd) if err != nil { @@ -441,7 +449,10 @@ func TestBuildOnBuildForbiddenMaintainerInSourceImage(t *testing.T) { func TestBuildOnBuildForbiddenFromInSourceImage(t *testing.T) { name := "testbuildonbuildforbiddenfrominsourceimage" + defer deleteImages("onbuild") defer deleteImages(name) + defer deleteAllContainers() + createCmd := exec.Command(dockerBinary, "create", "busybox", "true") out, _, _, err := runCommandWithStdoutStderr(createCmd) if err != nil { @@ -472,7 +483,10 @@ func TestBuildOnBuildForbiddenFromInSourceImage(t *testing.T) { func TestBuildOnBuildForbiddenChainedInSourceImage(t *testing.T) { name := "testbuildonbuildforbiddenchainedinsourceimage" + defer deleteImages("onbuild") defer deleteImages(name) + defer deleteAllContainers() + createCmd := exec.Command(dockerBinary, "create", "busybox", "true") out, _, _, err := runCommandWithStdoutStderr(createCmd) if err != nil { @@ -505,9 +519,9 @@ func TestBuildOnBuildCmdEntrypointJSON(t *testing.T) { name1 := "onbuildcmd" name2 := "onbuildgenerated" - defer deleteAllContainers() defer deleteImages(name2) defer deleteImages(name1) + defer deleteAllContainers() _, err := buildImage(name1, ` FROM busybox @@ -542,9 +556,9 @@ func TestBuildOnBuildEntrypointJSON(t *testing.T) { name1 := "onbuildcmd" name2 := "onbuildgenerated" - defer deleteAllContainers() defer deleteImages(name2) defer deleteImages(name1) + defer deleteAllContainers() _, err := buildImage(name1, ` FROM busybox @@ -590,6 +604,10 @@ func TestBuildCacheADD(t *testing.T) { true); err != nil { t.Fatal(err) } + if err != nil { + t.Fatal(err) + } + deleteImages(name) _, out, err := buildImageWithOut(name, fmt.Sprintf(`FROM scratch ADD %s/index.html /`, server.URL), @@ -1314,9 +1332,12 @@ func TestBuildWithInaccessibleFilesInContext(t *testing.T) { t.Fatal(err) } defer ctx.Close() - if err := os.Symlink(filepath.Join(ctx.Dir, "g"), "../../../../../../../../../../../../../../../../../../../azA"); err != nil { + + target := "../../../../../../../../../../../../../../../../../../../azA" + if err := os.Symlink(filepath.Join(ctx.Dir, "g"), target); err != nil { t.Fatal(err) } + defer os.Remove(target) // This is used to ensure we don't follow links when checking if everything in the context is accessible // This test doesn't require that we run commands as an unprivileged user if _, err := buildImageFromContext(name, ctx, true); err != nil { @@ -1668,6 +1689,7 @@ func TestBuildContextCleanup(t *testing.T) { func TestBuildContextCleanupFailedBuild(t *testing.T) { name := "testbuildcontextcleanup" defer deleteImages(name) + defer deleteAllContainers() entries, err := ioutil.ReadDir("/var/lib/docker/tmp") if err != nil { t.Fatalf("failed to list contents of tmp dir: %s", err) @@ -1919,7 +1941,8 @@ func TestBuildWithCache(t *testing.T) { func TestBuildWithoutCache(t *testing.T) { name := "testbuildwithoutcache" - defer deleteImages(name) + name2 := "testbuildwithoutcache2" + defer deleteImages(name, name2) id1, err := buildImage(name, `FROM scratch MAINTAINER dockerio @@ -1929,7 +1952,8 @@ func TestBuildWithoutCache(t *testing.T) { if err != nil { t.Fatal(err) } - id2, err := buildImage(name, + + id2, err := buildImage(name2, `FROM scratch MAINTAINER dockerio EXPOSE 5432 @@ -1946,7 +1970,8 @@ func TestBuildWithoutCache(t *testing.T) { func TestBuildADDLocalFileWithCache(t *testing.T) { name := "testbuildaddlocalfilewithcache" - defer deleteImages(name) + name2 := "testbuildaddlocalfilewithcache2" + defer deleteImages(name, name2) dockerfile := ` FROM busybox MAINTAINER dockerio @@ -1963,7 +1988,7 @@ func TestBuildADDLocalFileWithCache(t *testing.T) { if err != nil { t.Fatal(err) } - id2, err := buildImageFromContext(name, ctx, true) + id2, err := buildImageFromContext(name2, ctx, true) if err != nil { t.Fatal(err) } @@ -1975,7 +2000,8 @@ func TestBuildADDLocalFileWithCache(t *testing.T) { func TestBuildADDMultipleLocalFileWithCache(t *testing.T) { name := "testbuildaddmultiplelocalfilewithcache" - defer deleteImages(name) + name2 := "testbuildaddmultiplelocalfilewithcache2" + defer deleteImages(name, name2) dockerfile := ` FROM busybox MAINTAINER dockerio @@ -1992,7 +2018,7 @@ func TestBuildADDMultipleLocalFileWithCache(t *testing.T) { if err != nil { t.Fatal(err) } - id2, err := buildImageFromContext(name, ctx, true) + id2, err := buildImageFromContext(name2, ctx, true) if err != nil { t.Fatal(err) } @@ -2004,7 +2030,8 @@ func TestBuildADDMultipleLocalFileWithCache(t *testing.T) { func TestBuildADDLocalFileWithoutCache(t *testing.T) { name := "testbuildaddlocalfilewithoutcache" - defer deleteImages(name) + name2 := "testbuildaddlocalfilewithoutcache2" + defer deleteImages(name, name2) dockerfile := ` FROM busybox MAINTAINER dockerio @@ -2021,7 +2048,7 @@ func TestBuildADDLocalFileWithoutCache(t *testing.T) { if err != nil { t.Fatal(err) } - id2, err := buildImageFromContext(name, ctx, false) + id2, err := buildImageFromContext(name2, ctx, false) if err != nil { t.Fatal(err) } @@ -2033,7 +2060,8 @@ func TestBuildADDLocalFileWithoutCache(t *testing.T) { func TestBuildCopyDirButNotFile(t *testing.T) { name := "testbuildcopydirbutnotfile" - defer deleteImages(name) + name2 := "testbuildcopydirbutnotfile2" + defer deleteImages(name, name2) dockerfile := ` FROM scratch COPY dir /tmp/` @@ -2052,7 +2080,7 @@ func TestBuildCopyDirButNotFile(t *testing.T) { if err := ctx.Add("dir_file", "hello2"); err != nil { t.Fatal(err) } - id2, err := buildImageFromContext(name, ctx, true) + id2, err := buildImageFromContext(name2, ctx, true) if err != nil { t.Fatal(err) } @@ -2064,7 +2092,11 @@ func TestBuildCopyDirButNotFile(t *testing.T) { func TestBuildADDCurrentDirWithCache(t *testing.T) { name := "testbuildaddcurrentdirwithcache" - defer deleteImages(name) + name2 := name + "2" + name3 := name + "3" + name4 := name + "4" + name5 := name + "5" + defer deleteImages(name, name2, name3, name4, name5) dockerfile := ` FROM scratch MAINTAINER dockerio @@ -2084,7 +2116,7 @@ func TestBuildADDCurrentDirWithCache(t *testing.T) { if err := ctx.Add("bar", "hello2"); err != nil { t.Fatal(err) } - id2, err := buildImageFromContext(name, ctx, true) + id2, err := buildImageFromContext(name2, ctx, true) if err != nil { t.Fatal(err) } @@ -2095,7 +2127,7 @@ func TestBuildADDCurrentDirWithCache(t *testing.T) { if err := ctx.Add("foo", "hello1"); err != nil { t.Fatal(err) } - id3, err := buildImageFromContext(name, ctx, true) + id3, err := buildImageFromContext(name3, ctx, true) if err != nil { t.Fatal(err) } @@ -2107,14 +2139,14 @@ func TestBuildADDCurrentDirWithCache(t *testing.T) { if err := ctx.Add("foo", "hello1"); err != nil { t.Fatal(err) } - id4, err := buildImageFromContext(name, ctx, true) + id4, err := buildImageFromContext(name4, ctx, true) if err != nil { t.Fatal(err) } if id3 == id4 { t.Fatal("The cache should have been invalided but hasn't.") } - id5, err := buildImageFromContext(name, ctx, true) + id5, err := buildImageFromContext(name5, ctx, true) if err != nil { t.Fatal(err) } @@ -2126,7 +2158,8 @@ func TestBuildADDCurrentDirWithCache(t *testing.T) { func TestBuildADDCurrentDirWithoutCache(t *testing.T) { name := "testbuildaddcurrentdirwithoutcache" - defer deleteImages(name) + name2 := "testbuildaddcurrentdirwithoutcache2" + defer deleteImages(name, name2) dockerfile := ` FROM scratch MAINTAINER dockerio @@ -2142,7 +2175,7 @@ func TestBuildADDCurrentDirWithoutCache(t *testing.T) { if err != nil { t.Fatal(err) } - id2, err := buildImageFromContext(name, ctx, false) + id2, err := buildImageFromContext(name2, ctx, false) if err != nil { t.Fatal(err) } @@ -2186,7 +2219,8 @@ func TestBuildADDRemoteFileWithCache(t *testing.T) { func TestBuildADDRemoteFileWithoutCache(t *testing.T) { name := "testbuildaddremotefilewithoutcache" - defer deleteImages(name) + name2 := "testbuildaddremotefilewithoutcache2" + defer deleteImages(name, name2) server, err := fakeStorage(map[string]string{ "baz": "hello", }) @@ -2202,7 +2236,7 @@ func TestBuildADDRemoteFileWithoutCache(t *testing.T) { if err != nil { t.Fatal(err) } - id2, err := buildImage(name, + id2, err := buildImage(name2, fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server.URL), @@ -2218,7 +2252,11 @@ func TestBuildADDRemoteFileWithoutCache(t *testing.T) { func TestBuildADDRemoteFileMTime(t *testing.T) { name := "testbuildaddremotefilemtime" - defer deleteImages(name) + name2 := name + "2" + name3 := name + "3" + name4 := name + "4" + + defer deleteImages(name, name2, name3, name4) server, err := fakeStorage(map[string]string{"baz": "hello"}) if err != nil { @@ -2239,7 +2277,7 @@ func TestBuildADDRemoteFileMTime(t *testing.T) { t.Fatal(err) } - id2, err := buildImageFromContext(name, ctx, true) + id2, err := buildImageFromContext(name2, ctx, true) if err != nil { t.Fatal(err) } @@ -2255,7 +2293,7 @@ func TestBuildADDRemoteFileMTime(t *testing.T) { t.Fatalf("Error setting mtime on %q: %v", bazPath, err) } - id3, err := buildImageFromContext(name, ctx, true) + id3, err := buildImageFromContext(name3, ctx, true) if err != nil { t.Fatal(err) } @@ -2264,7 +2302,7 @@ func TestBuildADDRemoteFileMTime(t *testing.T) { } // And for good measure do it again and make sure cache is used this time - id4, err := buildImageFromContext(name, ctx, true) + id4, err := buildImageFromContext(name4, ctx, true) if err != nil { t.Fatal(err) } @@ -2353,7 +2391,7 @@ func TestBuildNoContext(t *testing.T) { t.Fatalf("build failed to complete: %v %v", out, err) } - if out, _, err := cmd(t, "run", "nocontext"); out != "ok\n" || err != nil { + if out, _, err := cmd(t, "run", "--rm", "nocontext"); out != "ok\n" || err != nil { t.Fatalf("run produced invalid output: %q, expected %q", out, "ok") } @@ -2364,7 +2402,8 @@ func TestBuildNoContext(t *testing.T) { // TODO: TestCaching func TestBuildADDLocalAndRemoteFilesWithoutCache(t *testing.T) { name := "testbuildaddlocalandremotefilewithoutcache" - defer deleteImages(name) + name2 := "testbuildaddlocalandremotefilewithoutcache2" + defer deleteImages(name, name2) server, err := fakeStorage(map[string]string{ "baz": "hello", }) @@ -2387,7 +2426,7 @@ func TestBuildADDLocalAndRemoteFilesWithoutCache(t *testing.T) { if err != nil { t.Fatal(err) } - id2, err := buildImageFromContext(name, ctx, false) + id2, err := buildImageFromContext(name2, ctx, false) if err != nil { t.Fatal(err) } @@ -2552,6 +2591,7 @@ func TestBuildInheritance(t *testing.T) { func TestBuildFails(t *testing.T) { name := "testbuildfails" defer deleteImages(name) + defer deleteAllContainers() _, err := buildImage(name, `FROM busybox RUN sh -c "exit 23"`, @@ -3217,6 +3257,7 @@ func TestBuildIgnoreInvalidInstruction(t *testing.T) { func TestBuildEntrypointInheritance(t *testing.T) { defer deleteImages("parent", "child") + defer deleteAllContainers() if _, err := buildImage("parent", ` FROM busybox @@ -3255,6 +3296,7 @@ func TestBuildEntrypointInheritanceInspect(t *testing.T) { ) defer deleteImages(name, name2) + defer deleteAllContainers() if _, err := buildImage(name, "FROM busybox\nENTRYPOINT /foo/bar", true); err != nil { t.Fatal(err) @@ -3298,7 +3340,7 @@ func TestBuildRunShEntrypoint(t *testing.T) { t.Fatal(err) } - out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", name)) + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", name)) if err != nil { t.Fatal(err, out) @@ -3345,12 +3387,13 @@ func TestBuildVerifySingleQuoteFails(t *testing.T) { // it should barf on it. name := "testbuildsinglequotefails" defer deleteImages(name) + defer deleteAllContainers() _, err := buildImage(name, `FROM busybox CMD [ '/bin/sh', '-c', 'echo hi' ]`, true) - _, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", name)) + _, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", name)) if err == nil { t.Fatal("The image was not supposed to be able to run") diff --git a/integration-cli/docker_cli_commit_test.go b/integration-cli/docker_cli_commit_test.go index 7715e81bf5..f41361ece4 100644 --- a/integration-cli/docker_cli_commit_test.go +++ b/integration-cli/docker_cli_commit_test.go @@ -154,6 +154,9 @@ func TestCommitHardlink(t *testing.T) { } func TestCommitTTY(t *testing.T) { + defer deleteImages("ttytest") + defer deleteAllContainers() + cmd := exec.Command(dockerBinary, "run", "-t", "--name", "tty", "busybox", "/bin/ls") if _, err := runCommand(cmd); err != nil { t.Fatal(err) diff --git a/integration-cli/docker_cli_events_test.go b/integration-cli/docker_cli_events_test.go index 2c4111ce55..92ff241e73 100644 --- a/integration-cli/docker_cli_events_test.go +++ b/integration-cli/docker_cli_events_test.go @@ -38,11 +38,15 @@ func TestEventsUntag(t *testing.T) { } func TestEventsPause(t *testing.T) { + name := "testeventpause" out, _, _ := cmd(t, "images", "-q") image := strings.Split(out, "\n")[0] - cmd(t, "run", "-d", "--name", "testeventpause", image, "sleep", "2") - cmd(t, "pause", "testeventpause") - cmd(t, "unpause", "testeventpause") + cmd(t, "run", "-d", "--name", name, image, "sleep", "2") + cmd(t, "pause", name) + cmd(t, "unpause", name) + + defer deleteAllContainers() + eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix())) out, _, _ = runCommandWithOutput(eventsCmd) events := strings.Split(out, "\n") @@ -60,10 +64,17 @@ func TestEventsPause(t *testing.T) { t.Fatalf("event should be pause, not %#v", unpauseEvent) } + waitCmd := exec.Command(dockerBinary, "wait", name) + if waitOut, _, err := runCommandWithOutput(waitCmd); err != nil { + t.Fatalf("error thrown while waiting for container: %s, %v", waitOut, err) + } + logDone("events - pause/unpause is logged") } func TestEventsContainerFailStartDie(t *testing.T) { + defer deleteAllContainers() + out, _, _ := cmd(t, "images", "-q") image := strings.Split(out, "\n")[0] eventsCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testeventdie", image, "blerg") @@ -93,6 +104,7 @@ func TestEventsContainerFailStartDie(t *testing.T) { } func TestEventsLimit(t *testing.T) { + defer deleteAllContainers() for i := 0; i < 30; i++ { cmd(t, "run", "busybox", "echo", strconv.Itoa(i)) } @@ -241,6 +253,8 @@ func TestEventsImagePull(t *testing.T) { func TestEventsImageImport(t *testing.T) { since := time.Now().Unix() + defer deleteImages("cirros") + server, err := fileServer(map[string]string{ "/cirros.tar.gz": "/cirros.tar.gz", }) @@ -249,7 +263,7 @@ func TestEventsImageImport(t *testing.T) { } defer server.Close() fileURL := fmt.Sprintf("%s/cirros.tar.gz", server.URL) - importCmd := exec.Command(dockerBinary, "import", fileURL) + importCmd := exec.Command(dockerBinary, "import", fileURL, "cirros") out, _, err := runCommandWithOutput(importCmd) if err != nil { t.Errorf("import failed with errors: %v, output: %q", err, out) diff --git a/integration-cli/docker_cli_import_test.go b/integration-cli/docker_cli_import_test.go index ea001fd456..94aadc5831 100644 --- a/integration-cli/docker_cli_import_test.go +++ b/integration-cli/docker_cli_import_test.go @@ -16,7 +16,7 @@ func TestImportDisplay(t *testing.T) { } defer server.Close() fileURL := fmt.Sprintf("%s/cirros.tar.gz", server.URL) - importCmd := exec.Command(dockerBinary, "import", fileURL) + importCmd := exec.Command(dockerBinary, "import", fileURL, "cirros") out, _, err := runCommandWithOutput(importCmd) if err != nil { t.Errorf("import failed with errors: %v, output: %q", err, out) @@ -26,5 +26,7 @@ func TestImportDisplay(t *testing.T) { t.Fatalf("display is messed up: %d '\\n' instead of 2", n) } + deleteImages("cirros") + logDone("import - cirros was imported and display is fine") } diff --git a/integration-cli/docker_cli_rm_test.go b/integration-cli/docker_cli_rm_test.go index bac7490d55..6681840ecd 100644 --- a/integration-cli/docker_cli_rm_test.go +++ b/integration-cli/docker_cli_rm_test.go @@ -106,6 +106,7 @@ func TestRmContainerOrphaning(t *testing.T) { } deleteAllContainers() + deleteImages(img1) logDone("rm - container orphaning") } diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index ce85f7741b..ca44aa3902 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -563,7 +563,8 @@ func TestRunCreateVolumeWithSymlink(t *testing.T) { // Tests that a volume path that has a symlink exists in a container mounting it with `--volumes-from`. func TestRunVolumesFromSymlinkPath(t *testing.T) { - buildCmd := exec.Command(dockerBinary, "build", "-t", "docker-test-volumesfromsymlinkpath", "-") + name := "docker-test-volumesfromsymlinkpath" + buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") buildCmd.Stdin = strings.NewReader(`FROM busybox RUN mkdir /baz && ln -s /baz /foo VOLUME ["/foo/bar"]`) @@ -573,7 +574,7 @@ func TestRunVolumesFromSymlinkPath(t *testing.T) { t.Fatalf("could not build 'docker-test-volumesfromsymlinkpath': %v", err) } - cmd := exec.Command(dockerBinary, "run", "--name", "test-volumesfromsymlinkpath", "docker-test-volumesfromsymlinkpath") + cmd := exec.Command(dockerBinary, "run", "--name", "test-volumesfromsymlinkpath", name) exitCode, err := runCommand(cmd) if err != nil || exitCode != 0 { t.Fatalf("[run] (volume) err: %v, exitcode: %d", err, exitCode) @@ -585,8 +586,8 @@ func TestRunVolumesFromSymlinkPath(t *testing.T) { t.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) } - deleteImages("docker-test-volumesfromsymlinkpath") deleteAllContainers() + deleteImages(name) logDone("run - volumes-from symlink path") } @@ -892,6 +893,7 @@ func TestRunUnPrivilegedCanMknod(t *testing.T) { } func TestRunCapDropInvalid(t *testing.T) { + defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--cap-drop=CHPASS", "busybox", "ls") out, _, err := runCommandWithOutput(cmd) if err == nil { @@ -962,6 +964,8 @@ func TestRunCapDropALLAddMknodCannotMknod(t *testing.T) { } func TestRunCapAddInvalid(t *testing.T) { + defer deleteAllContainers() + cmd := exec.Command(dockerBinary, "run", "--cap-add=CHPASS", "busybox", "ls") out, _, err := runCommandWithOutput(cmd) if err == nil { @@ -1718,6 +1722,8 @@ func TestRunExitOnStdinClose(t *testing.T) { // Test for #2267 func TestRunWriteHostsFileAndNotCommit(t *testing.T) { + defer deleteAllContainers() + name := "writehosts" cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hosts && cat /etc/hosts") out, _, err := runCommandWithOutput(cmd) @@ -1745,6 +1751,8 @@ func TestRunWriteHostsFileAndNotCommit(t *testing.T) { // Test for #2267 func TestRunWriteHostnameFileAndNotCommit(t *testing.T) { + defer deleteAllContainers() + name := "writehostname" cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hostname && cat /etc/hostname") out, _, err := runCommandWithOutput(cmd) @@ -1772,6 +1780,8 @@ func TestRunWriteHostnameFileAndNotCommit(t *testing.T) { // Test for #2267 func TestRunWriteResolvFileAndNotCommit(t *testing.T) { + defer deleteAllContainers() + name := "writeresolv" cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/resolv.conf && cat /etc/resolv.conf") out, _, err := runCommandWithOutput(cmd) @@ -1798,6 +1808,8 @@ func TestRunWriteResolvFileAndNotCommit(t *testing.T) { } func TestRunWithBadDevice(t *testing.T) { + defer deleteAllContainers() + name := "baddevice" cmd := exec.Command(dockerBinary, "run", "--name", name, "--device", "/etc", "busybox", "true") out, _, err := runCommandWithOutput(cmd) @@ -1812,6 +1824,8 @@ func TestRunWithBadDevice(t *testing.T) { } func TestRunEntrypoint(t *testing.T) { + defer deleteAllContainers() + name := "entrypoint" cmd := exec.Command(dockerBinary, "run", "--name", name, "--entrypoint", "/bin/echo", "busybox", "-n", "foobar") out, _, err := runCommandWithOutput(cmd) @@ -1826,6 +1840,8 @@ func TestRunEntrypoint(t *testing.T) { } func TestRunBindMounts(t *testing.T) { + defer deleteAllContainers() + tmpDir, err := ioutil.TempDir("", "docker-test-container") if err != nil { t.Fatal(err) @@ -2435,8 +2451,6 @@ func TestRunNoOutputFromPullInStdout(t *testing.T) { } func TestRunVolumesCleanPaths(t *testing.T) { - defer deleteAllContainers() - if _, err := buildImage("run_volumes_clean_paths", `FROM busybox VOLUME /foo/`, @@ -2444,6 +2458,7 @@ func TestRunVolumesCleanPaths(t *testing.T) { t.Fatal(err) } defer deleteImages("run_volumes_clean_paths") + defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-v", "/foo", "-v", "/bar/", "--name", "dark_helmet", "run_volumes_clean_paths") if out, _, err := runCommandWithOutput(cmd); err != nil { diff --git a/integration-cli/docker_cli_save_load_test.go b/integration-cli/docker_cli_save_load_test.go index ceb73a571f..f561a1a05d 100644 --- a/integration-cli/docker_cli_save_load_test.go +++ b/integration-cli/docker_cli_save_load_test.go @@ -239,12 +239,15 @@ func TestSaveMultipleNames(t *testing.T) { if out, _, err := runCommandWithOutput(tagCmd); err != nil { t.Fatalf("failed to tag repo: %s, %v", out, err) } + defer deleteImages(repoName + "-one") + // Make two images tagCmdFinal = fmt.Sprintf("%v tag scratch:latest %v-two:latest", dockerBinary, repoName) tagCmd = exec.Command("bash", "-c", tagCmdFinal) if out, _, err := runCommandWithOutput(tagCmd); err != nil { t.Fatalf("failed to tag repo: %s, %v", out, err) } + defer deleteImages(repoName + "-two") saveCmdFinal := fmt.Sprintf("%v save %v-one %v-two:latest | tar xO repositories | grep -q -E '(-one|-two)'", dockerBinary, repoName, repoName) saveCmd := exec.Command("bash", "-c", saveCmdFinal) From ce86d5ae6826b0ec3dcf3188f8a6bd37b0afd3b2 Mon Sep 17 00:00:00 2001 From: Arnaud Porterie Date: Tue, 18 Nov 2014 14:44:05 -0800 Subject: [PATCH 382/592] Adapt project/make.sh for Windows builds Fixes: - link -H windows is not compatible with -linkmode external - under Cygwin go does not play well with cygdrive type paths Signed-off-by: Arnaud Porterie --- project/make.sh | 6 +++++- project/make/binary | 17 ++++++++++++----- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/project/make.sh b/project/make.sh index d6da3057fa..0bcb1e1a6c 100755 --- a/project/make.sh +++ b/project/make.sh @@ -101,6 +101,10 @@ LDFLAGS=' -X '$DOCKER_PKG'/dockerversion.VERSION "'$VERSION'" ' LDFLAGS_STATIC='-linkmode external' +# Cgo -H windows is incompatible with -linkmode external. +if [ "$(go env GOOS)" == 'windows' ]; then + LDFLAGS_STATIC='' +fi EXTLDFLAGS_STATIC='-static' # ORIG_BUILDFLAGS is necessary for the cross target which cannot always build # with options like -race. @@ -215,7 +219,7 @@ bundle() { bundle=$(basename $bundlescript) echo "---> Making bundle: $bundle (in bundles/$VERSION/$bundle)" mkdir -p bundles/$VERSION/$bundle - source $bundlescript $(pwd)/bundles/$VERSION/$bundle + source "$bundlescript" "$(pwd)/bundles/$VERSION/$bundle" } main() { diff --git a/project/make/binary b/project/make/binary index 962bebc68d..6b988b1708 100755 --- a/project/make/binary +++ b/project/make/binary @@ -3,19 +3,26 @@ set -e DEST=$1 BINARY_NAME="docker-$VERSION" +BINARY_EXTENSION= if [ "$(go env GOOS)" = 'windows' ]; then - BINARY_NAME+='.exe' + BINARY_EXTENSION='.exe' +fi +BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" + +# Cygdrive paths don't play well with go build -o. +if [[ "$(uname -s)" == CYGWIN* ]]; then + DEST=$(cygpath -mw $DEST) fi go build \ - -o "$DEST/$BINARY_NAME" \ + -o "$DEST/$BINARY_FULLNAME" \ "${BUILDFLAGS[@]}" \ -ldflags " $LDFLAGS $LDFLAGS_STATIC_DOCKER " \ ./docker -echo "Created binary: $DEST/$BINARY_NAME" -ln -sf "$BINARY_NAME" "$DEST/docker" +echo "Created binary: $DEST/$BINARY_FULLNAME" +ln -sf "$BINARY_FULLNAME" "$DEST/docker$BINARY_EXTENSION" -hash_files "$DEST/$BINARY_NAME" +hash_files "$DEST/$BINARY_FULLNAME" From bbb245defe670f4fd9b9159b154e0ba5aa93ec87 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Mon, 17 Nov 2014 18:06:48 +0200 Subject: [PATCH 383/592] Fix deleteImages() helper for multiple names MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pass every image name as separate argument. Signed-off-by: Tõnis Tiigi (github: tonistiigi) --- integration-cli/docker_utils.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go index 61a616ceb2..58752bd04e 100644 --- a/integration-cli/docker_utils.go +++ b/integration-cli/docker_utils.go @@ -322,7 +322,10 @@ func deleteAllContainers() error { } func deleteImages(images ...string) error { - rmiCmd := exec.Command(dockerBinary, "rmi", strings.Join(images, " ")) + args := make([]string, 1, 2) + args[0] = "rmi" + args = append(args, images...) + rmiCmd := exec.Command(dockerBinary, args...) exitCode, err := runCommand(rmiCmd) // set error manually if not set if exitCode != 0 && err == nil { From 6705477673be7c303369778f6f288ee600ce3893 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Wed, 19 Nov 2014 23:24:16 +0200 Subject: [PATCH 384/592] Fix misuses of format based logging functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tõnis Tiigi (github: tonistiigi) --- daemon/graphdriver/graphtest/graphtest.go | 2 +- image/image.go | 2 +- integration-cli/docker_cli_events_test.go | 2 +- integration-cli/docker_cli_nat_test.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/daemon/graphdriver/graphtest/graphtest.go b/daemon/graphdriver/graphtest/graphtest.go index 6407e1205d..16c7163130 100644 --- a/daemon/graphdriver/graphtest/graphtest.go +++ b/daemon/graphdriver/graphtest/graphtest.go @@ -33,7 +33,7 @@ func newDriver(t *testing.T, name string) *Driver { d, err := graphdriver.GetDriver(name, root, nil) if err != nil { if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites { - t.Skip("Driver %s not supported", name) + t.Skipf("Driver %s not supported", name) } t.Fatal(err) } diff --git a/image/image.go b/image/image.go index f16385bbfc..8cd9aa3755 100644 --- a/image/image.go +++ b/image/image.go @@ -112,7 +112,7 @@ func StoreImage(img *Image, layerData archive.ArchiveReader, root string) error checksum := layerTarSum.Sum(nil) if img.Checksum != "" && img.Checksum != checksum { - log.Warn("image layer checksum mismatch: computed %q, expected %q", checksum, img.Checksum) + log.Warnf("image layer checksum mismatch: computed %q, expected %q", checksum, img.Checksum) } img.Checksum = checksum diff --git a/integration-cli/docker_cli_events_test.go b/integration-cli/docker_cli_events_test.go index 2c4111ce55..f2dc4e45a0 100644 --- a/integration-cli/docker_cli_events_test.go +++ b/integration-cli/docker_cli_events_test.go @@ -220,7 +220,7 @@ func TestEventsImagePull(t *testing.T) { since := time.Now().Unix() pullCmd := exec.Command(dockerBinary, "pull", "scratch") if out, _, err := runCommandWithOutput(pullCmd); err != nil { - t.Fatal("pulling the scratch image from has failed: %s, %v", out, err) + t.Fatalf("pulling the scratch image from has failed: %s, %v", out, err) } eventsCmd := exec.Command(dockerBinary, "events", diff --git a/integration-cli/docker_cli_nat_test.go b/integration-cli/docker_cli_nat_test.go index 01ebb73c74..7e3b595a80 100644 --- a/integration-cli/docker_cli_nat_test.go +++ b/integration-cli/docker_cli_nat_test.go @@ -11,7 +11,7 @@ import ( func TestNetworkNat(t *testing.T) { iface, err := net.InterfaceByName("eth0") if err != nil { - t.Skip("Test not running with `make test`. Interface eth0 not found: %s", err) + t.Skipf("Test not running with `make test`. Interface eth0 not found: %s", err) } ifaceAddrs, err := iface.Addrs() From cb57c388480d03770378e6e1842c2c1c6a46d8fd Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Tue, 18 Nov 2014 23:22:32 -0500 Subject: [PATCH 385/592] overlayfs: add --storage-driver doc Signed-off-by: Vincent Batts --- docs/sources/reference/commandline/cli.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index ab80f2ff51..ce7acc7e40 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -156,7 +156,7 @@ string is equivalent to setting the `--tlsverify` flag. The following are equiva ### Daemon storage-driver option The Docker daemon has support for three different image layer storage drivers: `aufs`, -`devicemapper`, and `btrfs`. +`devicemapper`, `btrfs` and `overlayfs`. The `aufs` driver is the oldest, but is based on a Linux kernel patch-set that is unlikely to be merged into the main kernel. These are also known to cause some @@ -175,6 +175,9 @@ To tell the Docker daemon to use `devicemapper`, use The `btrfs` driver is very fast for `docker build` - but like `devicemapper` does not share executable memory between devices. Use `docker -d -s btrfs -g /mnt/btrfs_partition`. +The `overlayfs` is a very fast union filesystem. It is now merged in the main +Linux kernel as of [3.18.0](https://lkml.org/lkml/2014/10/26/137). +Call `docker -d -s overlayfs` to use it. ### Docker exec-driver option From 71209f75791fdc1a2124682f50cd00a413ddb143 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 18 Nov 2014 15:10:07 -0800 Subject: [PATCH 386/592] Add test for --net container: This adds an integration test for checking that the network namespace fds are the same when a container joins another container's network namespace. Signed-off-by: Michael Crosby --- integration-cli/docker_cli_run_test.go | 34 ++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index ce85f7741b..6d0f34f921 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -2638,3 +2638,37 @@ func TestRunModeIpcContainer(t *testing.T) { logDone("run - hostname and several network modes") } + +func TestContainerNetworkMode(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + id := strings.TrimSpace(out) + if err := waitRun(id); err != nil { + t.Fatal(err) + } + pid1, err := inspectField(id, "State.Pid") + if err != nil { + t.Fatal(err) + } + + parentContainerNet, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1)) + if err != nil { + t.Fatal(err) + } + cmd = exec.Command(dockerBinary, "run", fmt.Sprintf("--net=container:%s", id), "busybox", "readlink", "/proc/self/ns/net") + out2, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out2) + } + + out2 = strings.Trim(out2, "\n") + if parentContainerNet != out2 { + t.Fatalf("NET different with --net=container:%s %s != %s\n", id, parentContainerNet, out2) + } + deleteAllContainers() + + logDone("run - container shared network namespace") +} From 4deac03c65edf34affd66abd3ef8fb88287d2f5a Mon Sep 17 00:00:00 2001 From: Oh Jinkyun Date: Mon, 3 Nov 2014 20:11:29 +0900 Subject: [PATCH 387/592] Fix for #8777 Now filter name is trimmed and lowercased before evaluation for case insensitive and whitespace trimemd check. Signed-off-by: Oh Jinkyun --- api/client/commands.go | 10 ++++ integration-cli/docker_cli_images_test.go | 59 +++++++++++++++++++++++ pkg/parsers/filters/parse.go | 4 +- 3 files changed, 72 insertions(+), 1 deletion(-) diff --git a/api/client/commands.go b/api/client/commands.go index a96089b8e6..a7e3acd510 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -47,6 +47,10 @@ const ( tarHeaderSize = 512 ) +var ( + acceptedImageFilterTags = map[string]struct{}{"dangling": {}} +) + func (cli *DockerCli) CmdHelp(args ...string) error { if len(args) > 1 { method, exists := cli.getMethod(args[:2]...) @@ -1336,6 +1340,12 @@ func (cli *DockerCli) CmdImages(args ...string) error { } } + for name := range imageFilterArgs { + if _, ok := acceptedImageFilterTags[name]; !ok { + return fmt.Errorf("Invalid filter '%s'", name) + } + } + matchName := cmd.Arg(0) // FIXME: --viz and --tree are deprecated. Remove them in a future version. if *flViz || *flTree { diff --git a/integration-cli/docker_cli_images_test.go b/integration-cli/docker_cli_images_test.go index ad06cb2eb8..a91f1c0e22 100644 --- a/integration-cli/docker_cli_images_test.go +++ b/integration-cli/docker_cli_images_test.go @@ -1,7 +1,10 @@ package main import ( + "fmt" "os/exec" + "reflect" + "sort" "strings" "testing" "time" @@ -63,3 +66,59 @@ func TestImagesOrderedByCreationDate(t *testing.T) { logDone("images - ordering by creation date") } + +func TestImagesErrorWithInvalidFilterNameTest(t *testing.T) { + imagesCmd := exec.Command(dockerBinary, "images", "-f", "FOO=123") + out, _, err := runCommandWithOutput(imagesCmd) + if !strings.Contains(out, "Invalid filter") { + t.Fatalf("error should occur when listing images with invalid filter name FOO, %s, %v", out, err) + } + + logDone("images - invalid filter name check working") +} + +func TestImagesFilterWhiteSpaceTrimmingAndLowerCasingWorking(t *testing.T) { + imageName := "images_filter_test" + defer deleteAllContainers() + defer deleteImages(imageName) + buildImage(imageName, + `FROM scratch + RUN touch /test/foo + RUN touch /test/bar + RUN touch /test/baz`, true) + + filters := []string{ + "dangling=true", + "Dangling=true", + " dangling=true", + "dangling=true ", + "dangling = true", + } + + imageListings := make([][]string, 5, 5) + for idx, filter := range filters { + cmd := exec.Command(dockerBinary, "images", "-f", filter) + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + listing := strings.Split(out, "\n") + sort.Strings(listing) + imageListings[idx] = listing + } + + for idx, listing := range imageListings { + if idx < 4 && !reflect.DeepEqual(listing, imageListings[idx+1]) { + for idx, errListing := range imageListings { + fmt.Printf("out %d", idx) + for _, image := range errListing { + fmt.Print(image) + } + fmt.Print("") + } + t.Fatalf("All output must be the same") + } + } + + logDone("images - white space trimming and lower casing") +} diff --git a/pkg/parsers/filters/parse.go b/pkg/parsers/filters/parse.go index 403959223c..8b045a3098 100644 --- a/pkg/parsers/filters/parse.go +++ b/pkg/parsers/filters/parse.go @@ -29,7 +29,9 @@ func ParseFlag(arg string, prev Args) (Args, error) { } f := strings.SplitN(arg, "=", 2) - filters[f[0]] = append(filters[f[0]], f[1]) + name := strings.ToLower(strings.TrimSpace(f[0])) + value := strings.TrimSpace(f[1]) + filters[name] = append(filters[name], value) return filters, nil } From 72c55e82156843c73ab1405b565e63d947b66c10 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Thu, 20 Nov 2014 09:02:21 -0800 Subject: [PATCH 388/592] Increase timeout for userland proxy starting Fixes #8883 Signed-off-by: Alexandr Morozov --- daemon/networkdriver/portmapper/proxy.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daemon/networkdriver/portmapper/proxy.go b/daemon/networkdriver/portmapper/proxy.go index e4a17bcd9a..5d0aa0be0d 100644 --- a/daemon/networkdriver/portmapper/proxy.go +++ b/daemon/networkdriver/portmapper/proxy.go @@ -145,7 +145,7 @@ func (p *proxyCommand) Start() error { select { case err := <-errchan: return err - case <-time.After(1 * time.Second): + case <-time.After(16 * time.Second): return fmt.Errorf("Timed out proxy starting the userland proxy") } } From 769b79866aa645d4deeeb0a44120cde7b046f0d1 Mon Sep 17 00:00:00 2001 From: unclejack Date: Thu, 20 Nov 2014 19:33:15 +0200 Subject: [PATCH 389/592] pkg/system: fix cleanup in tests Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- pkg/system/lstat_test.go | 4 +++- pkg/system/stat_test.go | 4 +++- pkg/system/utimes_test.go | 7 ++++--- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/pkg/system/lstat_test.go b/pkg/system/lstat_test.go index 7e271efea5..9bab4d7b0c 100644 --- a/pkg/system/lstat_test.go +++ b/pkg/system/lstat_test.go @@ -1,11 +1,13 @@ package system import ( + "os" "testing" ) func TestLstat(t *testing.T) { - file, invalid, _ := prepareFiles(t) + file, invalid, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) statFile, err := Lstat(file) if err != nil { diff --git a/pkg/system/stat_test.go b/pkg/system/stat_test.go index 0dcb239ece..abcc8ea7a6 100644 --- a/pkg/system/stat_test.go +++ b/pkg/system/stat_test.go @@ -1,12 +1,14 @@ package system import ( + "os" "syscall" "testing" ) func TestFromStatT(t *testing.T) { - file, _, _ := prepareFiles(t) + file, _, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) stat := &syscall.Stat_t{} err := syscall.Lstat(file, stat) diff --git a/pkg/system/utimes_test.go b/pkg/system/utimes_test.go index 38e4020cb5..1dea47cc15 100644 --- a/pkg/system/utimes_test.go +++ b/pkg/system/utimes_test.go @@ -8,7 +8,7 @@ import ( "testing" ) -func prepareFiles(t *testing.T) (string, string, string) { +func prepareFiles(t *testing.T) (string, string, string, string) { dir, err := ioutil.TempDir("", "docker-system-test") if err != nil { t.Fatal(err) @@ -26,11 +26,12 @@ func prepareFiles(t *testing.T) (string, string, string) { t.Fatal(err) } - return file, invalid, symlink + return file, invalid, symlink, dir } func TestLUtimesNano(t *testing.T) { - file, invalid, symlink := prepareFiles(t) + file, invalid, symlink, dir := prepareFiles(t) + defer os.RemoveAll(dir) before, err := os.Stat(file) if err != nil { From 32ba6ab83c7e47d627a2b971e7f6ca9b56e1be85 Mon Sep 17 00:00:00 2001 From: unclejack Date: Thu, 20 Nov 2014 19:34:35 +0200 Subject: [PATCH 390/592] pkg/archive: fix TempArchive cleanup w/ one read This fixes the removal of TempArchives which can read with only one read. Such archives weren't getting removed because EOF wasn't being triggered. Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- pkg/archive/archive.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 5a81223dbd..995668104d 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -742,17 +742,20 @@ func NewTempArchive(src Archive, dir string) (*TempArchive, error) { return nil, err } size := st.Size() - return &TempArchive{f, size}, nil + return &TempArchive{f, size, 0}, nil } type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) - if err != nil { + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.File.Close() os.Remove(archive.File.Name()) } return n, err From 4508bd94b0efd07a0ef48cd090786615e6b8cbb7 Mon Sep 17 00:00:00 2001 From: unclejack Date: Thu, 20 Nov 2014 19:36:54 +0200 Subject: [PATCH 391/592] pkg/symlink: fix cleanup for tests Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- pkg/symlink/fs_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/symlink/fs_test.go b/pkg/symlink/fs_test.go index d85fd6da74..cc0d82d1a3 100644 --- a/pkg/symlink/fs_test.go +++ b/pkg/symlink/fs_test.go @@ -46,6 +46,7 @@ func TestFollowSymLinkUnderLinkedDir(t *testing.T) { if err != nil { t.Fatal(err) } + defer os.RemoveAll(dir) os.Mkdir(filepath.Join(dir, "realdir"), 0700) os.Symlink("realdir", filepath.Join(dir, "linkdir")) From 98307c8faefca5c4347288af18aee4dacbf8802c Mon Sep 17 00:00:00 2001 From: unclejack Date: Thu, 20 Nov 2014 19:37:46 +0200 Subject: [PATCH 392/592] integ-cli: fix cleanup in test which mounts tmpfs Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- integration-cli/docker_cli_run_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index ca44aa3902..911861e8ac 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -1257,6 +1257,7 @@ func TestRunWithVolumesIsRecursive(t *testing.T) { if err := mount.Mount("tmpfs", tmpfsDir, "tmpfs", ""); err != nil { t.Fatalf("failed to create a tmpfs mount at %s - %s", tmpfsDir, err) } + defer mount.Unmount(tmpfsDir) f, err := ioutil.TempFile(tmpfsDir, "touch-me") if err != nil { From db7fded17fd984fc3c854d1e34bd8d656c3b3692 Mon Sep 17 00:00:00 2001 From: unclejack Date: Thu, 20 Nov 2014 19:38:41 +0200 Subject: [PATCH 393/592] integ-cli: fix cleanup in build tests Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- integration-cli/docker_cli_build_test.go | 43 ++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 4e6fe63ae1..32b568b8c9 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -179,6 +179,7 @@ func TestBuildEnvironmentReplacementAddCopy(t *testing.T) { if err != nil { t.Fatal(err) } + defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) @@ -632,6 +633,8 @@ func TestBuildSixtySteps(t *testing.T) { if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } @@ -656,6 +659,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } @@ -674,6 +679,8 @@ ADD test_file .`, if err != nil { t.Fatal(err) } + defer ctx.Close() + done := make(chan struct{}) go func() { if _, err := buildImageFromContext(name, ctx, true); err != nil { @@ -708,6 +715,8 @@ RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } @@ -947,6 +956,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } @@ -971,6 +982,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } @@ -996,6 +1009,8 @@ RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } @@ -1022,6 +1037,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } @@ -1040,6 +1057,8 @@ ADD . /`, if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } @@ -1064,6 +1083,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } @@ -1082,6 +1103,8 @@ COPY test_file .`, if err != nil { t.Fatal(err) } + defer ctx.Close() + done := make(chan struct{}) go func() { if _, err := buildImageFromContext(name, ctx, true); err != nil { @@ -1116,6 +1139,8 @@ RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } @@ -1140,6 +1165,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } @@ -1163,6 +1190,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } @@ -1188,6 +1217,8 @@ RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } @@ -1214,6 +1245,8 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } @@ -1231,6 +1264,8 @@ COPY . /`, if err != nil { t.Fatal(err) } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } @@ -1858,6 +1893,7 @@ func TestBuildOnBuildLimitedInheritence(t *testing.T) { if err != nil { t.Fatal(err) } + defer ctx.Close() out1, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", name1, ".") if err != nil { @@ -1874,6 +1910,7 @@ func TestBuildOnBuildLimitedInheritence(t *testing.T) { if err != nil { t.Fatal(err) } + defer ctx.Close() out2, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name2, ".") if err != nil { @@ -1890,6 +1927,7 @@ func TestBuildOnBuildLimitedInheritence(t *testing.T) { if err != nil { t.Fatal(err) } + defer ctx.Close() out3, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name3, ".") if err != nil { @@ -2984,6 +3022,8 @@ RUN [ "$(cat $TO)" = "hello" ] if err != nil { t.Fatal(err) } + defer ctx.Close() + _, err = buildImageFromContext(name, ctx, true) if err != nil { t.Fatal(err) @@ -3006,6 +3046,8 @@ RUN [ "$(cat /testfile)" = 'test!' ]` if err != nil { t.Fatal(err) } + defer ctx.Close() + _, err = buildImageFromContext(name, ctx, true) if err != nil { t.Fatal(err) @@ -3060,6 +3102,7 @@ RUN cat /existing-directory-trailing-slash/test/foo | grep Hi` } return &FakeContext{Dir: tmpDir} }() + defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatalf("build failed to complete for TestBuildAddTar: %v", err) From 6e92dfdfd843aec909572a405337efb25beb6f58 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Mon, 17 Nov 2014 19:39:55 +0000 Subject: [PATCH 394/592] Update libtrust version Signed-off-by: Victor Vieux --- api/common.go | 2 +- integration/utils_test.go | 2 ++ project/vendor.sh | 2 +- .../src/github.com/docker/libtrust/ec_key.go | 11 +-------- .../src/github.com/docker/libtrust/filter.go | 24 ++++++++++++------- .../github.com/docker/libtrust/filter_test.go | 6 +++-- .../docker/libtrust/key_files_test.go | 4 ++-- .../src/github.com/docker/libtrust/rsa_key.go | 11 +-------- .../libtrust/trustgraph/statement_test.go | 4 ++-- vendor/src/github.com/docker/libtrust/util.go | 16 +++++++++++++ .../github.com/docker/libtrust/util_test.go | 23 ++++++++++++++++++ 11 files changed, 68 insertions(+), 37 deletions(-) create mode 100644 vendor/src/github.com/docker/libtrust/util_test.go diff --git a/api/common.go b/api/common.go index 52e67caa13..3a46a8a523 100644 --- a/api/common.go +++ b/api/common.go @@ -68,7 +68,7 @@ func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { return nil, fmt.Errorf("Error saving key file: %s", err) } } else if err != nil { - log.Fatalf("Error loading key file: %s", err) + return nil, fmt.Errorf("Error loading key file: %s", err) } return trustKey, nil } diff --git a/integration/utils_test.go b/integration/utils_test.go index deb6a337a6..0c78a76170 100644 --- a/integration/utils_test.go +++ b/integration/utils_test.go @@ -9,6 +9,7 @@ import ( "net/http/httptest" "os" "path" + "path/filepath" "strings" "testing" "time" @@ -187,6 +188,7 @@ func newTestEngine(t Fataler, autorestart bool, root string) *engine.Engine { // Either InterContainerCommunication or EnableIptables must be set, // otherwise NewDaemon will fail because of conflicting settings. InterContainerCommunication: true, + TrustKeyPath: filepath.Join(root, "key.json"), } d, err := daemon.NewDaemon(cfg, eng) if err != nil { diff --git a/project/vendor.sh b/project/vendor.sh index 4c0b09fed1..1911583cab 100755 --- a/project/vendor.sh +++ b/project/vendor.sh @@ -51,7 +51,7 @@ clone hg code.google.com/p/go.net 84a4013f96e0 clone hg code.google.com/p/gosqlite 74691fb6f837 -clone git github.com/docker/libtrust d273ef2565ca +clone git github.com/docker/libtrust 230dfd18c232 clone git github.com/Sirupsen/logrus v0.6.0 diff --git a/vendor/src/github.com/docker/libtrust/ec_key.go b/vendor/src/github.com/docker/libtrust/ec_key.go index c7ac6844cf..f642acbcfa 100644 --- a/vendor/src/github.com/docker/libtrust/ec_key.go +++ b/vendor/src/github.com/docker/libtrust/ec_key.go @@ -55,16 +55,7 @@ func (k *ecPublicKey) CurveName() string { // KeyID returns a distinct identifier which is unique to this Public Key. func (k *ecPublicKey) KeyID() string { - // Generate and return a libtrust fingerprint of the EC public key. - // For an EC key this should be: - // SHA256("EC"+curveName+bytes(X)+bytes(Y)) - // Then truncated to 240 bits and encoded into 12 base32 groups like so: - // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP - hasher := crypto.SHA256.New() - hasher.Write([]byte(k.KeyType() + k.CurveName())) - hasher.Write(k.X.Bytes()) - hasher.Write(k.Y.Bytes()) - return keyIDEncode(hasher.Sum(nil)[:30]) + return keyIDFromCryptoKey(k) } func (k *ecPublicKey) String() string { diff --git a/vendor/src/github.com/docker/libtrust/filter.go b/vendor/src/github.com/docker/libtrust/filter.go index 945852afc8..5b2b4fca6f 100644 --- a/vendor/src/github.com/docker/libtrust/filter.go +++ b/vendor/src/github.com/docker/libtrust/filter.go @@ -11,9 +11,21 @@ func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKe filtered := make([]PublicKey, 0, len(keys)) for _, pubKey := range keys { - hosts, ok := pubKey.GetExtendedField("hosts").([]interface{}) + var hosts []string + switch v := pubKey.GetExtendedField("hosts").(type) { + case []string: + hosts = v + case []interface{}: + for _, value := range v { + h, ok := value.(string) + if !ok { + continue + } + hosts = append(hosts, h) + } + } - if !ok || (ok && len(hosts) == 0) { + if len(hosts) == 0 { if includeEmpty { filtered = append(filtered, pubKey) } @@ -21,12 +33,7 @@ func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKe } // Check if any hosts match pattern - for _, hostVal := range hosts { - hostPattern, ok := hostVal.(string) - if !ok { - continue - } - + for _, hostPattern := range hosts { match, err := filepath.Match(hostPattern, host) if err != nil { return nil, err @@ -37,7 +44,6 @@ func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKe continue } } - } return filtered, nil diff --git a/vendor/src/github.com/docker/libtrust/filter_test.go b/vendor/src/github.com/docker/libtrust/filter_test.go index b24e3322e6..997e554c04 100644 --- a/vendor/src/github.com/docker/libtrust/filter_test.go +++ b/vendor/src/github.com/docker/libtrust/filter_test.go @@ -27,6 +27,8 @@ func TestFilter(t *testing.T) { t.Fatal(err) } + // we use both []interface{} and []string here because jwt uses + // []interface{} format, while PEM uses []string switch { case i == 0: // Don't add entries for this key, key 0. @@ -36,10 +38,10 @@ func TestFilter(t *testing.T) { key.AddExtendedField("hosts", []interface{}{"*.even.example.com"}) case i == 7: // Should catch only the last key, and make it match any hostname. - key.AddExtendedField("hosts", []interface{}{"*"}) + key.AddExtendedField("hosts", []string{"*"}) default: // should catch keys 1, 3, 5. - key.AddExtendedField("hosts", []interface{}{"*.example.com"}) + key.AddExtendedField("hosts", []string{"*.example.com"}) } keys = append(keys, key) diff --git a/vendor/src/github.com/docker/libtrust/key_files_test.go b/vendor/src/github.com/docker/libtrust/key_files_test.go index 66c71dd43f..57e691f2ed 100644 --- a/vendor/src/github.com/docker/libtrust/key_files_test.go +++ b/vendor/src/github.com/docker/libtrust/key_files_test.go @@ -138,7 +138,7 @@ func testTrustedHostKeysFile(t *testing.T, trustedHostKeysFilename string) { } for addr, hostKey := range trustedHostKeysMapping { - t.Logf("Host Address: %s\n", addr) + t.Logf("Host Address: %d\n", addr) t.Logf("Host Key: %s\n\n", hostKey) } @@ -160,7 +160,7 @@ func testTrustedHostKeysFile(t *testing.T, trustedHostKeysFilename string) { } for addr, hostKey := range trustedHostKeysMapping { - t.Logf("Host Address: %s\n", addr) + t.Logf("Host Address: %d\n", addr) t.Logf("Host Key: %s\n\n", hostKey) } diff --git a/vendor/src/github.com/docker/libtrust/rsa_key.go b/vendor/src/github.com/docker/libtrust/rsa_key.go index 45463039d2..ecb15b56f3 100644 --- a/vendor/src/github.com/docker/libtrust/rsa_key.go +++ b/vendor/src/github.com/docker/libtrust/rsa_key.go @@ -34,16 +34,7 @@ func (k *rsaPublicKey) KeyType() string { // KeyID returns a distinct identifier which is unique to this Public Key. func (k *rsaPublicKey) KeyID() string { - // Generate and return a 'libtrust' fingerprint of the RSA public key. - // For an RSA key this should be: - // SHA256("RSA"+bytes(N)+bytes(E)) - // Then truncated to 240 bits and encoded into 12 base32 groups like so: - // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP - hasher := crypto.SHA256.New() - hasher.Write([]byte(k.KeyType())) - hasher.Write(k.N.Bytes()) - hasher.Write(serializeRSAPublicExponentParam(k.E)) - return keyIDEncode(hasher.Sum(nil)[:30]) + return keyIDFromCryptoKey(k) } func (k *rsaPublicKey) String() string { diff --git a/vendor/src/github.com/docker/libtrust/trustgraph/statement_test.go b/vendor/src/github.com/docker/libtrust/trustgraph/statement_test.go index d9c3c1a1ea..e509468659 100644 --- a/vendor/src/github.com/docker/libtrust/trustgraph/statement_test.go +++ b/vendor/src/github.com/docker/libtrust/trustgraph/statement_test.go @@ -201,7 +201,7 @@ func TestCollapseGrants(t *testing.T) { collapsedGrants, expiration, err := CollapseStatements(statements, false) if len(collapsedGrants) != 12 { - t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %s", 12, len(collapsedGrants)) + t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %d", 12, len(collapsedGrants)) } if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) { t.Fatalf("Unexpected expiration time: %s", expiration.String()) @@ -261,7 +261,7 @@ func TestCollapseGrants(t *testing.T) { collapsedGrants, expiration, err = CollapseStatements(statements, false) if len(collapsedGrants) != 12 { - t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %s", 12, len(collapsedGrants)) + t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %d", 12, len(collapsedGrants)) } if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) { t.Fatalf("Unexpected expiration time: %s", expiration.String()) diff --git a/vendor/src/github.com/docker/libtrust/util.go b/vendor/src/github.com/docker/libtrust/util.go index 3b2fac95b1..4d5a6200a8 100644 --- a/vendor/src/github.com/docker/libtrust/util.go +++ b/vendor/src/github.com/docker/libtrust/util.go @@ -2,6 +2,7 @@ package libtrust import ( "bytes" + "crypto" "crypto/elliptic" "crypto/x509" "encoding/base32" @@ -52,6 +53,21 @@ func keyIDEncode(b []byte) string { return buf.String() } +func keyIDFromCryptoKey(pubKey PublicKey) string { + // Generate and return a 'libtrust' fingerprint of the public key. + // For an RSA key this should be: + // SHA256(DER encoded ASN1) + // Then truncated to 240 bits and encoded into 12 base32 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) + if err != nil { + return "" + } + hasher := crypto.SHA256.New() + hasher.Write(derBytes) + return keyIDEncode(hasher.Sum(nil)[:30]) +} + func stringFromMap(m map[string]interface{}, key string) (string, error) { val, ok := m[key] if !ok { diff --git a/vendor/src/github.com/docker/libtrust/util_test.go b/vendor/src/github.com/docker/libtrust/util_test.go new file mode 100644 index 0000000000..ee54f5b8cc --- /dev/null +++ b/vendor/src/github.com/docker/libtrust/util_test.go @@ -0,0 +1,23 @@ +package libtrust + +import ( + "encoding/pem" + "reflect" + "testing" +) + +func TestAddPEMHeadersToKey(t *testing.T) { + pk := &rsaPublicKey{nil, map[string]interface{}{}} + blk := &pem.Block{Headers: map[string]string{"hosts": "localhost,127.0.0.1"}} + addPEMHeadersToKey(blk, pk) + + val := pk.GetExtendedField("hosts") + hosts, ok := val.([]string) + if !ok { + t.Fatalf("hosts type(%v), expected []string", reflect.TypeOf(val)) + } + expected := []string{"localhost", "127.0.0.1"} + if !reflect.DeepEqual(hosts, expected) { + t.Errorf("hosts(%v), expected %v", hosts, expected) + } +} From 227f4bbdb3a1e9ff0011d1ebaed39b3cb19d9e75 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Mon, 17 Nov 2014 21:54:11 +0000 Subject: [PATCH 395/592] Hostname -> Name Signed-off-by: Victor Vieux --- api/client/commands.go | 4 ++-- daemon/info.go | 2 +- docs/sources/reference/api/docker_remote_api.md | 3 +-- docs/sources/reference/api/docker_remote_api_v1.16.md | 2 +- docs/sources/reference/commandline/cli.md | 2 +- 5 files changed, 6 insertions(+), 7 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index 4f6f71d6d0..c930885bb9 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -505,8 +505,8 @@ func (cli *DockerCli) CmdInfo(args ...string) error { if remoteInfo.Exists("MemTotal") { fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(remoteInfo.GetInt64("MemTotal")))) } - if remoteInfo.Exists("Hostname") { - fmt.Fprintf(cli.out, "Hostname: %s\n", remoteInfo.Get("Hostname")) + if remoteInfo.Exists("Name") { + fmt.Fprintf(cli.out, "Name: %s\n", remoteInfo.Get("Name")) } if remoteInfo.Exists("ID") { fmt.Fprintf(cli.out, "ID: %s\n", remoteInfo.Get("ID")) diff --git a/daemon/info.go b/daemon/info.go index c05c2a569d..bb7f450698 100644 --- a/daemon/info.go +++ b/daemon/info.go @@ -77,7 +77,7 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status { v.SetInt("NCPU", runtime.NumCPU()) v.SetInt64("MemTotal", meminfo.MemTotal) if hostname, err := os.Hostname(); err == nil { - v.Set("Hostname", hostname) + v.Set("Name", hostname) } if _, err := v.WriteTo(job.Stdout); err != nil { return job.Error(err) diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md index 046e953b37..898cb571ea 100644 --- a/docs/sources/reference/api/docker_remote_api.md +++ b/docs/sources/reference/api/docker_remote_api.md @@ -50,8 +50,7 @@ You can still call an old version of the API using **New!** `info` now returns the number of CPUs available on the machine (`NCPU`), -total memory available (`MemTotal`), the short hostname (`Hostname`). and -the ID (`ID`). +total memory available (`MemTotal`), a name (`Name`), and the ID (`ID`). `POST /containers/create` diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md index 5e78e02ffb..03c28820d4 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.16.md +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -1220,7 +1220,7 @@ Display system-wide information "KernelVersion":"3.12.0-1-amd64" "NCPU":1, "MemTotal":2099236864, - "Hostname":"prod-server-42", + "Name":"prod-server-42", "ID":"7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" "Debug":false, "NFd": 11, diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 24271a2c6e..2cc47b8bba 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -856,7 +856,7 @@ For example: Kernel Version: 3.13.0-24-generic Operating System: Ubuntu 14.04 LTS CPUs: 1 - Hostname: prod-server-42 + Name: prod-server-42 ID: 7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS Total Memory: 2 GiB Debug mode (server): false From 8ef36dcfe75752a5705813e2d9fa9359a8162b18 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Mon, 17 Nov 2014 22:06:03 +0000 Subject: [PATCH 396/592] update docs Signed-off-by: Victor Vieux --- docs/sources/reference/api/docker_remote_api.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.16.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md index 898cb571ea..353f04b501 100644 --- a/docs/sources/reference/api/docker_remote_api.md +++ b/docs/sources/reference/api/docker_remote_api.md @@ -50,7 +50,7 @@ You can still call an old version of the API using **New!** `info` now returns the number of CPUs available on the machine (`NCPU`), -total memory available (`MemTotal`), a name (`Name`), and the ID (`ID`). +total memory available (`MemTotal`), a user-friendly name describing the running Docker daemon (`Name`), and a unique ID identifying the daemon (`ID`). `POST /containers/create` diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md index 03c28820d4..9ae057d3ba 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.16.md +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -1221,7 +1221,7 @@ Display system-wide information "NCPU":1, "MemTotal":2099236864, "Name":"prod-server-42", - "ID":"7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + "ID":"7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", "Debug":false, "NFd": 11, "NGoroutines":21, From 1314e1586f8cd6201c16161eb960a743c727946b Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Thu, 25 Sep 2014 19:28:24 -0700 Subject: [PATCH 397/592] Add support for ENV of the form: ENV name=value ... still supports the old form: ENV name value Also, fixed an issue with the parser where it would ignore lines at the end of the Dockerfile that ended with \ Closes #2333 Signed-off-by: Doug Davis --- builder/dispatchers.go | 38 +++-- builder/parser/line_parsers.go | 139 +++++++++++++++++- builder/parser/parser.go | 6 + .../Dockerfile | 2 +- builder/parser/testfiles/env/Dockerfile | 15 ++ builder/parser/testfiles/env/result | 10 ++ docs/sources/reference/builder.md | 25 ++++ integration-cli/docker_cli_build_test.go | 40 +++++ 8 files changed, 256 insertions(+), 19 deletions(-) rename builder/parser/testfiles-negative/{env_equals_env => env_no_value}/Dockerfile (50%) create mode 100644 builder/parser/testfiles/env/Dockerfile create mode 100644 builder/parser/testfiles/env/result diff --git a/builder/dispatchers.go b/builder/dispatchers.go index d1f2890ada..99be480f73 100644 --- a/builder/dispatchers.go +++ b/builder/dispatchers.go @@ -31,21 +31,39 @@ func nullDispatch(b *Builder, args []string, attributes map[string]bool, origina // in the dockerfile available from the next statement on via ${foo}. // func env(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 2 { - return fmt.Errorf("ENV accepts two arguments") + if len(args) == 0 { + return fmt.Errorf("ENV is missing arguments") } - fullEnv := fmt.Sprintf("%s=%s", args[0], args[1]) + if len(args)%2 != 0 { + // should never get here, but just in case + return fmt.Errorf("Bad input to ENV, too many args") + } - for i, envVar := range b.Config.Env { - envParts := strings.SplitN(envVar, "=", 2) - if args[0] == envParts[0] { - b.Config.Env[i] = fullEnv - return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s", fullEnv)) + commitStr := "ENV" + + for j := 0; j < len(args); j++ { + // name ==> args[j] + // value ==> args[j+1] + newVar := args[j] + "=" + args[j+1] + "" + commitStr += " " + newVar + + gotOne := false + for i, envVar := range b.Config.Env { + envParts := strings.SplitN(envVar, "=", 2) + if envParts[0] == args[j] { + b.Config.Env[i] = newVar + gotOne = true + break + } } + if !gotOne { + b.Config.Env = append(b.Config.Env, newVar) + } + j++ } - b.Config.Env = append(b.Config.Env, fullEnv) - return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s", fullEnv)) + + return b.commit("", b.Config.Cmd, commitStr) } // MAINTAINER some text diff --git a/builder/parser/line_parsers.go b/builder/parser/line_parsers.go index 358e2f73a0..abde85d292 100644 --- a/builder/parser/line_parsers.go +++ b/builder/parser/line_parsers.go @@ -12,6 +12,7 @@ import ( "fmt" "strconv" "strings" + "unicode" ) var ( @@ -41,17 +42,139 @@ func parseSubCommand(rest string) (*Node, map[string]bool, error) { // parse environment like statements. Note that this does *not* handle // variable interpolation, which will be handled in the evaluator. func parseEnv(rest string) (*Node, map[string]bool, error) { - node := &Node{} - rootnode := node - strs := TOKEN_WHITESPACE.Split(rest, 2) + // This is kind of tricky because we need to support the old + // variant: ENV name value + // as well as the new one: ENV name=value ... + // The trigger to know which one is being used will be whether we hit + // a space or = first. space ==> old, "=" ==> new - if len(strs) < 2 { - return nil, nil, fmt.Errorf("ENV must have two arguments") + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + + for pos := 0; pos <= len(rest); pos++ { + if pos != len(rest) { + ch = rune(rest[pos]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(rest) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + phase = inWord // found it, fall thru + } + if (phase == inWord || phase == inQuote) && (pos == len(rest)) { + if blankOK || len(word) > 0 { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if blankOK || len(word) > 0 { + words = append(words, word) + + // Look for = and if no there assume + // we're doing the old stuff and + // just read the rest of the line + if !strings.Contains(word, "=") { + word = strings.TrimSpace(rest[pos:]) + words = append(words, word) + break + } + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + continue + } + if ch == '\\' { + if pos+1 == len(rest) { + continue // just skip \ at end + } + pos++ + ch = rune(rest[pos]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + continue + } + if ch == '\\' { + if pos+1 == len(rest) { + phase = inWord + continue // just skip \ at end + } + pos++ + ch = rune(rest[pos]) + } + word += string(ch) + } } - node.Value = strs[0] - node.Next = &Node{} - node.Next.Value = strs[1] + if len(words) == 0 { + return nil, nil, fmt.Errorf("ENV must have some arguments") + } + + // Old format (ENV name value) + var rootnode *Node + + if !strings.Contains(words[0], "=") { + node := &Node{} + rootnode = node + strs := TOKEN_WHITESPACE.Split(rest, 2) + + if len(strs) < 2 { + return nil, nil, fmt.Errorf("ENV must have two arguments") + } + + node.Value = strs[0] + node.Next = &Node{} + node.Next.Value = strs[1] + } else { + var prevNode *Node + for i, word := range words { + if !strings.Contains(word, "=") { + return nil, nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) + } + parts := strings.SplitN(word, "=", 2) + + name := &Node{} + value := &Node{} + + name.Next = value + name.Value = parts[0] + value.Value = parts[1] + + if i == 0 { + rootnode = name + } else { + prevNode.Next = name + } + prevNode = value + } + } return rootnode, nil, nil } diff --git a/builder/parser/parser.go b/builder/parser/parser.go index 9e34b5920e..ad42a1586e 100644 --- a/builder/parser/parser.go +++ b/builder/parser/parser.go @@ -125,6 +125,12 @@ func Parse(rwc io.Reader) (*Node, error) { break } } + if child == nil && line != "" { + line, child, err = parseLine(line) + if err != nil { + return nil, err + } + } } if child != nil { diff --git a/builder/parser/testfiles-negative/env_equals_env/Dockerfile b/builder/parser/testfiles-negative/env_no_value/Dockerfile similarity index 50% rename from builder/parser/testfiles-negative/env_equals_env/Dockerfile rename to builder/parser/testfiles-negative/env_no_value/Dockerfile index 08675148ae..1d65578794 100644 --- a/builder/parser/testfiles-negative/env_equals_env/Dockerfile +++ b/builder/parser/testfiles-negative/env_no_value/Dockerfile @@ -1,3 +1,3 @@ FROM busybox -ENV PATH=PATH +ENV PATH diff --git a/builder/parser/testfiles/env/Dockerfile b/builder/parser/testfiles/env/Dockerfile new file mode 100644 index 0000000000..bb78503cce --- /dev/null +++ b/builder/parser/testfiles/env/Dockerfile @@ -0,0 +1,15 @@ +FROM ubuntu +ENV name value +ENV name=value +ENV name=value name2=value2 +ENV name="value value1" +ENV name=value\ value2 +ENV name="value'quote space'value2" +ENV name='value"double quote"value2' +ENV name=value\ value2 name2=value2\ value3 +ENV name=value \ + name1=value1 \ + name2="value2a \ + value2b" \ + name3="value3a\n\"value3b\"" \ + name4="value4a\\nvalue4b" \ diff --git a/builder/parser/testfiles/env/result b/builder/parser/testfiles/env/result new file mode 100644 index 0000000000..a473d0fa39 --- /dev/null +++ b/builder/parser/testfiles/env/result @@ -0,0 +1,10 @@ +(from "ubuntu") +(env "name" "value") +(env "name" "value") +(env "name" "value" "name2" "value2") +(env "name" "value value1") +(env "name" "value value2") +(env "name" "value'quote space'value2") +(env "name" "value\"double quote\"value2") +(env "name" "value value2" "name2" "value2 value3") +(env "name" "value" "name1" "value1" "name2" "value2a value2b" "name3" "value3an\"value3b\"" "name4" "value4a\\nvalue4b") diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index 19cc16ad0f..14961eeec0 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -337,11 +337,36 @@ expose ports to the host, at runtime, ## ENV ENV + ENV = ... The `ENV` instruction sets the environment variable `` to the value ``. This value will be passed to all future `RUN` instructions. This is functionally equivalent to prefixing the command with `=` +The `ENV` instruction has two forms. The first form, `ENV `, +will set a single variable to a value. The entire string after the first +space will be treated as the `` - including characters such as +spaces and quotes. + +The second form, `ENV = ...`, allows for multiple variables to +be set at one time. Notice that the second form uses the equals sign (=) +in the syntax, while the first form does not. Like command line parsing, +quotes and backslashes can be used to include spaces within values. + +For example: + + ENV myName="John Doe" myDog=Rex\ The\ Dog \ + myCat=fluffy + +and + + ENV myName John Doe + ENV myDog Rex The Dog + ENV myCat fluffy + +will yield the same net results in the final container, but the first form +does it all in one layer. + The environment variables set using `ENV` will persist when a container is run from the resulting image. You can view the values using `docker inspect`, and change them using `docker run --env =`. diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index de60a8017f..1979ee908f 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -2951,6 +2951,46 @@ RUN [ "$(cat $TO)" = "hello" ] logDone("build - environment variables usage") } +func TestBuildEnvUsage2(t *testing.T) { + name := "testbuildenvusage2" + defer deleteImages(name) + dockerfile := `FROM busybox +ENV abc=def +RUN [ "$abc" = "def" ] +ENV def="hello world" +RUN [ "$def" = "hello world" ] +ENV def=hello\ world +RUN [ "$def" = "hello world" ] +ENV v1=abc v2="hi there" +RUN [ "$v1" = "abc" ] +RUN [ "$v2" = "hi there" ] +ENV v3='boogie nights' v4="with'quotes too" +RUN [ "$v3" = "boogie nights" ] +RUN [ "$v4" = "with'quotes too" ] +ENV abc=zzz FROM=hello/docker/world +ENV abc=zzz TO=/docker/world/hello +ADD $FROM $TO +RUN [ "$(cat $TO)" = "hello" ] +ENV abc "zzz" +RUN [ $abc = \"zzz\" ] +ENV abc 'yyy' +RUN [ $abc = \'yyy\' ] +ENV abc= +RUN [ "$abc" = "" ] +` + ctx, err := fakeContext(dockerfile, map[string]string{ + "hello/docker/world": "hello", + }) + if err != nil { + t.Fatal(err) + } + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + t.Fatal(err) + } + logDone("build - environment variables usage2") +} + func TestBuildAddScript(t *testing.T) { name := "testbuildaddscript" defer deleteImages(name) From bce9ed0e4c56b85c8a4a5ba2b1af45035deec9dd Mon Sep 17 00:00:00 2001 From: Dan Walsh Date: Thu, 20 Nov 2014 12:56:54 -0500 Subject: [PATCH 398/592] Allow developers to build docker with debuginfo included If you execute DEBUG=-g hack/make.sh dynbinary Docker will be build with the debug info making it easier to use cgdb or lightide to debug. Docker-DCO-1.1-Signed-off-by: Dan Walsh (github: rhatdan) --- project/make.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/project/make.sh b/project/make.sh index d6da3057fa..36e5e161bf 100755 --- a/project/make.sh +++ b/project/make.sh @@ -96,10 +96,14 @@ fi # Use these flags when compiling the tests and final binary LDFLAGS=' - -w -X '$DOCKER_PKG'/dockerversion.GITCOMMIT "'$GITCOMMIT'" -X '$DOCKER_PKG'/dockerversion.VERSION "'$VERSION'" ' + +if [ -z "$DEBUG" ]; then + LDFLAGS="-w $LDFLAGS" +fi + LDFLAGS_STATIC='-linkmode external' EXTLDFLAGS_STATIC='-static' # ORIG_BUILDFLAGS is necessary for the cross target which cannot always build From 2bceaae42399ce33e8c724d1ac435eca6759637b Mon Sep 17 00:00:00 2001 From: "Daniel, Dao Quang Minh" Date: Fri, 14 Nov 2014 12:37:04 -0500 Subject: [PATCH 399/592] test case for preserving env in exec session Docker-DCO-1.1-Signed-off-by: Daniel, Dao Quang Minh (github: dqminh) --- integration-cli/docker_cli_exec_test.go | 27 +++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/integration-cli/docker_cli_exec_test.go b/integration-cli/docker_cli_exec_test.go index ed5778bbb2..438271744a 100644 --- a/integration-cli/docker_cli_exec_test.go +++ b/integration-cli/docker_cli_exec_test.go @@ -186,3 +186,30 @@ func TestExecAfterDaemonRestart(t *testing.T) { logDone("exec - exec running container after daemon restart") } + +// Regresssion test for #9155, #9044 +func TestExecEnv(t *testing.T) { + defer deleteAllContainers() + + runCmd := exec.Command(dockerBinary, "run", + "-e", "LALA=value1", + "-e", "LALA=value2", + "-d", "--name", "testing", "busybox", "top") + if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil { + t.Fatal(out, err) + } + + execCmd := exec.Command(dockerBinary, "exec", "testing", "env") + out, _, err := runCommandWithOutput(execCmd) + if err != nil { + t.Fatal(out, err) + } + + if strings.Contains(out, "LALA=value1") || + !strings.Contains(out, "LALA=value2") || + !strings.Contains(out, "HOME=/root") { + t.Errorf("exec env(%q), expect %q, %q", out, "LALA=value2", "HOME=/root") + } + + logDone("exec - exec inherits correct env") +} From 2fe36baa0a39840e64f1dc585af41b5ee0ed6df5 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Thu, 20 Nov 2014 18:36:05 +0000 Subject: [PATCH 400/592] add daemon labels Signed-off-by: Victor Vieux --- api/client/commands.go | 7 +++++++ daemon/config.go | 2 ++ daemon/info.go | 1 + docs/man/docker.1.md | 3 +++ docs/sources/reference/api/docker_remote_api.md | 3 ++- docs/sources/reference/api/docker_remote_api_v1.16.md | 3 ++- docs/sources/reference/commandline/cli.md | 8 ++++++-- opts/opts.go | 11 +++++++++++ 8 files changed, 34 insertions(+), 4 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index d0a0792399..8884878cc8 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -554,6 +554,13 @@ func (cli *DockerCli) CmdInfo(args ...string) error { if remoteInfo.Exists("IPv4Forwarding") && !remoteInfo.GetBool("IPv4Forwarding") { fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n") } + if remoteInfo.Exists("Labels") { + fmt.Fprintln(cli.out, "Labels:") + for _, attribute := range remoteInfo.GetList("Labels") { + fmt.Fprintf(cli.out, " %s\n", attribute) + } + } + return nil } diff --git a/daemon/config.go b/daemon/config.go index cbdd95da00..beb3b25a5a 100644 --- a/daemon/config.go +++ b/daemon/config.go @@ -41,6 +41,7 @@ type Config struct { EnableSelinuxSupport bool Context map[string][]string TrustKeyPath string + Labels []string } // InstallFlags adds command-line options to the top-level flag parser for @@ -69,6 +70,7 @@ func (config *Config) InstallFlags() { opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers") opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains") opts.MirrorListVar(&config.Mirrors, []string{"-registry-mirror"}, "Specify a preferred Docker registry mirror") + opts.LabelListVar(&config.Labels, []string{"-label"}, "Set key=values labels to the daemon (displayed in `docker info`)") // Localhost is by default considered as an insecure registry // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). diff --git a/daemon/info.go b/daemon/info.go index bb7f450698..2807adab38 100644 --- a/daemon/info.go +++ b/daemon/info.go @@ -79,6 +79,7 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status { if hostname, err := os.Hostname(); err == nil { v.Set("Name", hostname) } + v.SetList("Labels", daemon.Config().Labels) if _, err := v.WriteTo(job.Stdout); err != nil { return job.Error(err) } diff --git a/docs/man/docker.1.md b/docs/man/docker.1.md index f3ff68bc9f..e5a1bc24d7 100644 --- a/docs/man/docker.1.md +++ b/docs/man/docker.1.md @@ -68,6 +68,9 @@ unix://[/path/to/socket] to use. **-l**, **--log-level**="*debug*|*info*|*error*|*fatal*"" Set the logging level. Default is `info`. +**--label**="[]" + Set key=values labels to the daemon (displayed in `docker info`) + **--mtu**=VALUE Set the containers network mtu. Default is `1500`. diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md index 353f04b501..d61b25bf0b 100644 --- a/docs/sources/reference/api/docker_remote_api.md +++ b/docs/sources/reference/api/docker_remote_api.md @@ -50,7 +50,8 @@ You can still call an old version of the API using **New!** `info` now returns the number of CPUs available on the machine (`NCPU`), -total memory available (`MemTotal`), a user-friendly name describing the running Docker daemon (`Name`), and a unique ID identifying the daemon (`ID`). +total memory available (`MemTotal`), a user-friendly name describing the running Docker daemon (`Name`), a unique ID identifying the daemon (`ID`), and +a list of daemon labels (`Labels`). `POST /containers/create` diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md index e643d1a5c7..dc2cc56267 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.16.md +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -1230,7 +1230,8 @@ Display system-wide information "IndexServerAddress":["https://index.docker.io/v1/"], "MemoryLimit":true, "SwapLimit":false, - "IPv4Forwarding":true + "IPv4Forwarding":true, + "Labels":["storage=ssd"] } Status Codes: diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 7772059411..7b1b6187b0 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -76,7 +76,7 @@ expect an integer, and they can only be specified once. --ip-masq=true Enable IP masquerading for bridge's IP range --iptables=true Enable Docker's addition of iptables rules -l, --log-level="info" Set the logging level - + --label=[] Set key=values labels to the daemon (displayed in `docker info`) --mtu=0 Set the containers network MTU if no value is provided: default to the default route MTU or 1500 if no default route is available -p, --pidfile="/var/run/docker.pid" Path to use for daemon PID file @@ -851,7 +851,9 @@ For example: $ sudo docker -D info Containers: 14 Images: 52 - Storage Driver: btrfs + Storage Driver: aufs + Root Dir: /var/lib/docker/aufs + Dirs: 545 Execution Driver: native-0.2 Kernel Version: 3.13.0-24-generic Operating System: Ubuntu 14.04 LTS @@ -867,6 +869,8 @@ For example: Init Path: /usr/bin/docker Username: svendowideit Registry: [https://index.docker.io/v1/] + Labels: + storage=ssd The global `-D` option tells all `docker` commands to output debug information. diff --git a/opts/opts.go b/opts/opts.go index d3202969b4..f15064ac69 100644 --- a/opts/opts.go +++ b/opts/opts.go @@ -43,6 +43,10 @@ func MirrorListVar(values *[]string, names []string, usage string) { flag.Var(newListOptsRef(values, ValidateMirror), names, usage) } +func LabelListVar(values *[]string, names []string, usage string) { + flag.Var(newListOptsRef(values, ValidateLabel), names, usage) +} + // ListOpts type type ListOpts struct { values *[]string @@ -227,3 +231,10 @@ func ValidateMirror(val string) (string, error) { return fmt.Sprintf("%s://%s/v1/", uri.Scheme, uri.Host), nil } + +func ValidateLabel(val string) (string, error) { + if strings.Count(val, "=") != 1 { + return "", fmt.Errorf("bad attribute format: %s", val) + } + return val, nil +} From 7ff3b81054b028a4399c86340489fe3937049abe Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Thu, 20 Nov 2014 19:46:48 +0000 Subject: [PATCH 401/592] events filtering Signed-off-by: Victor Vieux --- api/client/commands.go | 26 +++++- api/server/server.go | 1 + .../reference/api/docker_remote_api_v1.16.md | 1 + docs/sources/reference/commandline/cli.md | 80 ++++++++++++++++--- events/events.go | 32 ++++++-- integration-cli/docker_cli_events_test.go | 63 ++++++++++++++- 6 files changed, 179 insertions(+), 24 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index d0a0792399..93d0bccc5c 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -1760,6 +1760,10 @@ func (cli *DockerCli) CmdEvents(args ...string) error { cmd := cli.Subcmd("events", "", "Get real time events from the server") since := cmd.String([]string{"#since", "-since"}, "", "Show all events created since timestamp") until := cmd.String([]string{"-until"}, "", "Stream events until this timestamp") + + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values (i.e. 'event=stop')") + if err := cmd.Parse(args); err != nil { return nil } @@ -1769,9 +1773,20 @@ func (cli *DockerCli) CmdEvents(args ...string) error { return nil } var ( - v = url.Values{} - loc = time.FixedZone(time.Now().Zone()) + v = url.Values{} + loc = time.FixedZone(time.Now().Zone()) + eventFilterArgs = filters.Args{} ) + + // Consolidate all filter flags, and sanity check them early. + // They'll get process in the daemon/server. + for _, f := range flFilter.GetAll() { + var err error + eventFilterArgs, err = filters.ParseFlag(f, eventFilterArgs) + if err != nil { + return err + } + } var setTime = func(key, value string) { format := timeutils.RFC3339NanoFixed if len(value) < len(format) { @@ -1789,6 +1804,13 @@ func (cli *DockerCli) CmdEvents(args ...string) error { if *until != "" { setTime("until", *until) } + if len(eventFilterArgs) > 0 { + filterJson, err := filters.ToParam(eventFilterArgs) + if err != nil { + return err + } + v.Set("filters", filterJson) + } if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil { return err } diff --git a/api/server/server.go b/api/server/server.go index d9b73e6798..488a94b483 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -315,6 +315,7 @@ func getEvents(eng *engine.Engine, version version.Version, w http.ResponseWrite streamJSON(job, w, true) job.Setenv("since", r.Form.Get("since")) job.Setenv("until", r.Form.Get("until")) + job.Setenv("filters", r.Form.Get("filters")) return job.Run() } diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md index e643d1a5c7..6d1d47b9c4 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.16.md +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -1386,6 +1386,7 @@ Query Parameters: - **since** – timestamp used for polling - **until** – timestamp used for polling +- **filters** – a json encoded value of the filters (a map[string][]string) to process on the event list. Status Codes: diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 7772059411..41931983ed 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -610,7 +610,10 @@ For example: Usage: docker events [OPTIONS] Get real time events from the server - + -f, --filter=[] Provide filter values. Valid filters: + event= - event to filter + image= - image to filter + container= - container to filter --since="" Show all events created since timestamp --until="" Stream events until this timestamp @@ -622,6 +625,24 @@ and Docker images will report: untag, delete +#### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If you would like to use +multiple filters, pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +Using the same filter multiple times will be handled as a `OR`; for example +`--filter container=588a23dac085 --filter container=a8f7720b8c22` will display events for +container 588a23dac085 `OR` container a8f7720b8c22 + +Using multiple filters will be handled as a `AND`; for example +`--filter container=588a23dac085 --filter event=start` will display events for container +container 588a23dac085 `AND` only when the event type is `start` + +Current filters: + * event + * image + * container + #### Examples You'll need two shells for this example. @@ -630,31 +651,64 @@ You'll need two shells for this example. $ sudo docker events -**Shell 2: Start and Stop a Container:** +**Shell 2: Start and Stop containers:** $ sudo docker start 4386fb97867d $ sudo docker stop 4386fb97867d + $ sudo docker stop 7805c1d35632 **Shell 1: (Again .. now showing events):** - 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) start - 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) die - 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) stop + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) start + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + 2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) stop **Show events in the past from a specified time:** $ sudo docker events --since 1378216169 - 2014-03-10T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) die - 2014-03-10T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) stop + 2014-03-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + 2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2014-03-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) stop $ sudo docker events --since '2013-09-03' - 2014-09-03T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) start - 2014-09-03T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) die - 2014-09-03T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) stop + 2014-09-03T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) start + 2014-09-03T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + 2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2014-09-03T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) stop $ sudo docker events --since '2013-09-03 15:49:29 +0200 CEST' - 2014-09-03T15:49:29.999999999Z07:00 4386fb97867d: (from 12de384bfb10) die - 2014-09-03T15:49:29.999999999Z07:00 4386fb97867d: (from 12de384bfb10) stop + 2014-09-03T15:49:29.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + 2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2014-09-03T15:49:29.999999999Z07:00 7805c1d35632: (from redis:2.8) stop + +**Filter events:** + + $ sudo docker events --filter 'event=stop' + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + 2014-09-03T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) stop + + $ sudo docker events --filter 'image=ubuntu-1:14.04' + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) start + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + + $ sudo docker events --filter 'container=7805c1d35632' + 2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2014-09-03T15:49:29.999999999Z07:00 7805c1d35632: (from redis:2.8) stop + + $ sudo docker events --filter 'container=7805c1d35632' --filter 'container=4386fb97867d' + 2014-09-03T15:49:29.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + 2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2014-09-03T15:49:29.999999999Z07:00 7805c1d35632: (from redis:2.8) stop + + $ sudo docker events --filter 'container=7805c1d35632' --filter 'event=stop' + 2014-09-03T15:49:29.999999999Z07:00 7805c1d35632: (from redis:2.8) stop ## exec @@ -768,7 +822,7 @@ by default. #### Filtering -The filtering flag (`-f` or `--filter`) format is of "key=value". If there are more +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) Current filters: diff --git a/events/events.go b/events/events.go index 57a82cada0..0951f7099d 100644 --- a/events/events.go +++ b/events/events.go @@ -6,6 +6,7 @@ import ( "time" "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/parsers/filters" "github.com/docker/docker/utils" ) @@ -48,6 +49,11 @@ func (e *Events) Get(job *engine.Job) engine.Status { timeout = time.NewTimer(time.Unix(until, 0).Sub(time.Now())) ) + eventFilters, err := filters.FromParam(job.Getenv("filters")) + if err != nil { + return job.Error(err) + } + // If no until, disable timeout if until == 0 { timeout.Stop() @@ -61,7 +67,7 @@ func (e *Events) Get(job *engine.Job) engine.Status { // Resend every event in the [since, until] time interval. if since != 0 { - if err := e.writeCurrent(job, since, until); err != nil { + if err := e.writeCurrent(job, since, until, eventFilters); err != nil { return job.Error(err) } } @@ -72,7 +78,7 @@ func (e *Events) Get(job *engine.Job) engine.Status { if !ok { return engine.StatusOK } - if err := writeEvent(job, event); err != nil { + if err := writeEvent(job, event, eventFilters); err != nil { return job.Error(err) } case <-timeout.C: @@ -97,7 +103,23 @@ func (e *Events) SubscribersCount(job *engine.Job) engine.Status { return engine.StatusOK } -func writeEvent(job *engine.Job, event *utils.JSONMessage) error { +func writeEvent(job *engine.Job, event *utils.JSONMessage, eventFilters filters.Args) error { + isFiltered := func(field string, filter []string) bool { + if len(filter) == 0 { + return false + } + for _, v := range filter { + if v == field { + return false + } + } + return true + } + + if isFiltered(event.Status, eventFilters["event"]) || isFiltered(event.From, eventFilters["image"]) || isFiltered(event.ID, eventFilters["container"]) { + return nil + } + // When sending an event JSON serialization errors are ignored, but all // other errors lead to the eviction of the listener. if b, err := json.Marshal(event); err == nil { @@ -108,11 +130,11 @@ func writeEvent(job *engine.Job, event *utils.JSONMessage) error { return nil } -func (e *Events) writeCurrent(job *engine.Job, since, until int64) error { +func (e *Events) writeCurrent(job *engine.Job, since, until int64, eventFilters filters.Args) error { e.mu.RLock() for _, event := range e.events { if event.Time >= since && (event.Time <= until || until == 0) { - if err := writeEvent(job, event); err != nil { + if err := writeEvent(job, event, eventFilters); err != nil { e.mu.RUnlock() return err } diff --git a/integration-cli/docker_cli_events_test.go b/integration-cli/docker_cli_events_test.go index 5c197b92fb..0733e78ed0 100644 --- a/integration-cli/docker_cli_events_test.go +++ b/integration-cli/docker_cli_events_test.go @@ -61,7 +61,7 @@ func TestEventsPause(t *testing.T) { t.Fatalf("event should be pause, not %#v", pauseEvent) } if unpauseEvent[len(unpauseEvent)-1] != "unpause" { - t.Fatalf("event should be pause, not %#v", unpauseEvent) + t.Fatalf("event should be unpause, not %#v", unpauseEvent) } waitCmd := exec.Command(dockerBinary, "wait", name) @@ -138,13 +138,13 @@ func TestEventsContainerEvents(t *testing.T) { t.Fatalf("event should be create, not %#v", createEvent) } if startEvent[len(startEvent)-1] != "start" { - t.Fatalf("event should be pause, not %#v", startEvent) + t.Fatalf("event should be start, not %#v", startEvent) } if dieEvent[len(dieEvent)-1] != "die" { - t.Fatalf("event should be pause, not %#v", dieEvent) + t.Fatalf("event should be die, not %#v", dieEvent) } if destroyEvent[len(destroyEvent)-1] != "destroy" { - t.Fatalf("event should be pause, not %#v", destroyEvent) + t.Fatalf("event should be destroy, not %#v", destroyEvent) } logDone("events - container create, start, die, destroy is logged") @@ -283,3 +283,58 @@ func TestEventsImageImport(t *testing.T) { logDone("events - image import is logged") } + +func TestEventsFilters(t *testing.T) { + now := time.Now().Unix() + cmd(t, "run", "--rm", "busybox", "true") + cmd(t, "run", "--rm", "busybox", "true") + eventsCmd := exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", now), fmt.Sprintf("--until=%d", time.Now().Unix()), "--filter", "event=die") + out, exitCode, err := runCommandWithOutput(eventsCmd) + if exitCode != 0 || err != nil { + t.Fatalf("Failed to get events with exit code %d: %s", exitCode, err) + } + events := strings.Split(out, "\n") + events = events[:len(events)-1] + if len(events) != 2 { + fmt.Printf("%v\n", events) + t.Fatalf("Unexpected event") + } + dieEvent := strings.Fields(events[len(events)-1]) + if dieEvent[len(dieEvent)-1] != "die" { + t.Fatalf("event should be die, not %#v", dieEvent) + } + + dieEvent = strings.Fields(events[len(events)-2]) + if dieEvent[len(dieEvent)-1] != "die" { + t.Fatalf("event should be die, not %#v", dieEvent) + } + + eventsCmd = exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix()), "--filter", "event=die", "--filter", "event=start") + out, exitCode, err = runCommandWithOutput(eventsCmd) + if exitCode != 0 || err != nil { + t.Fatalf("Failed to get events with exit code %d: %s", exitCode, err) + } + events = strings.Split(out, "\n") + events = events[:len(events)-1] + if len(events) != 4 { + t.Fatalf("Unexpected event") + } + startEvent := strings.Fields(events[len(events)-4]) + if startEvent[len(startEvent)-1] != "start" { + t.Fatalf("event should be start, not %#v", startEvent) + } + dieEvent = strings.Fields(events[len(events)-3]) + if dieEvent[len(dieEvent)-1] != "die" { + t.Fatalf("event should be die, not %#v", dieEvent) + } + startEvent = strings.Fields(events[len(events)-2]) + if startEvent[len(startEvent)-1] != "start" { + t.Fatalf("event should be start, not %#v", startEvent) + } + dieEvent = strings.Fields(events[len(events)-1]) + if dieEvent[len(dieEvent)-1] != "die" { + t.Fatalf("event should be die, not %#v", dieEvent) + } + + logDone("events - filters") +} From 284cbda9ceb368d4bb67c0c75739984622570d92 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Thu, 20 Nov 2014 19:54:03 +0000 Subject: [PATCH 402/592] docs nits Signed-off-by: Victor Vieux --- docs/sources/reference/commandline/cli.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 41931983ed..f46c733854 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -630,13 +630,13 @@ and Docker images will report: The filtering flag (`-f` or `--filter`) format is of "key=value". If you would like to use multiple filters, pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) -Using the same filter multiple times will be handled as a `OR`; for example +Using the same filter multiple times will be handled as a *OR*; for example `--filter container=588a23dac085 --filter container=a8f7720b8c22` will display events for -container 588a23dac085 `OR` container a8f7720b8c22 +container 588a23dac085 *OR* container a8f7720b8c22 -Using multiple filters will be handled as a `AND`; for example +Using multiple filters will be handled as a *AND*; for example `--filter container=588a23dac085 --filter event=start` will display events for container -container 588a23dac085 `AND` only when the event type is `start` +container 588a23dac085 *AND* the event type is *start* Current filters: * event From f42176434aa874afb7d633064f2babcf9d5124ab Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 20 Nov 2014 14:19:24 -0800 Subject: [PATCH 403/592] Update libcontainer to 84c1636580a356db88b079d118b Signed-off-by: Michael Crosby --- project/vendor.sh | 2 +- .../docker/libcontainer/cgroups/fs/cpuset.go | 22 +++++++++---------- .../cgroups/systemd/apply_systemd.go | 10 ++++----- .../docker/libcontainer/network/veth.go | 3 --- 4 files changed, 15 insertions(+), 22 deletions(-) diff --git a/project/vendor.sh b/project/vendor.sh index 8763f06dac..cc44277e01 100755 --- a/project/vendor.sh +++ b/project/vendor.sh @@ -66,7 +66,7 @@ if [ "$1" = '--go' ]; then mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar fi -clone git github.com/docker/libcontainer 28cb5f9dfd6f3352c610a4f1502b5df4f69389ea +clone git github.com/docker/libcontainer 84c1636580a356db88b079d118b94abe6a1a0acd # see src/github.com/docker/libcontainer/update-vendor.sh which is the "source of truth" for libcontainer deps (just like this file) rm -rf src/github.com/docker/libcontainer/vendor eval "$(grep '^clone ' src/github.com/docker/libcontainer/update-vendor.sh | grep -v 'github.com/codegangsta/cli')" diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go index 8847739464..54d2ed5725 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go @@ -14,17 +14,11 @@ type CpusetGroup struct { } func (s *CpusetGroup) Set(d *data) error { - // we don't want to join this cgroup unless it is specified - if d.c.CpusetCpus != "" { - dir, err := d.path("cpuset") - if err != nil { - return err - } - - return s.SetDir(dir, d.c.CpusetCpus, d.pid) + dir, err := d.path("cpuset") + if err != nil { + return err } - - return nil + return s.SetDir(dir, d.c.CpusetCpus, d.pid) } func (s *CpusetGroup) Remove(d *data) error { @@ -46,8 +40,12 @@ func (s *CpusetGroup) SetDir(dir, value string, pid int) error { return err } - if err := writeFile(dir, "cpuset.cpus", value); err != nil { - return err + // If we don't use --cpuset, the default cpuset.cpus is set in + // s.ensureParent, otherwise, use the value we set + if value != "" { + if err := writeFile(dir, "cpuset.cpus", value); err != nil { + return err + } } return nil diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go index 94f3465ffd..3d89811433 100644 --- a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go +++ b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go @@ -137,16 +137,14 @@ func Apply(c *cgroups.Cgroup, pid int) (map[string]string, error) { } - // we need to manually join the freezer cgroup in systemd because it does not currently support it - // via the dbus api + // we need to manually join the freezer and cpuset cgroup in systemd + // because it does not currently support it via the dbus api. if err := joinFreezer(c, pid); err != nil { return nil, err } - if c.CpusetCpus != "" { - if err := joinCpuset(c, pid); err != nil { - return nil, err - } + if err := joinCpuset(c, pid); err != nil { + return nil, err } paths := make(map[string]string) diff --git a/vendor/src/github.com/docker/libcontainer/network/veth.go b/vendor/src/github.com/docker/libcontainer/network/veth.go index 240da57986..3d7dc8729e 100644 --- a/vendor/src/github.com/docker/libcontainer/network/veth.go +++ b/vendor/src/github.com/docker/libcontainer/network/veth.go @@ -39,9 +39,6 @@ func (v *Veth) Create(n *Network, nspid int, networkState *NetworkState) error { if err := SetMtu(name1, n.Mtu); err != nil { return err } - if err := SetHairpinMode(name1, true); err != nil { - return err - } if err := InterfaceUp(name1); err != nil { return err } From 8d3b13bd7a14aa41eb3d3f3aab1b974af1db4a50 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Tue, 18 Nov 2014 11:42:54 -0800 Subject: [PATCH 404/592] Add an example that shows starting apache in the foreground Signed-off-by: Sven Dowideit --- docs/sources/reference/builder.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index aac21b3272..121018cff5 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -591,6 +591,17 @@ To examine the result further, you can use `docker exec`: And you can gracefully request `top` to shut down using `docker stop test`. +The following `Dockerfile` shows using the `ENTRYPOINT` to run Apache in the +foreground (i.e., as `PID 1`): + +``` +FROM debian:stable +RUN apt-get update && apt-get install -y --force-yes apache2 +EXPOSE 80 443 +VOLUME ["/var/www", "/var/log/apache2", "/etc/apache2"] +ENTRYPOINT ["/usr/sbin/apache2ctl", "-D", "FOREGROUND"] +``` + If you need to write a starter script for a single executable, you can ensure that the final executable receives the Unix signals by using `exec` and `gosu` (see [the Dockerfile best practices](/articles/dockerfile_best-practices/#entrypoint) From 56c37536315d4c63c35b766e3335034e488e2189 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 20 Nov 2014 14:22:22 -0800 Subject: [PATCH 405/592] Revert "Support hairpin NAT" This reverts commit 95a400e6e1a3b5da68431e64f9902a3fac218360. Signed-off-by: Michael Crosby --- pkg/iptables/iptables.go | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/pkg/iptables/iptables.go b/pkg/iptables/iptables.go index b550837601..53e6e1430c 100644 --- a/pkg/iptables/iptables.go +++ b/pkg/iptables/iptables.go @@ -73,6 +73,7 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str "-p", proto, "-d", daddr, "--dport", strconv.Itoa(port), + "!", "-i", c.Bridge, "-j", "DNAT", "--to-destination", net.JoinHostPort(dest_addr, strconv.Itoa(dest_port))); err != nil { return err @@ -96,17 +97,6 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str return fmt.Errorf("Error iptables forward: %s", output) } - if output, err := Raw("-t", "nat", string(fAction), "POSTROUTING", - "-p", proto, - "-s", dest_addr, - "-d", dest_addr, - "--dport", strconv.Itoa(dest_port), - "-j", "MASQUERADE"); err != nil { - return err - } else if len(output) != 0 { - return fmt.Errorf("Error iptables forward: %s", output) - } - return nil } From 7a7890950d59abf7bc4f826c605289e1d7586390 Mon Sep 17 00:00:00 2001 From: Lei Jitang Date: Tue, 4 Nov 2014 11:46:53 +0800 Subject: [PATCH 406/592] Fix create container output messages. Signed-off-by: Lei Jitang Signed-off-by: Jessica Frazelle --- api/client/commands.go | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index d0a0792399..39352c8b89 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -2054,16 +2054,15 @@ func (cli *DockerCli) CmdTag(args ...string) error { } func (cli *DockerCli) pullImage(image string) error { - return cli.pullImageCustomOut(image, cli.out) -} - -func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error { - v := url.Values{} repos, tag := parsers.ParseRepositoryTag(image) - // pull only the image tagged 'latest' if no tag was specified if tag == "" { tag = graph.DEFAULTTAG } + return cli.pullImageCustomOut(repos, tag, cli.out) +} + +func (cli *DockerCli) pullImageCustomOut(repos string, tag string, out io.Writer) error { + v := url.Values{} v.Set("fromImage", repos) v.Set("tag", tag) @@ -2151,10 +2150,14 @@ func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runc stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, false) //if image not found try to pull it if statusCode == 404 { - fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", config.Image) + repos, tag := parsers.ParseRepositoryTag(config.Image) + if tag == "" { + tag = graph.DEFAULTTAG + } + fmt.Fprintf(cli.err, "Unable to find image '%s:%s' locally\n", repos, tag) // we don't want to write to stdout anything apart from container.ID - if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil { + if err = cli.pullImageCustomOut(repos, tag, cli.err); err != nil { return nil, err } // Retry From e527be1f14eda5a3d9077517a0398d85c4d7fac6 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Thu, 20 Nov 2014 15:09:09 -0800 Subject: [PATCH 407/592] Fix tag output where image is not found. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- api/client/commands.go | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index 39352c8b89..8cdeb454d0 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -2054,15 +2054,16 @@ func (cli *DockerCli) CmdTag(args ...string) error { } func (cli *DockerCli) pullImage(image string) error { + return cli.pullImageCustomOut(image, cli.out) +} + +func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error { + v := url.Values{} repos, tag := parsers.ParseRepositoryTag(image) + // pull only the image tagged 'latest' if no tag was specified if tag == "" { tag = graph.DEFAULTTAG } - return cli.pullImageCustomOut(repos, tag, cli.out) -} - -func (cli *DockerCli) pullImageCustomOut(repos string, tag string, out io.Writer) error { - v := url.Values{} v.Set("fromImage", repos) v.Set("tag", tag) @@ -2150,14 +2151,14 @@ func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runc stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, false) //if image not found try to pull it if statusCode == 404 { - repos, tag := parsers.ParseRepositoryTag(config.Image) + repo, tag := parsers.ParseRepositoryTag(config.Image) if tag == "" { tag = graph.DEFAULTTAG } - fmt.Fprintf(cli.err, "Unable to find image '%s:%s' locally\n", repos, tag) + fmt.Fprintf(cli.err, "Unable to find image '%s:%s' locally\n", repo, tag) // we don't want to write to stdout anything apart from container.ID - if err = cli.pullImageCustomOut(repos, tag, cli.err); err != nil { + if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil { return nil, err } // Retry From ae9bd580af55992974fcb94f73f72cc3b2257fec Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Thu, 20 Nov 2014 07:29:04 -0800 Subject: [PATCH 408/592] Make --tlsverify enable tls regardless of value specified I also needed to add a mflag.IsSet() function that allows you to check to see if a certain flag was actually specified on the cmd line. Per #9221 - also tweaked the docs to fix a typo. Closes #9221 Signed-off-by: Doug Davis --- docker/docker.go | 7 ++++++- docker/flags.go | 2 +- docs/sources/reference/commandline/cli.md | 2 +- integration-cli/docker_cli_run_test.go | 25 +++++++++++++++++++++++ pkg/mflag/flag.go | 10 +++++++++ 5 files changed, 43 insertions(+), 3 deletions(-) diff --git a/docker/docker.go b/docker/docker.go index bb61d51725..3137f5c99f 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -83,9 +83,14 @@ func main() { ) tlsConfig.InsecureSkipVerify = true + // Regardless of whether the user sets it to true or false, if they + // specify --tlsverify at all then we need to turn on tls + if flag.IsSet("-tlsverify") { + *flTls = true + } + // If we should verify the server, we need to load a trusted ca if *flTlsVerify { - *flTls = true certPool := x509.NewCertPool() file, err := ioutil.ReadFile(*flCa) if err != nil { diff --git a/docker/flags.go b/docker/flags.go index 80fd9fc17c..6601b4fe8a 100644 --- a/docker/flags.go +++ b/docker/flags.go @@ -35,7 +35,7 @@ var ( flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode\nuse '' (the empty string) to disable setting of a group") flLogLevel = flag.String([]string{"l", "-log-level"}, "info", "Set the logging level") flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API") - flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by --tlsverify=true") + flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by --tlsverify flag") flTlsVerify = flag.Bool([]string{"-tlsverify"}, dockerTlsVerify, "Use TLS and verify the remote (daemon: verify client, client: verify daemon)") // these are initialized in init() below since their default values depend on dockerCertPath which isn't fully initialized until init() runs diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index ca7b7b7836..ff13d6222c 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -84,7 +84,7 @@ expect an integer, and they can only be specified once. -s, --storage-driver="" Force the Docker runtime to use a specific storage driver --selinux-enabled=false Enable selinux support. SELinux does not presently support the BTRFS storage driver --storage-opt=[] Set storage driver options - --tls=false Use TLS; implied by --tlsverify=true + --tls=false Use TLS; implied by --tlsverify flag --tlscacert="/home/sven/.docker/ca.pem" Trust only remotes providing a certificate signed by the CA given here --tlscert="/home/sven/.docker/cert.pem" Path to TLS certificate file --tlskey="/home/sven/.docker/key.pem" Path to TLS key file diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 9292994283..2d150426c6 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -2687,3 +2687,28 @@ func TestContainerNetworkMode(t *testing.T) { logDone("run - container shared network namespace") } + +func TestRunTLSverify(t *testing.T) { + cmd := exec.Command(dockerBinary, "ps") + out, ec, err := runCommandWithOutput(cmd) + if err != nil || ec != 0 { + t.Fatalf("Should have worked: %v:\n%v", err, out) + } + + // Regardless of whether we specify true or false we need to + // test to make sure tls is turned on if --tlsverify is specified at all + + cmd = exec.Command(dockerBinary, "--tlsverify=false", "ps") + out, ec, err = runCommandWithOutput(cmd) + if err == nil || ec == 0 || !strings.Contains(out, "trying to connect") { + t.Fatalf("Should have failed: \nec:%v\nout:%v\nerr:%v", ec, out, err) + } + + cmd = exec.Command(dockerBinary, "--tlsverify=true", "ps") + out, ec, err = runCommandWithOutput(cmd) + if err == nil || ec == 0 || !strings.Contains(out, "cert") { + t.Fatalf("Should have failed: \nec:%v\nout:%v\nerr:%v", ec, out, err) + } + + logDone("run - verify tls is set for --tlsverify") +} diff --git a/pkg/mflag/flag.go b/pkg/mflag/flag.go index b40f911769..c9061c2d73 100644 --- a/pkg/mflag/flag.go +++ b/pkg/mflag/flag.go @@ -394,12 +394,22 @@ func (f *FlagSet) Lookup(name string) *Flag { return f.formal[name] } +// Indicates whether the specified flag was specified at all on the cmd line +func (f *FlagSet) IsSet(name string) bool { + return f.actual[name] != nil +} + // Lookup returns the Flag structure of the named command-line flag, // returning nil if none exists. func Lookup(name string) *Flag { return CommandLine.formal[name] } +// Indicates whether the specified flag was specified at all on the cmd line +func IsSet(name string) bool { + return CommandLine.IsSet(name) +} + // Set sets the value of the named flag. func (f *FlagSet) Set(name, value string) error { flag, ok := f.formal[name] From 7b20c1fd1877dc91abfbf6fd57e1f16e3b44c1e0 Mon Sep 17 00:00:00 2001 From: Jeff Anderson Date: Thu, 20 Nov 2014 16:37:48 -0800 Subject: [PATCH 409/592] refer to the registry instead of the hub Signed-off-by: Jeff Anderson --- docs/sources/userguide/dockerimages.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/userguide/dockerimages.md b/docs/sources/userguide/dockerimages.md index 51f6beb554..157c578d97 100644 --- a/docs/sources/userguide/dockerimages.md +++ b/docs/sources/userguide/dockerimages.md @@ -13,14 +13,14 @@ image and the `training/webapp` image. We've also discovered that Docker stores downloaded images on the Docker host. If an image isn't already present on the host then it'll be downloaded from a registry: by default the -[Docker Hub](https://hub.docker.com) public registry. +[Docker Hub Registry](https://registry.hub.docker.com) public registry. In this section we're going to explore Docker images a bit more including: * Managing and working with images locally on your Docker host; * Creating basic images; -* Uploading images to [Docker Hub](https://hub.docker.com). +* Uploading images to [Docker Hub Registry](https://registry.hub.docker.com). ## Listing images on the host From 6cc75574b3b01fa4dfeeef585e52dbcf8da28586 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Thu, 20 Nov 2014 16:07:55 -0800 Subject: [PATCH 410/592] Typed errors for iptables chain raw command output. YAYYYYYY. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- pkg/iptables/iptables.go | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/pkg/iptables/iptables.go b/pkg/iptables/iptables.go index 53e6e1430c..b783347fa3 100644 --- a/pkg/iptables/iptables.go +++ b/pkg/iptables/iptables.go @@ -20,9 +20,9 @@ const ( ) var ( - ErrIptablesNotFound = errors.New("Iptables not found") nat = []string{"-t", "nat"} supportsXlock = false + ErrIptablesNotFound = errors.New("Iptables not found") ) type Chain struct { @@ -30,6 +30,15 @@ type Chain struct { Bridge string } +type ChainError struct { + Chain string + Output []byte +} + +func (e *ChainError) Error() string { + return fmt.Sprintf("Error iptables %s: %s", e.Chain, string(e.Output)) +} + func init() { supportsXlock = exec.Command("iptables", "--wait", "-L", "-n").Run() == nil } @@ -78,7 +87,7 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str "--to-destination", net.JoinHostPort(dest_addr, strconv.Itoa(dest_port))); err != nil { return err } else if len(output) != 0 { - return fmt.Errorf("Error iptables forward: %s", output) + return &ChainError{Chain: "FORWARD", Output: output} } fAction := action @@ -94,7 +103,7 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str "-j", "ACCEPT"); err != nil { return err } else if len(output) != 0 { - return fmt.Errorf("Error iptables forward: %s", output) + return &ChainError{Chain: "FORWARD", Output: output} } return nil @@ -108,7 +117,7 @@ func (c *Chain) Prerouting(action Action, args ...string) error { if output, err := Raw(append(a, "-j", c.Name)...); err != nil { return err } else if len(output) != 0 { - return fmt.Errorf("Error iptables prerouting: %s", output) + return &ChainError{Chain: "PREROUTING", Output: output} } return nil } @@ -121,7 +130,7 @@ func (c *Chain) Output(action Action, args ...string) error { if output, err := Raw(append(a, "-j", c.Name)...); err != nil { return err } else if len(output) != 0 { - return fmt.Errorf("Error iptables output: %s", output) + return &ChainError{Chain: "OUTPUT", Output: output} } return nil } From f6c7194539720473aae814d3d1445eab2a78d568 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Thu, 20 Nov 2014 16:20:29 -0800 Subject: [PATCH 411/592] Apply same typed iptables errors to network driver. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- daemon/networkdriver/bridge/driver.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/daemon/networkdriver/bridge/driver.go b/daemon/networkdriver/bridge/driver.go index 5d0040a8e7..04d88a4315 100644 --- a/daemon/networkdriver/bridge/driver.go +++ b/daemon/networkdriver/bridge/driver.go @@ -195,7 +195,7 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error { if output, err := iptables.Raw(append([]string{"-I"}, natArgs...)...); err != nil { return fmt.Errorf("Unable to enable network bridge NAT: %s", err) } else if len(output) != 0 { - return fmt.Errorf("Error iptables postrouting: %s", output) + return &iptables.ChainError{Chain: "POSTROUTING", Output: output} } } } @@ -236,7 +236,7 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error { if output, err := iptables.Raw(append([]string{"-I"}, outgoingArgs...)...); err != nil { return fmt.Errorf("Unable to allow outgoing packets: %s", err) } else if len(output) != 0 { - return fmt.Errorf("Error iptables allow outgoing: %s", output) + return &iptables.ChainError{Chain: "FORWARD outgoing", Output: output} } } @@ -247,7 +247,7 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error { if output, err := iptables.Raw(append([]string{"-I"}, existingArgs...)...); err != nil { return fmt.Errorf("Unable to allow incoming packets: %s", err) } else if len(output) != 0 { - return fmt.Errorf("Error iptables allow incoming: %s", output) + return &iptables.ChainError{Chain: "FORWARD incoming", Output: output} } } return nil From 2d2b3535cae11d02573d12297d34e9a98a3e984a Mon Sep 17 00:00:00 2001 From: Joel Friedly Date: Fri, 21 Nov 2014 03:14:01 +0000 Subject: [PATCH 412/592] Fix typos in the user guide main page Signed-off-by: Joel Friedly --- docs/sources/userguide/index.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/sources/userguide/index.md b/docs/sources/userguide/index.md index 08d6be0731..64bd3d16f3 100644 --- a/docs/sources/userguide/index.md +++ b/docs/sources/userguide/index.md @@ -10,7 +10,7 @@ using Docker and integrating it into your environment. We’ll teach you how to use Docker to: -* Dockerizing your applications. +* Dockerize your applications. * Run your own containers. * Build Docker images. * Share your Docker images with others. @@ -25,7 +25,7 @@ the Docker life cycle: Docker Hub is the central hub for Docker. It hosts public Docker images and provides services to help you build and manage your Docker -environment. To learn more; +environment. To learn more: Go to [Using Docker Hub](/userguide/dockerhub). @@ -34,7 +34,7 @@ Go to [Using Docker Hub](/userguide/dockerhub). *How do I run applications inside containers?* Docker offers a *container-based* virtualization platform to power your -applications. To learn how to Dockerize applications and run them. +applications. To learn how to Dockerize applications and run them: Go to [Dockerizing Applications](/userguide/dockerizing). @@ -55,7 +55,7 @@ Go to [Working With Containers](/userguide/usingdocker). Once you've learnt how to use Docker it's time to take the next step and learn how to build your own application images with Docker. -Go to [Working with Docker Images](/userguide/dockerimages) +Go to [Working with Docker Images](/userguide/dockerimages). ## Linking Containers Together From 244af451e9bdff5c87bca84e4c15193fc9eebc64 Mon Sep 17 00:00:00 2001 From: Yohei Ueda Date: Fri, 21 Nov 2014 22:12:03 +0900 Subject: [PATCH 413/592] Use termios via CGO Signed-off-by: Yohei Ueda --- pkg/term/term.go | 10 ++++---- pkg/term/term_cgo.go | 47 +++++++++++++++++++++++++++++++++++++ pkg/term/term_nocgo.go | 18 ++++++++++++++ pkg/term/termios_darwin.go | 2 ++ pkg/term/termios_freebsd.go | 2 ++ pkg/term/termios_linux.go | 2 ++ 6 files changed, 75 insertions(+), 6 deletions(-) create mode 100644 pkg/term/term_cgo.go create mode 100644 pkg/term/term_nocgo.go diff --git a/pkg/term/term.go b/pkg/term/term.go index 553747a7a0..8d807d8d44 100644 --- a/pkg/term/term.go +++ b/pkg/term/term.go @@ -47,8 +47,7 @@ func SetWinsize(fd uintptr, ws *Winsize) error { // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { var termios Termios - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&termios))) - return err == 0 + return tcget(fd, &termios) == 0 } // Restore restores the terminal connected to the given file descriptor to a @@ -57,8 +56,7 @@ func RestoreTerminal(fd uintptr, state *State) error { if state == nil { return ErrInvalidState } - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&state.termios))) - if err != 0 { + if err := tcset(fd, &state.termios); err != 0 { return err } return nil @@ -66,7 +64,7 @@ func RestoreTerminal(fd uintptr, state *State) error { func SaveState(fd uintptr) (*State, error) { var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + if err := tcget(fd, &oldState.termios); err != 0 { return nil, err } @@ -77,7 +75,7 @@ func DisableEcho(fd uintptr, state *State) error { newState := state.termios newState.Lflag &^= syscall.ECHO - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + if err := tcset(fd, &newState); err != 0 { return err } handleInterrupt(fd, state) diff --git a/pkg/term/term_cgo.go b/pkg/term/term_cgo.go new file mode 100644 index 0000000000..ddf080cf93 --- /dev/null +++ b/pkg/term/term_cgo.go @@ -0,0 +1,47 @@ +// +build !windows,cgo + +package term + +import ( + "syscall" + "unsafe" +) + +// #include +import "C" + +type Termios syscall.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + newState := oldState.termios + + C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) + if err := tcset(fd, &newState); err != 0 { + return nil, err + } + return &oldState, nil +} + +func tcget(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} diff --git a/pkg/term/term_nocgo.go b/pkg/term/term_nocgo.go new file mode 100644 index 0000000000..c211c3992d --- /dev/null +++ b/pkg/term/term_nocgo.go @@ -0,0 +1,18 @@ +// +build !windows,!cgo + +package term + +import ( + "syscall" + "unsafe" +) + +func tcget(fd uintptr, p *Termios) syscall.Errno { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) + return err +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) + return err +} diff --git a/pkg/term/termios_darwin.go b/pkg/term/termios_darwin.go index 11cd70d10b..2640e8b935 100644 --- a/pkg/term/termios_darwin.go +++ b/pkg/term/termios_darwin.go @@ -1,3 +1,5 @@ +// +build !cgo + package term import ( diff --git a/pkg/term/termios_freebsd.go b/pkg/term/termios_freebsd.go index ed3659572c..969beda239 100644 --- a/pkg/term/termios_freebsd.go +++ b/pkg/term/termios_freebsd.go @@ -1,3 +1,5 @@ +// +build !cgo + package term import ( diff --git a/pkg/term/termios_linux.go b/pkg/term/termios_linux.go index 4a717c84a7..024187ff06 100644 --- a/pkg/term/termios_linux.go +++ b/pkg/term/termios_linux.go @@ -1,3 +1,5 @@ +// +build !cgo + package term import ( From 4180579313e84ea7e3d85214521a815e95459a90 Mon Sep 17 00:00:00 2001 From: unclejack Date: Thu, 20 Nov 2014 19:39:08 +0200 Subject: [PATCH 414/592] graphdriver/aufs: fix tmp cleanup in tests Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- daemon/graphdriver/aufs/aufs_test.go | 34 ++++++++++++++++------------ 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/daemon/graphdriver/aufs/aufs_test.go b/daemon/graphdriver/aufs/aufs_test.go index 971d448af8..e1ed64985f 100644 --- a/daemon/graphdriver/aufs/aufs_test.go +++ b/daemon/graphdriver/aufs/aufs_test.go @@ -4,16 +4,18 @@ import ( "crypto/sha256" "encoding/hex" "fmt" - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/archive" "io/ioutil" "os" "path" "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" ) var ( - tmp = path.Join(os.TempDir(), "aufs-tests", "aufs") + tmpOuter = path.Join(os.TempDir(), "aufs-tests") + tmp = path.Join(tmpOuter, "aufs") ) func testInit(dir string, t *testing.T) graphdriver.Driver { @@ -640,8 +642,8 @@ func testMountMoreThan42Layers(t *testing.T, mountPath string) { t.Fatal(err) } - d := testInit(mountPath, t).(*Driver) defer os.RemoveAll(mountPath) + d := testInit(mountPath, t).(*Driver) defer d.Cleanup() var last string var expected int @@ -662,24 +664,24 @@ func testMountMoreThan42Layers(t *testing.T, mountPath string) { if err := d.Create(current, parent); err != nil { t.Logf("Current layer %d", i) - t.Fatal(err) + t.Error(err) } point, err := d.Get(current, "") if err != nil { t.Logf("Current layer %d", i) - t.Fatal(err) + t.Error(err) } f, err := os.Create(path.Join(point, current)) if err != nil { t.Logf("Current layer %d", i) - t.Fatal(err) + t.Error(err) } f.Close() if i%10 == 0 { if err := os.Remove(path.Join(point, parent)); err != nil { t.Logf("Current layer %d", i) - t.Fatal(err) + t.Error(err) } expected-- } @@ -689,28 +691,30 @@ func testMountMoreThan42Layers(t *testing.T, mountPath string) { // Perform the actual mount for the top most image point, err := d.Get(last, "") if err != nil { - t.Fatal(err) + t.Error(err) } files, err := ioutil.ReadDir(point) if err != nil { - t.Fatal(err) + t.Error(err) } if len(files) != expected { - t.Fatalf("Expected %d got %d", expected, len(files)) + t.Errorf("Expected %d got %d", expected, len(files)) } } func TestMountMoreThan42Layers(t *testing.T) { + os.RemoveAll(tmpOuter) testMountMoreThan42Layers(t, tmp) } func TestMountMoreThan42LayersMatchingPathLength(t *testing.T) { - tmp := "aufs-tests" + defer os.RemoveAll(tmpOuter) + zeroes := "0" for { // This finds a mount path so that when combined into aufs mount options // 4096 byte boundary would be in between the paths or in permission - // section. For '/tmp' it will use '/tmp/aufs-tests00000000/aufs' - mountPath := path.Join(os.TempDir(), tmp, "aufs") + // section. For '/tmp' it will use '/tmp/aufs-tests/00000000/aufs' + mountPath := path.Join(tmpOuter, zeroes, "aufs") pathLength := 77 + len(mountPath) if mod := 4095 % pathLength; mod == 0 || mod > pathLength-2 { @@ -718,6 +722,6 @@ func TestMountMoreThan42LayersMatchingPathLength(t *testing.T) { testMountMoreThan42Layers(t, mountPath) return } - tmp += "0" + zeroes += "0" } } From 054e57a622e6a065c343806e7334920d17a03c5b Mon Sep 17 00:00:00 2001 From: unclejack Date: Fri, 21 Nov 2014 19:51:32 +0200 Subject: [PATCH 415/592] build: add pull flag to force image pulling Signed-off-by: Cristian Staretu --- api/client/commands.go | 4 ++++ api/server/server.go | 3 +++ builder/dispatchers.go | 6 ++++++ builder/evaluator.go | 1 + builder/job.go | 2 ++ docs/sources/reference/api/docker_remote_api_v1.16.md | 1 + docs/sources/reference/commandline/cli.md | 1 + 7 files changed, 18 insertions(+) diff --git a/api/client/commands.go b/api/client/commands.go index d0a0792399..f0e8d834a6 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -77,6 +77,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error { noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image") rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build") forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers, even after unsuccessful builds") + pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image") if err := cmd.Parse(args); err != nil { return nil } @@ -213,6 +214,9 @@ func (cli *DockerCli) CmdBuild(args ...string) error { v.Set("forcerm", "1") } + if *pull { + v.Set("pull", "1") + } cli.LoadConfigFile() headers := http.Header(make(map[string][]string)) diff --git a/api/server/server.go b/api/server/server.go index d9b73e6798..b3cf0603bb 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -1016,6 +1016,9 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite } else { job.Setenv("rm", r.FormValue("rm")) } + if r.FormValue("pull") == "1" && version.GreaterThanOrEqualTo("1.16") { + job.Setenv("pull", "1") + } job.Stdin.Add(r.Body) job.Setenv("remote", r.FormValue("remote")) job.Setenv("t", r.FormValue("t")) diff --git a/builder/dispatchers.go b/builder/dispatchers.go index 99be480f73..db7476c5ed 100644 --- a/builder/dispatchers.go +++ b/builder/dispatchers.go @@ -115,6 +115,12 @@ func from(b *Builder, args []string, attributes map[string]bool, original string name := args[0] image, err := b.Daemon.Repositories().LookupImage(name) + if b.Pull { + image, err = b.pullImage(name) + if err != nil { + return err + } + } if err != nil { if b.Daemon.Graph().IsNotExist(err) { image, err = b.pullImage(name) diff --git a/builder/evaluator.go b/builder/evaluator.go index 645038bb1d..3d9ebb162c 100644 --- a/builder/evaluator.go +++ b/builder/evaluator.go @@ -90,6 +90,7 @@ type Builder struct { // controls how images and containers are handled between steps. Remove bool ForceRemove bool + Pull bool AuthConfig *registry.AuthConfig AuthConfigFile *registry.ConfigFile diff --git a/builder/job.go b/builder/job.go index c86ccb0e3c..1d10e8eb34 100644 --- a/builder/job.go +++ b/builder/job.go @@ -35,6 +35,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { noCache = job.GetenvBool("nocache") rm = job.GetenvBool("rm") forceRm = job.GetenvBool("forcerm") + pull = job.GetenvBool("pull") authConfig = ®istry.AuthConfig{} configFile = ®istry.ConfigFile{} tag string @@ -111,6 +112,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { UtilizeCache: !noCache, Remove: rm, ForceRemove: forceRm, + Pull: pull, OutOld: job.Stdout, StreamFormatter: sf, AuthConfig: authConfig, diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md index e643d1a5c7..a1d3fa5dfe 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.16.md +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -1156,6 +1156,7 @@ Query Parameters: the resulting image in case of success - **q** – suppress verbose build output - **nocache** – do not use the cache when building the image +- **pull** - attempt to pull the image even if an older image exists locally - **rm** - remove intermediate containers after a successful build (default behavior) - **forcerm - always remove intermediate containers (includes rm) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index ff13d6222c..504fc0fbac 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -312,6 +312,7 @@ To kill the container, use `docker kill`. --force-rm=false Always remove intermediate containers, even after unsuccessful builds --no-cache=false Do not use cache when building the image + --pull=false Always attempt to pull a newer version of the image -q, --quiet=false Suppress the verbose output generated by the containers --rm=true Remove intermediate containers after a successful build -t, --tag="" Repository name (and optionally a tag) to be applied to the resulting image in case of success From 62a7d75512d939a86cbc58986278548df3302902 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 21 Nov 2014 19:15:22 +0000 Subject: [PATCH 416/592] key=values -> key=value Signed-off-by: Victor Vieux --- daemon/config.go | 2 +- docs/man/docker.1.md | 2 +- docs/sources/reference/commandline/cli.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/daemon/config.go b/daemon/config.go index beb3b25a5a..785fd4d290 100644 --- a/daemon/config.go +++ b/daemon/config.go @@ -70,7 +70,7 @@ func (config *Config) InstallFlags() { opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers") opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains") opts.MirrorListVar(&config.Mirrors, []string{"-registry-mirror"}, "Specify a preferred Docker registry mirror") - opts.LabelListVar(&config.Labels, []string{"-label"}, "Set key=values labels to the daemon (displayed in `docker info`)") + opts.LabelListVar(&config.Labels, []string{"-label"}, "Set key=value labels to the daemon (displayed in `docker info`)") // Localhost is by default considered as an insecure registry // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). diff --git a/docs/man/docker.1.md b/docs/man/docker.1.md index e5a1bc24d7..c8d28b2c23 100644 --- a/docs/man/docker.1.md +++ b/docs/man/docker.1.md @@ -69,7 +69,7 @@ unix://[/path/to/socket] to use. Set the logging level. Default is `info`. **--label**="[]" - Set key=values labels to the daemon (displayed in `docker info`) + Set key=value labels to the daemon (displayed in `docker info`) **--mtu**=VALUE Set the containers network mtu. Default is `1500`. diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 7b1b6187b0..7938ea5843 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -76,7 +76,7 @@ expect an integer, and they can only be specified once. --ip-masq=true Enable IP masquerading for bridge's IP range --iptables=true Enable Docker's addition of iptables rules -l, --log-level="info" Set the logging level - --label=[] Set key=values labels to the daemon (displayed in `docker info`) + --label=[] Set key=value labels to the daemon (displayed in `docker info`) --mtu=0 Set the containers network MTU if no value is provided: default to the default route MTU or 1500 if no default route is available -p, --pidfile="/var/run/docker.pid" Path to use for daemon PID file From 82f33d86a7c4d41d3c880757d35c81c847a9ab69 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Thu, 13 Nov 2014 16:04:13 -0700 Subject: [PATCH 417/592] Add some minor reorganization to the Makefile preamble The gist here is a reemphasizing of the explicitly "user mutable" bits by putting them first (and hopefully improving readability a little bit in the process). Signed-off-by: Andrew Page --- Makefile | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index b3baca8e03..6f76fa4d29 100644 --- a/Makefile +++ b/Makefile @@ -1,23 +1,39 @@ .PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli validate +# env vars passed through directly to Docker's build scripts +# to allow things like `make DOCKER_CLIENTONLY=1 binary` easily +# `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these +DOCKER_ENVS := \ + -e BUILDFLAGS \ + -e DOCKER_CLIENTONLY \ + -e DOCKER_EXECDRIVER \ + -e DOCKER_GRAPHDRIVER \ + -e TESTDIRS \ + -e TESTFLAGS \ + -e TIMEOUT +# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds + # to allow `make BINDDIR=. shell` or `make BINDDIR= test` # (default to no bind mount if DOCKER_HOST is set) BINDDIR := $(if $(DOCKER_HOST),,bundles) +DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/docker/docker/$(BINDDIR)") + +# to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs) +DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) + # to allow `make DOCSPORT=9000 docs` DOCSPORT := 8000 GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) -GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH)) -DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/docker/docker/$(BINDDIR)") -DOCKER_ENVS := -e TIMEOUT -e BUILDFLAGS -e TESTFLAGS \ - -e TESTDIRS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER \ - -e DOCKER_CLIENTONLY DOCKER_RUN_DOCKER := docker run --rm -it --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) "$(DOCKER_IMAGE)" -# to allow `make DOCSDIR=docs docs-shell` -DOCKER_RUN_DOCS := docker run --rm -it $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) -e AWS_S3_BUCKET + +DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET + +# for some docs workarounds (see below in "docs-build" target) +GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) default: binary From 20218f39718673d3ae5822aeecfd08ea0c6e8126 Mon Sep 17 00:00:00 2001 From: Martin Honermeyer Date: Sat, 15 Nov 2014 21:49:47 +0100 Subject: [PATCH 418/592] Fix link to MAINTAINERS.md in CONTRIBUTING.md Signed-off-by: Martin Honermeyer --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 29a3ce1404..77af00e40c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -172,7 +172,7 @@ component affected. For example, if a change affects `docs/` and `registry/`, it needs an absolute majority from the maintainers of `docs/` AND, separately, an absolute majority of the maintainers of `registry/`. -For more details see [MAINTAINERS.md](hack/MAINTAINERS.md) +For more details see [MAINTAINERS.md](project/MAINTAINERS.md) ### Sign your work From f8509e7940d73ecc0071faf15a865acb1f8dad52 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 13 Nov 2014 19:33:41 -0800 Subject: [PATCH 419/592] Mknod more loopbacks for devmapper Signed-off-by: Michael Crosby --- .../graphdriver/devmapper/devmapper_test.go | 3 ++ daemon/graphdriver/graphtest/graphtest.go | 41 +++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/daemon/graphdriver/devmapper/devmapper_test.go b/daemon/graphdriver/devmapper/devmapper_test.go index b6e26bc1d7..6cb7572384 100644 --- a/daemon/graphdriver/devmapper/devmapper_test.go +++ b/daemon/graphdriver/devmapper/devmapper_test.go @@ -13,6 +13,9 @@ func init() { DefaultDataLoopbackSize = 300 * 1024 * 1024 DefaultMetaDataLoopbackSize = 200 * 1024 * 1024 DefaultBaseFsSize = 300 * 1024 * 1024 + if err := graphtest.InitLoopbacks(); err != nil { + panic(err) + } } // This avoids creating a new driver for each test if all tests are run diff --git a/daemon/graphdriver/graphtest/graphtest.go b/daemon/graphdriver/graphtest/graphtest.go index 16c7163130..67f15c594d 100644 --- a/daemon/graphdriver/graphtest/graphtest.go +++ b/daemon/graphdriver/graphtest/graphtest.go @@ -1,6 +1,7 @@ package graphtest import ( + "fmt" "io/ioutil" "os" "path" @@ -20,6 +21,46 @@ type Driver struct { refCount int } +// InitLoopbacks ensures that the loopback devices are properly created within +// the system running the device mapper tests. +func InitLoopbacks() error { + stat_t, err := getBaseLoopStats() + if err != nil { + return err + } + // create atleast 8 loopback files, ya, that is a good number + for i := 0; i < 8; i++ { + loopPath := fmt.Sprintf("/dev/loop%d", i) + // only create new loopback files if they don't exist + if _, err := os.Stat(loopPath); err != nil { + if mkerr := syscall.Mknod(loopPath, + uint32(stat_t.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil { + return mkerr + } + os.Chown(loopPath, int(stat_t.Uid), int(stat_t.Gid)) + } + } + return nil +} + +// getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the +// loop0 device on the system. If it does not exist we assume 0,0,0660 for the +// stat data +func getBaseLoopStats() (*syscall.Stat_t, error) { + loop0, err := os.Stat("/dev/loop0") + if err != nil { + if os.IsNotExist(err) { + return &syscall.Stat_t{ + Uid: 0, + Gid: 0, + Mode: 0660, + }, nil + } + return nil, err + } + return loop0.Sys().(*syscall.Stat_t), nil +} + func newDriver(t *testing.T, name string) *Driver { root, err := ioutil.TempDir("/var/tmp", "docker-graphtest-") if err != nil { From acdf766069f8e8c65fd3de3ce4f8efc15f421abd Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Fri, 14 Nov 2014 09:33:13 -0500 Subject: [PATCH 420/592] pkg/devicemapper: clarify TaskCreate and createTask * Rename and expose createTask() to TaskCreateNamed() * add comments Signed-off-by: Vincent Batts --- pkg/devicemapper/devmapper.go | 55 ++++++++++++++++++----------------- 1 file changed, 29 insertions(+), 26 deletions(-) diff --git a/pkg/devicemapper/devmapper.go b/pkg/devicemapper/devmapper.go index 16c0ac1c8c..e5c99ae677 100644 --- a/pkg/devicemapper/devmapper.go +++ b/pkg/devicemapper/devmapper.go @@ -63,7 +63,7 @@ var ( ErrGetLibraryVersion = errors.New("dm_get_library_version failed") ErrCreateRemoveTask = errors.New("Can't create task of type DeviceRemove") ErrRunRemoveDevice = errors.New("running RemoveDevice failed") - ErrInvalidAddNode = errors.New("Invalide AddNoce type") + ErrInvalidAddNode = errors.New("Invalid AddNode type") ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity") ErrBusy = errors.New("Device is Busy") @@ -104,6 +104,20 @@ func (t *Task) destroy() { } } +// TaskCreateNamed is a convenience function for TaskCreate when a name +// will be set on the task as well +func TaskCreateNamed(t TaskType, name string) (*Task, error) { + task := TaskCreate(t) + if task == nil { + return nil, fmt.Errorf("Can't create task of type %d", int(t)) + } + if err := task.SetName(name); err != nil { + return nil, fmt.Errorf("Can't set task name %s", name) + } + return task, nil +} + +// TaskCreate initializes a devicemapper task of tasktype func TaskCreate(tasktype TaskType) *Task { Ctask := DmTaskCreate(int(tasktype)) if Ctask == nil { @@ -298,7 +312,7 @@ func GetLibraryVersion() (string, error) { func RemoveDevice(name string) error { log.Debugf("[devmapper] RemoveDevice START") defer log.Debugf("[devmapper] RemoveDevice END") - task, err := createTask(DeviceRemove, name) + task, err := TaskCreateNamed(DeviceRemove, name) if task == nil { return err } @@ -354,7 +368,7 @@ func BlockDeviceDiscard(path string) error { // This is the programmatic example of "dmsetup create" func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { - task, err := createTask(DeviceCreate, poolName) + task, err := TaskCreateNamed(DeviceCreate, poolName) if task == nil { return err } @@ -383,7 +397,7 @@ func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize } func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { - task, err := createTask(DeviceReload, poolName) + task, err := TaskCreateNamed(DeviceReload, poolName) if task == nil { return err } @@ -405,19 +419,8 @@ func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize return nil } -func createTask(t TaskType, name string) (*Task, error) { - task := TaskCreate(t) - if task == nil { - return nil, fmt.Errorf("Can't create task of type %d", int(t)) - } - if err := task.SetName(name); err != nil { - return nil, fmt.Errorf("Can't set task name %s", name) - } - return task, nil -} - func GetDeps(name string) (*Deps, error) { - task, err := createTask(DeviceDeps, name) + task, err := TaskCreateNamed(DeviceDeps, name) if task == nil { return nil, err } @@ -428,7 +431,7 @@ func GetDeps(name string) (*Deps, error) { } func GetInfo(name string) (*Info, error) { - task, err := createTask(DeviceInfo, name) + task, err := TaskCreateNamed(DeviceInfo, name) if task == nil { return nil, err } @@ -450,9 +453,9 @@ func GetDriverVersion() (string, error) { } func GetStatus(name string) (uint64, uint64, string, string, error) { - task, err := createTask(DeviceStatus, name) + task, err := TaskCreateNamed(DeviceStatus, name) if task == nil { - log.Debugf("GetStatus: Error createTask: %s", err) + log.Debugf("GetStatus: Error TaskCreateNamed: %s", err) return 0, 0, "", "", err } if err := task.Run(); err != nil { @@ -475,7 +478,7 @@ func GetStatus(name string) (uint64, uint64, string, string, error) { } func SetTransactionId(poolName string, oldId uint64, newId uint64) error { - task, err := createTask(DeviceTargetMsg, poolName) + task, err := TaskCreateNamed(DeviceTargetMsg, poolName) if task == nil { return err } @@ -495,7 +498,7 @@ func SetTransactionId(poolName string, oldId uint64, newId uint64) error { } func SuspendDevice(name string) error { - task, err := createTask(DeviceSuspend, name) + task, err := TaskCreateNamed(DeviceSuspend, name) if task == nil { return err } @@ -506,7 +509,7 @@ func SuspendDevice(name string) error { } func ResumeDevice(name string) error { - task, err := createTask(DeviceResume, name) + task, err := TaskCreateNamed(DeviceResume, name) if task == nil { return err } @@ -528,7 +531,7 @@ func CreateDevice(poolName string, deviceId *int) error { log.Debugf("[devmapper] CreateDevice(poolName=%v, deviceId=%v)", poolName, *deviceId) for { - task, err := createTask(DeviceTargetMsg, poolName) + task, err := TaskCreateNamed(DeviceTargetMsg, poolName) if task == nil { return err } @@ -556,7 +559,7 @@ func CreateDevice(poolName string, deviceId *int) error { } func DeleteDevice(poolName string, deviceId int) error { - task, err := createTask(DeviceTargetMsg, poolName) + task, err := TaskCreateNamed(DeviceTargetMsg, poolName) if task == nil { return err } @@ -576,7 +579,7 @@ func DeleteDevice(poolName string, deviceId int) error { } func ActivateDevice(poolName string, name string, deviceId int, size uint64) error { - task, err := createTask(DeviceCreate, name) + task, err := TaskCreateNamed(DeviceCreate, name) if task == nil { return err } @@ -614,7 +617,7 @@ func CreateSnapDevice(poolName string, deviceId *int, baseName string, baseDevic } for { - task, err := createTask(DeviceTargetMsg, poolName) + task, err := TaskCreateNamed(DeviceTargetMsg, poolName) if task == nil { if doSuspend { ResumeDevice(baseName) From d4ba00bd4237ebf6e8016a350d95cc060e5e8a05 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Thu, 20 Nov 2014 13:01:59 -0500 Subject: [PATCH 421/592] Cleanup exec API docs and available params Adds pertitent information about what is expected in the json payload and comments out unsupported (exec) features in runConfig. Signed-off-by: Brian Goff --- daemon/exec.go | 2 -- .../reference/api/docker_remote_api_v1.15.md | 16 +++++++++++----- .../reference/api/docker_remote_api_v1.16.md | 16 +++++++++++----- runconfig/exec.go | 7 ++++--- 4 files changed, 26 insertions(+), 15 deletions(-) diff --git a/daemon/exec.go b/daemon/exec.go index d813dbba1d..ee457f972f 100644 --- a/daemon/exec.go +++ b/daemon/exec.go @@ -122,8 +122,6 @@ func (d *Daemon) ContainerExecCreate(job *engine.Job) engine.Status { entrypoint, args := d.getEntrypointAndArgs(nil, config.Cmd) processConfig := execdriver.ProcessConfig{ - Privileged: config.Privileged, - User: config.User, Tty: config.Tty, Entrypoint: entrypoint, Arguments: args, diff --git a/docs/sources/reference/api/docker_remote_api_v1.15.md b/docs/sources/reference/api/docker_remote_api_v1.15.md index a634f7c550..599f88b29b 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.15.md +++ b/docs/sources/reference/api/docker_remote_api_v1.15.md @@ -1560,7 +1560,6 @@ Sets up an exec instance in a running container `id` "Cmd":[ "date" ], - "Container":"e90e34656806", } **Example response**: @@ -1574,7 +1573,12 @@ Sets up an exec instance in a running container `id` Json Parameters: -- **execConfig** ? exec configuration. +- **AttachStdin** - Boolean value, attaches to stdin of the exec command. +- **AttachStdout** - Boolean value, attaches to stdout of the exec command. +- **AttachStderr** - Boolean value, attaches to stderr of the exec command. +- **Tty** - Boolean value to allocate a pseudo-TTY +- **Cmd** - Command to run specified as a string or an array of strings. + Status Codes: @@ -1585,8 +1589,9 @@ Status Codes: `POST /exec/(id)/start` -Starts a previously set up exec instance `id`. If `detach` is true, this API returns after -starting the `exec` command. Otherwise, this API sets up an interactive session with the `exec` command. +Starts a previously set up exec instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. **Example request**: @@ -1607,7 +1612,8 @@ starting the `exec` command. Otherwise, this API sets up an interactive session Json Parameters: -- **execConfig** ? exec configuration. +- **Detach** - Detach from the exec command +- **Tty** - Boolean value to allocate a pseudo-TTY Status Codes: diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md index d8ce9469a6..ed70a62c9d 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.16.md +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -1511,7 +1511,6 @@ Sets up an exec instance in a running container `id` "Cmd":[ "date" ], - "Container":"e90e34656806", } **Example response**: @@ -1525,7 +1524,12 @@ Sets up an exec instance in a running container `id` Json Parameters: -- **execConfig** ? exec configuration. +- **AttachStdin** - Boolean value, attaches to stdin of the exec command. +- **AttachStdout** - Boolean value, attaches to stdout of the exec command. +- **AttachStderr** - Boolean value, attaches to stderr of the exec command. +- **Tty** - Boolean value to allocate a pseudo-TTY +- **Cmd** - Command to run specified as a string or an array of strings. + Status Codes: @@ -1536,8 +1540,9 @@ Status Codes: `POST /exec/(id)/start` -Starts a previously set up exec instance `id`. If `detach` is true, this API returns after -starting the `exec` command. Otherwise, this API sets up an interactive session with the `exec` command. +Starts a previously set up exec instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. **Example request**: @@ -1558,7 +1563,8 @@ starting the `exec` command. Otherwise, this API sets up an interactive session Json Parameters: -- **execConfig** ? exec configuration. +- **Detach** - Detach from the exec command +- **Tty** - Boolean value to allocate a pseudo-TTY Status Codes: diff --git a/runconfig/exec.go b/runconfig/exec.go index 07de3e43bc..b83c11bd1d 100644 --- a/runconfig/exec.go +++ b/runconfig/exec.go @@ -19,10 +19,11 @@ type ExecConfig struct { func ExecConfigFromJob(job *engine.Job) *ExecConfig { execConfig := &ExecConfig{ - User: job.Getenv("User"), - Privileged: job.GetenvBool("Privileged"), + // TODO(vishh): Expose 'User' once it is supported. + //User: job.Getenv("User"), + // TODO(vishh): Expose 'Privileged' once it is supported. + //Privileged: job.GetenvBool("Privileged"), Tty: job.GetenvBool("Tty"), - Container: job.Getenv("Container"), AttachStdin: job.GetenvBool("AttachStdin"), AttachStderr: job.GetenvBool("AttachStderr"), AttachStdout: job.GetenvBool("AttachStdout"), From 553b50bd37ade60bfafe5d5cc10f984251741f44 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Fri, 21 Nov 2014 21:36:23 -0500 Subject: [PATCH 422/592] devmapper: remove unnecessary else branch in getPoolName() Docker-DCO-1.1-Signed-off-by: Mike Snitzer (github: snitm) --- daemon/graphdriver/devmapper/deviceset.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index f28dc982bd..e4fa3e7195 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -153,9 +153,8 @@ func (devices *DeviceSet) oldMetadataFile() string { func (devices *DeviceSet) getPoolName() string { if devices.thinPoolDevice == "" { return devices.devicePrefix + "-pool" - } else { - return devices.thinPoolDevice } + return devices.thinPoolDevice } func (devices *DeviceSet) getPoolDevName() string { From 30979ad5a0f94097e7dc6763c26fc33dc716d572 Mon Sep 17 00:00:00 2001 From: James Mills Date: Sat, 22 Nov 2014 15:40:39 +1000 Subject: [PATCH 423/592] Updated installation docs for CRUX as a Docker Host. Docker-DCO-1.1-Signed-off-by: James Mills (github: therealprologic) --- docs/sources/installation/cruxlinux.md | 35 ++++++++++++++++---------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/docs/sources/installation/cruxlinux.md b/docs/sources/installation/cruxlinux.md index 28efde376a..ead4c273ca 100644 --- a/docs/sources/installation/cruxlinux.md +++ b/docs/sources/installation/cruxlinux.md @@ -4,16 +4,14 @@ page_keywords: crux linux, virtualization, Docker, documentation, installation # CRUX Linux -Installing on CRUX Linux can be handled via the ports from [James -Mills](http://prologic.shortcircuit.net.au/) and are included in the +Installing on CRUX Linux can be handled via the contrib ports from +[James Mills](http://prologic.shortcircuit.net.au/) and are included in the official [contrib](http://crux.nu/portdb/?a=repo&q=contrib) ports: - docker -- docker-bin -The `docker` port will install the latest tagged -version of Docker. The `docker-bin` port will -install the latest tagged version of Docker from upstream built binaries. +The `docker` port will build and install the latest tagged version of Docker. + ## Installation @@ -21,22 +19,21 @@ Assuming you have contrib enabled, update your ports tree and install docker (*a # prt-get depinst docker -You can install `docker-bin` instead if you wish to avoid compilation time. - ## Kernel Requirements To have a working **CRUX+Docker** Host you must ensure your Kernel has -the necessary modules enabled for LXC containers to function correctly -and Docker Daemon to work properly. +the necessary modules enabled for the Docker Daemon to function correctly. Please read the `README`: $ prt-get readme docker -The `docker` and `docker-bin` ports install the `contrib/check-config.sh` -script provided by the Docker contributors for checking your kernel -configuration as a suitable Docker Host. +The `docker` port installs the `contrib/check-config.sh` script +provided by the Docker contributors for checking your kernel +configuration as a suitable Docker host. + +To check your Kernel configuration run: $ /usr/share/docker/check-config.sh @@ -51,6 +48,18 @@ To start on system boot: - Edit `/etc/rc.conf` - Put `docker` into the `SERVICES=(...)` array after `net`. +## Images + +There is a CRUX image maintained by [James Mills](http://prologic.shortcircuit.net.au/) +as part of the Docker "Official Library" of images. To use this image simply pull it +or use it as part of your `FROM` line in your `Dockerfile(s)`. + + $ docker pull crux + $ docker run -i -t crux + +There are also user contributed [CRUX based image(s)](https://registry.hub.docker.com/repos/crux/) on the Docker Hub. + + ## Issues If you have any issues please file a bug with the From 88afbc4d94c4a803e936d602c620b8ab08e24acd Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Sat, 22 Nov 2014 05:25:57 -0800 Subject: [PATCH 424/592] Add missing unit testcase for new IsSet() func in mflag Forgot to add this when I did PR #9259 Signed-off-by: Doug Davis --- pkg/mflag/flag_test.go | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/pkg/mflag/flag_test.go b/pkg/mflag/flag_test.go index 340a1cb175..622e8a9bfc 100644 --- a/pkg/mflag/flag_test.go +++ b/pkg/mflag/flag_test.go @@ -168,11 +168,14 @@ func testParse(f *FlagSet, t *testing.T) { } boolFlag := f.Bool([]string{"bool"}, false, "bool value") bool2Flag := f.Bool([]string{"bool2"}, false, "bool2 value") + f.Bool([]string{"bool3"}, false, "bool3 value") + bool4Flag := f.Bool([]string{"bool4"}, false, "bool4 value") intFlag := f.Int([]string{"-int"}, 0, "int value") int64Flag := f.Int64([]string{"-int64"}, 0, "int64 value") uintFlag := f.Uint([]string{"uint"}, 0, "uint value") uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value") stringFlag := f.String([]string{"string"}, "0", "string value") + f.String([]string{"string2"}, "0", "string2 value") singleQuoteFlag := f.String([]string{"squote"}, "", "single quoted value") doubleQuoteFlag := f.String([]string{"dquote"}, "", "double quoted value") mixedQuoteFlag := f.String([]string{"mquote"}, "", "mixed quoted value") @@ -185,6 +188,7 @@ func testParse(f *FlagSet, t *testing.T) { args := []string{ "-bool", "-bool2=true", + "-bool4=false", "--int", "22", "--int64", "0x23", "-uint", "24", @@ -212,6 +216,18 @@ func testParse(f *FlagSet, t *testing.T) { if *bool2Flag != true { t.Error("bool2 flag should be true, is ", *bool2Flag) } + if !f.IsSet("bool2") { + t.Error("bool2 should be marked as set") + } + if f.IsSet("bool3") { + t.Error("bool3 should not be marked as set") + } + if !f.IsSet("bool4") { + t.Error("bool4 should be marked as set") + } + if *bool4Flag != false { + t.Error("bool4 flag should be false, is ", *bool4Flag) + } if *intFlag != 22 { t.Error("int flag should be 22, is ", *intFlag) } @@ -227,6 +243,12 @@ func testParse(f *FlagSet, t *testing.T) { if *stringFlag != "hello" { t.Error("string flag should be `hello`, is ", *stringFlag) } + if !f.IsSet("string") { + t.Error("string flag should be marked as set") + } + if f.IsSet("string2") { + t.Error("string2 flag should not be marked as set") + } if *singleQuoteFlag != "single" { t.Error("single quote string flag should be `single`, is ", *singleQuoteFlag) } From 34fe2a372576907cb7ec26cf22ac4e93b8974f6e Mon Sep 17 00:00:00 2001 From: Vincent Bernat Date: Sun, 23 Nov 2014 00:45:14 +0100 Subject: [PATCH 425/592] zsh: correctly parse available subcommands A lot of flags have been added on the output of `docker help`. Use a more robust method to extract the list of available subcommands by spotting the `Command:` line and the next blank line. Signed-off-by: Vincent Bernat --- contrib/completion/zsh/_docker | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker index c13a849783..9104f385d7 100644 --- a/contrib/completion/zsh/_docker +++ b/contrib/completion/zsh/_docker @@ -177,7 +177,9 @@ __docker_commands () { if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \ && ! _retrieve_cache docker_subcommands; then - _docker_subcommands=(${${${${(f)"$(_call_program commands docker 2>&1)"}[5,-1]}## #}/ ##/:}) + local -a lines + lines=(${(f)"$(_call_program commands docker 2>&1)"}) + _docker_subcommands=(${${${lines[$((${lines[(i)Commands:]} + 1)),${lines[(I) *]}]}## #}/ ##/:}) _docker_subcommands=($_docker_subcommands 'help:Show help for a command') _store_cache docker_subcommands _docker_subcommands fi From 745e3f77a127c5be2e7d563e402e3e4a7d5d7729 Mon Sep 17 00:00:00 2001 From: "Dmitry V. Krivenok" Date: Sun, 23 Nov 2014 22:59:35 +0300 Subject: [PATCH 426/592] Fixed typo in documentation. --- docs/sources/reference/commandline/cli.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 6dedb4799f..bf0833d57a 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -155,7 +155,7 @@ string is equivalent to setting the `--tlsverify` flag. The following are equiva ### Daemon storage-driver option -The Docker daemon has support for three different image layer storage drivers: `aufs`, +The Docker daemon has support for four different image layer storage drivers: `aufs`, `devicemapper`, `btrfs` and `overlayfs`. The `aufs` driver is the oldest, but is based on a Linux kernel patch-set that From 91a8b916b09615119e80a1193f1a2f6c01143106 Mon Sep 17 00:00:00 2001 From: Richard Metzler Date: Sun, 23 Nov 2014 23:57:43 +0100 Subject: [PATCH 427/592] Empty Line should fix Markdown unordered list Without the line break the list would render as one single paragraph. --- docs/sources/reference/api/docker_remote_api_v1.15.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/sources/reference/api/docker_remote_api_v1.15.md b/docs/sources/reference/api/docker_remote_api_v1.15.md index a634f7c550..6c95a94b23 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.15.md +++ b/docs/sources/reference/api/docker_remote_api_v1.15.md @@ -524,6 +524,7 @@ Start the container `id` HTTP/1.1 204 No Content Json Parameters: + - **Binds** – A list of volume bindings for this container. Each volume binding is a string of the form `container_path` (to create a new volume for the container), `host_path:container_path` (to bind-mount From 7fbbd515b1018721e91199960d1933383a8262a1 Mon Sep 17 00:00:00 2001 From: Daehyeok Mun Date: Tue, 25 Nov 2014 00:32:38 +0900 Subject: [PATCH 428/592] remove deprecated cmd function in integration-cli Remove deprecated cmd function in integration-cli and change cmd to dockerCmd in all test files Signed-off-by: Daehyeok Mun --- integration-cli/docker_cli_build_test.go | 2 +- integration-cli/docker_cli_cp_test.go | 48 +++++++++++------------ integration-cli/docker_cli_events_test.go | 26 ++++++------ integration-cli/docker_cli_links_test.go | 32 +++++++-------- integration-cli/docker_cli_rmi_test.go | 24 ++++++------ integration-cli/docker_cli_run_test.go | 6 +-- integration-cli/docker_cli_start_test.go | 18 ++++----- integration-cli/docker_utils.go | 5 --- 8 files changed, 78 insertions(+), 83 deletions(-) diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index ea8f54d932..1d287bd7dc 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -2429,7 +2429,7 @@ func TestBuildNoContext(t *testing.T) { t.Fatalf("build failed to complete: %v %v", out, err) } - if out, _, err := cmd(t, "run", "--rm", "nocontext"); out != "ok\n" || err != nil { + if out, _, err := dockerCmd(t, "run", "--rm", "nocontext"); out != "ok\n" || err != nil { t.Fatalf("run produced invalid output: %q, expected %q", out, "ok") } diff --git a/integration-cli/docker_cli_cp_test.go b/integration-cli/docker_cli_cp_test.go index b89ddde0b4..3ebb2ab14f 100644 --- a/integration-cli/docker_cli_cp_test.go +++ b/integration-cli/docker_cli_cp_test.go @@ -23,7 +23,7 @@ const ( // Test for #5656 // Check that garbage paths don't escape the container's rootfs func TestCpGarbagePath(t *testing.T) { - out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) if err != nil || exitCode != 0 { t.Fatal("failed to create a container", out, err) } @@ -31,7 +31,7 @@ func TestCpGarbagePath(t *testing.T) { cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) - out, _, err = cmd(t, "wait", cleanedContainerID) + out, _, err = dockerCmd(t, "wait", cleanedContainerID) if err != nil || stripTrailingCharacters(out) != "0" { t.Fatal("failed to set up container", out, err) } @@ -59,7 +59,7 @@ func TestCpGarbagePath(t *testing.T) { path := filepath.Join("../../../../../../../../../../../../", cpFullPath) - _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir) if err != nil { t.Fatalf("couldn't copy from garbage path: %s:%s %s", cleanedContainerID, path, err) } @@ -85,7 +85,7 @@ func TestCpGarbagePath(t *testing.T) { // Check that relative paths are relative to the container's rootfs func TestCpRelativePath(t *testing.T) { - out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) if err != nil || exitCode != 0 { t.Fatal("failed to create a container", out, err) } @@ -93,7 +93,7 @@ func TestCpRelativePath(t *testing.T) { cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) - out, _, err = cmd(t, "wait", cleanedContainerID) + out, _, err = dockerCmd(t, "wait", cleanedContainerID) if err != nil || stripTrailingCharacters(out) != "0" { t.Fatal("failed to set up container", out, err) } @@ -122,7 +122,7 @@ func TestCpRelativePath(t *testing.T) { path, _ := filepath.Rel("/", cpFullPath) - _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir) if err != nil { t.Fatalf("couldn't copy from relative path: %s:%s %s", cleanedContainerID, path, err) } @@ -148,7 +148,7 @@ func TestCpRelativePath(t *testing.T) { // Check that absolute paths are relative to the container's rootfs func TestCpAbsolutePath(t *testing.T) { - out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) if err != nil || exitCode != 0 { t.Fatal("failed to create a container", out, err) } @@ -156,7 +156,7 @@ func TestCpAbsolutePath(t *testing.T) { cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) - out, _, err = cmd(t, "wait", cleanedContainerID) + out, _, err = dockerCmd(t, "wait", cleanedContainerID) if err != nil || stripTrailingCharacters(out) != "0" { t.Fatal("failed to set up container", out, err) } @@ -185,7 +185,7 @@ func TestCpAbsolutePath(t *testing.T) { path := cpFullPath - _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir) if err != nil { t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err) } @@ -212,7 +212,7 @@ func TestCpAbsolutePath(t *testing.T) { // Test for #5619 // Check that absolute symlinks are still relative to the container's rootfs func TestCpAbsoluteSymlink(t *testing.T) { - out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path") + out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path") if err != nil || exitCode != 0 { t.Fatal("failed to create a container", out, err) } @@ -220,7 +220,7 @@ func TestCpAbsoluteSymlink(t *testing.T) { cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) - out, _, err = cmd(t, "wait", cleanedContainerID) + out, _, err = dockerCmd(t, "wait", cleanedContainerID) if err != nil || stripTrailingCharacters(out) != "0" { t.Fatal("failed to set up container", out, err) } @@ -249,7 +249,7 @@ func TestCpAbsoluteSymlink(t *testing.T) { path := filepath.Join("/", "container_path") - _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir) if err != nil { t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err) } @@ -276,7 +276,7 @@ func TestCpAbsoluteSymlink(t *testing.T) { // Test for #5619 // Check that symlinks which are part of the resource path are still relative to the container's rootfs func TestCpSymlinkComponent(t *testing.T) { - out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path") + out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path") if err != nil || exitCode != 0 { t.Fatal("failed to create a container", out, err) } @@ -284,7 +284,7 @@ func TestCpSymlinkComponent(t *testing.T) { cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) - out, _, err = cmd(t, "wait", cleanedContainerID) + out, _, err = dockerCmd(t, "wait", cleanedContainerID) if err != nil || stripTrailingCharacters(out) != "0" { t.Fatal("failed to set up container", out, err) } @@ -313,7 +313,7 @@ func TestCpSymlinkComponent(t *testing.T) { path := filepath.Join("/", "container_path", cpTestName) - _, _, err = cmd(t, "cp", cleanedContainerID+":"+path, tmpdir) + _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir) if err != nil { t.Fatalf("couldn't copy from symlink path component: %s:%s %s", cleanedContainerID, path, err) } @@ -339,7 +339,7 @@ func TestCpSymlinkComponent(t *testing.T) { // Check that cp with unprivileged user doesn't return any error func TestCpUnprivilegedUser(t *testing.T) { - out, exitCode, err := cmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName) + out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName) if err != nil || exitCode != 0 { t.Fatal("failed to create a container", out, err) } @@ -347,7 +347,7 @@ func TestCpUnprivilegedUser(t *testing.T) { cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) - out, _, err = cmd(t, "wait", cleanedContainerID) + out, _, err = dockerCmd(t, "wait", cleanedContainerID) if err != nil || stripTrailingCharacters(out) != "0" { t.Fatal("failed to set up container", out, err) } @@ -389,7 +389,7 @@ func TestCpVolumePath(t *testing.T) { t.Fatal(err) } - out, exitCode, err := cmd(t, "run", "-d", "-v", "/foo", "-v", tmpDir+"/test:/test", "-v", tmpDir+":/baz", "busybox", "/bin/sh", "-c", "touch /foo/bar") + out, exitCode, err := dockerCmd(t, "run", "-d", "-v", "/foo", "-v", tmpDir+"/test:/test", "-v", tmpDir+":/baz", "busybox", "/bin/sh", "-c", "touch /foo/bar") if err != nil || exitCode != 0 { t.Fatal("failed to create a container", out, err) } @@ -397,13 +397,13 @@ func TestCpVolumePath(t *testing.T) { cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) - out, _, err = cmd(t, "wait", cleanedContainerID) + out, _, err = dockerCmd(t, "wait", cleanedContainerID) if err != nil || stripTrailingCharacters(out) != "0" { t.Fatal("failed to set up container", out, err) } // Copy actual volume path - _, _, err = cmd(t, "cp", cleanedContainerID+":/foo", outDir) + _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/foo", outDir) if err != nil { t.Fatalf("couldn't copy from volume path: %s:%s %v", cleanedContainerID, "/foo", err) } @@ -423,7 +423,7 @@ func TestCpVolumePath(t *testing.T) { } // Copy file nested in volume - _, _, err = cmd(t, "cp", cleanedContainerID+":/foo/bar", outDir) + _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/foo/bar", outDir) if err != nil { t.Fatalf("couldn't copy from volume path: %s:%s %v", cleanedContainerID, "/foo", err) } @@ -436,7 +436,7 @@ func TestCpVolumePath(t *testing.T) { } // Copy Bind-mounted dir - _, _, err = cmd(t, "cp", cleanedContainerID+":/baz", outDir) + _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/baz", outDir) if err != nil { t.Fatalf("couldn't copy from bind-mounted volume path: %s:%s %v", cleanedContainerID, "/baz", err) } @@ -449,7 +449,7 @@ func TestCpVolumePath(t *testing.T) { } // Copy file nested in bind-mounted dir - _, _, err = cmd(t, "cp", cleanedContainerID+":/baz/test", outDir) + _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/baz/test", outDir) fb, err := ioutil.ReadFile(outDir + "/baz/test") if err != nil { t.Fatal(err) @@ -463,7 +463,7 @@ func TestCpVolumePath(t *testing.T) { } // Copy bind-mounted file - _, _, err = cmd(t, "cp", cleanedContainerID+":/test", outDir) + _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/test", outDir) fb, err = ioutil.ReadFile(outDir + "/test") if err != nil { t.Fatal(err) diff --git a/integration-cli/docker_cli_events_test.go b/integration-cli/docker_cli_events_test.go index 5c197b92fb..600a3fa72f 100644 --- a/integration-cli/docker_cli_events_test.go +++ b/integration-cli/docker_cli_events_test.go @@ -16,12 +16,12 @@ import ( ) func TestEventsUntag(t *testing.T) { - out, _, _ := cmd(t, "images", "-q") + out, _, _ := dockerCmd(t, "images", "-q") image := strings.Split(out, "\n")[0] - cmd(t, "tag", image, "utest:tag1") - cmd(t, "tag", image, "utest:tag2") - cmd(t, "rmi", "utest:tag1") - cmd(t, "rmi", "utest:tag2") + dockerCmd(t, "tag", image, "utest:tag1") + dockerCmd(t, "tag", image, "utest:tag2") + dockerCmd(t, "rmi", "utest:tag1") + dockerCmd(t, "rmi", "utest:tag2") eventsCmd := exec.Command("timeout", "0.2", dockerBinary, "events", "--since=1") out, _, _ = runCommandWithOutput(eventsCmd) events := strings.Split(out, "\n") @@ -39,11 +39,11 @@ func TestEventsUntag(t *testing.T) { func TestEventsPause(t *testing.T) { name := "testeventpause" - out, _, _ := cmd(t, "images", "-q") + out, _, _ := dockerCmd(t, "images", "-q") image := strings.Split(out, "\n")[0] - cmd(t, "run", "-d", "--name", name, image, "sleep", "2") - cmd(t, "pause", name) - cmd(t, "unpause", name) + dockerCmd(t, "run", "-d", "--name", name, image, "sleep", "2") + dockerCmd(t, "pause", name) + dockerCmd(t, "unpause", name) defer deleteAllContainers() @@ -75,7 +75,7 @@ func TestEventsPause(t *testing.T) { func TestEventsContainerFailStartDie(t *testing.T) { defer deleteAllContainers() - out, _, _ := cmd(t, "images", "-q") + out, _, _ := dockerCmd(t, "images", "-q") image := strings.Split(out, "\n")[0] eventsCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testeventdie", image, "blerg") _, _, err := runCommandWithOutput(eventsCmd) @@ -106,7 +106,7 @@ func TestEventsContainerFailStartDie(t *testing.T) { func TestEventsLimit(t *testing.T) { defer deleteAllContainers() for i := 0; i < 30; i++ { - cmd(t, "run", "busybox", "echo", strconv.Itoa(i)) + dockerCmd(t, "run", "busybox", "echo", strconv.Itoa(i)) } eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix())) out, _, _ := runCommandWithOutput(eventsCmd) @@ -119,7 +119,7 @@ func TestEventsLimit(t *testing.T) { } func TestEventsContainerEvents(t *testing.T) { - cmd(t, "run", "--rm", "busybox", "true") + dockerCmd(t, "run", "--rm", "busybox", "true") eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix())) out, exitCode, err := runCommandWithOutput(eventsCmd) if exitCode != 0 || err != nil { @@ -190,7 +190,7 @@ func TestEventsRedirectStdout(t *testing.T) { since := time.Now().Unix() - cmd(t, "run", "busybox", "true") + dockerCmd(t, "run", "busybox", "true") defer deleteAllContainers() diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go index 7b19434fb5..f202ce10a2 100644 --- a/integration-cli/docker_cli_links_test.go +++ b/integration-cli/docker_cli_links_test.go @@ -62,21 +62,21 @@ func TestLinksPingUnlinkedContainers(t *testing.T) { func TestLinksPingLinkedContainers(t *testing.T) { var out string - out, _, _ = cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") + out, _, _ = dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") idA := stripTrailingCharacters(out) - out, _, _ = cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") + out, _, _ = dockerCmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") idB := stripTrailingCharacters(out) - cmd(t, "run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") - cmd(t, "kill", idA) - cmd(t, "kill", idB) + dockerCmd(t, "run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + dockerCmd(t, "kill", idA) + dockerCmd(t, "kill", idB) deleteAllContainers() logDone("links - ping linked container") } func TestLinksIpTablesRulesWhenLinkAndUnlink(t *testing.T) { - cmd(t, "run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "sleep", "10") - cmd(t, "run", "-d", "--name", "parent", "--link", "child:http", "busybox", "sleep", "10") + dockerCmd(t, "run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "sleep", "10") + dockerCmd(t, "run", "-d", "--name", "parent", "--link", "child:http", "busybox", "sleep", "10") childIP := findContainerIP(t, "child") parentIP := findContainerIP(t, "parent") @@ -87,13 +87,13 @@ func TestLinksIpTablesRulesWhenLinkAndUnlink(t *testing.T) { t.Fatal("Iptables rules not found") } - cmd(t, "rm", "--link", "parent/http") + dockerCmd(t, "rm", "--link", "parent/http") if iptables.Exists(sourceRule...) || iptables.Exists(destinationRule...) { t.Fatal("Iptables rules should be removed when unlink") } - cmd(t, "kill", "child") - cmd(t, "kill", "parent") + dockerCmd(t, "kill", "child") + dockerCmd(t, "kill", "parent") deleteAllContainers() logDone("link - verify iptables when link and unlink") @@ -105,9 +105,9 @@ func TestLinksInspectLinksStarted(t *testing.T) { result []string ) defer deleteAllContainers() - cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") - cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") - cmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sleep", "10") + dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") + dockerCmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") + dockerCmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sleep", "10") links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links") if err != nil { t.Fatal(err) @@ -134,9 +134,9 @@ func TestLinksInspectLinksStopped(t *testing.T) { result []string ) defer deleteAllContainers() - cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") - cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") - cmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true") + dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") + dockerCmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") + dockerCmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true") links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links") if err != nil { t.Fatal(err) diff --git a/integration-cli/docker_cli_rmi_test.go b/integration-cli/docker_cli_rmi_test.go index 98cadfe853..4600c481fd 100644 --- a/integration-cli/docker_cli_rmi_test.go +++ b/integration-cli/docker_cli_rmi_test.go @@ -29,7 +29,7 @@ func TestRmiWithContainerFails(t *testing.T) { } // make sure it didn't delete the busybox name - images, _, _ := cmd(t, "images") + images, _, _ := dockerCmd(t, "images") if !strings.Contains(images, "busybox") { t.Fatalf("The name 'busybox' should not have been removed from images: %q", images) } @@ -40,35 +40,35 @@ func TestRmiWithContainerFails(t *testing.T) { } func TestRmiTag(t *testing.T) { - imagesBefore, _, _ := cmd(t, "images", "-a") - cmd(t, "tag", "busybox", "utest:tag1") - cmd(t, "tag", "busybox", "utest/docker:tag2") - cmd(t, "tag", "busybox", "utest:5000/docker:tag3") + imagesBefore, _, _ := dockerCmd(t, "images", "-a") + dockerCmd(t, "tag", "busybox", "utest:tag1") + dockerCmd(t, "tag", "busybox", "utest/docker:tag2") + dockerCmd(t, "tag", "busybox", "utest:5000/docker:tag3") { - imagesAfter, _, _ := cmd(t, "images", "-a") + imagesAfter, _, _ := dockerCmd(t, "images", "-a") if nLines(imagesAfter) != nLines(imagesBefore)+3 { t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) } } - cmd(t, "rmi", "utest/docker:tag2") + dockerCmd(t, "rmi", "utest/docker:tag2") { - imagesAfter, _, _ := cmd(t, "images", "-a") + imagesAfter, _, _ := dockerCmd(t, "images", "-a") if nLines(imagesAfter) != nLines(imagesBefore)+2 { t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) } } - cmd(t, "rmi", "utest:5000/docker:tag3") + dockerCmd(t, "rmi", "utest:5000/docker:tag3") { - imagesAfter, _, _ := cmd(t, "images", "-a") + imagesAfter, _, _ := dockerCmd(t, "images", "-a") if nLines(imagesAfter) != nLines(imagesBefore)+1 { t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) } } - cmd(t, "rmi", "utest:tag1") + dockerCmd(t, "rmi", "utest:tag1") { - imagesAfter, _, _ := cmd(t, "images", "-a") + imagesAfter, _, _ := dockerCmd(t, "images", "-a") if nLines(imagesAfter) != nLines(imagesBefore)+0 { t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) } diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 574d1ece64..9546af0014 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -798,7 +798,7 @@ func TestRunLoopbackWhenNetworkDisabled(t *testing.T) { } func TestRunNetHostNotAllowedWithLinks(t *testing.T) { - _, _, err := cmd(t, "run", "--name", "linked", "busybox", "true") + _, _, err := dockerCmd(t, "run", "--name", "linked", "busybox", "true") cmd := exec.Command(dockerBinary, "run", "--net=host", "--link", "linked:linked", "busybox", "true") _, _, err = runCommandWithOutput(cmd) @@ -1204,7 +1204,7 @@ func TestRunModeHostname(t *testing.T) { } func TestRunRootWorkdir(t *testing.T) { - s, _, err := cmd(t, "run", "--workdir", "/", "busybox", "pwd") + s, _, err := dockerCmd(t, "run", "--workdir", "/", "busybox", "pwd") if err != nil { t.Fatal(s, err) } @@ -1218,7 +1218,7 @@ func TestRunRootWorkdir(t *testing.T) { } func TestRunAllowBindMountingRoot(t *testing.T) { - s, _, err := cmd(t, "run", "-v", "/:/host", "busybox", "ls", "/host") + s, _, err := dockerCmd(t, "run", "-v", "/:/host", "busybox", "ls", "/host") if err != nil { t.Fatal(s, err) } diff --git a/integration-cli/docker_cli_start_test.go b/integration-cli/docker_cli_start_test.go index 6af5f43f54..da550cc776 100644 --- a/integration-cli/docker_cli_start_test.go +++ b/integration-cli/docker_cli_start_test.go @@ -12,8 +12,8 @@ import ( func TestStartAttachReturnsOnError(t *testing.T) { defer deleteAllContainers() - cmd(t, "run", "-d", "--name", "test", "busybox") - cmd(t, "stop", "test") + dockerCmd(t, "run", "-d", "--name", "test", "busybox") + dockerCmd(t, "stop", "test") // Expect this to fail because the above container is stopped, this is what we want if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "test2", "--link", "test:test", "busybox")); err == nil { @@ -73,7 +73,7 @@ func TestStartRecordError(t *testing.T) { defer deleteAllContainers() // when container runs successfully, we should not have state.Error - cmd(t, "run", "-d", "-p", "9999:9999", "--name", "test", "busybox", "top") + dockerCmd(t, "run", "-d", "-p", "9999:9999", "--name", "test", "busybox", "top") stateErr, err := inspectField("test", "State.Error") if err != nil { t.Fatalf("Failed to inspect %q state's error, got error %q", "test", err) @@ -97,8 +97,8 @@ func TestStartRecordError(t *testing.T) { } // Expect the conflict to be resolved when we stop the initial container - cmd(t, "stop", "test") - cmd(t, "start", "test2") + dockerCmd(t, "stop", "test") + dockerCmd(t, "start", "test2") stateErr, err = inspectField("test2", "State.Error") if err != nil { t.Fatalf("Failed to inspect %q state's error, got error %q", "test", err) @@ -115,7 +115,7 @@ func TestStartVolumesFromFailsCleanly(t *testing.T) { defer deleteAllContainers() // Create the first data volume - cmd(t, "run", "-d", "--name", "data_before", "-v", "/foo", "busybox") + dockerCmd(t, "run", "-d", "--name", "data_before", "-v", "/foo", "busybox") // Expect this to fail because the data test after contaienr doesn't exist yet if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "consumer", "--volumes-from", "data_before", "--volumes-from", "data_after", "busybox")); err == nil { @@ -123,13 +123,13 @@ func TestStartVolumesFromFailsCleanly(t *testing.T) { } // Create the second data volume - cmd(t, "run", "-d", "--name", "data_after", "-v", "/bar", "busybox") + dockerCmd(t, "run", "-d", "--name", "data_after", "-v", "/bar", "busybox") // Now, all the volumes should be there - cmd(t, "start", "consumer") + dockerCmd(t, "start", "consumer") // Check that we have the volumes we want - out, _, _ := cmd(t, "inspect", "--format='{{ len .Volumes }}'", "consumer") + out, _, _ := dockerCmd(t, "inspect", "--format='{{ len .Volumes }}'", "consumer") n_volumes := strings.Trim(out, " \r\n'") if n_volumes != "2" { t.Fatalf("Missing volumes: expected 2, got %s", n_volumes) diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go index 58752bd04e..ba1a0b1306 100644 --- a/integration-cli/docker_utils.go +++ b/integration-cli/docker_utils.go @@ -356,11 +356,6 @@ func pullImageIfNotExist(image string) (err error) { return } -// deprecated, use dockerCmd instead -func cmd(t *testing.T, args ...string) (string, int, error) { - return dockerCmd(t, args...) -} - func dockerCmd(t *testing.T, args ...string) (string, int, error) { out, status, err := runCommandWithOutput(exec.Command(dockerBinary, args...)) if err != nil { From 5deedef42c7d85835729ecf4fe61ec91612089af Mon Sep 17 00:00:00 2001 From: "Dmitry V. Krivenok" Date: Mon, 24 Nov 2014 21:22:54 +0300 Subject: [PATCH 429/592] Made wording a bit more generic. --- docs/sources/reference/commandline/cli.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index bf0833d57a..e0fc13e507 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -155,7 +155,7 @@ string is equivalent to setting the `--tlsverify` flag. The following are equiva ### Daemon storage-driver option -The Docker daemon has support for four different image layer storage drivers: `aufs`, +The Docker daemon has support for several different image layer storage drivers: `aufs`, `devicemapper`, `btrfs` and `overlayfs`. The `aufs` driver is the oldest, but is based on a Linux kernel patch-set that From d96832cbd2c62103944518866e1fc1219ce048d5 Mon Sep 17 00:00:00 2001 From: Vaidas Jablonskis Date: Sat, 22 Nov 2014 23:21:47 +0000 Subject: [PATCH 430/592] registry: fix ServerAddress setting This ensures that ServerAddress is set, while previously it was getting set after configFile.Configs. Signed-off-by: Vaidas Jablonskis --- registry/auth.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/registry/auth.go b/registry/auth.go index a22d0b881f..4276064083 100644 --- a/registry/auth.go +++ b/registry/auth.go @@ -126,8 +126,8 @@ func LoadConfig(rootPath string) (*ConfigFile, error) { return &configFile, err } authConfig.Auth = "" - configFile.Configs[k] = authConfig authConfig.ServerAddress = k + configFile.Configs[k] = authConfig } } return &configFile, nil From e07daa58d99fc1733603a9fbc5c795818cdc6687 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Mon, 24 Nov 2014 13:49:09 -0500 Subject: [PATCH 431/592] contrib: fix the docker-device-tool Signed-off-by: Vincent Batts --- contrib/docker-device-tool/device_tool.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/contrib/docker-device-tool/device_tool.go b/contrib/docker-device-tool/device_tool.go index 8ab53de8da..ffc34a54e0 100644 --- a/contrib/docker-device-tool/device_tool.go +++ b/contrib/docker-device-tool/device_tool.go @@ -3,12 +3,15 @@ package main import ( "flag" "fmt" - "github.com/docker/docker/daemon/graphdriver/devmapper" "os" "path" "sort" "strconv" "strings" + + log "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver/devmapper" + "github.com/docker/docker/pkg/devicemapper" ) func usage() { @@ -60,7 +63,7 @@ func main() { if *flDebug { os.Setenv("DEBUG", "1") - log.SetLevel("debug") + log.SetLevel(log.DebugLevel) } if flag.NArg() < 1 { @@ -70,7 +73,7 @@ func main() { args := flag.Args() home := path.Join(*root, "devicemapper") - devices, err := devmapper.NewDeviceSet(home, false) + devices, err := devmapper.NewDeviceSet(home, false, nil) if err != nil { fmt.Println("Can't initialize device mapper: ", err) os.Exit(1) @@ -143,7 +146,7 @@ func main() { usage() } - err := devices.RemoveDevice(args[1]) + err := devicemapper.RemoveDevice(args[1]) if err != nil { fmt.Println("Can't remove device: ", err) os.Exit(1) @@ -154,7 +157,7 @@ func main() { usage() } - err := devices.MountDevice(args[1], args[2], false) + err := devices.MountDevice(args[1], args[2], "") if err != nil { fmt.Println("Can't create snap device: ", err) os.Exit(1) From c7e4cc4a531b5337d64bda22df8553e646a96fe7 Mon Sep 17 00:00:00 2001 From: Aidan Hobson Sayers Date: Fri, 14 Nov 2014 01:52:55 +0000 Subject: [PATCH 432/592] Allow git@ prefixes for any hosted git service Signed-off-by: Aidan Hobson Sayers --- docs/sources/reference/commandline/cli.md | 2 +- utils/utils.go | 2 +- utils/utils_test.go | 31 ++++++++++++++++++----- 3 files changed, 27 insertions(+), 8 deletions(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 07cc578eb2..b9c2945707 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -462,7 +462,7 @@ Supported formats are: bzip2, gzip and xz. This will clone the GitHub repository and use the cloned repository as context. The Dockerfile at the root of the repository is used as Dockerfile. Note that you -can specify an arbitrary Git repository by using the `git://` +can specify an arbitrary Git repository by using the `git://` or `git@` schema. > **Note:** `docker build` will return a `no such file or directory` error diff --git a/utils/utils.go b/utils/utils.go index 84d01f6c9d..3f49cb72f1 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -293,7 +293,7 @@ func IsURL(str string) bool { } func IsGIT(str string) bool { - return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/") || strings.HasPrefix(str, "git@github.com:") || (strings.HasSuffix(str, ".git") && IsURL(str)) + return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/") || strings.HasPrefix(str, "git@") || (strings.HasSuffix(str, ".git") && IsURL(str)) } func ValidGitTransport(str string) bool { diff --git a/utils/utils_test.go b/utils/utils_test.go index 6e2de7e041..a319d2d818 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -98,23 +98,42 @@ func TestReadSymlinkedDirectoryToFile(t *testing.T) { } } -func TestValidGitTransport(t *testing.T) { - for _, url := range []string{ +var ( + gitUrls = []string{ "git://github.com/docker/docker", "git@github.com:docker/docker.git", + "git@bitbucket.org:atlassianlabs/atlassian-docker.git", "https://github.com/docker/docker.git", "http://github.com/docker/docker.git", - } { + } + incompleteGitUrls = []string{ + "github.com/docker/docker", + } +) + +func TestValidGitTransport(t *testing.T) { + for _, url := range gitUrls { if ValidGitTransport(url) == false { t.Fatalf("%q should be detected as valid Git prefix", url) } } - for _, url := range []string{ - "github.com/docker/docker", - } { + for _, url := range incompleteGitUrls { if ValidGitTransport(url) == true { t.Fatalf("%q should not be detected as valid Git prefix", url) } } } + +func TestIsGIT(t *testing.T) { + for _, url := range gitUrls { + if IsGIT(url) == false { + t.Fatalf("%q should be detected as valid Git url", url) + } + } + for _, url := range incompleteGitUrls { + if IsGIT(url) == false { + t.Fatalf("%q should be detected as valid Git url", url) + } + } +} From faab87cc36fb6f02ddd53e1be09f10623a40773a Mon Sep 17 00:00:00 2001 From: unclejack Date: Tue, 28 Oct 2014 23:18:45 +0200 Subject: [PATCH 433/592] pkg/symlink: avoid following out of scope Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- pkg/symlink/fs.go | 47 +++++++---- pkg/symlink/fs_test.go | 150 +++++++++++++++++++++++++++++++++--- pkg/symlink/testdata/fs/j/k | 1 + 3 files changed, 171 insertions(+), 27 deletions(-) create mode 120000 pkg/symlink/testdata/fs/j/k diff --git a/pkg/symlink/fs.go b/pkg/symlink/fs.go index d761732571..6ce99c6bda 100644 --- a/pkg/symlink/fs.go +++ b/pkg/symlink/fs.go @@ -12,6 +12,12 @@ const maxLoopCounter = 100 // FollowSymlink will follow an existing link and scope it to the root // path provided. +// The role of this function is to return an absolute path in the root +// or normalize to the root if the symlink leads to a path which is +// outside of the root. +// Errors encountered while attempting to follow the symlink in path +// will be reported. +// Normalizations to the root don't constitute errors. func FollowSymlinkInScope(link, root string) (string, error) { root, err := filepath.Abs(root) if err != nil { @@ -60,25 +66,36 @@ func FollowSymlinkInScope(link, root string) (string, error) { } return "", err } - if stat.Mode()&os.ModeSymlink == os.ModeSymlink { - dest, err := os.Readlink(prev) - if err != nil { - return "", err - } - if path.IsAbs(dest) { - prev = filepath.Join(root, dest) - } else { - prev, _ = filepath.Abs(prev) - - if prev = filepath.Join(filepath.Dir(prev), dest); len(prev) < len(root) { - prev = filepath.Join(root, filepath.Base(dest)) - } - } - } else { + // let's break if we're not dealing with a symlink + if stat.Mode()&os.ModeSymlink != os.ModeSymlink { break } + + // process the symlink + dest, err := os.Readlink(prev) + if err != nil { + return "", err + } + + if path.IsAbs(dest) { + prev = filepath.Join(root, dest) + } else { + prev, _ = filepath.Abs(prev) + + dir := filepath.Dir(prev) + prev = filepath.Join(dir, dest) + if dir == root && !strings.HasPrefix(prev, root) { + prev = root + } + if len(prev) < len(root) || (len(prev) == len(root) && prev != root) { + prev = filepath.Join(root, filepath.Base(dest)) + } + } } } + if prev == "/" { + prev = root + } return prev, nil } diff --git a/pkg/symlink/fs_test.go b/pkg/symlink/fs_test.go index cc0d82d1a3..0e2f948b6a 100644 --- a/pkg/symlink/fs_test.go +++ b/pkg/symlink/fs_test.go @@ -98,25 +98,151 @@ func TestFollowSymLinkRelativeLink(t *testing.T) { } func TestFollowSymLinkRelativeLinkScope(t *testing.T) { - link := "testdata/fs/a/f" + // avoid letting symlink f lead us out of the "testdata" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + { + link := "testdata/fs/a/f" - rewrite, err := FollowSymlinkInScope(link, "testdata") - if err != nil { - t.Fatal(err) + rewrite, err := FollowSymlinkInScope(link, "testdata") + if err != nil { + t.Fatal(err) + } + + if expected := abs(t, "testdata/test"); expected != rewrite { + t.Fatalf("Expected %s got %s", expected, rewrite) + } } - if expected := abs(t, "testdata/test"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) + // avoid letting symlink f lead us out of the "testdata/fs" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + { + link := "testdata/fs/a/f" + + rewrite, err := FollowSymlinkInScope(link, "testdata/fs") + if err != nil { + t.Fatal(err) + } + + if expected := abs(t, "testdata/fs/test"); expected != rewrite { + t.Fatalf("Expected %s got %s", expected, rewrite) + } } - link = "testdata/fs/b/h" + // avoid letting symlink g (pointed at by symlink h) take out of scope + // TODO: we should probably normalize to scope here because ../[....]/root + // is out of scope and we leak information + { + link := "testdata/fs/b/h" - rewrite, err = FollowSymlinkInScope(link, "testdata") - if err != nil { - t.Fatal(err) + rewrite, err := FollowSymlinkInScope(link, "testdata") + if err != nil { + t.Fatal(err) + } + + if expected := abs(t, "testdata/root"); expected != rewrite { + t.Fatalf("Expected %s got %s", expected, rewrite) + } } - if expected := abs(t, "testdata/root"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) + // avoid letting allowing symlink e lead us to ../b + // normalize to the "testdata/fs/a" + { + link := "testdata/fs/a/e" + + rewrite, err := FollowSymlinkInScope(link, "testdata/fs/a") + if err != nil { + t.Fatal(err) + } + + if expected := abs(t, "testdata/fs/a"); expected != rewrite { + t.Fatalf("Expected %s got %s", expected, rewrite) + } + } + + // avoid letting symlink -> ../directory/file escape from scope + // normalize to "testdata/fs/j" + { + link := "testdata/fs/j/k" + + rewrite, err := FollowSymlinkInScope(link, "testdata/fs/j") + if err != nil { + t.Fatal(err) + } + + if expected := abs(t, "testdata/fs/j"); expected != rewrite { + t.Fatalf("Expected %s got %s", expected, rewrite) + } + } + + // make sure we don't allow escaping to / + // normalize to dir + { + dir, err := ioutil.TempDir("", "docker-fs-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + linkFile := filepath.Join(dir, "foo") + os.Mkdir(filepath.Join(dir, ""), 0700) + os.Symlink("/", linkFile) + + rewrite, err := FollowSymlinkInScope(linkFile, dir) + if err != nil { + t.Fatal(err) + } + + if rewrite != dir { + t.Fatalf("Expected %s got %s", dir, rewrite) + } + } + + // make sure we don't allow escaping to / + // normalize to dir + { + dir, err := ioutil.TempDir("", "docker-fs-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + linkFile := filepath.Join(dir, "foo") + os.Mkdir(filepath.Join(dir, ""), 0700) + os.Symlink("/../../", linkFile) + + rewrite, err := FollowSymlinkInScope(linkFile, dir) + if err != nil { + t.Fatal(err) + } + + if rewrite != dir { + t.Fatalf("Expected %s got %s", dir, rewrite) + } + } + + // make sure we stay in scope without leaking information + // this also checks for escaping to / + // normalize to dir + { + dir, err := ioutil.TempDir("", "docker-fs-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + linkFile := filepath.Join(dir, "foo") + os.Mkdir(filepath.Join(dir, ""), 0700) + os.Symlink("../../", linkFile) + + rewrite, err := FollowSymlinkInScope(linkFile, dir) + if err != nil { + t.Fatal(err) + } + + if rewrite != dir { + t.Fatalf("Expected %s got %s", dir, rewrite) + } } } diff --git a/pkg/symlink/testdata/fs/j/k b/pkg/symlink/testdata/fs/j/k new file mode 120000 index 0000000000..f559e8fda2 --- /dev/null +++ b/pkg/symlink/testdata/fs/j/k @@ -0,0 +1 @@ +../i/a \ No newline at end of file From 294843ef23fcff3c080d9fbd12df17ae7006a9f8 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 3 Nov 2014 22:57:18 +0000 Subject: [PATCH 434/592] Move security opts to HostConfig These settings need to be in the HostConfig so that they are not committed to an image and cannot introduce a security issue. We can safely move this field from the Config to the HostConfig without any regressions because these settings are consumed at container created and used to populate fields on the Container struct. Because of this, existing settings will be honored for containers already created on a daemon with custom security settings and prevent values being consumed via an Image. Signed-off-by: Michael Crosby Conflicts: daemon/create.go changing config to hostConfig was required to fix the build --- daemon/create.go | 4 ++-- daemon/daemon.go | 11 +++++------ daemon/daemon_unit_test.go | 2 +- daemon/start.go | 3 +++ runconfig/config.go | 2 -- runconfig/hostconfig.go | 2 ++ runconfig/parse.go | 2 +- 7 files changed, 14 insertions(+), 12 deletions(-) diff --git a/daemon/create.go b/daemon/create.go index 3a71a8ac7e..e666e6f6ff 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -83,8 +83,8 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil { return nil, nil, err } - if hostConfig != nil && config.SecurityOpt == nil { - config.SecurityOpt, err = daemon.GenerateSecurityOpt(hostConfig.IpcMode) + if hostConfig != nil && hostConfig.SecurityOpt == nil { + hostConfig.SecurityOpt, err = daemon.GenerateSecurityOpt(hostConfig.IpcMode) if err != nil { return nil, nil, err } diff --git a/daemon/daemon.go b/daemon/daemon.go index 84628be729..93cb101f61 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -531,10 +531,10 @@ func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint, configCmd []string) return entrypoint, args } -func parseSecurityOpt(container *Container, config *runconfig.Config) error { +func parseSecurityOpt(container *Container, config *runconfig.HostConfig) error { var ( - label_opts []string - err error + labelOpts []string + err error ) for _, opt := range config.SecurityOpt { @@ -544,7 +544,7 @@ func parseSecurityOpt(container *Container, config *runconfig.Config) error { } switch con[0] { case "label": - label_opts = append(label_opts, con[1]) + labelOpts = append(labelOpts, con[1]) case "apparmor": container.AppArmorProfile = con[1] default: @@ -552,7 +552,7 @@ func parseSecurityOpt(container *Container, config *runconfig.Config) error { } } - container.ProcessLabel, container.MountLabel, err = label.InitLabels(label_opts) + container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) return err } @@ -586,7 +586,6 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, img *i execCommands: newExecStore(), } container.root = daemon.containerRoot(container.ID) - err = parseSecurityOpt(container, config) return container, err } diff --git a/daemon/daemon_unit_test.go b/daemon/daemon_unit_test.go index f3b899ec8d..fbc3302aaa 100644 --- a/daemon/daemon_unit_test.go +++ b/daemon/daemon_unit_test.go @@ -8,7 +8,7 @@ import ( func TestParseSecurityOpt(t *testing.T) { container := &Container{} - config := &runconfig.Config{} + config := &runconfig.HostConfig{} // test apparmor config.SecurityOpt = []string{"apparmor:test_profile"} diff --git a/daemon/start.go b/daemon/start.go index f2c375ddc9..f72407e3f3 100644 --- a/daemon/start.go +++ b/daemon/start.go @@ -44,6 +44,9 @@ func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status { } func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error { + if err := parseSecurityOpt(container, hostConfig); err != nil { + return err + } // Validate the HostConfig binds. Make sure that: // the source exists for _, bind := range hostConfig.Binds { diff --git a/runconfig/config.go b/runconfig/config.go index 29c54a4d6d..ca5c3240b6 100644 --- a/runconfig/config.go +++ b/runconfig/config.go @@ -33,7 +33,6 @@ type Config struct { NetworkDisabled bool MacAddress string OnBuild []string - SecurityOpt []string } func ContainerConfigFromJob(job *engine.Job) *Config { @@ -58,7 +57,6 @@ func ContainerConfigFromJob(job *engine.Job) *Config { } job.GetenvJson("ExposedPorts", &config.ExposedPorts) job.GetenvJson("Volumes", &config.Volumes) - config.SecurityOpt = job.GetenvList("SecurityOpt") if PortSpecs := job.GetenvList("PortSpecs"); PortSpecs != nil { config.PortSpecs = PortSpecs } diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go index 01388ad727..b619e9c31c 100644 --- a/runconfig/hostconfig.go +++ b/runconfig/hostconfig.go @@ -95,6 +95,7 @@ type HostConfig struct { CapAdd []string CapDrop []string RestartPolicy RestartPolicy + SecurityOpt []string } // This is used by the create command when you want to set both the @@ -130,6 +131,7 @@ func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { job.GetenvJson("PortBindings", &hostConfig.PortBindings) job.GetenvJson("Devices", &hostConfig.Devices) job.GetenvJson("RestartPolicy", &hostConfig.RestartPolicy) + hostConfig.SecurityOpt = job.GetenvList("SecurityOpt") if Binds := job.GetenvList("Binds"); Binds != nil { hostConfig.Binds = Binds } diff --git a/runconfig/parse.go b/runconfig/parse.go index 2bd8cf969e..0d682f35d3 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -273,7 +273,6 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe MacAddress: *flMacAddress, Entrypoint: entrypoint, WorkingDir: *flWorkingDir, - SecurityOpt: flSecurityOpt.GetAll(), } hostConfig := &HostConfig{ @@ -294,6 +293,7 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe CapAdd: flCapAdd.GetAll(), CapDrop: flCapDrop.GetAll(), RestartPolicy: restartPolicy, + SecurityOpt: flSecurityOpt.GetAll(), } // When allocating stdin in attached mode, close stdin at client disconnect From fa1484d12c5b66f7db03a9c93002ba3df56cdb4e Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 3 Nov 2014 23:00:49 +0000 Subject: [PATCH 435/592] Add AppArmorProfile to container inspect json Signed-off-by: Michael Crosby --- daemon/inspect.go | 1 + 1 file changed, 1 insertion(+) diff --git a/daemon/inspect.go b/daemon/inspect.go index 396ca0227f..cf2ed644d0 100644 --- a/daemon/inspect.go +++ b/daemon/inspect.go @@ -47,6 +47,7 @@ func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status { out.Set("ProcessLabel", container.ProcessLabel) out.SetJson("Volumes", container.Volumes) out.SetJson("VolumesRW", container.VolumesRW) + out.SetJson("AppArmorProfile", container.AppArmorProfile) if children, err := daemon.Children(container.Name); err == nil { for linkAlias, child := range children { From 1cb17f03d0b217acf2d2c289b4946d367f9d3e80 Mon Sep 17 00:00:00 2001 From: unclejack Date: Wed, 29 Oct 2014 21:06:51 +0200 Subject: [PATCH 436/592] add pkg/chrootarchive and use it on the daemon Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) Conflicts: builder/internals.go daemon/graphdriver/aufs/aufs.go daemon/volumes.go fixed conflicts in imports --- builder/internals.go | 11 +++-- daemon/graphdriver/aufs/aufs.go | 3 +- daemon/graphdriver/fsdiff.go | 3 +- daemon/graphdriver/vfs/driver.go | 4 +- daemon/volumes.go | 4 +- pkg/chrootarchive/archive.go | 76 +++++++++++++++++++++++++++++++ pkg/chrootarchive/diff.go | 38 ++++++++++++++++ pkg/chrootarchive/init.go | 18 ++++++++ pkg/reexec/command_linux.go | 18 ++++++++ pkg/reexec/command_unsupported.go | 11 +++++ pkg/reexec/reexec.go | 3 -- 11 files changed, 176 insertions(+), 13 deletions(-) create mode 100644 pkg/chrootarchive/archive.go create mode 100644 pkg/chrootarchive/diff.go create mode 100644 pkg/chrootarchive/init.go create mode 100644 pkg/reexec/command_linux.go create mode 100644 pkg/reexec/command_unsupported.go diff --git a/builder/internals.go b/builder/internals.go index f6083e7918..a894dd0b6b 100644 --- a/builder/internals.go +++ b/builder/internals.go @@ -24,6 +24,7 @@ import ( "github.com/docker/docker/daemon" imagepkg "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/system" @@ -46,7 +47,9 @@ func (b *Builder) readContext(context io.Reader) error { if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil { return err } - if err := archive.Untar(b.context, tmpdirPath, nil); err != nil { + + os.MkdirAll(tmpdirPath, 0700) + if err := chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil { return err } @@ -627,7 +630,7 @@ func (b *Builder) addContext(container *daemon.Container, orig, dest string, dec } // try to successfully untar the orig - if err := archive.UntarPath(origPath, tarDest); err == nil { + if err := chrootarchive.UntarPath(origPath, tarDest); err == nil { return nil } else if err != io.EOF { log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err) @@ -637,7 +640,7 @@ func (b *Builder) addContext(container *daemon.Container, orig, dest string, dec if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil { return err } - if err := archive.CopyWithTar(origPath, destPath); err != nil { + if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil { return err } @@ -650,7 +653,7 @@ func (b *Builder) addContext(container *daemon.Container, orig, dest string, dec } func copyAsDirectory(source, destination string, destinationExists bool) error { - if err := archive.CopyWithTar(source, destination); err != nil { + if err := chrootarchive.CopyWithTar(source, destination); err != nil { return err } diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go index da3c720d16..55cfd00c1f 100644 --- a/daemon/graphdriver/aufs/aufs.go +++ b/daemon/graphdriver/aufs/aufs.go @@ -33,6 +33,7 @@ import ( log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" mountpk "github.com/docker/docker/pkg/mount" "github.com/docker/docker/utils" "github.com/docker/libcontainer/label" @@ -305,7 +306,7 @@ func (a *Driver) Diff(id, parent string) (archive.Archive, error) { } func (a *Driver) applyDiff(id string, diff archive.ArchiveReader) error { - return archive.Untar(diff, path.Join(a.rootPath(), "diff", id), nil) + return chrootarchive.Untar(diff, path.Join(a.rootPath(), "diff", id), nil) } // DiffSize calculates the changes between the specified id diff --git a/daemon/graphdriver/fsdiff.go b/daemon/graphdriver/fsdiff.go index 3569cf910e..48852a5631 100644 --- a/daemon/graphdriver/fsdiff.go +++ b/daemon/graphdriver/fsdiff.go @@ -8,6 +8,7 @@ import ( log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/utils" ) @@ -122,7 +123,7 @@ func (gdw *naiveDiffDriver) ApplyDiff(id, parent string, diff archive.ArchiveRea start := time.Now().UTC() log.Debugf("Start untar layer") - if err = archive.ApplyLayer(layerFs, diff); err != nil { + if err = chrootarchive.ApplyLayer(layerFs, diff); err != nil { return } log.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) diff --git a/daemon/graphdriver/vfs/driver.go b/daemon/graphdriver/vfs/driver.go index 1076eb38dd..aa104500bc 100644 --- a/daemon/graphdriver/vfs/driver.go +++ b/daemon/graphdriver/vfs/driver.go @@ -8,7 +8,7 @@ import ( "path" "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/libcontainer/label" ) @@ -66,7 +66,7 @@ func (d *Driver) Create(id, parent string) error { if err != nil { return fmt.Errorf("%s: %s", parent, err) } - if err := archive.CopyWithTar(parentDir, dir); err != nil { + if err := chrootarchive.CopyWithTar(parentDir, dir); err != nil { return err } return nil diff --git a/daemon/volumes.go b/daemon/volumes.go index 6523dae853..a2cf3af33a 100644 --- a/daemon/volumes.go +++ b/daemon/volumes.go @@ -12,7 +12,7 @@ import ( log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" - "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/volumes" ) @@ -320,7 +320,7 @@ func copyExistingContents(source, destination string) error { if len(srcList) == 0 { // If the source volume is empty copy files from the root into the volume - if err := archive.CopyWithTar(source, destination); err != nil { + if err := chrootarchive.CopyWithTar(source, destination); err != nil { return err } } diff --git a/pkg/chrootarchive/archive.go b/pkg/chrootarchive/archive.go new file mode 100644 index 0000000000..f1df57ca59 --- /dev/null +++ b/pkg/chrootarchive/archive.go @@ -0,0 +1,76 @@ +package chrootarchive + +import ( + "flag" + "fmt" + "io" + "os" + "runtime" + "syscall" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +func untar() { + runtime.LockOSThread() + flag.Parse() + + if err := syscall.Chroot(flag.Arg(0)); err != nil { + fatal(err) + } + if err := syscall.Chdir("/"); err != nil { + fatal(err) + } + if err := archive.Untar(os.Stdin, "/", nil); err != nil { + fatal(err) + } + os.Exit(0) +} + +var ( + chrootArchiver = &archive.Archiver{Untar} +) + +func Untar(archive io.Reader, dest string, options *archive.TarOptions) error { + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := os.MkdirAll(dest, 0777); err != nil { + return err + } + } + cmd := reexec.Command("docker-untar", dest) + cmd.Stdin = archive + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("Untar %s %s", err, out) + } + return nil +} + +func TarUntar(src, dst string) error { + return chrootArchiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return chrootArchiver.CopyWithTar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// If `dst` ends with a trailing slash '/', the final destination path +// will be `dst/base(src)`. +func CopyFileWithTar(src, dst string) (err error) { + return chrootArchiver.CopyFileWithTar(src, dst) +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return chrootArchiver.UntarPath(src, dst) +} diff --git a/pkg/chrootarchive/diff.go b/pkg/chrootarchive/diff.go new file mode 100644 index 0000000000..2133200c68 --- /dev/null +++ b/pkg/chrootarchive/diff.go @@ -0,0 +1,38 @@ +package chrootarchive + +import ( + "flag" + "fmt" + "os" + "runtime" + "syscall" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +func applyLayer() { + runtime.LockOSThread() + flag.Parse() + + if err := syscall.Chroot(flag.Arg(0)); err != nil { + fatal(err) + } + if err := syscall.Chdir("/"); err != nil { + fatal(err) + } + if err := archive.ApplyLayer("/", os.Stdin); err != nil { + fatal(err) + } + os.Exit(0) +} + +func ApplyLayer(dest string, layer archive.ArchiveReader) error { + cmd := reexec.Command("docker-applyLayer", dest) + cmd.Stdin = layer + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("ApplyLayer %s %s", err, out) + } + return nil +} diff --git a/pkg/chrootarchive/init.go b/pkg/chrootarchive/init.go new file mode 100644 index 0000000000..b548e9fe72 --- /dev/null +++ b/pkg/chrootarchive/init.go @@ -0,0 +1,18 @@ +package chrootarchive + +import ( + "fmt" + "os" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-untar", untar) + reexec.Register("docker-applyLayer", applyLayer) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} diff --git a/pkg/reexec/command_linux.go b/pkg/reexec/command_linux.go new file mode 100644 index 0000000000..8dc3f3a4a6 --- /dev/null +++ b/pkg/reexec/command_linux.go @@ -0,0 +1,18 @@ +// +build linux + +package reexec + +import ( + "os/exec" + "syscall" +) + +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGTERM, + }, + } +} diff --git a/pkg/reexec/command_unsupported.go b/pkg/reexec/command_unsupported.go new file mode 100644 index 0000000000..a579318e82 --- /dev/null +++ b/pkg/reexec/command_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux + +package reexec + +import ( + "os/exec" +) + +func Command(args ...string) *exec.Cmd { + return nil +} diff --git a/pkg/reexec/reexec.go b/pkg/reexec/reexec.go index 136b905bd2..774e71c76d 100644 --- a/pkg/reexec/reexec.go +++ b/pkg/reexec/reexec.go @@ -27,19 +27,16 @@ func Init() bool { return true } - return false } // Self returns the path to the current processes binary func Self() string { name := os.Args[0] - if filepath.Base(name) == name { if lp, err := exec.LookPath(name); err == nil { name = lp } } - return name } From 9c01bc249dc628280f3fc019d5f0e0ace71be248 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Sat, 8 Nov 2014 10:38:42 -0500 Subject: [PATCH 437/592] pkg/chrootarchive: pass TarOptions via CLI arg Signed-off-by: Tibor Vass Conflicts: graph/load.go fixed conflict in imports --- builder/internals.go | 1 - graph/load.go | 3 ++- pkg/chrootarchive/archive.go | 18 ++++++++++++-- pkg/chrootarchive/archive_test.go | 39 +++++++++++++++++++++++++++++++ pkg/chrootarchive/init.go | 1 + 5 files changed, 58 insertions(+), 4 deletions(-) create mode 100644 pkg/chrootarchive/archive_test.go diff --git a/builder/internals.go b/builder/internals.go index a894dd0b6b..0a2432f144 100644 --- a/builder/internals.go +++ b/builder/internals.go @@ -48,7 +48,6 @@ func (b *Builder) readContext(context io.Reader) error { return err } - os.MkdirAll(tmpdirPath, 0700) if err := chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil { return err } diff --git a/graph/load.go b/graph/load.go index 875741ecf7..18c83c07de 100644 --- a/graph/load.go +++ b/graph/load.go @@ -11,6 +11,7 @@ import ( "github.com/docker/docker/engine" "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" ) // Loads a set of images into the repository. This is the complementary of ImageExport. @@ -53,7 +54,7 @@ func (s *TagStore) CmdLoad(job *engine.Job) engine.Status { excludes[i] = k i++ } - if err := archive.Untar(repoFile, repoDir, &archive.TarOptions{Excludes: excludes}); err != nil { + if err := chrootarchive.Untar(repoFile, repoDir, &archive.TarOptions{Excludes: excludes}); err != nil { return job.Error(err) } diff --git a/pkg/chrootarchive/archive.go b/pkg/chrootarchive/archive.go index f1df57ca59..fc2bea2c40 100644 --- a/pkg/chrootarchive/archive.go +++ b/pkg/chrootarchive/archive.go @@ -1,11 +1,14 @@ package chrootarchive import ( + "bytes" + "encoding/json" "flag" "fmt" "io" "os" "runtime" + "strings" "syscall" "github.com/docker/docker/pkg/archive" @@ -22,7 +25,12 @@ func untar() { if err := syscall.Chdir("/"); err != nil { fatal(err) } - if err := archive.Untar(os.Stdin, "/", nil); err != nil { + options := new(archive.TarOptions) + dec := json.NewDecoder(strings.NewReader(flag.Arg(1))) + if err := dec.Decode(options); err != nil { + fatal(err) + } + if err := archive.Untar(os.Stdin, "/", options); err != nil { fatal(err) } os.Exit(0) @@ -33,12 +41,18 @@ var ( ) func Untar(archive io.Reader, dest string, options *archive.TarOptions) error { + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + if err := enc.Encode(options); err != nil { + return fmt.Errorf("Untar json encode: %v", err) + } if _, err := os.Stat(dest); os.IsNotExist(err) { if err := os.MkdirAll(dest, 0777); err != nil { return err } } - cmd := reexec.Command("docker-untar", dest) + + cmd := reexec.Command("docker-untar", dest, buf.String()) cmd.Stdin = archive out, err := cmd.CombinedOutput() if err != nil { diff --git a/pkg/chrootarchive/archive_test.go b/pkg/chrootarchive/archive_test.go new file mode 100644 index 0000000000..aeac448743 --- /dev/null +++ b/pkg/chrootarchive/archive_test.go @@ -0,0 +1,39 @@ +package chrootarchive + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/docker/pkg/archive" +) + +func TestChrootTarUntar(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := os.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "lolo"), []byte("hello lolo"), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "src") + if err := os.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + if err := Untar(stream, dest, &archive.TarOptions{Excludes: []string{"lolo"}}); err != nil { + t.Fatal(err) + } +} diff --git a/pkg/chrootarchive/init.go b/pkg/chrootarchive/init.go index b548e9fe72..f05698f65b 100644 --- a/pkg/chrootarchive/init.go +++ b/pkg/chrootarchive/init.go @@ -10,6 +10,7 @@ import ( func init() { reexec.Register("docker-untar", untar) reexec.Register("docker-applyLayer", applyLayer) + reexec.Init() } func fatal(err error) { From 209deff9633b82198925846ebcb0a02191553005 Mon Sep 17 00:00:00 2001 From: unclejack Date: Tue, 11 Nov 2014 13:02:14 +0200 Subject: [PATCH 438/592] don't call reexec.Init from chrootarchive Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) Conflicts: daemon/graphdriver/aufs/aufs_test.go fixed conflict caused by imports --- daemon/graphdriver/aufs/aufs_test.go | 5 +++++ daemon/graphdriver/vfs/vfs_test.go | 9 ++++++++- graph/pools_test.go | 10 +++++++++- pkg/chrootarchive/archive_test.go | 5 +++++ pkg/chrootarchive/init.go | 1 - 5 files changed, 27 insertions(+), 3 deletions(-) diff --git a/daemon/graphdriver/aufs/aufs_test.go b/daemon/graphdriver/aufs/aufs_test.go index e1ed64985f..c17a5dcce6 100644 --- a/daemon/graphdriver/aufs/aufs_test.go +++ b/daemon/graphdriver/aufs/aufs_test.go @@ -11,6 +11,7 @@ import ( "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" ) var ( @@ -18,6 +19,10 @@ var ( tmp = path.Join(tmpOuter, "aufs") ) +func init() { + reexec.Init() +} + func testInit(dir string, t *testing.T) graphdriver.Driver { d, err := Init(dir, nil) if err != nil { diff --git a/daemon/graphdriver/vfs/vfs_test.go b/daemon/graphdriver/vfs/vfs_test.go index eaf70f59d3..1ee6ae4a90 100644 --- a/daemon/graphdriver/vfs/vfs_test.go +++ b/daemon/graphdriver/vfs/vfs_test.go @@ -1,10 +1,17 @@ package vfs import ( - "github.com/docker/docker/daemon/graphdriver/graphtest" "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" + + "github.com/docker/docker/pkg/reexec" ) +func init() { + reexec.Init() +} + // This avoids creating a new driver for each test if all tests are run // Make sure to put new tests between TestVfsSetup and TestVfsTeardown func TestVfsSetup(t *testing.T) { diff --git a/graph/pools_test.go b/graph/pools_test.go index 785a4bd122..129a5e1fec 100644 --- a/graph/pools_test.go +++ b/graph/pools_test.go @@ -1,6 +1,14 @@ package graph -import "testing" +import ( + "testing" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Init() +} func TestPools(t *testing.T) { s := &TagStore{ diff --git a/pkg/chrootarchive/archive_test.go b/pkg/chrootarchive/archive_test.go index aeac448743..69e18e3199 100644 --- a/pkg/chrootarchive/archive_test.go +++ b/pkg/chrootarchive/archive_test.go @@ -7,8 +7,13 @@ import ( "testing" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" ) +func init() { + reexec.Init() +} + func TestChrootTarUntar(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntar") if err != nil { diff --git a/pkg/chrootarchive/init.go b/pkg/chrootarchive/init.go index f05698f65b..b548e9fe72 100644 --- a/pkg/chrootarchive/init.go +++ b/pkg/chrootarchive/init.go @@ -10,7 +10,6 @@ import ( func init() { reexec.Register("docker-untar", untar) reexec.Register("docker-applyLayer", applyLayer) - reexec.Init() } func fatal(err error) { From 221617dbcd9431f14a3779d8bac9aba52f78ea21 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Mon, 20 Oct 2014 15:35:48 -0400 Subject: [PATCH 439/592] archive: add breakout tests Signed-off-by: Tibor Vass Conflicts: pkg/archive/archive.go fixed conflict which git couldn't fix with the added BreakoutError Conflicts: pkg/archive/archive_test.go fixed conflict in imports --- pkg/archive/archive.go | 5 + pkg/archive/archive_test.go | 192 +++++++++++++++++++++++++++++++++++- pkg/archive/diff_test.go | 191 +++++++++++++++++++++++++++++++++++ pkg/archive/utils_test.go | 166 +++++++++++++++++++++++++++++++ 4 files changed, 553 insertions(+), 1 deletion(-) create mode 100644 pkg/archive/diff_test.go create mode 100644 pkg/archive/utils_test.go diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 995668104d..d90dfcffcf 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -42,6 +42,11 @@ type ( Archiver struct { Untar func(io.Reader, string, *TarOptions) error } + + // breakoutError is used to differentiate errors related to breaking out + // When testing archive breakout in the unit tests, this error is expected + // in order for the test to pass. + breakoutError error ) var ( diff --git a/pkg/archive/archive_test.go b/pkg/archive/archive_test.go index 3516aca8f0..36abdb958b 100644 --- a/pkg/archive/archive_test.go +++ b/pkg/archive/archive_test.go @@ -8,6 +8,7 @@ import ( "os" "os/exec" "path" + "path/filepath" "syscall" "testing" "time" @@ -214,7 +215,12 @@ func TestTarWithOptions(t *testing.T) { // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} - err := createTarFile("pax_global_header", "some_dir", &hdr, nil, true) + tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true) if err != nil { t.Fatal(err) } @@ -403,3 +409,187 @@ func BenchmarkTarUntarWithLinks(b *testing.B) { os.RemoveAll(target) } } + +func TestUntarInvalidFilenames(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidHardlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidSymlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} diff --git a/pkg/archive/diff_test.go b/pkg/archive/diff_test.go new file mode 100644 index 0000000000..758c4115d5 --- /dev/null +++ b/pkg/archive/diff_test.go @@ -0,0 +1,191 @@ +package archive + +import ( + "testing" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +func TestApplyLayerInvalidFilenames(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidHardlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidSymlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} diff --git a/pkg/archive/utils_test.go b/pkg/archive/utils_test.go new file mode 100644 index 0000000000..3624fe5afa --- /dev/null +++ b/pkg/archive/utils_test.go @@ -0,0 +1,166 @@ +package archive + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" +) + +var testUntarFns = map[string]func(string, io.Reader) error{ + "untar": func(dest string, r io.Reader) error { + return Untar(r, dest, nil) + }, + "applylayer": func(dest string, r io.Reader) error { + return ApplyLayer(dest, ArchiveReader(r)) + }, +} + +// testBreakout is a helper function that, within the provided `tmpdir` directory, +// creates a `victim` folder with a generated `hello` file in it. +// `untar` extracts to a directory named `dest`, the tar file created from `headers`. +// +// Here are the tested scenarios: +// - removed `victim` folder (write) +// - removed files from `victim` folder (write) +// - new files in `victim` folder (write) +// - modified files in `victim` folder (write) +// - file in `dest` with same content as `victim/hello` (read) +// +// When using testBreakout make sure you cover one of the scenarios listed above. +func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { + tmpdir, err := ioutil.TempDir("", tmpdir) + if err != nil { + return err + } + defer os.RemoveAll(tmpdir) + + dest := filepath.Join(tmpdir, "dest") + if err := os.Mkdir(dest, 0755); err != nil { + return err + } + + victim := filepath.Join(tmpdir, "victim") + if err := os.Mkdir(victim, 0755); err != nil { + return err + } + hello := filepath.Join(victim, "hello") + helloData, err := time.Now().MarshalText() + if err != nil { + return err + } + if err := ioutil.WriteFile(hello, helloData, 0644); err != nil { + return err + } + helloStat, err := os.Stat(hello) + if err != nil { + return err + } + + reader, writer := io.Pipe() + go func() { + t := tar.NewWriter(writer) + for _, hdr := range headers { + t.WriteHeader(hdr) + } + t.Close() + }() + + untar := testUntarFns[untarFn] + if untar == nil { + return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn) + } + if err := untar(dest, reader); err != nil { + if _, ok := err.(breakoutError); !ok { + // If untar returns an error unrelated to an archive breakout, + // then consider this an unexpected error and abort. + return err + } + // Here, untar detected the breakout. + // Let's move on verifying that indeed there was no breakout. + fmt.Printf("breakoutError: %v\n", err) + } + + // Check victim folder + f, err := os.Open(victim) + if err != nil { + // codepath taken if victim folder was removed + return fmt.Errorf("archive breakout: error reading %q: %v", victim, err) + } + defer f.Close() + + // Check contents of victim folder + // + // We are only interested in getting 2 files from the victim folder, because if all is well + // we expect only one result, the `hello` file. If there is a second result, it cannot + // hold the same name `hello` and we assume that a new file got created in the victim folder. + // That is enough to detect an archive breakout. + names, err := f.Readdirnames(2) + if err != nil { + // codepath taken if victim is not a folder + return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err) + } + for _, name := range names { + if name != "hello" { + // codepath taken if new file was created in victim folder + return fmt.Errorf("archive breakout: new file %q", name) + } + } + + // Check victim/hello + f, err = os.Open(hello) + if err != nil { + // codepath taken if read permissions were removed + return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err) + } + defer f.Close() + b, err := ioutil.ReadAll(f) + if err != nil { + return err + } + fi, err := f.Stat() + if err != nil { + return err + } + if helloStat.IsDir() != fi.IsDir() || + // TODO: cannot check for fi.ModTime() change + helloStat.Mode() != fi.Mode() || + helloStat.Size() != fi.Size() || + !bytes.Equal(helloData, b) { + // codepath taken if hello has been modified + return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v.", hello, helloData, b, helloStat, fi) + } + + // Check that nothing in dest/ has the same content as victim/hello. + // Since victim/hello was generated with time.Now(), it is safe to assume + // that any file whose content matches exactly victim/hello, managed somehow + // to access victim/hello. + return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error { + if info.IsDir() { + if err != nil { + // skip directory if error + return filepath.SkipDir + } + // enter directory + return nil + } + if err != nil { + // skip file if error + return nil + } + b, err := ioutil.ReadFile(path) + if err != nil { + // Houston, we have a problem. Aborting (space)walk. + return err + } + if bytes.Equal(helloData, b) { + return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path) + } + return nil + }) +} From 1852cc38415c3d63d18c2938af9c112fbc4dfc10 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Mon, 20 Oct 2014 15:36:28 -0400 Subject: [PATCH 440/592] archive: prevent breakout in Untar Signed-off-by: Tibor Vass --- pkg/archive/archive.go | 22 +++++++++++++++++++++- pkg/symlink/fs.go | 4 +++- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index d90dfcffcf..67eb0be8ad 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -22,6 +22,7 @@ import ( "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/system" ) @@ -292,11 +293,23 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } case tar.TypeLink: - if err := os.Link(filepath.Join(extractDir, hdr.Linkname), path); err != nil { + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: + // check for symlink breakout + if _, err := symlink.FollowSymlinkInScope(filepath.Join(filepath.Dir(path), hdr.Linkname), extractDir); err != nil { + if _, ok := err.(symlink.ErrBreakout); ok { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + return err + } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } @@ -456,6 +469,8 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(archive io.Reader, dest string, options *TarOptions) error { + dest = filepath.Clean(dest) + if options == nil { options = &TarOptions{} } @@ -493,6 +508,7 @@ loop: } // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/" hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.Excludes { @@ -513,7 +529,11 @@ loop: } } + // Prevent symlink breakout path := filepath.Join(dest, hdr.Name) + if !strings.HasPrefix(path, dest) { + return breakoutError(fmt.Errorf("%q is outside of %q", path, dest)) + } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from diff --git a/pkg/symlink/fs.go b/pkg/symlink/fs.go index 6ce99c6bda..09271ffc4b 100644 --- a/pkg/symlink/fs.go +++ b/pkg/symlink/fs.go @@ -10,6 +10,8 @@ import ( const maxLoopCounter = 100 +type ErrBreakout error + // FollowSymlink will follow an existing link and scope it to the root // path provided. // The role of this function is to return an absolute path in the root @@ -34,7 +36,7 @@ func FollowSymlinkInScope(link, root string) (string, error) { } if !strings.HasPrefix(filepath.Dir(link), root) { - return "", fmt.Errorf("%s is not within %s", link, root) + return "", ErrBreakout(fmt.Errorf("%s is not within %s", link, root)) } prev := "/" From 31d1d733037b22591e2dd2edfe6c4d2d4b8086cc Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Fri, 31 Oct 2014 13:18:39 -0400 Subject: [PATCH 441/592] archive: prevent breakout in ApplyLayer Signed-off-by: Tibor Vass --- pkg/archive/diff.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pkg/archive/diff.go b/pkg/archive/diff.go index eabb7c48ff..856cedcead 100644 --- a/pkg/archive/diff.go +++ b/pkg/archive/diff.go @@ -18,6 +18,8 @@ import ( // ApplyLayer parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. func ApplyLayer(dest string, layer ArchiveReader) error { + dest = filepath.Clean(dest) + // We need to be able to set any perms oldmask, err := system.Umask(0) if err != nil { @@ -91,6 +93,12 @@ func ApplyLayer(dest string, layer ArchiveReader) error { path := filepath.Join(dest, hdr.Name) base := filepath.Base(path) + + // Prevent symlink breakout + if !strings.HasPrefix(path, dest) { + return breakoutError(fmt.Errorf("%q is outside of %q", path, dest)) + } + if strings.HasPrefix(base, ".wh.") { originalBase := base[len(".wh."):] originalPath := filepath.Join(filepath.Dir(path), originalBase) From 330171e1d9ec537d7f691fd63c697a0540589053 Mon Sep 17 00:00:00 2001 From: unclejack Date: Tue, 18 Nov 2014 23:33:13 +0200 Subject: [PATCH 442/592] pkg/chrootarchive: provide TMPDIR for ApplyLayer Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- pkg/chrootarchive/diff.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/pkg/chrootarchive/diff.go b/pkg/chrootarchive/diff.go index 2133200c68..2653aefe9d 100644 --- a/pkg/chrootarchive/diff.go +++ b/pkg/chrootarchive/diff.go @@ -3,6 +3,7 @@ package chrootarchive import ( "flag" "fmt" + "io/ioutil" "os" "runtime" "syscall" @@ -21,9 +22,16 @@ func applyLayer() { if err := syscall.Chdir("/"); err != nil { fatal(err) } - if err := archive.ApplyLayer("/", os.Stdin); err != nil { + tmpDir, err := ioutil.TempDir("/", "temp-docker-extract") + if err != nil { fatal(err) } + os.Setenv("TMPDIR", tmpDir) + if err := archive.ApplyLayer("/", os.Stdin); err != nil { + os.RemoveAll(tmpDir) + fatal(err) + } + os.RemoveAll(tmpDir) os.Exit(0) } From f6d9780229bfa52c86762d49a7a7e644dcd8f6df Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Wed, 19 Nov 2014 11:27:34 -0500 Subject: [PATCH 443/592] archive: do not call FollowSymlinkInScope in createTarFile Signed-off-by: Tibor Vass --- pkg/archive/archive.go | 15 ++++++++------- pkg/archive/archive_test.go | 14 ++++++++++++++ pkg/symlink/fs.go | 4 +--- 3 files changed, 23 insertions(+), 10 deletions(-) diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 67eb0be8ad..aaeed31981 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -22,7 +22,6 @@ import ( "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/promise" - "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/system" ) @@ -303,12 +302,14 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } case tar.TypeSymlink: - // check for symlink breakout - if _, err := symlink.FollowSymlinkInScope(filepath.Join(filepath.Dir(path), hdr.Linkname), extractDir); err != nil { - if _, ok := err.(symlink.ErrBreakout); ok { - return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) - } - return err + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err diff --git a/pkg/archive/archive_test.go b/pkg/archive/archive_test.go index 36abdb958b..05362a21c9 100644 --- a/pkg/archive/archive_test.go +++ b/pkg/archive/archive_test.go @@ -587,6 +587,20 @@ func TestUntarInvalidSymlink(t *testing.T) { Mode: 0644, }, }, + { // try writing to victim/newdir/newfile with a symlink in the path + { + // this header needs to be before the next one, or else there is an error + Name: "dir/loophole", + Typeflag: tar.TypeSymlink, + Linkname: "../../victim", + Mode: 0755, + }, + { + Name: "dir/loophole/newdir/newfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, } { if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) diff --git a/pkg/symlink/fs.go b/pkg/symlink/fs.go index 09271ffc4b..6ce99c6bda 100644 --- a/pkg/symlink/fs.go +++ b/pkg/symlink/fs.go @@ -10,8 +10,6 @@ import ( const maxLoopCounter = 100 -type ErrBreakout error - // FollowSymlink will follow an existing link and scope it to the root // path provided. // The role of this function is to return an absolute path in the root @@ -36,7 +34,7 @@ func FollowSymlinkInScope(link, root string) (string, error) { } if !strings.HasPrefix(filepath.Dir(link), root) { - return "", ErrBreakout(fmt.Errorf("%s is not within %s", link, root)) + return "", fmt.Errorf("%s is not within %s", link, root) } prev := "/" From feca1b1780a0942aafc1796d1fd52026a673bf92 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 24 Nov 2014 18:10:37 -0500 Subject: [PATCH 444/592] Move git and url checks into pkg This moves the IsGIT and IsURL functions out of the generic `utils` package and into their own `urlutil` pkg. Signed-off-by: Michael Crosby --- pkg/urlutil/git.go | 30 ++++++++++++++++++++++++++++ pkg/urlutil/git_test.go | 43 +++++++++++++++++++++++++++++++++++++++++ pkg/urlutil/url.go | 19 ++++++++++++++++++ utils/utils.go | 16 +-------------- utils/utils_test.go | 40 -------------------------------------- 5 files changed, 93 insertions(+), 55 deletions(-) create mode 100644 pkg/urlutil/git.go create mode 100644 pkg/urlutil/git_test.go create mode 100644 pkg/urlutil/url.go diff --git a/pkg/urlutil/git.go b/pkg/urlutil/git.go new file mode 100644 index 0000000000..ba88ddf6e6 --- /dev/null +++ b/pkg/urlutil/git.go @@ -0,0 +1,30 @@ +package urlutil + +import "strings" + +var ( + validPrefixes = []string{ + "git://", + "github.com/", + "git@", + } +) + +// IsGitURL returns true if the provided str is a git repository URL. +func IsGitURL(str string) bool { + if IsURL(str) && strings.HasSuffix(str, ".git") { + return true + } + for _, prefix := range validPrefixes { + if strings.HasPrefix(str, prefix) { + return true + } + } + return false +} + +// IsGitTransport returns true if the provided str is a git transport by inspecting +// the prefix of the string for known protocols used in git. +func IsGitTransport(str string) bool { + return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") +} diff --git a/pkg/urlutil/git_test.go b/pkg/urlutil/git_test.go new file mode 100644 index 0000000000..01dcea7da3 --- /dev/null +++ b/pkg/urlutil/git_test.go @@ -0,0 +1,43 @@ +package urlutil + +import "testing" + +var ( + gitUrls = []string{ + "git://github.com/docker/docker", + "git@github.com:docker/docker.git", + "git@bitbucket.org:atlassianlabs/atlassian-docker.git", + "https://github.com/docker/docker.git", + "http://github.com/docker/docker.git", + } + incompleteGitUrls = []string{ + "github.com/docker/docker", + } +) + +func TestValidGitTransport(t *testing.T) { + for _, url := range gitUrls { + if IsGitTransport(url) == false { + t.Fatalf("%q should be detected as valid Git prefix", url) + } + } + + for _, url := range incompleteGitUrls { + if IsGitTransport(url) == true { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} + +func TestIsGIT(t *testing.T) { + for _, url := range gitUrls { + if IsGitURL(url) == false { + t.Fatalf("%q should be detected as valid Git url", url) + } + } + for _, url := range incompleteGitUrls { + if IsGitURL(url) == false { + t.Fatalf("%q should be detected as valid Git url", url) + } + } +} diff --git a/pkg/urlutil/url.go b/pkg/urlutil/url.go new file mode 100644 index 0000000000..eeae56efe7 --- /dev/null +++ b/pkg/urlutil/url.go @@ -0,0 +1,19 @@ +package urlutil + +import "strings" + +var validUrlPrefixes = []string{ + "http://", + "https://", +} + +// IsURL returns true if the provided str is a valid URL by doing +// a simple change for the transport of the url. +func IsURL(str string) bool { + for _, prefix := range validUrlPrefixes { + if strings.HasPrefix(str, prefix) { + return true + } + } + return false +} diff --git a/utils/utils.go b/utils/utils.go index 3f49cb72f1..e529cb9687 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -288,21 +288,7 @@ func NewHTTPRequestError(msg string, res *http.Response) error { } } -func IsURL(str string) bool { - return strings.HasPrefix(str, "http://") || strings.HasPrefix(str, "https://") -} - -func IsGIT(str string) bool { - return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/") || strings.HasPrefix(str, "git@") || (strings.HasSuffix(str, ".git") && IsURL(str)) -} - -func ValidGitTransport(str string) bool { - return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") || IsURL(str) -} - -var ( - localHostRx = regexp.MustCompile(`(?m)^nameserver 127[^\n]+\n*`) -) +var localHostRx = regexp.MustCompile(`(?m)^nameserver 127[^\n]+\n*`) // RemoveLocalDns looks into the /etc/resolv.conf, // and removes any local nameserver entries. diff --git a/utils/utils_test.go b/utils/utils_test.go index a319d2d818..ce304482b8 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -97,43 +97,3 @@ func TestReadSymlinkedDirectoryToFile(t *testing.T) { t.Errorf("failed to remove symlink: %s", err) } } - -var ( - gitUrls = []string{ - "git://github.com/docker/docker", - "git@github.com:docker/docker.git", - "git@bitbucket.org:atlassianlabs/atlassian-docker.git", - "https://github.com/docker/docker.git", - "http://github.com/docker/docker.git", - } - incompleteGitUrls = []string{ - "github.com/docker/docker", - } -) - -func TestValidGitTransport(t *testing.T) { - for _, url := range gitUrls { - if ValidGitTransport(url) == false { - t.Fatalf("%q should be detected as valid Git prefix", url) - } - } - - for _, url := range incompleteGitUrls { - if ValidGitTransport(url) == true { - t.Fatalf("%q should not be detected as valid Git prefix", url) - } - } -} - -func TestIsGIT(t *testing.T) { - for _, url := range gitUrls { - if IsGIT(url) == false { - t.Fatalf("%q should be detected as valid Git url", url) - } - } - for _, url := range incompleteGitUrls { - if IsGIT(url) == false { - t.Fatalf("%q should be detected as valid Git url", url) - } - } -} From 5794b5373ef26846b3cc5e48e651208771d12b19 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 24 Nov 2014 18:47:42 -0500 Subject: [PATCH 445/592] Update code for use of urlutil pkg Signed-off-by: Michael Crosby --- api/client/commands.go | 7 ++++--- builder/internals.go | 3 ++- builder/job.go | 7 ++++--- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index 4255bdbc50..b2561104a7 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -38,6 +38,7 @@ import ( "github.com/docker/docker/pkg/term" "github.com/docker/docker/pkg/timeutils" "github.com/docker/docker/pkg/units" + "github.com/docker/docker/pkg/urlutil" "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" @@ -115,13 +116,13 @@ func (cli *DockerCli) CmdBuild(args ...string) error { } else { context = ioutil.NopCloser(buf) } - } else if utils.IsURL(cmd.Arg(0)) && (!utils.IsGIT(cmd.Arg(0)) || !hasGit) { + } else if urlutil.IsURL(cmd.Arg(0)) && (!urlutil.IsGitURL(cmd.Arg(0)) || !hasGit) { isRemote = true } else { root := cmd.Arg(0) - if utils.IsGIT(root) { + if urlutil.IsGitURL(root) { remoteURL := cmd.Arg(0) - if !utils.ValidGitTransport(remoteURL) { + if !urlutil.IsGitTransport(remoteURL) { remoteURL = "https://" + remoteURL } diff --git a/builder/internals.go b/builder/internals.go index f6083e7918..619c9c2e96 100644 --- a/builder/internals.go +++ b/builder/internals.go @@ -28,6 +28,7 @@ import ( "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/pkg/urlutil" "github.com/docker/docker/registry" "github.com/docker/docker/utils" ) @@ -215,7 +216,7 @@ func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath stri origPath = strings.TrimPrefix(origPath, "./") // In the remote/URL case, download it and gen its hashcode - if utils.IsURL(origPath) { + if urlutil.IsURL(origPath) { if !allowRemote { return fmt.Errorf("Source can't be a URL for %s", cmdName) } diff --git a/builder/job.go b/builder/job.go index 1d10e8eb34..20299d490a 100644 --- a/builder/job.go +++ b/builder/job.go @@ -11,6 +11,7 @@ import ( "github.com/docker/docker/graph" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/urlutil" "github.com/docker/docker/registry" "github.com/docker/docker/utils" ) @@ -58,8 +59,8 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { if remoteURL == "" { context = ioutil.NopCloser(job.Stdin) - } else if utils.IsGIT(remoteURL) { - if !utils.ValidGitTransport(remoteURL) { + } else if urlutil.IsGitURL(remoteURL) { + if !urlutil.IsGitTransport(remoteURL) { remoteURL = "https://" + remoteURL } root, err := ioutil.TempDir("", "docker-build-git") @@ -77,7 +78,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { return job.Error(err) } context = c - } else if utils.IsURL(remoteURL) { + } else if urlutil.IsURL(remoteURL) { f, err := utils.Download(remoteURL) if err != nil { return job.Error(err) From 2ec2237909ba51d3fe10a2ee6cfb81f315408f68 Mon Sep 17 00:00:00 2001 From: unclejack Date: Tue, 25 Nov 2014 01:28:20 +0200 Subject: [PATCH 446/592] graph/load: add build tags to fix make cross Signed-off-by: Cristian Staretu --- graph/load.go | 2 ++ graph/load_unsupported.go | 11 +++++++++++ 2 files changed, 13 insertions(+) create mode 100644 graph/load_unsupported.go diff --git a/graph/load.go b/graph/load.go index 18c83c07de..76172d2555 100644 --- a/graph/load.go +++ b/graph/load.go @@ -1,3 +1,5 @@ +// +build linux + package graph import ( diff --git a/graph/load_unsupported.go b/graph/load_unsupported.go new file mode 100644 index 0000000000..164e9176a1 --- /dev/null +++ b/graph/load_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux + +package graph + +import ( + "github.com/docker/docker/engine" +) + +func (s *TagStore) CmdLoad(job *engine.Job) engine.Status { + return job.Errorf("CmdLoad is not supported on this platform") +} From b21e1d4a00f2687ef24aa47039ac2f0281294365 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Fri, 14 Nov 2014 14:47:37 -0800 Subject: [PATCH 447/592] Add v1.3.2 changelog & bump version to 1.3.2-dev Signed-off-by: Tibor Vass Signed-off-by: Cristian Staretu --- CHANGELOG.md | 16 ++++++++++++++++ VERSION | 2 +- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b9d4370517..2d8f5cce8c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,21 @@ # Changelog +## 1.3.2 (2014-11-20) + +#### Security +- Fix tar breakout vulnerability +* Extractions are now sandboxed chroot +- Security options are no longer committed to images + +#### Runtime +- Fix deadlock in `docker ps -f exited=1` +- Fix a bug when `--volumes-from` references a container that failed to start + +#### Registry ++ `--insecure-registry` now accepts CIDR notation such as 10.1.0.0/16 +* Private registries whose IPs fall in the 127.0.0.0/8 range do no need the `--insecure-registry` flag +- Skip the experimental registry v2 API when mirroring is enabled + ## 1.3.1 (2014-10-28) #### Security diff --git a/VERSION b/VERSION index 625610ece8..259bb263c9 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.3.1-dev +1.3.2-dev From 4dd3368b51b2b00936f91fcc951d81d0c0d918ae Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Mon, 24 Nov 2014 11:33:38 -0500 Subject: [PATCH 448/592] docs: Add 1.3.2 release notes Signed-off-by: Tibor Vass --- docs/sources/release-notes.md | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/docs/sources/release-notes.md b/docs/sources/release-notes.md index b1b3b2bfdf..cf528bc729 100644 --- a/docs/sources/release-notes.md +++ b/docs/sources/release-notes.md @@ -4,6 +4,35 @@ page_keywords: docker, documentation, about, technology, understanding, release #Release Notes +##Version 1.3.2 +(2014-11-24) + +This release fixes some bugs and addresses some security issues. We have also +made improvements to aspects of `docker run`. + +*Security fixes* + +Patches and changes were made to address CVE-2014-6407 and CVE-2014-6408. +Specifically, changes were made in order to: + +* Prevent host privilege escalation from an image extraction vulnerability (CVE-2014-6407). + +* Prevent container escalation from malicious security options applied to images (CVE-2014-6408). + +*Daemon fixes* + +The `--insecure-registry` flag of the `docker run` command has undergone +several refinements and additions. For details, please see the +[command-line reference](http://docs.docker.com/reference/commandline/cli/#run). + +* You can now specify a sub-net in order to set a range of registries which the Docker daemon will consider insecure. + +* By default, Docker now defines `localhost` as an insecure registry. + +* Registries can now be referenced using the Classless Inter-Domain Routing (CIDR) format. + +* When mirroring is enabled, the experimental registry v2 API is skipped. + ##Version 1.3.1 (2014-10-28) From b9f1b0a7514c6e40e7048fb9206001259eb7c33c Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Fri, 21 Nov 2014 22:26:09 -0500 Subject: [PATCH 449/592] devmapper: cleanup some extraneous branching in setupBaseImage() Docker-DCO-1.1-Signed-off-by: Mike Snitzer (github: snitm) --- daemon/graphdriver/devmapper/deviceset.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index e4fa3e7195..b9d6e7616d 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -416,16 +416,17 @@ func (devices *DeviceSet) setupBaseImage() error { } if devices.thinPoolDevice != "" && oldInfo == nil { - if _, transactionId, dataUsed, _, _, _, err := devices.poolStatus(); err != nil { + _, transactionId, dataUsed, _, _, _, err := devices.poolStatus() + if err != nil { return err - } else { - if dataUsed != 0 { - return fmt.Errorf("Unable to take ownership of thin-pool (%s) that already has used data blocks", - devices.thinPoolDevice) - } else if transactionId != 0 { - return fmt.Errorf("Unable to take ownership of thin-pool (%s) with non-zero transaction Id", - devices.thinPoolDevice) - } + } + if dataUsed != 0 { + return fmt.Errorf("Unable to take ownership of thin-pool (%s) that already has used data blocks", + devices.thinPoolDevice) + } + if transactionId != 0 { + return fmt.Errorf("Unable to take ownership of thin-pool (%s) with non-zero transaction Id", + devices.thinPoolDevice) } } From f30fee69b14a5fd9df9abb00498f2aff6e378bbe Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Fri, 24 Oct 2014 16:23:50 -0400 Subject: [PATCH 450/592] pkg/tarsum: specification on TarSum checksum Signed-off-by: Vincent Batts --- pkg/tarsum/tarsum_spec.md | 228 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 228 insertions(+) create mode 100644 pkg/tarsum/tarsum_spec.md diff --git a/pkg/tarsum/tarsum_spec.md b/pkg/tarsum/tarsum_spec.md new file mode 100644 index 0000000000..bffd44adb0 --- /dev/null +++ b/pkg/tarsum/tarsum_spec.md @@ -0,0 +1,228 @@ +page_title: TarSum checksum specification +page_description: Documentation for algorithm used in the TarSum checksum calculation +page_keywords: docker, checksum, validation, tarsum + +# TarSum Checksum Specification + +## Abstract + +This document describes the algorithms used in performing the TarSum checksum +calculation on file system layers, the need for this method over existing +methods, and the versioning of this calculation. + + +## Introduction + +The transportation of file systems, regarding docker, is done with tar(1) +archives. Types of transpiration include distribution to and from a registry +endpoint, saving and loading through commands or docker daemon APIs, +transferring the build context from client to docker daemon, and committing the +file system of a container to become an image. + +As tar archives are used for transit, but not preserved in many situations, the +focus of the algorithm is to ensure the integrity of the preserved file system, +while maintaining a deterministic accountability. This includes neither +constrain the ordering or manipulation of the files during the creation or +unpacking of the archive, nor include additional metadata state about the file +system attributes. + + +## Intended Audience + +This document is outlining the methods used for consistent checksum calculation +for file systems transported via tar archives. + +Auditing these methodologies is an open and iterative process. This document +should accommodate the review of source code. Ultimately, this document should +be the starting point of further refinements to the algorithm and its future +versions. + + +## Concept + +The checksum mechanism must ensure the integrity and confidentiality of the +file system payload. + + +## Checksum Algorithm Profile + +A checksum mechanism must define the following operations and attributes: + +* associated hashing cipher - used to checksum each file payload and attribute + information. +* checksum list - each file of the file system archive has its checksum + calculated from the payload and attributes of the file. The final checksum is + calculated from this list, with specific ordering. +* version - as the algorithm adapts to requirements, there are behaviors of the + algorithm to manage by versioning. +* archive being calculated - the tar archive having its checksum calculated + + +## Elements of TarSum checksum + +The calculated sum output is a text string. The elements included in the output +of the calculated sum comprise the information needed for validation of the sum +(TarSum version and block cipher used) and the expected checksum in hexadecimal +form. + +There are two delimiters used: +* '+' separates TarSum version from block cipher +* ':' separates calculation mechanics from expected hash + +Example: + + "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" + | | \ | + | | \ | + |_version_|_cipher__|__ | + | \ | + |_calculation_mechanics_|______________________expected_sum_______________________| + + +## Versioning + +Versioning was introduced [0] to accommodate differences in calculation needed, +and ability to maintain reverse compatibility. + +The general algorithm will be describe further in the 'Calculation'. + +### Version0 + +This is the initial version of TarSum. + +Its element in the checksum "tarsum" + + +### Version1 + +Its element in the checksum "tarsum.v1" + +The notable changes in this version: +* exclusion of file mtime from the file information headers, in each file + checksum calculation +* inclusion of extended attributes (xattrs. Also seen as "SCHILY.xattr." prefixed Pax + tar file info headers) keys and values in each file checksum calculation + +### VersionDev + +*Do not use unless validating refinements to the checksum algorithm* + +Its element in the checksum "tarsum.dev" + +This is a floating place holder for a next version. The methods used for +calculation are subject to change without notice. + +## Ciphers + +The official default and standard block cipher used in the calculation mechanic +is "sha256". This refers to SHA256 hash algorithm as defined in FIPS 180-4. + +Though the algorithm itself is not exclusively bound to this single block +cipher, and support for alternate block ciphers was later added [1]. Presently +use of this is for isolated use-cases and future-proofing the TarSum checksum +format. + +## Calculation + +### Requirement + +As mentioned earlier, the calculation is such that it takes into consideration +the life and cycle of the tar archive. In that the tar archive is not an +immutable, permanent artifact. Otherwise options like relying on a known block +cipher checksum of the archive itself would be reliable enough. Since the tar +archive is used as a transportation medium, and is thrown away after its +contents are extracted. Therefore, for consistent validation items such as +order of files in the tar archive and time stamps are subject to change once an +image is received. + + +### Process + +The method is typically iterative due to reading tar info headers from the +archive stream, though this is not a strict requirement. + +#### Files + +Each file in the tar archive have their contents (headers and body) checksummed +individually using the designated associated hashing cipher. The ordered +headers of the file are written to the checksum calculation first, and then the +payload of the file body. + +The resulting checksum of the file is appended to the list of file sums. The +sum is encoded as a string of the hexadecimal digest. Additionally, the file +name and position in the archive is kept as reference for special ordering. + +#### Headers + +The following headers are read, in this +order ( and the corresponding representation of its value): +* 'name' - string +* 'mode' - string of the base10 integer +* 'uid' - string of the integer +* 'gid' - string of the integer +* 'size' - string of the integer +* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC +* 'typeflag' - string of the char +* 'linkname' - string +* 'uname' - string +* 'gname' - string +* 'devmajor' - string of the integer +* 'devminor' - string of the integer + +For >= Version1, the extented attribute headers ("SCHILY.xattr." prefixed pax +headers) included after the above list. These xattrs key/values are first +sorted by the keys. + + +#### Header Format + +The ordered headers are written to the hash in the format of + + "{.key}{.value}" + +with no newline. + + +#### Body + +After the order headers of the file have been added to the checksum for the +file, then the body of the file is written to the hash. + + +#### List of file sums + +The list of file sums is sorted by the string of the hexadecimal digest. + +If there are two files in the tar with matching paths, the order of occurrence +for that path is reflected for the sums of the corresponding file header and +body. + + +#### Final Checksum + +Using an initialize hash of the associated hash cipher, if there is additional +payload to include in the TarSum calculation for the archive, it is written +first. Then each checksum from the ordered list of files sums is written to the +hash. The resulting digest is formatted per the Elements of TarSum checksum, +including the TarSum version, the associated hash cipher and the hexadecimal +encoded checksum digest. + + +## Security Considerations + +The initial version of TarSum has undergone one update that could invalidate +handcrafted tar archives. The tar archive format supports appending of files +with same names as prior files in the archive. The latter file will clobber the +prior file of the same path. Due to this the algorithm now accounts for + + +## Footnotes + +* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 +* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e + +## Acknowledgements + +Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the +TarSum calculation. + From 3e08fb5ad2b882b12a396e258bc745254fac6f51 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Wed, 12 Nov 2014 09:25:46 -0500 Subject: [PATCH 451/592] pkg/tarsum: review amendments (separate commit to preserve github conversation) Signed-off-by: Vincent Batts --- pkg/tarsum/tarsum_spec.md | 35 +++++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/pkg/tarsum/tarsum_spec.md b/pkg/tarsum/tarsum_spec.md index bffd44adb0..aa5065d6a0 100644 --- a/pkg/tarsum/tarsum_spec.md +++ b/pkg/tarsum/tarsum_spec.md @@ -14,8 +14,10 @@ methods, and the versioning of this calculation. ## Introduction The transportation of file systems, regarding docker, is done with tar(1) -archives. Types of transpiration include distribution to and from a registry -endpoint, saving and loading through commands or docker daemon APIs, +archives. There are a variety of tar serialization formats [2], and a key +concern here is ensuring a repeatable checksum given a set of inputs from a +generic tar archive. Types of transportation include distribution to and from a +registry endpoint, saving and loading through commands or docker daemon APIs, transferring the build context from client to docker daemon, and committing the file system of a container to become an image. @@ -40,7 +42,7 @@ versions. ## Concept -The checksum mechanism must ensure the integrity and confidentiality of the +The checksum mechanism must ensure the integrity and assurance of the file system payload. @@ -62,11 +64,11 @@ A checksum mechanism must define the following operations and attributes: The calculated sum output is a text string. The elements included in the output of the calculated sum comprise the information needed for validation of the sum -(TarSum version and block cipher used) and the expected checksum in hexadecimal +(TarSum version and hashing cipher used) and the expected checksum in hexadecimal form. There are two delimiters used: -* '+' separates TarSum version from block cipher +* '+' separates TarSum version from hashing cipher * ':' separates calculation mechanics from expected hash Example: @@ -114,11 +116,11 @@ calculation are subject to change without notice. ## Ciphers -The official default and standard block cipher used in the calculation mechanic +The official default and standard hashing cipher used in the calculation mechanic is "sha256". This refers to SHA256 hash algorithm as defined in FIPS 180-4. -Though the algorithm itself is not exclusively bound to this single block -cipher, and support for alternate block ciphers was later added [1]. Presently +Though the algorithm itself is not exclusively bound to this single hashing +cipher, and support for alternate hashing ciphers was later added [1]. Presently use of this is for isolated use-cases and future-proofing the TarSum checksum format. @@ -128,7 +130,7 @@ format. As mentioned earlier, the calculation is such that it takes into consideration the life and cycle of the tar archive. In that the tar archive is not an -immutable, permanent artifact. Otherwise options like relying on a known block +immutable, permanent artifact. Otherwise options like relying on a known hashing cipher checksum of the archive itself would be reliable enough. Since the tar archive is used as a transportation medium, and is thrown away after its contents are extracted. Therefore, for consistent validation items such as @@ -200,10 +202,12 @@ body. #### Final Checksum -Using an initialize hash of the associated hash cipher, if there is additional -payload to include in the TarSum calculation for the archive, it is written -first. Then each checksum from the ordered list of files sums is written to the -hash. The resulting digest is formatted per the Elements of TarSum checksum, +Begin with a fresh or initial state of the associated hash cipher. If there is +additional payload to include in the TarSum calculation for the archive, it is +written first. Then each checksum from the ordered list of file sums is written +to the hash. + +The resulting digest is formatted per the Elements of TarSum checksum, including the TarSum version, the associated hash cipher and the hexadecimal encoded checksum digest. @@ -213,13 +217,16 @@ encoded checksum digest. The initial version of TarSum has undergone one update that could invalidate handcrafted tar archives. The tar archive format supports appending of files with same names as prior files in the archive. The latter file will clobber the -prior file of the same path. Due to this the algorithm now accounts for +prior file of the same path. Due to this the algorithm now accounts for files +with matching paths, and orders the list of file sums accordingly [3]. ## Footnotes * [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 * [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e +* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29 +* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31 ## Acknowledgements From 3d6e63e0c4bab556fb8bd0abb65e4f7af35256fd Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 13 Nov 2014 13:09:05 -0500 Subject: [PATCH 452/592] pkg/tarsum: review cleanup Signed-off-by: Vincent Batts --- pkg/tarsum/tarsum_spec.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/tarsum/tarsum_spec.md b/pkg/tarsum/tarsum_spec.md index aa5065d6a0..b51e5b104f 100644 --- a/pkg/tarsum/tarsum_spec.md +++ b/pkg/tarsum/tarsum_spec.md @@ -188,7 +188,7 @@ with no newline. #### Body After the order headers of the file have been added to the checksum for the -file, then the body of the file is written to the hash. +file, the body of the file is written to the hash. #### List of file sums From 7f84174109b734fac309c02e4b474aeff9dfca15 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 20 Nov 2014 15:46:15 -0500 Subject: [PATCH 453/592] tarsum: updates for jamtur01 comments Signed-off-by: Vincent Batts --- pkg/tarsum/tarsum_spec.md | 82 +++++++++++++++++---------------------- 1 file changed, 36 insertions(+), 46 deletions(-) diff --git a/pkg/tarsum/tarsum_spec.md b/pkg/tarsum/tarsum_spec.md index b51e5b104f..7a6f8edc7c 100644 --- a/pkg/tarsum/tarsum_spec.md +++ b/pkg/tarsum/tarsum_spec.md @@ -1,5 +1,5 @@ page_title: TarSum checksum specification -page_description: Documentation for algorithm used in the TarSum checksum calculation +page_description: Documentation for algorithms used in the TarSum checksum calculation page_keywords: docker, checksum, validation, tarsum # TarSum Checksum Specification @@ -7,58 +7,54 @@ page_keywords: docker, checksum, validation, tarsum ## Abstract This document describes the algorithms used in performing the TarSum checksum -calculation on file system layers, the need for this method over existing +calculation on filesystem layers, the need for this method over existing methods, and the versioning of this calculation. ## Introduction -The transportation of file systems, regarding docker, is done with tar(1) +The transportation of filesystems, regarding Docker, is done with tar(1) archives. There are a variety of tar serialization formats [2], and a key concern here is ensuring a repeatable checksum given a set of inputs from a generic tar archive. Types of transportation include distribution to and from a -registry endpoint, saving and loading through commands or docker daemon APIs, -transferring the build context from client to docker daemon, and committing the -file system of a container to become an image. +registry endpoint, saving and loading through commands or Docker daemon APIs, +transferring the build context from client to Docker daemon, and committing the +filesystem of a container to become an image. As tar archives are used for transit, but not preserved in many situations, the -focus of the algorithm is to ensure the integrity of the preserved file system, +focus of the algorithm is to ensure the integrity of the preserved filesystem, while maintaining a deterministic accountability. This includes neither -constrain the ordering or manipulation of the files during the creation or +constraining the ordering or manipulation of the files during the creation or unpacking of the archive, nor include additional metadata state about the file system attributes. - ## Intended Audience This document is outlining the methods used for consistent checksum calculation -for file systems transported via tar archives. +for filesystems transported via tar archives. Auditing these methodologies is an open and iterative process. This document should accommodate the review of source code. Ultimately, this document should be the starting point of further refinements to the algorithm and its future versions. - ## Concept The checksum mechanism must ensure the integrity and assurance of the -file system payload. - +filesystem payload. ## Checksum Algorithm Profile A checksum mechanism must define the following operations and attributes: -* associated hashing cipher - used to checksum each file payload and attribute +* Associated hashing cipher - used to checksum each file payload and attribute information. -* checksum list - each file of the file system archive has its checksum +* Checksum list - each file of the filesystem archive has its checksum calculated from the payload and attributes of the file. The final checksum is calculated from this list, with specific ordering. -* version - as the algorithm adapts to requirements, there are behaviors of the +* Version - as the algorithm adapts to requirements, there are behaviors of the algorithm to manage by versioning. -* archive being calculated - the tar archive having its checksum calculated - +* Archive being calculated - the tar archive having its checksum calculated ## Elements of TarSum checksum @@ -73,13 +69,14 @@ There are two delimiters used: Example: +``` "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" | | \ | | | \ | |_version_|_cipher__|__ | | \ | |_calculation_mechanics_|______________________expected_sum_______________________| - +``` ## Versioning @@ -92,51 +89,50 @@ The general algorithm will be describe further in the 'Calculation'. This is the initial version of TarSum. -Its element in the checksum "tarsum" - +Its element in the TarSum checksum string is `tarsum`. ### Version1 -Its element in the checksum "tarsum.v1" +Its element in the TarSum checksum is `tarsum.v1`. The notable changes in this version: -* exclusion of file mtime from the file information headers, in each file +* Exclusion of file `mtime` from the file information headers, in each file checksum calculation -* inclusion of extended attributes (xattrs. Also seen as "SCHILY.xattr." prefixed Pax +* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax tar file info headers) keys and values in each file checksum calculation ### VersionDev *Do not use unless validating refinements to the checksum algorithm* -Its element in the checksum "tarsum.dev" +Its element in the TarSum checksum is `tarsum.dev`. -This is a floating place holder for a next version. The methods used for -calculation are subject to change without notice. +This is a floating place holder for a next version and grounds for testing +changes. The methods used for calculation are subject to change without notice, +and this version is for testing and not for production use. ## Ciphers The official default and standard hashing cipher used in the calculation mechanic -is "sha256". This refers to SHA256 hash algorithm as defined in FIPS 180-4. +is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4. -Though the algorithm itself is not exclusively bound to this single hashing -cipher, and support for alternate hashing ciphers was later added [1]. Presently -use of this is for isolated use-cases and future-proofing the TarSum checksum -format. +Though the TarSum algorithm itself is not exclusively bound to the single +hashing cipher `sha256`, support for alternate hashing ciphers was later added +[1]. Use cases for alternate cipher could include future-proofing TarSum +checksum format and using faster cipher hashes for tar filesystem checksums. ## Calculation ### Requirement As mentioned earlier, the calculation is such that it takes into consideration -the life and cycle of the tar archive. In that the tar archive is not an -immutable, permanent artifact. Otherwise options like relying on a known hashing -cipher checksum of the archive itself would be reliable enough. Since the tar -archive is used as a transportation medium, and is thrown away after its -contents are extracted. Therefore, for consistent validation items such as -order of files in the tar archive and time stamps are subject to change once an -image is received. - +the lifecycle of the tar archive. In that the tar archive is not an immutable, +permanent artifact. Otherwise options like relying on a known hashing cipher +checksum of the archive itself would be reliable enough. The tar archive of the +filesystem is used as a transportation medium for Docker images, and the +archive is discarded once its contents are extracted. Therefore, for consistent +validation items such as order of files in the tar archive and time stamps are +subject to change once an image is received. ### Process @@ -175,7 +171,6 @@ For >= Version1, the extented attribute headers ("SCHILY.xattr." prefixed pax headers) included after the above list. These xattrs key/values are first sorted by the keys. - #### Header Format The ordered headers are written to the hash in the format of @@ -184,13 +179,11 @@ The ordered headers are written to the hash in the format of with no newline. - #### Body After the order headers of the file have been added to the checksum for the file, the body of the file is written to the hash. - #### List of file sums The list of file sums is sorted by the string of the hexadecimal digest. @@ -199,7 +192,6 @@ If there are two files in the tar with matching paths, the order of occurrence for that path is reflected for the sums of the corresponding file header and body. - #### Final Checksum Begin with a fresh or initial state of the associated hash cipher. If there is @@ -211,7 +203,6 @@ The resulting digest is formatted per the Elements of TarSum checksum, including the TarSum version, the associated hash cipher and the hexadecimal encoded checksum digest. - ## Security Considerations The initial version of TarSum has undergone one update that could invalidate @@ -220,7 +211,6 @@ with same names as prior files in the archive. The latter file will clobber the prior file of the same path. Due to this the algorithm now accounts for files with matching paths, and orders the list of file sums accordingly [3]. - ## Footnotes * [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 From efb8e8a34550b80a567e7b69ad6bb2644d5a9b09 Mon Sep 17 00:00:00 2001 From: "Andrew C. Bodine" Date: Mon, 24 Nov 2014 23:41:49 -0800 Subject: [PATCH 454/592] Closes #9296 adds a more detailed note about the mount behavior with -v flag Signed-off-by: Andrew C. Bodine --- docs/sources/userguide/dockervolumes.md | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/docs/sources/userguide/dockervolumes.md b/docs/sources/userguide/dockervolumes.md index 58412611c5..6f94b6dbd0 100644 --- a/docs/sources/userguide/dockervolumes.md +++ b/docs/sources/userguide/dockervolumes.md @@ -51,8 +51,15 @@ directory from your own host into a container. $ sudo docker run -d -P --name web -v /src/webapp:/opt/webapp training/webapp python app.py -This will mount the local directory, `/src/webapp`, into the container as the -`/opt/webapp` directory. This is very useful for testing, for example we can +This will mount the host directory, `/src/webapp`, into the container at +`/opt/webapp`. + +> **Note:** +> If the path `/opt/webapp` already exists inside the container's image, it's +> contents will be replaced by the contents of `/src/webapp` on the host to stay +> consistent with the expected behavior of `mount` + +This is very useful for testing, for example we can mount our source code inside the container and see our application at work as we change the source code. The directory on the host must be specified as an absolute path and if the directory doesn't exist Docker will automatically From 2e863e8a3849f6ea34dd281aac6f8a6c700bf029 Mon Sep 17 00:00:00 2001 From: Harald Albers Date: Mon, 17 Nov 2014 17:13:58 +0100 Subject: [PATCH 455/592] Add missing options to bash completion for the run and create commands Signed-off-by: Harald Albers --- contrib/completion/bash/docker | 175 +++++++++++++++++++++++++++++++-- 1 file changed, 167 insertions(+), 8 deletions(-) diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker index dbe7c71442..089ebfea67 100755 --- a/contrib/completion/bash/docker +++ b/contrib/completion/bash/docker @@ -99,6 +99,55 @@ __docker_pos_first_nonflag() { echo $counter } +__docker_resolve_hostname() { + command -v host >/dev/null 2>&1 || return + COMPREPLY=( $(host 2>/dev/null "${cur%:}" | awk '/has address/ {print $4}') ) +} + +__docker_capabilities() { + # The list of capabilities is defined in types.go, ALL was added manually. + COMPREPLY=( $( compgen -W " + ALL + AUDIT_CONTROL + AUDIT_WRITE + BLOCK_SUSPEND + CHOWN + DAC_OVERRIDE + DAC_READ_SEARCH + FOWNER + FSETID + IPC_LOCK + IPC_OWNER + KILL + LEASE + LINUX_IMMUTABLE + MAC_ADMIN + MAC_OVERRIDE + MKNOD + NET_ADMIN + NET_BIND_SERVICE + NET_BROADCAST + NET_RAW + SETFCAP + SETGID + SETPCAP + SETUID + SYS_ADMIN + SYS_BOOT + SYS_CHROOT + SYSLOG + SYS_MODULE + SYS_NICE + SYS_PACCT + SYS_PTRACE + SYS_RAWIO + SYS_RESOURCE + SYS_TIME + SYS_TTY_CONFIG + WAKE_ALARM + " -- "$cur" ) ) +} + _docker_docker() { case "$prev" in -H) @@ -222,7 +271,7 @@ _docker_create() { __docker_containers_all return ;; - -v|--volume) + -v|--volume|--device) case "$cur" in *:*) # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine) @@ -255,7 +304,62 @@ _docker_create() { esac return ;; - --entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf) + --add-host) + case "$cur" in + *:) + __docker_resolve_hostname + return + ;; + esac + ;; + --cap-add|--cap-drop) + __docker_capabilities + return + ;; + --net) + case "$cur" in + container:*) + local cur=${cur#*:} + __docker_containers_all + ;; + *) + COMPREPLY=( $( compgen -W "bridge none container: host" -- "$cur") ) + if [ "${COMPREPLY[*]}" = "container:" ] ; then + compopt -o nospace + fi + ;; + esac + return + ;; + --restart) + case "$cur" in + on-failure:*) + ;; + *) + COMPREPLY=( $( compgen -W "no on-failure on-failure: always" -- "$cur") ) + ;; + esac + return + ;; + --security-opt) + case "$cur" in + label:*:*) + ;; + label:*) + local cur=${cur##*:} + COMPREPLY=( $( compgen -W "user: role: type: level: disable" -- "$cur") ) + if [ "${COMPREPLY[*]}" != "disable" ] ; then + compopt -o nospace + fi + ;; + *) + COMPREPLY=( $( compgen -W "label apparmor" -S ":" -- "$cur") ) + compopt -o nospace + ;; + esac + return + ;; + --entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf|--dns-search) return ;; *) @@ -264,10 +368,10 @@ _docker_create() { case "$cur" in -*) - COMPREPLY=( $( compgen -W "-n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir -c --cpu-shares --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "-n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf --security-opt --add-host --cap-add --cap-drop --device --dns-search --net --restart" -- "$cur" ) ) ;; *) - local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--env-file|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf') + local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--env-file|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt|--add-host|--cap-add|--cap-drop|--device|--dns-search|--net|--restart') if [ $cword -eq $counter ]; then __docker_image_repos_and_tags_and_ids @@ -553,7 +657,7 @@ _docker_run() { __docker_containers_all return ;; - -v|--volume) + -v|--volume|--device) case "$cur" in *:*) # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine) @@ -586,7 +690,62 @@ _docker_run() { esac return ;; - --entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf) + --add-host) + case "$cur" in + *:) + __docker_resolve_hostname + return + ;; + esac + ;; + --cap-add|--cap-drop) + __docker_capabilities + return + ;; + --net) + case "$cur" in + container:*) + local cur=${cur#*:} + __docker_containers_all + ;; + *) + COMPREPLY=( $( compgen -W "bridge none container: host" -- "$cur") ) + if [ "${COMPREPLY[*]}" = "container:" ] ; then + compopt -o nospace + fi + ;; + esac + return + ;; + --restart) + case "$cur" in + on-failure:*) + ;; + *) + COMPREPLY=( $( compgen -W "no on-failure on-failure: always" -- "$cur") ) + ;; + esac + return + ;; + --security-opt) + case "$cur" in + label:*:*) + ;; + label:*) + local cur=${cur##*:} + COMPREPLY=( $( compgen -W "user: role: type: level: disable" -- "$cur") ) + if [ "${COMPREPLY[*]}" != "disable" ] ; then + compopt -o nospace + fi + ;; + *) + COMPREPLY=( $( compgen -W "label apparmor" -S ":" -- "$cur") ) + compopt -o nospace + ;; + esac + return + ;; + --entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf|--dns-search) return ;; *) @@ -595,11 +754,11 @@ _docker_run() { case "$cur" in -*) - COMPREPLY=( $( compgen -W "--rm -d --detach -n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf --security-opt" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--rm -d --detach -n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf --security-opt --add-host --cap-add --cap-drop --device --dns-search --net --restart" -- "$cur" ) ) ;; *) - local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--env-file|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt') + local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--env-file|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt|--add-host|--cap-add|--cap-drop|--device|--dns-search|--net|--restart') if [ $cword -eq $counter ]; then __docker_image_repos_and_tags_and_ids From eac9f2e5c4fa47b0ef1e064ac2bc62be6f3a3c99 Mon Sep 17 00:00:00 2001 From: Harald Albers Date: Wed, 19 Nov 2014 14:29:56 +0100 Subject: [PATCH 456/592] Minor bash completion cleanup The -n and --networking options were removed because they are unsupported. Bash completion should not reveal the existence of otherwise undocumented unsupported options. Signed-off-by: Harald Albers --- contrib/completion/bash/docker | 52 ++++++---------------------------- 1 file changed, 8 insertions(+), 44 deletions(-) diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker index 089ebfea67..5364944faf 100755 --- a/contrib/completion/bash/docker +++ b/contrib/completion/bash/docker @@ -1,8 +1,8 @@ -#!bash +#!/bin/bash # # bash completion file for core docker commands # -# This script provides supports completion of: +# This script provides completion of: # - commands and their options # - container ids and names # - image repos and tags @@ -11,9 +11,9 @@ # To enable the completions either: # - place this file in /etc/bash_completion.d # or -# - copy this file and add the line below to your .bashrc after -# bash completion features are loaded -# . docker.bash +# - copy this file to e.g. ~/.docker-completion.sh and add the line +# below to your .bashrc after bash completion features are loaded +# . ~/.docker-completion.sh # # Note: # Currently, the completions will not work if the docker daemon is not @@ -153,8 +153,6 @@ _docker_docker() { -H) return ;; - *) - ;; esac case "$cur" in @@ -187,8 +185,6 @@ _docker_build() { __docker_image_repos_and_tags return ;; - *) - ;; esac case "$cur" in @@ -209,8 +205,6 @@ _docker_commit() { -m|--message|-a|--author|--run) return ;; - *) - ;; esac case "$cur" in @@ -362,13 +356,11 @@ _docker_create() { --entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf|--dns-search) return ;; - *) - ;; esac case "$cur" in -*) - COMPREPLY=( $( compgen -W "-n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf --security-opt --add-host --cap-add --cap-drop --device --dns-search --net --restart" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf --security-opt --add-host --cap-add --cap-drop --device --dns-search --net --restart" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--env-file|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt|--add-host|--cap-add|--cap-drop|--device|--dns-search|--net|--restart') @@ -392,16 +384,12 @@ _docker_events() { --since) return ;; - *) - ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--since" -- "$cur" ) ) ;; - *) - ;; esac } @@ -480,8 +468,6 @@ _docker_inspect() { -f|--format) return ;; - *) - ;; esac case "$cur" in @@ -507,16 +493,12 @@ _docker_login() { -u|--username|-p|--password|-e|--email) return ;; - *) - ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "-u --username -p --password -e --email" -- "$cur" ) ) ;; - *) - ;; esac } @@ -556,16 +538,12 @@ _docker_ps() { -n) return ;; - *) - ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "-q --quiet -s --size -a --all --no-trunc -l --latest --since --before -n" -- "$cur" ) ) ;; - *) - ;; esac } @@ -574,8 +552,6 @@ _docker_pull() { -t|--tag) return ;; - *) - ;; esac case "$cur" in @@ -603,8 +579,6 @@ _docker_restart() { -t|--time) return ;; - *) - ;; esac case "$cur" in @@ -624,7 +598,6 @@ _docker_rm() { return ;; *) - local force= for arg in "${COMP_WORDS[@]}"; do case "$arg" in -f|--force) @@ -748,16 +721,13 @@ _docker_run() { --entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf|--dns-search) return ;; - *) - ;; esac case "$cur" in -*) - COMPREPLY=( $( compgen -W "--rm -d --detach -n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf --security-opt --add-host --cap-add --cap-drop --device --dns-search --net --restart" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--rm -d --detach --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir --cpuset -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env --env-file -p --publish --expose --dns --volumes-from --lxc-conf --security-opt --add-host --cap-add --cap-drop --device --dns-search --net --restart" -- "$cur" ) ) ;; *) - local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--env-file|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|--cpuset|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf|--security-opt|--add-host|--cap-add|--cap-drop|--device|--dns-search|--net|--restart') if [ $cword -eq $counter ]; then @@ -779,16 +749,12 @@ _docker_search() { -s|--stars) return ;; - *) - ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--no-trunc --automated -s --stars" -- "$cur" ) ) ;; - *) - ;; esac } @@ -808,8 +774,6 @@ _docker_stop() { -t|--time) return ;; - *) - ;; esac case "$cur" in @@ -911,7 +875,7 @@ _docker() { local cur prev words cword _get_comp_words_by_ref -n : cur prev words cword - local command='docker' + local command='docker' cpos=0 local counter=1 while [ $counter -lt $cword ]; do case "${words[$counter]}" in From dfcf05a5d070823ade84ffba0e9784cdfe6d54b0 Mon Sep 17 00:00:00 2001 From: Yohei Ueda Date: Tue, 25 Nov 2014 20:49:01 +0900 Subject: [PATCH 457/592] Use termios via CGO only on Linux Signed-off-by: Yohei Ueda --- pkg/term/{term_cgo.go => tc_linux_cgo.go} | 2 +- pkg/term/{term_nocgo.go => tc_other.go} | 3 ++- pkg/term/termios_darwin.go | 2 -- pkg/term/termios_freebsd.go | 2 -- 4 files changed, 3 insertions(+), 6 deletions(-) rename pkg/term/{term_cgo.go => tc_linux_cgo.go} (97%) rename pkg/term/{term_nocgo.go => tc_other.go} (90%) diff --git a/pkg/term/term_cgo.go b/pkg/term/tc_linux_cgo.go similarity index 97% rename from pkg/term/term_cgo.go rename to pkg/term/tc_linux_cgo.go index ddf080cf93..ae9516c99c 100644 --- a/pkg/term/term_cgo.go +++ b/pkg/term/tc_linux_cgo.go @@ -1,4 +1,4 @@ -// +build !windows,cgo +// +build linux,cgo package term diff --git a/pkg/term/term_nocgo.go b/pkg/term/tc_other.go similarity index 90% rename from pkg/term/term_nocgo.go rename to pkg/term/tc_other.go index c211c3992d..266039bac3 100644 --- a/pkg/term/term_nocgo.go +++ b/pkg/term/tc_other.go @@ -1,4 +1,5 @@ -// +build !windows,!cgo +// +build !windows +// +build !linux !cgo package term diff --git a/pkg/term/termios_darwin.go b/pkg/term/termios_darwin.go index 2640e8b935..11cd70d10b 100644 --- a/pkg/term/termios_darwin.go +++ b/pkg/term/termios_darwin.go @@ -1,5 +1,3 @@ -// +build !cgo - package term import ( diff --git a/pkg/term/termios_freebsd.go b/pkg/term/termios_freebsd.go index 969beda239..ed3659572c 100644 --- a/pkg/term/termios_freebsd.go +++ b/pkg/term/termios_freebsd.go @@ -1,5 +1,3 @@ -// +build !cgo - package term import ( From ec8c53228a409dc3af45e5e25798bec84c4d0515 Mon Sep 17 00:00:00 2001 From: James Turnbull Date: Tue, 25 Nov 2014 10:38:16 -0500 Subject: [PATCH 458/592] Removed extraneous ) Docker-DCO-1.1-Signed-off-by: James Turnbull (github: jamtur01) --- docs/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/README.md b/docs/README.md index f5ccb753da..c113a884a3 100755 --- a/docs/README.md +++ b/docs/README.md @@ -11,7 +11,7 @@ development) branch maps to the "master" documentation. ## Contributing -Be sure to follow the [contribution guidelines](../CONTRIBUTING.md)). +Be sure to follow the [contribution guidelines](../CONTRIBUTING.md). In particular, [remember to sign your work!](../CONTRIBUTING.md#sign-your-work) ## Getting Started From b4b962444765ea76457465000f42d1a52021020e Mon Sep 17 00:00:00 2001 From: Shishir Mahajan Date: Tue, 25 Nov 2014 14:05:43 -0500 Subject: [PATCH 459/592] Removing description for 'docker images' -t and -v flags, since they are deprecated Signed-off-by: Shishir Mahajan --- docs/man/docker-images.1.md | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/docs/man/docker-images.1.md b/docs/man/docker-images.1.md index c572ee674b..8b2869d59d 100644 --- a/docs/man/docker-images.1.md +++ b/docs/man/docker-images.1.md @@ -58,25 +58,6 @@ used in builds use **-a**: docker images -a -## List images dependency tree hierarchy - -To list the images in the local repository (not the registry) in a dependency -tree format, use the **-t** option. - - docker images -t - -This displays a staggered hierarchy tree where the less indented image is -the oldest with dependent image layers branching inward (to the right) on -subsequent lines. The newest or top level image layer is listed last in -any tree branch. - -## List images in GraphViz format - -To display the list in a format consumable by a GraphViz tools run with -**-v**. For example to produce a .png graph file of the hierarchy use: - - docker images --viz | dot -Tpng -o docker.png - ## Listing only the shortened image IDs Listing just the shortened image IDs. This can be useful for some automated From 61b2766e758f21b8b6bd32dc82ba624c2f0c5fd6 Mon Sep 17 00:00:00 2001 From: Dan Walsh Date: Thu, 20 Nov 2014 17:20:26 -0500 Subject: [PATCH 460/592] Label content created for containers with the private label Currently this content gets a system label and is not writable based on SELinux controls. This patch will set the labels to the correct label. Docker-DCO-1.1-Signed-off-by: Dan Walsh (github: rhatdan) --- daemon/volumes.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/daemon/volumes.go b/daemon/volumes.go index a2cf3af33a..54cc0369dc 100644 --- a/daemon/volumes.go +++ b/daemon/volumes.go @@ -15,6 +15,7 @@ import ( "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/volumes" + "github.com/docker/libcontainer/label" ) type Mount struct { @@ -235,15 +236,24 @@ func validMountMode(mode string) bool { } func (container *Container) setupMounts() error { + if err := label.SetFileLabel(container.ResolvConfPath, container.MountLabel); err != nil { + return err + } mounts := []execdriver.Mount{ {Source: container.ResolvConfPath, Destination: "/etc/resolv.conf", Writable: true, Private: true}, } if container.HostnamePath != "" { + if err := label.SetFileLabel(container.HostnamePath, container.MountLabel); err != nil { + return err + } mounts = append(mounts, execdriver.Mount{Source: container.HostnamePath, Destination: "/etc/hostname", Writable: true, Private: true}) } if container.HostsPath != "" { + if err := label.SetFileLabel(container.HostsPath, container.MountLabel); err != nil { + return err + } mounts = append(mounts, execdriver.Mount{Source: container.HostsPath, Destination: "/etc/hosts", Writable: true, Private: true}) } From c83cd5c7ac6dca270ef77adbca0be63441573c91 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Mon, 24 Nov 2014 23:54:50 +0100 Subject: [PATCH 461/592] Expanded documentation for Webhooks 2.0 (chains and callbacks information) Signed-off-by: Joffrey Fuhrer --- docs/sources/docker-hub/repos.md | 36 ++++++++++++++++++++++++++- docs/sources/userguide/dockerrepos.md | 3 +++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/docs/sources/docker-hub/repos.md b/docs/sources/docker-hub/repos.md index 42d97d8bf2..8ee914bf42 100644 --- a/docs/sources/docker-hub/repos.md +++ b/docs/sources/docker-hub/repos.md @@ -1,6 +1,6 @@ page_title: Repositories and Images on Docker Hub page_description: Repositories and Images on Docker Hub -page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, docs, documentation +page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, webhooks, docs, documentation # Repositories and Images on Docker Hub @@ -142,3 +142,37 @@ similar to the example shown below. Webhooks allow you to notify people, services and other applications of new updates to your images and repositories. +### Webhook chains + +Webhook chains allow you to chain calls to multiple services. After clicking the +"Add webhook" button, simply add as many URLs as necessary in your chain. + +The first webhook in a chain will be called after a successful push. Subsequent URLs will be contacted after the callback has been validated. + +#### Validating a callback + +In order to validate a callback in a webhook chain, you need to + +1. Retrieve the `callback_url` value in the request's JSON payload. +1. Send a POST request to this URL containing a valid JSON body. + +> **Note**: A chain request will only be considered complete once the last +> callback has been validated. + +#### Callback JSON data + +Recognized parameters in callback data are as follow: + +* `state` (required): Accepted values are `success`, `failure` and `error`. If the state isn't `success`, the webhook chain will be interrupted. +* `description`: A string containing miscellaneous information that will be available on the Docker Hub. Maximum 255 characters. +* `context`: A string containing the context of the operation. Can be retrieved on the Docker Hub. Maximum 100 characters. +* `target_url`: The URL where the results of the operation can be found. Can be retrieved on the Docker Hub. + +*Example callback payload:* + + { + "state": "success", + "description": "387 tests PASSED", + "context": "Continuous integration by Acme CI", + "target_url": "http://ci.acme.com/results/afd339c1c3d27" + } diff --git a/docs/sources/userguide/dockerrepos.md b/docs/sources/userguide/dockerrepos.md index 967ed0d8cf..33aaf0fa55 100644 --- a/docs/sources/userguide/dockerrepos.md +++ b/docs/sources/userguide/dockerrepos.md @@ -162,6 +162,9 @@ event when an image or updated image is pushed to the repository. With a webhook you can specify a target URL and a JSON payload that will be delivered when the image is pushed. +See more information on webhooks +[here](http://docs.docker.com/docker-hub/repos/#webhooks) + ## Next steps Go and use Docker! From 3db5ea1617549bbef7f645d4ecbf482ad1f31c71 Mon Sep 17 00:00:00 2001 From: unclejack Date: Wed, 26 Nov 2014 00:41:42 +0200 Subject: [PATCH 462/592] pkg/tarsum: add maintainers & add missing s Signed-off-by: Cristian Staretu --- pkg/tarsum/MAINTAINER | 1 - pkg/tarsum/MAINTAINERS | 4 ++++ 2 files changed, 4 insertions(+), 1 deletion(-) delete mode 100644 pkg/tarsum/MAINTAINER create mode 100644 pkg/tarsum/MAINTAINERS diff --git a/pkg/tarsum/MAINTAINER b/pkg/tarsum/MAINTAINER deleted file mode 100644 index bd492e8394..0000000000 --- a/pkg/tarsum/MAINTAINER +++ /dev/null @@ -1 +0,0 @@ -Eric Windisch (@ewindisch) diff --git a/pkg/tarsum/MAINTAINERS b/pkg/tarsum/MAINTAINERS new file mode 100644 index 0000000000..9571a14a38 --- /dev/null +++ b/pkg/tarsum/MAINTAINERS @@ -0,0 +1,4 @@ +Derek McGowan (github: dmcgowan) +Eric Windisch (github: ewindisch) +Josh Hawn (github: jlhawn) +Vincent Batts (github: vbatts) From bd4fe9b9867c7633e520130b99e02798f49e6b44 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Fri, 7 Nov 2014 13:44:35 -0500 Subject: [PATCH 463/592] Cleanup ParseHost Current implementation is comingling things that ought not be together. There are _some_ similarities between parsing for the different proto types, but they are more different than alike, making the code extremely difficult to reason about. Signed-off-by: Brian Goff --- pkg/parsers/parsers.go | 98 ++++++++++++++++++++---------------------- 1 file changed, 47 insertions(+), 51 deletions(-) diff --git a/pkg/parsers/parsers.go b/pkg/parsers/parsers.go index e6e3718b40..2851fe163a 100644 --- a/pkg/parsers/parsers.go +++ b/pkg/parsers/parsers.go @@ -7,63 +7,59 @@ import ( ) // FIXME: Change this not to receive default value as parameter -func ParseHost(defaultHost string, defaultUnix, addr string) (string, error) { - var ( - proto string - host string - port int - ) +func ParseHost(defaultTCPAddr, defaultUnixAddr, addr string) (string, error) { addr = strings.TrimSpace(addr) - switch { - case addr == "tcp://": - return "", fmt.Errorf("Invalid bind address format: %s", addr) - case strings.HasPrefix(addr, "unix://"): - proto = "unix" - addr = strings.TrimPrefix(addr, "unix://") - if addr == "" { - addr = defaultUnix - } - case strings.HasPrefix(addr, "tcp://"): - proto = "tcp" - addr = strings.TrimPrefix(addr, "tcp://") - case strings.HasPrefix(addr, "fd://"): + if addr == "" { + addr = fmt.Sprintf("unix://%s", defaultUnixAddr) + } + addrParts := strings.Split(addr, "://") + if len(addrParts) == 1 { + addrParts = []string{"tcp", addrParts[0]} + } + + switch addrParts[0] { + case "tcp": + return ParseTCPAddr(addrParts[1], defaultTCPAddr) + case "unix": + return ParseUnixAddr(addrParts[1], defaultUnixAddr) + case "fd": return addr, nil - case addr == "": - proto = "unix" - addr = defaultUnix default: - if strings.Contains(addr, "://") { - return "", fmt.Errorf("Invalid bind address protocol: %s", addr) - } - proto = "tcp" - } - - if proto != "unix" && strings.Contains(addr, ":") { - hostParts := strings.Split(addr, ":") - if len(hostParts) != 2 { - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } - if hostParts[0] != "" { - host = hostParts[0] - } else { - host = defaultHost - } - - if p, err := strconv.Atoi(hostParts[1]); err == nil && p != 0 { - port = p - } else { - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } - - } else if proto == "tcp" && !strings.Contains(addr, ":") { return "", fmt.Errorf("Invalid bind address format: %s", addr) - } else { - host = addr } - if proto == "unix" { - return fmt.Sprintf("%s://%s", proto, host), nil +} + +func ParseUnixAddr(addr string, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, "unix://") + if strings.Contains(addr, "://") { + return "", fmt.Errorf("Invalid proto, expected unix: %s", addr) } - return fmt.Sprintf("%s://%s:%d", proto, host, port), nil + if addr == "" { + addr = defaultAddr + } + return fmt.Sprintf("unix://%s", addr), nil +} + +func ParseTCPAddr(addr string, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, "tcp://") + if strings.Contains(addr, "://") || addr == "" { + return "", fmt.Errorf("Invalid proto, expected tcp: %s", addr) + } + + hostParts := strings.Split(addr, ":") + if len(hostParts) != 2 { + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } + host := hostParts[0] + if host == "" { + host = defaultAddr + } + + p, err := strconv.Atoi(hostParts[1]) + if err != nil && p == 0 { + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } + return fmt.Sprintf("tcp://%s:%d", host, p), nil } // Get a repos name and returns the right reposName + tag From f0f0e316aab2c8cc6e2f0124c7bde7661a15461c Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Tue, 25 Nov 2014 17:07:04 +0000 Subject: [PATCH 464/592] fix tests Signed-off-by: Victor Vieux --- integration-cli/docker_cli_events_test.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/integration-cli/docker_cli_events_test.go b/integration-cli/docker_cli_events_test.go index 0733e78ed0..915c30bb19 100644 --- a/integration-cli/docker_cli_events_test.go +++ b/integration-cli/docker_cli_events_test.go @@ -285,10 +285,10 @@ func TestEventsImageImport(t *testing.T) { } func TestEventsFilters(t *testing.T) { - now := time.Now().Unix() + since := time.Now().Unix() cmd(t, "run", "--rm", "busybox", "true") cmd(t, "run", "--rm", "busybox", "true") - eventsCmd := exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", now), fmt.Sprintf("--until=%d", time.Now().Unix()), "--filter", "event=die") + eventsCmd := exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", time.Now().Unix()), "--filter", "event=die") out, exitCode, err := runCommandWithOutput(eventsCmd) if exitCode != 0 || err != nil { t.Fatalf("Failed to get events with exit code %d: %s", exitCode, err) @@ -296,8 +296,7 @@ func TestEventsFilters(t *testing.T) { events := strings.Split(out, "\n") events = events[:len(events)-1] if len(events) != 2 { - fmt.Printf("%v\n", events) - t.Fatalf("Unexpected event") + t.Fatalf("Expected 2 events, got %d: %v", len(events), events) } dieEvent := strings.Fields(events[len(events)-1]) if dieEvent[len(dieEvent)-1] != "die" { @@ -309,7 +308,7 @@ func TestEventsFilters(t *testing.T) { t.Fatalf("event should be die, not %#v", dieEvent) } - eventsCmd = exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix()), "--filter", "event=die", "--filter", "event=start") + eventsCmd = exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", time.Now().Unix()), "--filter", "event=die", "--filter", "event=start") out, exitCode, err = runCommandWithOutput(eventsCmd) if exitCode != 0 || err != nil { t.Fatalf("Failed to get events with exit code %d: %s", exitCode, err) @@ -317,7 +316,7 @@ func TestEventsFilters(t *testing.T) { events = strings.Split(out, "\n") events = events[:len(events)-1] if len(events) != 4 { - t.Fatalf("Unexpected event") + t.Fatalf("Expected 4 events, got %d: %v", len(events), events) } startEvent := strings.Fields(events[len(events)-4]) if startEvent[len(startEvent)-1] != "start" { From 20575d20bad60172b9d1f40c9fe357b7c069f466 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Fri, 24 Oct 2014 18:12:54 +0000 Subject: [PATCH 465/592] Break some routines out of the mutable files test for future use Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- integration-cli/docker_cli_run_test.go | 22 ++++-------------- integration-cli/docker_test_vars.go | 9 ++++---- integration-cli/docker_utils.go | 32 ++++++++++++++++++++++++++ 3 files changed, 42 insertions(+), 21 deletions(-) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 9546af0014..05915032fb 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -1897,37 +1897,25 @@ func TestRunMutableNetworkFiles(t *testing.T) { for _, fn := range []string{"resolv.conf", "hosts"} { deleteAllContainers() - out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s; while true; do sleep 1; done", fn))) - if err != nil { - t.Fatal(err, out) - } - - time.Sleep(1 * time.Second) - - contID := strings.TrimSpace(out) - - f, err := os.Open(filepath.Join("/var/lib/docker/containers", contID, fn)) + content, err := runCommandAndReadContainerFile(fn, exec.Command(dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s; while true; do sleep 1; done", fn))) if err != nil { t.Fatal(err) } - content, err := ioutil.ReadAll(f) - f.Close() - if strings.TrimSpace(string(content)) != "success" { t.Fatal("Content was not what was modified in the container", string(content)) } - out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "c2", "busybox", "sh", "-c", fmt.Sprintf("while true; do cat /etc/%s; sleep 1; done", fn))) + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "c2", "busybox", "sh", "-c", fmt.Sprintf("while true; do cat /etc/%s; sleep 1; done", fn))) if err != nil { t.Fatal(err) } - contID = strings.TrimSpace(out) + contID := strings.TrimSpace(out) - resolvConfPath := filepath.Join("/var/lib/docker/containers", contID, fn) + resolvConfPath := containerStorageFile(contID, fn) - f, err = os.OpenFile(resolvConfPath, os.O_WRONLY|os.O_SYNC|os.O_APPEND, 0644) + f, err := os.OpenFile(resolvConfPath, os.O_WRONLY|os.O_SYNC|os.O_APPEND, 0644) if err != nil { t.Fatal(err) } diff --git a/integration-cli/docker_test_vars.go b/integration-cli/docker_test_vars.go index 23903a39a9..78c481bd23 100644 --- a/integration-cli/docker_test_vars.go +++ b/integration-cli/docker_test_vars.go @@ -16,10 +16,11 @@ var ( // the private registry to use for tests privateRegistryURL = "127.0.0.1:5000" - dockerBasePath = "/var/lib/docker" - execDriverPath = dockerBasePath + "/execdriver/native" - volumesConfigPath = dockerBasePath + "/volumes" - volumesStoragePath = dockerBasePath + "/vfs/dir" + dockerBasePath = "/var/lib/docker" + execDriverPath = dockerBasePath + "/execdriver/native" + volumesConfigPath = dockerBasePath + "/volumes" + volumesStoragePath = dockerBasePath + "/vfs/dir" + containerStoragePath = dockerBasePath + "/containers" workingDirectory string ) diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go index ba1a0b1306..ca33baa2aa 100644 --- a/integration-cli/docker_utils.go +++ b/integration-cli/docker_utils.go @@ -731,3 +731,35 @@ func readFile(src string, t *testing.T) (content string) { } return string(data) } + +func containerStorageFile(containerId, basename string) string { + return filepath.Join("/var/lib/docker/containers", containerId, basename) +} + +func runCommandAndReadContainerFile(filename string, cmd *exec.Cmd) ([]byte, error) { + out, _, err := runCommandWithOutput(cmd) + if err != nil { + return nil, err + } + + time.Sleep(1 * time.Second) + + contID := strings.TrimSpace(out) + + return readContainerFile(contID, filename) +} + +func readContainerFile(containerId, filename string) ([]byte, error) { + f, err := os.Open(containerStorageFile(containerId, filename)) + if err != nil { + return nil, err + } + defer f.Close() + + content, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + return content, nil +} From 68bc8de111e4faa2a16583cd45b5c0fd253a3bba Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Fri, 24 Oct 2014 21:39:12 +0000 Subject: [PATCH 466/592] Test for updating hosts files via links. Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- integration-cli/docker_cli_links_test.go | 37 ++++++++++++++++++++++++ integration-cli/docker_utils.go | 3 +- 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go index f202ce10a2..d412ef2a1a 100644 --- a/integration-cli/docker_cli_links_test.go +++ b/integration-cli/docker_cli_links_test.go @@ -6,6 +6,7 @@ import ( "os/exec" "strings" "testing" + "time" "github.com/docker/docker/pkg/iptables" ) @@ -177,3 +178,39 @@ func TestLinksNotStartedParentNotFail(t *testing.T) { } logDone("link - container start not failing on updating stopped parent links") } + +func TestLinksHostsFilesInject(t *testing.T) { + defer deleteAllContainers() + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-itd", "--name", "one", "busybox", "top")) + if err != nil { + t.Fatal(err, out) + } + + idOne := strings.TrimSpace(out) + + out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "-itd", "--name", "two", "--link", "one:onetwo", "busybox", "top")) + if err != nil { + t.Fatal(err, out) + } + + idTwo := strings.TrimSpace(out) + + time.Sleep(1 * time.Second) + + contentOne, err := readContainerFile(idOne, "hosts") + if err != nil { + t.Fatal(err, string(contentOne)) + } + + contentTwo, err := readContainerFile(idTwo, "hosts") + if err != nil { + t.Fatal(err, string(contentTwo)) + } + + if !strings.Contains(string(contentTwo), "onetwo") { + t.Fatal("Host is not present in updated hosts file", string(contentTwo)) + } + + logDone("link - ensure containers hosts files are updated with the link alias.") +} diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go index ca33baa2aa..9b5fa76c0c 100644 --- a/integration-cli/docker_utils.go +++ b/integration-cli/docker_utils.go @@ -736,10 +736,11 @@ func containerStorageFile(containerId, basename string) string { return filepath.Join("/var/lib/docker/containers", containerId, basename) } +// docker commands that use this function must be run with the '-d' switch. func runCommandAndReadContainerFile(filename string, cmd *exec.Cmd) ([]byte, error) { out, _, err := runCommandWithOutput(cmd) if err != nil { - return nil, err + return nil, fmt.Errorf("%v: %q", err, out) } time.Sleep(1 * time.Second) From 90928eb1140fc0394e2a79d5e9a91dbc0f02484c Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Mon, 17 Nov 2014 15:50:09 -0800 Subject: [PATCH 467/592] Add support for docker exec to return cmd exitStatus Note - only support the non-detached mode of exec right now. Another PR will add -d support. Closes #8703 Signed-off-by: Doug Davis --- api/client/commands.go | 11 ++ api/client/utils.go | 20 ++++ api/server/server.go | 10 ++ daemon/container.go | 4 + daemon/daemon.go | 1 + daemon/exec.go | 18 ++- daemon/inspect.go | 18 +++ .../reference/api/docker_remote_api_v1.16.md | 108 ++++++++++++++++++ integration-cli/docker_cli_exec_test.go | 17 +++ 9 files changed, 204 insertions(+), 3 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index 6ddae4a3a7..ddff3d88a8 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -2574,6 +2574,8 @@ func (cli *DockerCli) CmdExec(args ...string) error { if _, _, err := readBody(cli.call("POST", "/exec/"+execID+"/start", execConfig, false)); err != nil { return err } + // For now don't print this - wait for when we support exec wait() + // fmt.Fprintf(cli.out, "%s\n", execID) return nil } @@ -2636,5 +2638,14 @@ func (cli *DockerCli) CmdExec(args ...string) error { return err } + var status int + if _, status, err = getExecExitCode(cli, execID); err != nil { + return err + } + + if status != 0 { + return &utils.StatusError{StatusCode: status} + } + return nil } diff --git a/api/client/utils.go b/api/client/utils.go index 3799ce6735..8de571bf4d 100644 --- a/api/client/utils.go +++ b/api/client/utils.go @@ -234,6 +234,26 @@ func getExitCode(cli *DockerCli, containerId string) (bool, int, error) { return state.GetBool("Running"), state.GetInt("ExitCode"), nil } +// getExecExitCode perform an inspect on the exec command. It returns +// the running state and the exit code. +func getExecExitCode(cli *DockerCli, execId string) (bool, int, error) { + stream, _, err := cli.call("GET", "/exec/"+execId+"/json", nil, false) + if err != nil { + // If we can't connect, then the daemon probably died. + if err != ErrConnectionRefused { + return false, -1, err + } + return false, -1, nil + } + + var result engine.Env + if err := result.Decode(stream); err != nil { + return false, -1, err + } + + return result.GetBool("Running"), result.GetInt("ExitCode"), nil +} + func (cli *DockerCli) monitorTtySize(id string, isExec bool) error { cli.resizeTty(id, isExec) diff --git a/api/server/server.go b/api/server/server.go index d9b73e6798..1d591a3d84 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -956,6 +956,15 @@ func getContainersByName(eng *engine.Engine, version version.Version, w http.Res return job.Run() } +func getExecByID(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter 'id'") + } + var job = eng.Job("execInspect", vars["id"]) + streamJSON(job, w, false) + return job.Run() +} + func getImagesByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") @@ -1277,6 +1286,7 @@ func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion st "/containers/{name:.*}/top": getContainersTop, "/containers/{name:.*}/logs": getContainersLogs, "/containers/{name:.*}/attach/ws": wsContainersAttach, + "/exec/{id:.*}/json": getExecByID, }, "POST": { "/auth": postAuth, diff --git a/daemon/container.go b/daemon/container.go index bf93787ebf..b35969900c 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -602,6 +602,10 @@ func (container *Container) cleanup() { if err := container.Unmount(); err != nil { log.Errorf("%v: Failed to umount filesystem: %v", container.ID, err) } + + for _, eConfig := range container.execCommands.s { + container.daemon.unregisterExecCommand(eConfig) + } } func (container *Container) KillSig(sig int) error { diff --git a/daemon/daemon.go b/daemon/daemon.go index 84628be729..06dc557799 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -130,6 +130,7 @@ func (daemon *Daemon) Install(eng *engine.Engine) error { "execCreate": daemon.ContainerExecCreate, "execStart": daemon.ContainerExecStart, "execResize": daemon.ContainerExecResize, + "execInspect": daemon.ContainerExecInspect, } { if err := eng.Register(name, method); err != nil { return err diff --git a/daemon/exec.go b/daemon/exec.go index d813dbba1d..71529b165c 100644 --- a/daemon/exec.go +++ b/daemon/exec.go @@ -24,6 +24,7 @@ type execConfig struct { sync.Mutex ID string Running bool + ExitCode int ProcessConfig execdriver.ProcessConfig StreamConfig OpenStdin bool @@ -207,8 +208,9 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status { execErr := make(chan error) - // Remove exec from daemon and container. - defer d.unregisterExecCommand(execConfig) + // Note, the execConfig data will be removed when the container + // itself is deleted. This allows us to query it (for things like + // the exitStatus) even after the cmd is done running. go func() { err := container.Exec(execConfig) @@ -231,7 +233,17 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status { } func (d *Daemon) Exec(c *Container, execConfig *execConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { - return d.execDriver.Exec(c.command, &execConfig.ProcessConfig, pipes, startCallback) + exitStatus, err := d.execDriver.Exec(c.command, &execConfig.ProcessConfig, pipes, startCallback) + + // On err, make sure we don't leave ExitCode at zero + if err != nil && exitStatus == 0 { + exitStatus = 128 + } + + execConfig.ExitCode = exitStatus + execConfig.Running = false + + return exitStatus, err } func (container *Container) Exec(execConfig *execConfig) error { diff --git a/daemon/inspect.go b/daemon/inspect.go index 396ca0227f..5dec257f3a 100644 --- a/daemon/inspect.go +++ b/daemon/inspect.go @@ -64,3 +64,21 @@ func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status { } return job.Errorf("No such container: %s", name) } + +func (daemon *Daemon) ContainerExecInspect(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("usage: %s ID", job.Name) + } + id := job.Args[0] + eConfig, err := daemon.getExecConfig(id) + if err != nil { + return job.Error(err) + } + + b, err := json.Marshal(*eConfig) + if err != nil { + return job.Error(err) + } + job.Stdout.Write(b) + return engine.StatusOK +} diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md index dc2cc56267..15a8f1c4b5 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.16.md +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -1598,6 +1598,114 @@ Status Codes: - **201** – no error - **404** – no such exec instance +### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the exec command `id`. + +**Example request**: + + GET /exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "Memory" : 0, + "MemorySwap" : 0, + "CpuShares" : 0, + "Cpuset" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "PortSpecs" : null, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Volumes" : {}, + "VolumesRW" : {} + } + } + +Status Codes: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + # 3. Going further ## 3.1 Inside `docker run` diff --git a/integration-cli/docker_cli_exec_test.go b/integration-cli/docker_cli_exec_test.go index 438271744a..ebb5484f2e 100644 --- a/integration-cli/docker_cli_exec_test.go +++ b/integration-cli/docker_cli_exec_test.go @@ -213,3 +213,20 @@ func TestExecEnv(t *testing.T) { logDone("exec - exec inherits correct env") } + +func TestExecExitStatus(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "top", "busybox", "top") + if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil { + t.Fatal(out, err) + } + + // Test normal (non-detached) case first + cmd := exec.Command(dockerBinary, "exec", "top", "sh", "-c", "exit 23") + ec, _ := runCommand(cmd) + + if ec != 23 { + t.Fatalf("Should have had an ExitCode of 23, not: %d", ec) + } + + logDone("exec - exec non-zero ExitStatus") +} From 8635b0248a1166a48dbd0fbe32df7b19f4ccea62 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Tue, 25 Nov 2014 18:34:08 -0800 Subject: [PATCH 468/592] fix where cmd function dne Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_events_test.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/integration-cli/docker_cli_events_test.go b/integration-cli/docker_cli_events_test.go index 82c685f2a2..a56788e219 100644 --- a/integration-cli/docker_cli_events_test.go +++ b/integration-cli/docker_cli_events_test.go @@ -286,8 +286,14 @@ func TestEventsImageImport(t *testing.T) { func TestEventsFilters(t *testing.T) { since := time.Now().Unix() - cmd(t, "run", "--rm", "busybox", "true") - cmd(t, "run", "--rm", "busybox", "true") + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", "busybox", "true")) + if err != nil { + t.Fatal(out, err) + } + out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", "busybox", "true")) + if err != nil { + t.Fatal(out, err) + } eventsCmd := exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", time.Now().Unix()), "--filter", "event=die") out, exitCode, err := runCommandWithOutput(eventsCmd) if exitCode != 0 || err != nil { From 12bc51603bddac886363756d550ab79a53bcac8f Mon Sep 17 00:00:00 2001 From: Ben Firshman Date: Wed, 26 Nov 2014 15:15:10 +0000 Subject: [PATCH 469/592] Update format of notes in docs style guide It seems like all of the notes in the docs are this format. Signed-off-by: Ben Firshman --- docs/sources/contributing/docs_style-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/contributing/docs_style-guide.md b/docs/sources/contributing/docs_style-guide.md index 2da7728dbf..6ff3dfd1cf 100644 --- a/docs/sources/contributing/docs_style-guide.md +++ b/docs/sources/contributing/docs_style-guide.md @@ -169,7 +169,7 @@ Use notes sparingly and only to bring things to the reader's attention that are critical or otherwise deserving of being called out from the body text. Please format all notes as follows: - **Note:** + > **Note:** > One line of note text > another line of note text From 64fd3e89c7f6164b5522b5e611e7daf4a2bdae9c Mon Sep 17 00:00:00 2001 From: Yohei Ueda Date: Thu, 27 Nov 2014 03:19:10 +0900 Subject: [PATCH 470/592] Increase memory limit in test cases Signed-off-by: Yohei Ueda --- integration-cli/docker_cli_run_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 05915032fb..844fc62ddd 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -44,7 +44,7 @@ func TestRunEchoStdout(t *testing.T) { // "test" should be printed func TestRunEchoStdoutWithMemoryLimit(t *testing.T) { - runCmd := exec.Command(dockerBinary, "run", "-m", "4m", "busybox", "echo", "test") + runCmd := exec.Command(dockerBinary, "run", "-m", "16m", "busybox", "echo", "test") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) @@ -81,7 +81,7 @@ func TestRunEchoStdoutWitCPULimit(t *testing.T) { // "test" should be printed func TestRunEchoStdoutWithCPUAndMemoryLimit(t *testing.T) { - runCmd := exec.Command(dockerBinary, "run", "-c", "1000", "-m", "4m", "busybox", "echo", "test") + runCmd := exec.Command(dockerBinary, "run", "-c", "1000", "-m", "16m", "busybox", "echo", "test") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) From 8b881df9b1f3c8fa7c36dd5b2fb3e7235f6d4654 Mon Sep 17 00:00:00 2001 From: Michael Steinert Date: Wed, 26 Nov 2014 13:09:44 -0600 Subject: [PATCH 471/592] Fix a small typo Signed-off-by: Michael Steinert --- docs/sources/reference/builder.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index 121018cff5..5ee0358479 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -764,7 +764,7 @@ Docker client, refer to [*Share Directories via Volumes*](/userguide/dockervolum documentation. > **Note**: -> The list is parsed a JSON array, which means that +> The list is parsed as a JSON array, which means that > you must use double-quotes (") around words not single-quotes ('). ## USER From 750dc335a983fcd2c1410bffe5e23d1b0b3c3e3c Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Wed, 26 Nov 2014 10:46:00 -0800 Subject: [PATCH 472/592] Windows should not be officially released. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- Dockerfile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 344551d35d..af559759b7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -68,8 +68,10 @@ RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 ENV DOCKER_CROSSPLATFORMS \ linux/386 linux/arm \ darwin/amd64 darwin/386 \ - freebsd/amd64 freebsd/386 freebsd/arm \ - windows/amd64 windows/386 + freebsd/amd64 freebsd/386 freebsd/arm +# windows is experimental for now +# windows/amd64 windows/386 + # (set an explicit GOARM of 5 for maximum compatibility) ENV GOARM 5 RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' From d53b586ff1b274b529b7912df3052a1bc2ca8dfe Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Wed, 26 Nov 2014 14:09:52 +1000 Subject: [PATCH 473/592] Extract the systemd docs from various places and add a little more Signed-off-by: Sven Dowideit --- docs/mkdocs.yml | 1 + docs/sources/articles/systemd.md | 101 +++++++++++++++++++++++ docs/sources/installation/archlinux.md | 6 ++ docs/sources/installation/centos.md | 14 +++- docs/sources/installation/fedora.md | 25 +----- docs/sources/installation/frugalware.md | 6 ++ docs/sources/installation/gentoolinux.md | 4 + docs/sources/installation/openSUSE.md | 8 ++ docs/sources/installation/oracle.md | 6 ++ docs/sources/installation/rhel.md | 7 ++ 10 files changed, 153 insertions(+), 25 deletions(-) create mode 100644 docs/sources/articles/systemd.md diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index f5ea845e95..06f9064d96 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -98,6 +98,7 @@ pages: - ['articles/ambassador_pattern_linking.md', 'Articles', 'Cross-Host linking using ambassador containers'] - ['articles/runmetrics.md', 'Articles', 'Runtime metrics'] - ['articles/b2d_volume_resize.md', 'Articles', 'Increasing a Boot2Docker volume'] +- ['articles/systemd.md', 'Articles', 'Controlling and configuring Docker using Systemd'] # Reference - ['reference/index.md', '**HIDDEN**'] diff --git a/docs/sources/articles/systemd.md b/docs/sources/articles/systemd.md new file mode 100644 index 0000000000..028185f8df --- /dev/null +++ b/docs/sources/articles/systemd.md @@ -0,0 +1,101 @@ +page_title: Controlling and configuring Docker using Systemd +page_description: Controlling and configuring Docker using Systemd +page_keywords: docker, daemon, systemd, configuration + +# Controlling and configuring Docker using Systemd + +Many Linux distributions use systemd to start the Docker daemon. This document +shows a few examples of how to customise Docker's settings. + +## Starting the Docker daemon + +Once Docker is installed, you will need to start the Docker daemon. + + $ sudo systemctl start docker + # or on older distributions, you may need to use + $ sudo service docker start + +If you want Docker to start at boot, you should also: + + $ sudo systemctl enable docker + # or on older distributions, you may need to use + $ sudo chkconfig docker on + +## Custom Docker daemon options + +There are a number of ways to configure the daemon flags and environment variables +for your Docker daemon. + +If the `docker.service` file is set to use an `EnvironmentFile` +(often pointing to `/etc/sysconfig/docker`) then you can modify the +referenced file. + +Or, you may need to edit the `docker.service` file, which can be in `/usr/lib/systemd/system` +or `/etc/systemd/service`. + +### Runtime directory and storage driver + +You may want to control the disk space used for Docker images, containers +and volumes by moving it to a separate partition. + +In this example, we'll assume that your `docker.services` file looks something like: + + [Unit] + Description=Docker Application Container Engine + Documentation=http://docs.docker.com + After=network.target docker.socket + Requires=docker.socket + + [Service] + Type=notify + EnvironmentFile=-/etc/sysconfig/docker + ExecStart=/usr/bin/docker -d -H fd:// $OPTIONS + LimitNOFILE=1048576 + LimitNPROC=1048576 + + [Install] + Also=docker.socket + +This will allow us to add extra flags to the `/etc/sysconfig/docker` file by +setting `OPTIONS`: + + OPTIONS="--graph /mnt/docker-data --storage btrfs" + +You can also set other environment variables in this file, for example, the +`HTTP_PROXY` environment variables described below. + +### HTTP Proxy + +This example overrides the default `docker.service` file. + +If you are behind a HTTP proxy server, for example in corporate settings, +you will need to add this configuration in the Docker systemd service file. + +Copy file `/usr/lib/systemd/system/docker.service` to `/etc/systemd/system/docker/service`. + +Add the following to the `[Service]` section in the new file: + + Environment="HTTP_PROXY=http://proxy.example.com:80/" + +If you have internal Docker registries that you need to contact without +proxying you can specify them via the `NO_PROXY` environment variable: + + Environment="HTTP_PROXY=http://proxy.example.com:80/" "NO_PROXY=localhost,127.0.0.0/8,docker-registry.somecorporation.com" + +Flush changes: + + $ sudo systemctl daemon-reload + +Restart Docker: + + $ sudo systemctl restart docker + +## Manually creating the systemd unit files + +When installing the binary without a package, you may want +to integrate Docker with systemd. For this, simply install the two unit files +(service and socket) from [the github +repository](https://github.com/docker/docker/tree/master/contrib/init/systemd) +to `/etc/systemd/system`. + + diff --git a/docs/sources/installation/archlinux.md b/docs/sources/installation/archlinux.md index 81cc21fb02..99849c7aa0 100644 --- a/docs/sources/installation/archlinux.md +++ b/docs/sources/installation/archlinux.md @@ -53,3 +53,9 @@ service: To start on system boot: $ sudo systemctl enable docker + +## Custom daemon options + +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our systemd article to +learn how to [customize your systemd Docker daemon options](/articles/systemd/). diff --git a/docs/sources/installation/centos.md b/docs/sources/installation/centos.md index 2f7d57d604..707afc959a 100644 --- a/docs/sources/installation/centos.md +++ b/docs/sources/installation/centos.md @@ -45,11 +45,11 @@ to `/etc/systemd/system`. CentOS-7 introduced firewalld, which is a wrapper around iptables and can conflict with Docker. -When firewalld is started or restarted it will remove the `DOCKER` chain +When `firewalld` is started or restarted it will remove the `DOCKER` chain from iptables, preventing Docker from working properly. -When using systemd, firewalld is started before Docker, but if you -start or restart firewalld after Docker, you will have to restart the Docker daemon. +When using systemd, `firewalld` is started before Docker, but if you +start or restart `firewalld` after Docker, you will have to restart the Docker daemon. ## Installing Docker - CentOS-6 Please note that this for CentOS-6, this package is part of [Extra Packages @@ -103,7 +103,13 @@ Run a simple bash shell to test the image: $ sudo docker run -i -t centos /bin/bash If everything is working properly, you'll get a simple bash prompt. Type -exit to continue. +`exit` to continue. + +## Custom daemon options + +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our systemd article to +learn how to [customize your systemd Docker daemon options](/articles/systemd/). ## Dockerfiles The CentOS Project provides a number of sample Dockerfiles which you may use diff --git a/docs/sources/installation/fedora.md b/docs/sources/installation/fedora.md index 9101ef1356..9253144045 100644 --- a/docs/sources/installation/fedora.md +++ b/docs/sources/installation/fedora.md @@ -67,28 +67,11 @@ member of that group in order to contact the `docker -d` process. Adding users to the `docker` group is *not* necessary for Docker versions 1.0 and above. -## HTTP Proxy +## Custom daemon options -If you are behind a HTTP proxy server, for example in corporate settings, -you will need to add this configuration in the Docker *systemd service file*. - -Edit file `/usr/lib/systemd/system/docker.service`. Add the following to -section `[Service]` : - - Environment="HTTP_PROXY=http://proxy.example.com:80/" - -If you have internal Docker registries that you need to contact without -proxying you can specify them via the `NO_PROXY` environment variable: - - Environment="HTTP_PROXY=http://proxy.example.com:80/" "NO_PROXY=localhost,127.0.0.0/8,docker-registry.somecorporation.com" - -Flush changes: - - $ systemctl daemon-reload - -Restart Docker: - - $ systemctl start docker +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our systemd article to +learn how to [customize your systemd Docker daemon options](/articles/systemd/). ## What next? diff --git a/docs/sources/installation/frugalware.md b/docs/sources/installation/frugalware.md index 2c2f922613..6b4db23b26 100644 --- a/docs/sources/installation/frugalware.md +++ b/docs/sources/installation/frugalware.md @@ -42,3 +42,9 @@ service: To start on system boot: $ sudo systemctl enable lxc-docker + +## Custom daemon options + +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our systemd article to +learn how to [customize your systemd Docker daemon options](/articles/systemd/). diff --git a/docs/sources/installation/gentoolinux.md b/docs/sources/installation/gentoolinux.md index 39333e63e6..716eab9d82 100644 --- a/docs/sources/installation/gentoolinux.md +++ b/docs/sources/installation/gentoolinux.md @@ -91,3 +91,7 @@ To start the `docker` daemon: To start on system boot: $ sudo systemctl enable docker + +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our systemd article to +learn how to [customize your systemd Docker daemon options](/articles/systemd/). diff --git a/docs/sources/installation/openSUSE.md b/docs/sources/installation/openSUSE.md index 951b8770cc..bcd08d9fc0 100644 --- a/docs/sources/installation/openSUSE.md +++ b/docs/sources/installation/openSUSE.md @@ -71,5 +71,13 @@ hand to ensure the `FW_ROUTE` flag is set to `yes` like so: **Done!** +## Custom daemon options + +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our systemd article to +learn how to [customize your systemd Docker daemon options](/articles/systemd/). + +## What's next + Continue with the [User Guide](/userguide/). diff --git a/docs/sources/installation/oracle.md b/docs/sources/installation/oracle.md index 05bb3d9808..e99c828efa 100644 --- a/docs/sources/installation/oracle.md +++ b/docs/sources/installation/oracle.md @@ -75,6 +75,12 @@ and set `enabled=1` in the `[ol6_addons]` or the `[ol7_addons]` stanza. **Done!** +## Custom daemon options + +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our systemd article to +learn how to [customize your systemd Docker daemon options](/articles/systemd/). + ## Using the btrfs storage engine Docker on Oracle Linux 6 and 7 supports the use of the btrfs storage engine. diff --git a/docs/sources/installation/rhel.md b/docs/sources/installation/rhel.md index 74a293b513..59ab049641 100644 --- a/docs/sources/installation/rhel.md +++ b/docs/sources/installation/rhel.md @@ -83,6 +83,13 @@ Now let's verify that Docker is working. Continue with the [User Guide](/userguide/). +## Custom daemon options + +If you need to add an HTTP Proxy, set a different directory or partition for the +Docker runtime files, or make other customizations, read our systemd article to +learn how to [customize your systemd Docker daemon options](/articles/systemd/). + + ## Issues? If you have any issues - please report them directly in the From 498f208ade18dffb7362adc37c1f11b1bff187da Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Fri, 21 Nov 2014 13:47:03 -0800 Subject: [PATCH 474/592] Explain what the VIRTUAL SIZE means Signed-off-by: Sven Dowideit Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/reference/commandline/cli.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 9868424b7e..e676fd3682 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -796,6 +796,10 @@ decrease disk usage, and speed up `docker build` by allowing each step to be cached. These intermediate layers are not shown by default. +The `VIRTUAL SIZE` is the cumulative space taken up by the image and all +its parent images. This is also the disk space used by the contents of the +Tar file created when you `docker save` an image. + An image will be listed more than once if it has multiple repository names or tags. This single image (identifiable by its matching `IMAGE ID`) uses up the `VIRTUAL SIZE` listed only once. From 54050083d1c65d4e7a90371c78c587819df27d2f Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Tue, 18 Nov 2014 15:55:40 -0800 Subject: [PATCH 475/592] Add an example of how to add your client IP to the container hosts file Signed-off-by: Sven Dowideit Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/reference/commandline/cli.md | 24 +++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 9868424b7e..5b8a949063 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -1606,6 +1606,30 @@ container exits with a non-zero exit status more than 10 times in a row Docker will abort trying to restart the container. Providing a maximum restart limit is only valid for the ** on-failure ** policy. +### Adding entries to a container hosts file + +You can add other hosts into a container's `/etc/hosts` file by using one or more +`--add-host` flags. This example adds a static address for a host named `docker`: + +``` + $ docker run --add-host=docker:10.180.0.1 --rm -it debian + $$ ping docker + PING docker (10.180.0.1): 48 data bytes + 56 bytes from 10.180.0.1: icmp_seq=0 ttl=254 time=7.600 ms + 56 bytes from 10.180.0.1: icmp_seq=1 ttl=254 time=30.705 ms + ^C--- docker ping statistics --- + 2 packets transmitted, 2 packets received, 0% packet loss + round-trip min/avg/max/stddev = 7.600/19.152/30.705/11.553 ms +``` + +> **Note:** +> Sometimes you need to connect to the Docker host, which means getting the IP +> address of the host. You can use the following shell commands to simplify this +> process: +> +> $ alias hostip="ip route show 0.0.0.0/0 | grep -Eo 'via \S+' | awk '{ print \$2 }'" +> $ docker run --add-host=docker:$(hostip) --rm -it debian + ## save Usage: docker save [OPTIONS] IMAGE [IMAGE...] From 095027944f16205c1c75d47d2f60a7c1ac666bf1 Mon Sep 17 00:00:00 2001 From: Tomas Tomecek Date: Thu, 27 Nov 2014 12:54:36 +0100 Subject: [PATCH 476/592] docs: man docker-images: inconsistent naming Synopsis is mentioning "NAME" while description is describing "REPOSITORY". Signed-off-by: Tomas Tomecek --- api/client/commands.go | 2 +- docs/man/docker-images.1.md | 2 +- docs/sources/reference/commandline/cli.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index b2561104a7..26dea6c29a 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -1328,7 +1328,7 @@ func (cli *DockerCli) CmdPull(args ...string) error { } func (cli *DockerCli) CmdImages(args ...string) error { - cmd := cli.Subcmd("images", "[NAME]", "List images") + cmd := cli.Subcmd("images", "[REPOSITORY]", "List images") quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (by default filter out the intermediate image layers)") noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") diff --git a/docs/man/docker-images.1.md b/docs/man/docker-images.1.md index c572ee674b..88a94827dc 100644 --- a/docs/man/docker-images.1.md +++ b/docs/man/docker-images.1.md @@ -10,7 +10,7 @@ docker-images - List images [**-f**|**--filter**[=*[]*]] [**--no-trunc**[=*false*]] [**-q**|**--quiet**[=*false*]] - [NAME] + [REPOSITORY] # DESCRIPTION This command lists the images stored in the local Docker repository. diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index b9c2945707..94ae5a72c2 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -725,7 +725,7 @@ To see how the `docker:latest` image was built: ## images - Usage: docker images [OPTIONS] [NAME] + Usage: docker images [OPTIONS] [REPOSITORY] List images From 9f8c14026ce2ddf0f69eb7a416cfb56da5443032 Mon Sep 17 00:00:00 2001 From: Anthony Baire Date: Fri, 28 Nov 2014 13:36:27 +0100 Subject: [PATCH 477/592] fixed regression in docs-update.py 17500eb renamed 'help' as 'help_string', but there was a miss Signed-off-by: Anthony Baire --- docs/docs-update.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs-update.py b/docs/docs-update.py index b605aeccb2..11d7452268 100755 --- a/docs/docs-update.py +++ b/docs/docs-update.py @@ -148,7 +148,7 @@ def update_man_pages(): help_string = e.output last_key = "" - for l in str(help).split("\n"): + for l in str(help_string).split("\n"): l = l.rstrip() if l != "": match = re.match("Usage: docker {}(.*)".format(command), l) From be5bfbe2217905ba129588fe17264b96c866e26d Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Wed, 26 Nov 2014 23:00:13 -0800 Subject: [PATCH 478/592] Change path breakout detection logic in archive package Fixes #9375 Signed-off-by: Alexandr Morozov --- integration-cli/docker_cli_cp_test.go | 38 +++++++++++++++++++++++++++ pkg/archive/archive.go | 9 ++++--- pkg/archive/diff.go | 12 +++++---- 3 files changed, 51 insertions(+), 8 deletions(-) diff --git a/integration-cli/docker_cli_cp_test.go b/integration-cli/docker_cli_cp_test.go index 3ebb2ab14f..a5432849dd 100644 --- a/integration-cli/docker_cli_cp_test.go +++ b/integration-cli/docker_cli_cp_test.go @@ -478,3 +478,41 @@ func TestCpVolumePath(t *testing.T) { logDone("cp - volume path") } + +func TestCpToDot(t *testing.T) { + out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") + if err != nil || exitCode != 0 { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + defer deleteContainer(cleanedContainerID) + + out, _, err = dockerCmd(t, "wait", cleanedContainerID) + if err != nil || stripTrailingCharacters(out) != "0" { + t.Fatal("failed to set up container", out, err) + } + + tmpdir, err := ioutil.TempDir("", "docker-integration") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + cwd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + defer os.Chdir(cwd) + if err := os.Chdir(tmpdir); err != nil { + t.Fatal(err) + } + _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/test", ".") + if err != nil { + t.Fatalf("couldn't docker cp to \".\" path: %s", err) + } + content, err := ioutil.ReadFile("./test") + if string(content) != "lololol\n" { + t.Fatal("Wrong content in copied file %q, should be %q", content, "lololol\n") + } + logDone("cp - to dot path") +} diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index aaeed31981..3783e72d91 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -530,10 +530,13 @@ loop: } } - // Prevent symlink breakout path := filepath.Join(dest, hdr.Name) - if !strings.HasPrefix(path, dest) { - return breakoutError(fmt.Errorf("%q is outside of %q", path, dest)) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, "..") { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it diff --git a/pkg/archive/diff.go b/pkg/archive/diff.go index 856cedcead..c6118c5db3 100644 --- a/pkg/archive/diff.go +++ b/pkg/archive/diff.go @@ -92,12 +92,14 @@ func ApplyLayer(dest string, layer ArchiveReader) error { } path := filepath.Join(dest, hdr.Name) - base := filepath.Base(path) - - // Prevent symlink breakout - if !strings.HasPrefix(path, dest) { - return breakoutError(fmt.Errorf("%q is outside of %q", path, dest)) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err } + if strings.HasPrefix(rel, "..") { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) if strings.HasPrefix(base, ".wh.") { originalBase := base[len(".wh."):] From 9c5e61c24c490ca3e9d52855ee26d4a8e9032fb2 Mon Sep 17 00:00:00 2001 From: Emily Maier Date: Fri, 28 Nov 2014 13:48:50 -0500 Subject: [PATCH 479/592] Fixes race condition in test. Closes #9389. Signed-off-by: Emily Maier --- integration-cli/docker_cli_start_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-cli/docker_cli_start_test.go b/integration-cli/docker_cli_start_test.go index da550cc776..8041c01c68 100644 --- a/integration-cli/docker_cli_start_test.go +++ b/integration-cli/docker_cli_start_test.go @@ -13,7 +13,7 @@ func TestStartAttachReturnsOnError(t *testing.T) { defer deleteAllContainers() dockerCmd(t, "run", "-d", "--name", "test", "busybox") - dockerCmd(t, "stop", "test") + dockerCmd(t, "wait", "test") // Expect this to fail because the above container is stopped, this is what we want if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "test2", "--link", "test:test", "busybox")); err == nil { From 8432224f0ef628b3da42def046e48f9635aa4b23 Mon Sep 17 00:00:00 2001 From: Jacob Atzen Date: Tue, 25 Nov 2014 12:07:02 +0100 Subject: [PATCH 480/592] Update documentation for EXPOSE The documentation for EXPOSE seems to indicate, that EXPOSE is only relevant in the context of links, which is not the case. Signed-off-by: Jacob Atzen --- docs/sources/reference/builder.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index 121018cff5..65a1737866 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -329,10 +329,13 @@ default specified in `CMD`. The `EXPOSE` instructions informs Docker that the container will listen on the specified network ports at runtime. Docker uses this information to interconnect containers using links (see the [Docker User -Guide](/userguide/dockerlinks)). Note that `EXPOSE` only works for -inter-container links. It doesn't make ports accessible from the host. To -expose ports to the host, at runtime, -[use the `-p` flag](/userguide/dockerlinks). +Guide](/userguide/dockerlinks)) and to determine which ports to expose to the +host when [using the -P flag](/reference/run/#expose-incoming-ports). +**Note:** +`EXPOSE` doesn't define which ports can be exposed to the host or make ports +accessible from the host by default. To expose ports to the host, at runtime, +[use the `-p` flag](/userguide/dockerlinks) or +[the -P flag](/reference/run/#expose-incoming-ports). ## ENV From 9cc73c62e6779bf2c8eefc4192c017a2c73daf40 Mon Sep 17 00:00:00 2001 From: Qiang Huang Date: Sun, 30 Nov 2014 01:58:16 +0800 Subject: [PATCH 481/592] flag: fix the comments Signed-off-by: Qiang Huang --- pkg/mflag/flag.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/mflag/flag.go b/pkg/mflag/flag.go index c9061c2d73..a30c41b045 100644 --- a/pkg/mflag/flag.go +++ b/pkg/mflag/flag.go @@ -23,12 +23,12 @@ flag.Var(&flagVal, []string{"name"}, "help message for flagname") For such flags, the default value is just the initial value of the variable. - You can also add "deprecated" flags, they are still usable, bur are not shown + You can also add "deprecated" flags, they are still usable, but are not shown in the usage and will display a warning when you try to use them: - var ip = flag.Int([]string{"f", "#flagname", "-flagname"}, 1234, "help message for flagname") - this will display: `Warning: '-flagname' is deprecated, it will be replaced by '--flagname' soon. See usage.` and + var ip = flag.Int([]string{"#f", "#flagname", "-flagname2"}, 1234, "help message for flagname") + this will display: `Warning: '--flagname' is deprecated, it will be replaced by '--flagname2' soon. See usage.` and var ip = flag.Int([]string{"f", "#flagname"}, 1234, "help message for flagname") - will display: `Warning: '-t' is deprecated, it will be removed soon. See usage.` + will display: `Warning: '-f' is deprecated, it will be removed soon. See usage.` You can also group one letter flags, bif you declare var v = flag.Bool([]string{"v", "-verbose"}, false, "help message for verbose") From eb8e84c48a8a20895b78299dbc2d8429d092be8d Mon Sep 17 00:00:00 2001 From: Alexander Boyd Date: Mon, 24 Nov 2014 16:22:50 -0700 Subject: [PATCH 482/592] Correct inaccuracy in docker pull documentation Signed-off-by: Alexander Boyd --- docs/sources/reference/commandline/cli.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 06220be22a..72144d3b5a 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -1199,9 +1199,8 @@ use `docker pull`: # will pull the debian:latest image, its intermediate layers # and any aliases of the same id $ sudo docker pull debian:testing - # will pull the image named ubuntu:trusty, ubuntu:14.04 - # which is an alias of the same image - # and any intermediate layers it is based on. + # will pull the image named debian:testing and any intermediate + # layers it is based on. # (Typically the empty `scratch` image, a MAINTAINER layer, # and the un-tarred base). $ sudo docker pull --all-tags centos From 47d8ec0a42ce2cd71a0275665f2e7d74a5a56cbf Mon Sep 17 00:00:00 2001 From: Flavio Castelli Date: Sun, 16 Nov 2014 15:14:45 -0500 Subject: [PATCH 483/592] Cover openSUSE and SUSE Linux Enterprise on the same page Updated the documentation to cover the installation of Docker on openSUSE and on SUSE Linux Enterprise. Docker-DCO-1.1-Signed-off-by: Flavio Castelli (github: flavio) --- docs/sources/installation.md | 2 +- .../installation/{openSUSE.md => SUSE.md} | 39 +++++++++---------- 2 files changed, 20 insertions(+), 21 deletions(-) rename docs/sources/installation/{openSUSE.md => SUSE.md} (66%) diff --git a/docs/sources/installation.md b/docs/sources/installation.md index 1c3c726594..7eaabeeefe 100644 --- a/docs/sources/installation.md +++ b/docs/sources/installation.md @@ -16,7 +16,7 @@ techniques for installing Docker all the time. - [Arch Linux](archlinux/) - [CRUX Linux](cruxlinux/) - [Gentoo](gentoolinux/) - - [openSUSE](openSUSE/) + - [openSUSE and SUSE Linux Enterprise](SUSE/) - [FrugalWare](frugalware/) - [Mac OS X](mac/) - [Windows](windows/) diff --git a/docs/sources/installation/openSUSE.md b/docs/sources/installation/SUSE.md similarity index 66% rename from docs/sources/installation/openSUSE.md rename to docs/sources/installation/SUSE.md index bcd08d9fc0..2a0aa91d9f 100644 --- a/docs/sources/installation/openSUSE.md +++ b/docs/sources/installation/SUSE.md @@ -1,40 +1,39 @@ -page_title: Installation on openSUSE -page_description: Installation instructions for Docker on openSUSE. -page_keywords: openSUSE, virtualbox, docker, documentation, installation +page_title: Installation on openSUSE and SUSE Linux Enterprise +page_description: Installation instructions for Docker on openSUSE and on SUSE Linux Enterprise. +page_keywords: openSUSE, SUSE Linux Enterprise, SUSE, SLE, docker, documentation, installation # openSUSE Docker is available in **openSUSE 12.3 and later**. Please note that due -to the current Docker limitations Docker is able to run only on the **64 -bit** architecture. +to its current limitations Docker is able to run only **64 bit** architecture. -## Installation +Docker is not part of the official repositories of openSUSE 12.3 and +openSUSE 13.1. Hence it is neccessary to add the [Virtualization +repository](https://build.opensuse.org/project/show/Virtualization) from +[OBS](https://build.opensuse.org/) to install the `docker` package. -The `docker` package from the [Virtualization -project](https://build.opensuse.org/project/show/Virtualization) on -[OBS](https://build.opensuse.org/) provides Docker on openSUSE. - -To proceed with Docker installation please add the right Virtualization -repository. +Execute one of the following commands to add the Virtualization repository: # openSUSE 12.3 $ sudo zypper ar -f http://download.opensuse.org/repositories/Virtualization/openSUSE_12.3/ Virtualization - $ sudo rpm --import http://download.opensuse.org/repositories/Virtualization/openSUSE_12.3/repodata/repomd.xml.key # openSUSE 13.1 $ sudo zypper ar -f http://download.opensuse.org/repositories/Virtualization/openSUSE_13.1/ Virtualization - $ sudo rpm --import http://download.opensuse.org/repositories/Virtualization/openSUSE_13.1/repodata/repomd.xml.key + +No extra repository is required for openSUSE 13.2 and later. + +# SUSE Linux Enterprise + +Docker is available in **SUSE Linux Enterprise 12 and later**. Please note that +due to its current limitations Docker is able to run only on **64 bit** +architecture. + +# Installation Install the Docker package. $ sudo zypper in docker -It's also possible to install Docker using openSUSE's1-click install. -Just visit [this](http://software.opensuse.org/package/docker) page, -select your openSUSE version and click on the installation link. This -will add the right repository to your system and it will also install -the docker package. - Now that it's installed, let's start the Docker daemon. $ sudo systemctl start docker From 4bf72613fed9424356e2146d795ddbfa825befa4 Mon Sep 17 00:00:00 2001 From: Neal McBurnett Date: Sun, 30 Nov 2014 21:12:57 -0700 Subject: [PATCH 484/592] Fixes #9418 Docker Registry now open source Signed-off-by: Neal McBurnett --- docs/sources/reference/api/registry_api.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/reference/api/registry_api.md b/docs/sources/reference/api/registry_api.md index 0839fe209f..43a463cd5e 100644 --- a/docs/sources/reference/api/registry_api.md +++ b/docs/sources/reference/api/registry_api.md @@ -14,9 +14,9 @@ page_keywords: API, Docker, index, registry, REST, documentation service using tokens - It supports different storage backends (S3, cloud files, local FS) - It doesn't have a local database - - It will be open-sourced at some point + - The registry is open source: [Docker Registry](https://github.com/docker/docker-registry) -We expect that there will be multiple registries out there. To help to + We expect that there will be multiple registries out there. To help to grasp the context, here are some examples of registries: - **sponsor registry**: such a registry is provided by a third-party From 921346be48d20a66b2b5763144b064395285bb32 Mon Sep 17 00:00:00 2001 From: HuKeping Date: Mon, 1 Dec 2014 16:28:08 +0800 Subject: [PATCH 485/592] inspect: format the output of docker inspect Prior to this patch, one would get the output of docker inspect xxx as below: user@server:/mnt$ docker inspect ubuntu [{ "Architecture": "amd64", ... "VirtualSize": 199257566 } ]user@server:/mnt$ The last ']' was on the same line with the prompt, i wonder if it is really what we want it be, it is a little weird, so i add a '\n' to it. Signed-off-by: Hu Keping --- api/client/commands.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/client/commands.go b/api/client/commands.go index 8cb1b2847f..a3dcffa736 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -882,7 +882,7 @@ func (cli *DockerCli) CmdInspect(args ...string) error { // Remove trailing ',' indented.Truncate(indented.Len() - 1) } - indented.WriteByte(']') + indented.WriteString("]\n") if tmpl == nil { if _, err := io.Copy(cli.out, indented); err != nil { From 51172493ab6d2b31760b9fe273c7d4d0917a9bb7 Mon Sep 17 00:00:00 2001 From: Huayi Zhang Date: Mon, 1 Dec 2014 16:45:53 +0800 Subject: [PATCH 486/592] Attach goroutine blocking profiler /debug/pprof/block is 404 currently Signed-off-by: Huayi Zhang --- api/server/server.go | 1 + 1 file changed, 1 insertion(+) diff --git a/api/server/server.go b/api/server/server.go index 41318967a6..bf5318c0f4 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -1259,6 +1259,7 @@ func AttachProfiler(router *mux.Router) { router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) router.HandleFunc("/debug/pprof/profile", pprof.Profile) router.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + router.HandleFunc("/debug/pprof/block", pprof.Handler("block").ServeHTTP) router.HandleFunc("/debug/pprof/heap", pprof.Handler("heap").ServeHTTP) router.HandleFunc("/debug/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) router.HandleFunc("/debug/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) From ffda2035e5a9e6a723c4c84cd5f88c5f4a9dc2ba Mon Sep 17 00:00:00 2001 From: Sindhu S Date: Mon, 1 Dec 2014 19:07:04 +0530 Subject: [PATCH 487/592] Fix dead link in oracle.md --- docs/sources/installation/oracle.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/sources/installation/oracle.md b/docs/sources/installation/oracle.md index e99c828efa..6d2f782b49 100644 --- a/docs/sources/installation/oracle.md +++ b/docs/sources/installation/oracle.md @@ -122,5 +122,4 @@ Request at [My Oracle Support](http://support.oracle.com). If you do not have an Oracle Linux Support Subscription, you can use the [Oracle Linux -Forum](https://community.oracle.com/community/server_%26_storage_systems/linux/ -oracle_linux) for community-based support. +Forum](https://community.oracle.com/community/server_%26_storage_systems/linux/oracle_linux) for community-based support. From b1dc0db56ac9a27d51ab2a3684ed398d7b80a80c Mon Sep 17 00:00:00 2001 From: Sindhu S Date: Mon, 1 Dec 2014 19:28:02 +0530 Subject: [PATCH 488/592] Fix dead link cli.md --- docs/sources/reference/commandline/cli.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 72144d3b5a..4a11c6afef 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -102,7 +102,7 @@ To run the daemon with debug output, use `docker -d -D`. ### Daemon socket option -The Docker daemon can listen for [Docker Remote API](reference/api/docker_remote_api/) +The Docker daemon can listen for [Docker Remote API](/reference/api/docker_remote_api/) requests via three different types of Socket: `unix`, `tcp`, and `fd`. By default, a `unix` domain socket (or IPC socket) is created at `/var/run/docker.sock`, From 7754ef1f064be08419bf397e2ea7e4ce99bdf90d Mon Sep 17 00:00:00 2001 From: Sindhu S Date: Mon, 1 Dec 2014 19:35:58 +0530 Subject: [PATCH 489/592] Fix dead link build.md --- docs/sources/reference/builder.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index dffabaff3a..adc308c9d6 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -19,7 +19,7 @@ Dockerfile knowledge with the [Dockerfile tutorial](/userguide/level1). ## Usage -To [*build*](../commandline/cli/#cli-build) an image from a source repository, +To [*build*](/reference/commandline/cli/#build) an image from a source repository, create a description file called `Dockerfile` at the root of your repository. This file will describe the steps to assemble the image. From 9200fdd197f7c80c495597104928596516b36f41 Mon Sep 17 00:00:00 2001 From: Blake Geno Date: Mon, 1 Dec 2014 11:44:13 -0500 Subject: [PATCH 490/592] remove obsolete workaround Signed-off-by: Blake Geno --- daemon/create.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/daemon/create.go b/daemon/create.go index e666e6f6ff..65bdf28d9c 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -53,12 +53,9 @@ func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status { job.Errorf("IPv4 forwarding is disabled.\n") } container.LogEvent("create") - // FIXME: this is necessary because daemon.Create might return a nil container - // with a non-nil error. This should not happen! Once it's fixed we - // can remove this workaround. - if container != nil { - job.Printf("%s\n", container.ID) - } + + job.Printf("%s\n", container.ID) + for _, warning := range buildWarnings { job.Errorf("%s\n", warning) } @@ -108,6 +105,7 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos } return container, warnings, nil } + func (daemon *Daemon) GenerateSecurityOpt(ipcMode runconfig.IpcMode) ([]string, error) { if ipcMode.IsHost() { return label.DisableSecOpt(), nil From 305e9cd2fab974c38064a1acd2ef1f552a3a1357 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 1 Dec 2014 14:54:14 -0500 Subject: [PATCH 491/592] Label standard mounts within loop Signed-off-by: Michael Crosby --- daemon/volumes.go | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/daemon/volumes.go b/daemon/volumes.go index 54cc0369dc..46ae5588af 100644 --- a/daemon/volumes.go +++ b/daemon/volumes.go @@ -236,25 +236,22 @@ func validMountMode(mode string) bool { } func (container *Container) setupMounts() error { - if err := label.SetFileLabel(container.ResolvConfPath, container.MountLabel); err != nil { - return err - } mounts := []execdriver.Mount{ {Source: container.ResolvConfPath, Destination: "/etc/resolv.conf", Writable: true, Private: true}, } if container.HostnamePath != "" { - if err := label.SetFileLabel(container.HostnamePath, container.MountLabel); err != nil { - return err - } mounts = append(mounts, execdriver.Mount{Source: container.HostnamePath, Destination: "/etc/hostname", Writable: true, Private: true}) } if container.HostsPath != "" { - if err := label.SetFileLabel(container.HostsPath, container.MountLabel); err != nil { + mounts = append(mounts, execdriver.Mount{Source: container.HostsPath, Destination: "/etc/hosts", Writable: true, Private: true}) + } + + for _, m := range mounts { + if err := label.SetFileLabel(m.Source, container.MountLabel); err != nil { return err } - mounts = append(mounts, execdriver.Mount{Source: container.HostsPath, Destination: "/etc/hosts", Writable: true, Private: true}) } // Mount user specified volumes From c8a3d31332074ddc226086ff1f0c042b6e120232 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Mon, 1 Dec 2014 12:16:49 -0500 Subject: [PATCH 492/592] Check for no `Cmd` on exec create endpoint Fixes #9414 Signed-off-by: Brian Goff --- daemon/exec.go | 5 +++- integration-cli/docker_api_containers_test.go | 6 ++--- integration-cli/docker_api_exec_test.go | 25 +++++++++++++++++++ integration-cli/docker_api_inspect_test.go | 2 +- integration-cli/docker_api_resize_test.go | 4 +-- integration-cli/docker_utils.go | 11 ++++++-- runconfig/exec.go | 20 +++++++++------ 7 files changed, 57 insertions(+), 16 deletions(-) create mode 100644 integration-cli/docker_api_exec_test.go diff --git a/daemon/exec.go b/daemon/exec.go index 2b0f1bcb26..7d6755118e 100644 --- a/daemon/exec.go +++ b/daemon/exec.go @@ -118,7 +118,10 @@ func (d *Daemon) ContainerExecCreate(job *engine.Job) engine.Status { return job.Error(err) } - config := runconfig.ExecConfigFromJob(job) + config, err := runconfig.ExecConfigFromJob(job) + if err != nil { + return job.Error(err) + } entrypoint, args := d.getEntrypointAndArgs(nil, config.Cmd) diff --git a/integration-cli/docker_api_containers_test.go b/integration-cli/docker_api_containers_test.go index 605c24bf91..f02f619c44 100644 --- a/integration-cli/docker_api_containers_test.go +++ b/integration-cli/docker_api_containers_test.go @@ -23,7 +23,7 @@ func TestContainerApiGetAll(t *testing.T) { t.Fatalf("Error on container creation: %v, output: %q", err, out) } - body, err := sockRequest("GET", "/containers/json?all=1") + body, err := sockRequest("GET", "/containers/json?all=1", nil) if err != nil { t.Fatalf("GET all containers sockRequest failed: %v", err) } @@ -56,7 +56,7 @@ func TestContainerApiGetExport(t *testing.T) { t.Fatalf("Error on container creation: %v, output: %q", err, out) } - body, err := sockRequest("GET", "/containers/"+name+"/export") + body, err := sockRequest("GET", "/containers/"+name+"/export", nil) if err != nil { t.Fatalf("GET containers/export sockRequest failed: %v", err) } @@ -92,7 +92,7 @@ func TestContainerApiGetChanges(t *testing.T) { t.Fatalf("Error on container creation: %v, output: %q", err, out) } - body, err := sockRequest("GET", "/containers/"+name+"/changes") + body, err := sockRequest("GET", "/containers/"+name+"/changes", nil) if err != nil { t.Fatalf("GET containers/changes sockRequest failed: %v", err) } diff --git a/integration-cli/docker_api_exec_test.go b/integration-cli/docker_api_exec_test.go new file mode 100644 index 0000000000..df7122dd75 --- /dev/null +++ b/integration-cli/docker_api_exec_test.go @@ -0,0 +1,25 @@ +package main + +import ( + "bytes" + "fmt" + "os/exec" + "testing" +) + +// Regression test for #9414 +func TestExecApiCreateNoCmd(t *testing.T) { + defer deleteAllContainers() + name := "exec_test" + runCmd := exec.Command(dockerBinary, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + if out, _, err := runCommandWithOutput(runCmd); err != nil { + t.Fatal(out, err) + } + + body, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": nil}) + if err == nil || !bytes.Contains(body, []byte("No exec command specified")) { + t.Fatalf("Expected error when creating exec command with no Cmd specified: %q", err) + } + + logDone("exec create API - returns error when missing Cmd") +} diff --git a/integration-cli/docker_api_inspect_test.go b/integration-cli/docker_api_inspect_test.go index 112299484c..1ff0312581 100644 --- a/integration-cli/docker_api_inspect_test.go +++ b/integration-cli/docker_api_inspect_test.go @@ -24,7 +24,7 @@ func TestInspectApiContainerResponse(t *testing.T) { if testVersion != "latest" { endpoint = "/" + testVersion + endpoint } - body, err := sockRequest("GET", endpoint) + body, err := sockRequest("GET", endpoint, nil) if err != nil { t.Fatalf("sockRequest failed for %s version: %v", testVersion, err) } diff --git a/integration-cli/docker_api_resize_test.go b/integration-cli/docker_api_resize_test.go index 355bfd9977..6ba95c3052 100644 --- a/integration-cli/docker_api_resize_test.go +++ b/integration-cli/docker_api_resize_test.go @@ -16,7 +16,7 @@ func TestResizeApiResponse(t *testing.T) { cleanedContainerID := stripTrailingCharacters(out) endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" - _, err = sockRequest("POST", endpoint) + _, err = sockRequest("POST", endpoint, nil) if err != nil { t.Fatalf("resize Request failed %v", err) } @@ -41,7 +41,7 @@ func TestResizeApiResponseWhenContainerNotStarted(t *testing.T) { } endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" - body, err := sockRequest("POST", endpoint) + body, err := sockRequest("POST", endpoint, nil) if err == nil { t.Fatalf("resize should fail when container is not started") } diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go index 9b5fa76c0c..2c66ce2d0c 100644 --- a/integration-cli/docker_utils.go +++ b/integration-cli/docker_utils.go @@ -1,6 +1,8 @@ package main import ( + "bytes" + "encoding/json" "errors" "fmt" "io" @@ -249,7 +251,7 @@ func (d *Daemon) Cmd(name string, arg ...string) (string, error) { return string(b), err } -func sockRequest(method, endpoint string) ([]byte, error) { +func sockRequest(method, endpoint string, data interface{}) ([]byte, error) { // FIX: the path to sock should not be hardcoded sock := filepath.Join("/", "var", "run", "docker.sock") c, err := net.DialTimeout("unix", sock, time.Duration(10*time.Second)) @@ -260,7 +262,12 @@ func sockRequest(method, endpoint string) ([]byte, error) { client := httputil.NewClientConn(c, nil) defer client.Close() - req, err := http.NewRequest(method, endpoint, nil) + jsonData := bytes.NewBuffer(nil) + if err := json.NewEncoder(jsonData).Encode(data); err != nil { + return nil, err + } + + req, err := http.NewRequest(method, endpoint, jsonData) req.Header.Set("Content-Type", "application/json") if err != nil { return nil, fmt.Errorf("could not create new request: %v", err) diff --git a/runconfig/exec.go b/runconfig/exec.go index b83c11bd1d..1ced70a86a 100644 --- a/runconfig/exec.go +++ b/runconfig/exec.go @@ -1,6 +1,8 @@ package runconfig import ( + "fmt" + "github.com/docker/docker/engine" flag "github.com/docker/docker/pkg/mflag" ) @@ -17,7 +19,7 @@ type ExecConfig struct { Cmd []string } -func ExecConfigFromJob(job *engine.Job) *ExecConfig { +func ExecConfigFromJob(job *engine.Job) (*ExecConfig, error) { execConfig := &ExecConfig{ // TODO(vishh): Expose 'User' once it is supported. //User: job.Getenv("User"), @@ -28,11 +30,14 @@ func ExecConfigFromJob(job *engine.Job) *ExecConfig { AttachStderr: job.GetenvBool("AttachStderr"), AttachStdout: job.GetenvBool("AttachStdout"), } - if cmd := job.GetenvList("Cmd"); cmd != nil { - execConfig.Cmd = cmd + cmd := job.GetenvList("Cmd") + if len(cmd) == 0 { + return nil, fmt.Errorf("No exec command specified") } - return execConfig + execConfig.Cmd = cmd + + return execConfig, nil } func ParseExec(cmd *flag.FlagSet, args []string) (*ExecConfig, error) { @@ -47,10 +52,11 @@ func ParseExec(cmd *flag.FlagSet, args []string) (*ExecConfig, error) { return nil, err } parsedArgs := cmd.Args() - if len(parsedArgs) > 1 { - container = cmd.Arg(0) - execCmd = parsedArgs[1:] + if len(parsedArgs) < 2 { + return nil, fmt.Errorf("not enough arguments to create exec command") } + container = cmd.Arg(0) + execCmd = parsedArgs[1:] execConfig := &ExecConfig{ // TODO(vishh): Expose '-u' flag once it is supported. From a00a1a1fca020d21cb677439160e018bda5c3835 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Mon, 1 Dec 2014 16:24:43 -0800 Subject: [PATCH 493/592] Try other port on any error from Map Sometimes other programs can bind on ports from our range, so we just skip this ports on allocation. Fixes #9293 Probably fixes #8714 Signed-off-by: Alexander Morozov --- daemon/networkdriver/bridge/driver.go | 20 ++++--------- integration-cli/docker_cli_run_test.go | 41 ++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 15 deletions(-) diff --git a/daemon/networkdriver/bridge/driver.go b/daemon/networkdriver/bridge/driver.go index 7fe04550ef..e0467b6bd7 100644 --- a/daemon/networkdriver/bridge/driver.go +++ b/daemon/networkdriver/bridge/driver.go @@ -11,7 +11,6 @@ import ( log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/networkdriver" "github.com/docker/docker/daemon/networkdriver/ipallocator" - "github.com/docker/docker/daemon/networkdriver/portallocator" "github.com/docker/docker/daemon/networkdriver/portmapper" "github.com/docker/docker/engine" "github.com/docker/docker/nat" @@ -468,22 +467,13 @@ func AllocatePort(job *engine.Job) engine.Status { if host, err = portmapper.Map(container, ip, hostPort); err == nil { break } - - if allocerr, ok := err.(portallocator.ErrPortAlreadyAllocated); ok { - // There is no point in immediately retrying to map an explicitly - // chosen port. - if hostPort != 0 { - job.Logf("Failed to bind %s for container address %s: %s", allocerr.IPPort(), container.String(), allocerr.Error()) - break - } - - // Automatically chosen 'free' port failed to bind: move on the next. - job.Logf("Failed to bind %s for container address %s. Trying another port.", allocerr.IPPort(), container.String()) - } else { - // some other error during mapping - job.Logf("Received an unexpected error during port allocation: %s", err.Error()) + // There is no point in immediately retrying to map an explicitly + // chosen port. + if hostPort != 0 { + job.Logf("Failed to allocate and map port %d: %s", hostPort, err) break } + job.Logf("Failed to allocate and map port: %s, retry: %d", err, i+1) } if err != nil { diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 844fc62ddd..20a096f196 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -2701,3 +2701,44 @@ func TestRunTLSverify(t *testing.T) { logDone("run - verify tls is set for --tlsverify") } + +func TestRunPortFromDockerRangeInUse(t *testing.T) { + defer deleteAllContainers() + // first find allocator current position + cmd := exec.Command(dockerBinary, "run", "-d", "-p", ":80", "busybox", "top") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(out, err) + } + id := strings.TrimSpace(out) + cmd = exec.Command(dockerBinary, "port", id) + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(out, err) + } + out = strings.TrimSpace(out) + out = strings.Split(out, ":")[1] + lastPort, err := strconv.Atoi(out) + if err != nil { + t.Fatal(err) + } + port := lastPort + 1 + l, err := net.Listen("tcp", ":"+strconv.Itoa(port)) + if err != nil { + t.Fatal(err) + } + defer l.Close() + cmd = exec.Command(dockerBinary, "run", "-d", "-p", ":80", "busybox", "top") + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatalf(out, err) + } + id = strings.TrimSpace(out) + cmd = exec.Command(dockerBinary, "port", id) + out, _, err = runCommandWithOutput(cmd) + if err != nil { + t.Fatal(out, err) + } + + logDone("run - find another port if port from autorange already bound") +} From e98e56bb1edc3835bc835c9e034780845c64d797 Mon Sep 17 00:00:00 2001 From: Arnaud Porterie Date: Mon, 1 Dec 2014 17:57:53 -0800 Subject: [PATCH 494/592] Fix client-side HTTP hijacking over TLS Properly CloseWrite() the client socket once done with stdin when using TLS connection (this used to rely on an erroneous type assertion). Fixes #8658. Fixes #8642. Signed-off-by: Arnaud Porterie Signed-off-by: Michael Crosby --- api/client/hijack.go | 105 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 98 insertions(+), 7 deletions(-) diff --git a/api/client/hijack.go b/api/client/hijack.go index adc012bace..617a0b3f61 100644 --- a/api/client/hijack.go +++ b/api/client/hijack.go @@ -2,6 +2,7 @@ package client import ( "crypto/tls" + "errors" "fmt" "io" "net" @@ -10,6 +11,7 @@ import ( "os" "runtime" "strings" + "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/api" @@ -19,9 +21,99 @@ import ( "github.com/docker/docker/pkg/term" ) +type tlsClientCon struct { + *tls.Conn + rawConn net.Conn +} + +func (c *tlsClientCon) CloseWrite() error { + // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it + // on its underlying connection. + if cwc, ok := c.rawConn.(interface { + CloseWrite() error + }); ok { + return cwc.CloseWrite() + } + return nil +} + +func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { + return tlsDialWithDialer(new(net.Dialer), network, addr, config) +} + +// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in +// order to return our custom tlsClientCon struct which holds both the tls.Conn +// object _and_ its underlying raw connection. The rationale for this is that +// we need to be able to close the write end of the connection when attaching, +// which tls.Conn does not provide. +func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { + // We want the Timeout and Deadline values from dialer to cover the + // whole process: TCP connection and TLS handshake. This means that we + // also need to start our own timers now. + timeout := dialer.Timeout + + if !dialer.Deadline.IsZero() { + deadlineTimeout := dialer.Deadline.Sub(time.Now()) + if timeout == 0 || deadlineTimeout < timeout { + timeout = deadlineTimeout + } + } + + var errChannel chan error + + if timeout != 0 { + errChannel = make(chan error, 2) + time.AfterFunc(timeout, func() { + errChannel <- errors.New("") + }) + } + + rawConn, err := dialer.Dial(network, addr) + if err != nil { + return nil, err + } + + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + hostname := addr[:colonPos] + + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if config.ServerName == "" { + // Make a copy to avoid polluting argument or default. + c := *config + c.ServerName = hostname + config = &c + } + + conn := tls.Client(rawConn, config) + + if timeout == 0 { + err = conn.Handshake() + } else { + go func() { + errChannel <- conn.Handshake() + }() + + err = <-errChannel + } + + if err != nil { + rawConn.Close() + return nil, err + } + + // This is Docker difference with standard's crypto/tls package: returned a + // wrapper which holds both the TLS and raw connections. + return &tlsClientCon{conn, rawConn}, nil +} + func (cli *DockerCli) dial() (net.Conn, error) { if cli.tlsConfig != nil && cli.proto != "unix" { - return tls.Dial(cli.proto, cli.addr, cli.tlsConfig) + // Notice this isn't Go standard's tls.Dial function + return tlsDial(cli.proto, cli.addr, cli.tlsConfig) } return net.Dial(cli.proto, cli.addr) } @@ -109,12 +201,11 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea io.Copy(rwc, in) log.Debugf("[hijack] End of stdin") } - if tcpc, ok := rwc.(*net.TCPConn); ok { - if err := tcpc.CloseWrite(); err != nil { - log.Debugf("Couldn't send EOF: %s", err) - } - } else if unixc, ok := rwc.(*net.UnixConn); ok { - if err := unixc.CloseWrite(); err != nil { + + if conn, ok := rwc.(interface { + CloseWrite() error + }); ok { + if err := conn.CloseWrite(); err != nil { log.Debugf("Couldn't send EOF: %s", err) } } From c57317893a743720a241ae328d1bdf7bd02b1fea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9na=C3=AFc=20Huard?= Date: Sun, 30 Nov 2014 10:14:34 +0100 Subject: [PATCH 495/592] Rename overlayfs to overlay MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since Linux 3.18-rc6, overlayfs has been renamed overlay. This change was introduced by the following commit in linux.git: ef94b1864d1ed5be54376404bb23d22ed0481feb ovl: rename filesystem type to "overlay" Signed-off-by: Lénaïc Huard --- contrib/check-config.sh | 2 +- daemon/graphdriver/overlayfs/overlayfs.go | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/contrib/check-config.sh b/contrib/check-config.sh index 54b1359a22..e9fbb21e98 100755 --- a/contrib/check-config.sh +++ b/contrib/check-config.sh @@ -166,7 +166,7 @@ echo '- Storage Drivers:' check_flags BLK_DEV_DM DM_THIN_PROVISIONING EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY | sed 's/^/ /' echo '- "'$(wrap_color 'overlayfs' blue)'":' - check_flags OVERLAYFS_FS | sed 's/^/ /' + check_flags OVERLAY_FS | sed 's/^/ /' } | sed 's/^/ /' echo diff --git a/daemon/graphdriver/overlayfs/overlayfs.go b/daemon/graphdriver/overlayfs/overlayfs.go index a9ce75a375..d7dfcb24ac 100644 --- a/daemon/graphdriver/overlayfs/overlayfs.go +++ b/daemon/graphdriver/overlayfs/overlayfs.go @@ -9,7 +9,6 @@ import ( "os" "os/exec" "path" - "strings" "sync" "syscall" @@ -115,7 +114,7 @@ func Init(home string, options []string) (graphdriver.Driver, error) { func supportsOverlayfs() error { // We can try to modprobe overlayfs first before looking at // proc/filesystems for when overlayfs is supported - exec.Command("modprobe", "overlayfs").Run() + exec.Command("modprobe", "overlay").Run() f, err := os.Open("/proc/filesystems") if err != nil { @@ -125,11 +124,11 @@ func supportsOverlayfs() error { s := bufio.NewScanner(f) for s.Scan() { - if strings.Contains(s.Text(), "overlayfs") { + if s.Text() == "nodev\toverlay" { return nil } } - log.Error("'overlayfs' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlayfs support loaded.") + log.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") return graphdriver.ErrNotSupported } @@ -274,7 +273,7 @@ func (d *Driver) Get(id string, mountLabel string) (string, error) { mergedDir := path.Join(dir, "merged") opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) - if err := syscall.Mount("overlayfs", mergedDir, "overlayfs", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { + if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { return "", err } mount.path = mergedDir From fb06e1c6ebcf5408e6a7d0bfcf9264571eab69a1 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Tue, 2 Dec 2014 03:02:25 -0800 Subject: [PATCH 496/592] graphdb: initialize the database semi-idempotently on every connection. Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- pkg/graphdb/conn_sqlite3.go | 18 +-------- pkg/graphdb/graphdb.go | 76 +++++++++++++++++++++---------------- pkg/graphdb/graphdb_test.go | 2 +- 3 files changed, 45 insertions(+), 51 deletions(-) diff --git a/pkg/graphdb/conn_sqlite3.go b/pkg/graphdb/conn_sqlite3.go index b6a8027a81..455790ac28 100644 --- a/pkg/graphdb/conn_sqlite3.go +++ b/pkg/graphdb/conn_sqlite3.go @@ -4,31 +4,15 @@ package graphdb import ( "database/sql" - "os" _ "code.google.com/p/gosqlite/sqlite3" // registers sqlite ) func NewSqliteConn(root string) (*Database, error) { - initDatabase := false - - stat, err := os.Stat(root) - if err != nil { - if os.IsNotExist(err) { - initDatabase = true - } else { - return nil, err - } - } - - if stat != nil && stat.Size() == 0 { - initDatabase = true - } - conn, err := sql.Open("sqlite3", root) if err != nil { return nil, err } - return NewDatabase(conn, initDatabase) + return NewDatabase(conn) } diff --git a/pkg/graphdb/graphdb.go b/pkg/graphdb/graphdb.go index 450bd508eb..62342033ac 100644 --- a/pkg/graphdb/graphdb.go +++ b/pkg/graphdb/graphdb.go @@ -73,45 +73,55 @@ func IsNonUniqueNameError(err error) bool { } // Create a new graph database initialized with a root entity -func NewDatabase(conn *sql.DB, init bool) (*Database, error) { +func NewDatabase(conn *sql.DB) (*Database, error) { if conn == nil { return nil, fmt.Errorf("Database connection cannot be nil") } db := &Database{conn: conn} - if init { - if _, err := conn.Exec(createEntityTable); err != nil { - return nil, err - } - if _, err := conn.Exec(createEdgeTable); err != nil { - return nil, err - } - if _, err := conn.Exec(createEdgeIndices); err != nil { - return nil, err - } - - rollback := func() { - conn.Exec("ROLLBACK") - } - - // Create root entities - if _, err := conn.Exec("BEGIN"); err != nil { - return nil, err - } - if _, err := conn.Exec("INSERT INTO entity (id) VALUES (?);", "0"); err != nil { - rollback() - return nil, err - } - - if _, err := conn.Exec("INSERT INTO edge (entity_id, name) VALUES(?,?);", "0", "/"); err != nil { - rollback() - return nil, err - } - - if _, err := conn.Exec("COMMIT"); err != nil { - return nil, err - } + if _, err := conn.Exec(createEntityTable); err != nil { + return nil, err } + if _, err := conn.Exec(createEdgeTable); err != nil { + return nil, err + } + if _, err := conn.Exec(createEdgeIndices); err != nil { + return nil, err + } + + rollback := func() { + conn.Exec("ROLLBACK") + } + + // Create root entities + if _, err := conn.Exec("BEGIN"); err != nil { + return nil, err + } + + if _, err := conn.Exec("DELETE FROM entity where id = ?", "0"); err != nil { + rollback() + return nil, err + } + + if _, err := conn.Exec("INSERT INTO entity (id) VALUES (?);", "0"); err != nil { + rollback() + return nil, err + } + + if _, err := conn.Exec("DELETE FROM edge where entity_id=? and name=?", "0", "/"); err != nil { + rollback() + return nil, err + } + + if _, err := conn.Exec("INSERT INTO edge (entity_id, name) VALUES(?,?);", "0", "/"); err != nil { + rollback() + return nil, err + } + + if _, err := conn.Exec("COMMIT"); err != nil { + return nil, err + } + return db, nil } diff --git a/pkg/graphdb/graphdb_test.go b/pkg/graphdb/graphdb_test.go index 7568e66de4..f22828560c 100644 --- a/pkg/graphdb/graphdb_test.go +++ b/pkg/graphdb/graphdb_test.go @@ -14,7 +14,7 @@ import ( func newTestDb(t *testing.T) (*Database, string) { p := path.Join(os.TempDir(), "sqlite.db") conn, err := sql.Open("sqlite3", p) - db, err := NewDatabase(conn, true) + db, err := NewDatabase(conn) if err != nil { t.Fatal(err) } From 46437d1a60278d1acfe32412f69b3c22749becf2 Mon Sep 17 00:00:00 2001 From: Jeff Anderson Date: Mon, 1 Dec 2014 10:49:27 -0700 Subject: [PATCH 497/592] Update dockerimages.md remove 'public registry' Signed-off-by: Jeff Anderson --- docs/sources/userguide/dockerimages.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/userguide/dockerimages.md b/docs/sources/userguide/dockerimages.md index 157c578d97..ead6d82db7 100644 --- a/docs/sources/userguide/dockerimages.md +++ b/docs/sources/userguide/dockerimages.md @@ -13,7 +13,7 @@ image and the `training/webapp` image. We've also discovered that Docker stores downloaded images on the Docker host. If an image isn't already present on the host then it'll be downloaded from a registry: by default the -[Docker Hub Registry](https://registry.hub.docker.com) public registry. +[Docker Hub Registry](https://registry.hub.docker.com). In this section we're going to explore Docker images a bit more including: From 59da197de8f040eed7560747476f97ce8afad293 Mon Sep 17 00:00:00 2001 From: Arnaud Porterie Date: Tue, 2 Dec 2014 10:47:57 -0800 Subject: [PATCH 498/592] Remove unused Engine.Logf The `Engine.Logf` method was unused and confusing. Signed-off-by: Arnaud Porterie --- daemon/daemon.go | 1 - engine/engine.go | 9 --------- engine/engine_test.go | 10 ---------- 3 files changed, 20 deletions(-) diff --git a/daemon/daemon.go b/daemon/daemon.go index 667b2cb4cd..fe6e0eb2aa 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -926,7 +926,6 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) eng.OnShutdown(func() { // FIXME: if these cleanup steps can be called concurrently, register // them as separate handlers to speed up total shutdown time - // FIXME: use engine logging instead of log.Errorf if err := daemon.shutdown(); err != nil { log.Errorf("daemon.shutdown(): %s", err) } diff --git a/engine/engine.go b/engine/engine.go index 769f644a17..26f9953d66 100644 --- a/engine/engine.go +++ b/engine/engine.go @@ -11,7 +11,6 @@ import ( "time" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/timeutils" "github.com/docker/docker/utils" ) @@ -251,11 +250,3 @@ func (eng *Engine) ParseJob(input string) (*Job, error) { job.Env().Init(&env) return job, nil } - -func (eng *Engine) Logf(format string, args ...interface{}) (n int, err error) { - if !eng.Logging { - return 0, nil - } - prefixedFormat := fmt.Sprintf("[%s] [%s] %s\n", time.Now().Format(timeutils.RFC3339NanoFixed), eng, strings.TrimRight(format, "\n")) - return fmt.Fprintf(eng.Stderr, prefixedFormat, args...) -} diff --git a/engine/engine_test.go b/engine/engine_test.go index 92f3757251..7ab2f8fc0d 100644 --- a/engine/engine_test.go +++ b/engine/engine_test.go @@ -99,16 +99,6 @@ func TestEngineString(t *testing.T) { } } -func TestEngineLogf(t *testing.T) { - eng := New() - input := "Test log line" - if n, err := eng.Logf("%s\n", input); err != nil { - t.Fatal(err) - } else if n < len(input) { - t.Fatalf("Test: Logf() should print at least as much as the input\ninput=%d\nprinted=%d", len(input), n) - } -} - func TestParseJob(t *testing.T) { eng := New() // Verify that the resulting job calls to the right place From bcef3535579b2e9a8f672626dd014b77ed44c5c0 Mon Sep 17 00:00:00 2001 From: Joffrey F Date: Tue, 2 Dec 2014 13:42:04 -0800 Subject: [PATCH 499/592] Webhooks documentation: second pass addressing @fredlf's comments Signed-off-by: Joffrey F --- docs/sources/docker-hub/repos.md | 17 +++++++++++++---- docs/sources/userguide/dockerrepos.md | 8 ++++---- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/docs/sources/docker-hub/repos.md b/docs/sources/docker-hub/repos.md index 8ee914bf42..d0c2faea19 100644 --- a/docs/sources/docker-hub/repos.md +++ b/docs/sources/docker-hub/repos.md @@ -140,12 +140,18 @@ similar to the example shown below. } Webhooks allow you to notify people, services and other applications of -new updates to your images and repositories. +new updates to your images and repositories. To get started adding webhooks, +go to the desired repo in the Hub, and click "Webhooks" under the "Settings" +box. ### Webhook chains -Webhook chains allow you to chain calls to multiple services. After clicking the -"Add webhook" button, simply add as many URLs as necessary in your chain. +Webhook chains allow you to chain calls to multiple services. For example, +you can use this to trigger a deployment of your container only after +it has been successfully tested, then update a separate Changelog once the +deployment is complete. +After clicking the "Add webhook" button, simply add as many URLs as necessary +in your chain. The first webhook in a chain will be called after a successful push. Subsequent URLs will be contacted after the callback has been validated. @@ -159,9 +165,12 @@ In order to validate a callback in a webhook chain, you need to > **Note**: A chain request will only be considered complete once the last > callback has been validated. +To help you debug or simply view the results of your webhook(s), +view the "History" of the webhook available on its settings page. + #### Callback JSON data -Recognized parameters in callback data are as follow: +The following parameters are recognized in callback data: * `state` (required): Accepted values are `success`, `failure` and `error`. If the state isn't `success`, the webhook chain will be interrupted. * `description`: A string containing miscellaneous information that will be available on the Docker Hub. Maximum 255 characters. diff --git a/docs/sources/userguide/dockerrepos.md b/docs/sources/userguide/dockerrepos.md index 33aaf0fa55..9b5f9783e1 100644 --- a/docs/sources/userguide/dockerrepos.md +++ b/docs/sources/userguide/dockerrepos.md @@ -24,12 +24,12 @@ Docker itself provides access to Docker Hub services via the `docker search`, ### Account creation and login Typically, you'll want to start by creating an account on Docker Hub (if you haven't -already) and logging in. You can create your account directly on +already) and logging in. You can create your account directly on [Docker Hub](https://hub.docker.com/account/signup/), or by running: $ sudo docker login -This will prompt you for a user name, which will become the public namespace for your +This will prompt you for a user name, which will become the public namespace for your public repositories. If your user name is available, Docker will prompt you to enter a password and your e-mail address. It will then automatically log you in. You can now commit and @@ -162,8 +162,8 @@ event when an image or updated image is pushed to the repository. With a webhook you can specify a target URL and a JSON payload that will be delivered when the image is pushed. -See more information on webhooks -[here](http://docs.docker.com/docker-hub/repos/#webhooks) +See the Docker Hub documentation for [more information on +webhooks](http://docs.docker.com/docker-hub/repos/#webhooks) ## Next steps From 8d9e25dbddc189f4094e0f25a90f2b8a25deec9d Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Tue, 2 Dec 2014 15:23:49 -0800 Subject: [PATCH 500/592] Fix TarSum iteration test I noticed that 3 of the tarsum test cases had expected a tarsum with a sha256 hash of e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 As I've been working with sha256 quite a bit lately, it struck me that this is the initial digest value for sha256, which means that no data was processed. However, these tests *do* process data. It turns out that there was a bug in the test handling code which did not wait for tarsum to end completely. This patch corrects these test cases. I'm unaware of anywhere else in the code base where this would be an issue, though we definitily need to look out in the future to ensure we are completing tarsum reads (waiting for EOF). Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- pkg/tarsum/tarsum_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/tarsum/tarsum_test.go b/pkg/tarsum/tarsum_test.go index 5e7f042a2f..41e1b9b7c4 100644 --- a/pkg/tarsum/tarsum_test.go +++ b/pkg/tarsum/tarsum_test.go @@ -337,7 +337,7 @@ func TestIteration(t *testing.T) { data []byte }{ { - "tarsum+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "tarsum+sha256:626c4a2e9a467d65c33ae81f7f3dedd4de8ccaee72af73223c4bc4718cbc7bbd", Version0, &tar.Header{ Name: "file.txt", @@ -349,7 +349,7 @@ func TestIteration(t *testing.T) { []byte(""), }, { - "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "tarsum.dev+sha256:6ffd43a1573a9913325b4918e124ee982a99c0f3cba90fc032a65f5e20bdd465", VersionDev, &tar.Header{ Name: "file.txt", @@ -361,7 +361,7 @@ func TestIteration(t *testing.T) { []byte(""), }, { - "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "tarsum.dev+sha256:b38166c059e11fb77bef30bf16fba7584446e80fcc156ff46d47e36c5305d8ef", VersionDev, &tar.Header{ Name: "another.txt", @@ -463,6 +463,7 @@ func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) { for { hdr, err := tr.Next() if hdr == nil || err == io.EOF { + // Signals the end of the archive. break } if err != nil { @@ -471,7 +472,6 @@ func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) { if _, err = io.Copy(ioutil.Discard, tr); err != nil { return "", err } - break // we're just reading one header ... } return ts.Sum(nil), nil } From a61a4a31882335020cfbffc49ffe274847a5c803 Mon Sep 17 00:00:00 2001 From: Michal Minar Date: Thu, 6 Nov 2014 15:09:09 +0100 Subject: [PATCH 501/592] Man: describe --icc option better Current description is misleading. It make an impression the --icc=false prevents containers to talk with each other. Signed-off-by: Michal Minar Docker-DCO-1.1-Signed-off-by: Michal Minar (github: SvenDowideit) --- contrib/completion/fish/docker.fish | 2 +- daemon/config.go | 2 +- docs/man/docker.1.md | 4 ++-- docs/sources/reference/commandline/cli.md | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish index 73c2966393..23c2085686 100644 --- a/contrib/completion/fish/docker.fish +++ b/contrib/completion/fish/docker.fish @@ -53,7 +53,7 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force docker to use specific DNS servers' complete -c docker -f -n '__fish_docker_no_subcommand' -s e -l exec-driver -d 'Force the docker runtime to use a specific exec driver' complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the docker runtime' -complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Enable inter-container communication' +complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Enable inter-container communication without any restriction' complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports' complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Disable enabling of net.ipv4.ip_forward' complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Disable docker's addition of iptables rules" diff --git a/daemon/config.go b/daemon/config.go index 785fd4d290..dfbb781136 100644 --- a/daemon/config.go +++ b/daemon/config.go @@ -59,7 +59,7 @@ func (config *Config) InstallFlags() { flag.StringVar(&config.BridgeIface, []string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking") flag.StringVar(&config.FixedCIDR, []string{"-fixed-cidr"}, "", "IPv4 subnet for fixed IPs (ex: 10.20.0.0/16)\nthis subnet must be nested in the bridge subnet (which is defined by -b or --bip)") opts.ListVar(&config.InsecureRegistries, []string{"-insecure-registry"}, "Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16)") - flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Enable inter-container communication") + flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Enable inter-container communication without any restriction") flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver") flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver") flag.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, "Enable selinux support. SELinux does not presently support the BTRFS storage driver") diff --git a/docs/man/docker.1.md b/docs/man/docker.1.md index c8d28b2c23..a8df208f00 100644 --- a/docs/man/docker.1.md +++ b/docs/man/docker.1.md @@ -54,7 +54,7 @@ unix://[/path/to/socket] to use. IPv4 subnet for fixed IPs (ex: 10.20.0.0/16); this subnet must be nested in the bridge subnet (which is defined by \-b or \-\-bip) **--icc**=*true*|*false* - Enable inter\-container communication. Default is true. + Enable inter\-container communication without any restriction. If disabled, containers can still be linked together using **--link** option (see **docker-run(1)**). Default is true. **--ip**="" Default IP address to use when binding container ports. Default is `0.0.0.0`. @@ -77,7 +77,7 @@ unix://[/path/to/socket] to use. **-p**="" Path to use for daemon PID file. Default is `/var/run/docker.pid` -**--registry-mirror=:// +**--registry-mirror**=:// Prepend a registry mirror to be used for image pulls. May be specified multiple times. **-s**="" diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index d668725d14..91707986ad 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -69,7 +69,7 @@ expect an integer, and they can only be specified once. use '' (the empty string) to disable setting of a group -g, --graph="/var/lib/docker" Path to use as the root of the Docker runtime -H, --host=[] The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. - --icc=true Enable inter-container communication + --icc=true Enable inter-container communication without any restriction --insecure-registry=[] Enable insecure communication with specified registries (disables certificate verification for HTTPS and enables HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16) --ip=0.0.0.0 Default IP address to use when binding container ports --ip-forward=true Enable net.ipv4.ip_forward From aa00ad47e243b41b863354e6360a5d3a46aa3212 Mon Sep 17 00:00:00 2001 From: Michal Minar Date: Thu, 6 Nov 2014 16:58:07 +0100 Subject: [PATCH 502/592] Man: describe storage options Documented --storage-opt=[] option in man page. Content taken from: daemon/graphdriver/devmapper/README.md Signed-off-by: Michal Minar Docker-DCO-1.1-Signed-off-by: Michal Minar (github: SvenDowideit) --- docs/man/docker.1.md | 66 ++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 61 insertions(+), 5 deletions(-) diff --git a/docs/man/docker.1.md b/docs/man/docker.1.md index a8df208f00..84b958f26e 100644 --- a/docs/man/docker.1.md +++ b/docs/man/docker.1.md @@ -83,6 +83,9 @@ unix://[/path/to/socket] to use. **-s**="" Force the Docker runtime to use a specific storage driver. +**--storage-opt**=[] + Set storage driver options. See STORAGE DRIVER OPTIONS. + **-v**=*true*|*false* Print version information and quit. Default is false. @@ -202,13 +205,66 @@ inside it) **docker-wait(1)** Block until a container stops, then print its exit code -# EXAMPLES +# STORAGE DRIVER OPTIONS -For specific examples please see the man page for the specific Docker command. -For example: +Options to storage backend can be specified with **--storage-opt** flags. The +only backend which currently takes options is *devicemapper*. Therefore use these +flags with **-s=**devicemapper. + +Here is the list of *devicemapper* options: + +#### dm.basesize + Specifies the size to use when creating the base device, which limits the size of images and containers. The default value is 10G. Note, thin devices are inherently "sparse", so a 10G device which is mostly empty doesn't use 10 GB of space on the pool. However, the filesystem will use more space for the empty case the larger the device is. **Warning**: This value affects the system-wide "base" empty filesystem that may already be initialized and inherited by pulled images. + +#### dm.loopdatasize + Specifies the size to use when creating the loopback file for the "data" device which is used for the thin pool. The default size is 100G. Note that the file is sparse, so it will not initially take up this much space. + +#### dm.loopmetadatasize + Specifies the size to use when creating the loopback file for the "metadadata" device which is used for the thin pool. The default size is 2G. Note that the file is sparse, so it will not initially take up this much space. + +#### dm.fs + Specifies the filesystem type to use for the base device. The supported options are "ext4" and "xfs". The default is "ext4" + +#### dm.mkfsarg + Specifies extra mkfs arguments to be used when creating the base device. + +#### dm.mountopt + Specifies extra mount options used when mounting the thin devices. + +#### dm.datadev + Specifies a custom blockdevice to use for data for the thin pool. + + If using a block device for device mapper storage, ideally both datadev and metadatadev should be specified to completely avoid using the loopback device. + +#### dm.metadatadev + Specifies a custom blockdevice to use for metadata for the thin pool. + + For best performance the metadata should be on a different spindle than the data, or even better on an SSD. + + If setting up a new metadata pool it is required to be valid. This can be achieved by zeroing the first 4k to indicate empty metadata, like this: + + dd if=/dev/zero of=/dev/metadata_dev bs=4096 count=1 + +#### dm.blocksize + Specifies a custom blocksize to use for the thin pool. The default blocksize is 64K. + +#### dm.blkdiscard + Enables or disables the use of blkdiscard when removing devicemapper devices. This is enabled by default (only) if using loopback devices and is required to res-parsify the loopback file on image/container removal. + + Disabling this on loopback can lead to *much* faster container removal times, but will make the space used in `/var/lib/docker` directory not be returned to the system for other use when containers are removed. + +# EXAMPLES +Launching docker daemon with *devicemapper* backend with particular block devices for data and metadata: + + docker -d -s=devicemapper \ + --storage-opt dm.datadev=/dev/vdb \ + --storage-opt dm.metadatadev=/dev/vdc \ + --storage-opt dm.basesize=20G + +#### Client +For specific client examples please see the man page for the specific Docker command. For example: man docker run # HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) based - on docker.com source material and internal work. +April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. From a2aa78f6ec9e0a654f92c57336f8e487477d4040 Mon Sep 17 00:00:00 2001 From: Michal Minar Date: Mon, 10 Nov 2014 11:30:30 +0100 Subject: [PATCH 503/592] Doc: described storage-driver options in cli reference Documented --storage-opt=[] option in cli reference page. Content taken from: daemon/graphdriver/devmapper/README.md Signed-off-by: Michal Minar Docker-DCO-1.1-Signed-off-by: Michal Minar (github: SvenDowideit) --- docs/sources/reference/commandline/cli.md | 146 ++++++++++++++++++++-- 1 file changed, 139 insertions(+), 7 deletions(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 91707986ad..64cb8d300d 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -164,13 +164,16 @@ serious kernel crashes. However, `aufs` is also the only storage driver that all containers to share executable and shared library memory, so is a useful choice when running thousands of containers with the same program or libraries. -The `devicemapper` driver uses thin provisioning and Copy on Write (CoW) snapshots. -This driver will create a 100GB sparse file containing all your images and -containers. Each container will be limited to a 10 GB thin volume, and either of -these will require tuning - see [~jpetazzo/Resizing Docker containers with the -Device Mapper plugin]( http://jpetazzo.github.io/2014/01/29/docker-device-mapper-resize/) -To tell the Docker daemon to use `devicemapper`, use -`docker -d -s devicemapper`. +The `devicemapper` driver uses thin provisioning and Copy on Write (CoW) +snapshots. For each devicemapper graph location – typically +`/var/lib/docker/devicemapper` – a thin pool is created based on two block +devices, one for data and one for metadata. By default, these block devices +are created automatically by using loopback mounts of automatically created +sparse files. Refer to [Storage driver options](#storage-driver-options) below +for a way how to customize this setup. +[~jpetazzo/Resizing Docker containers with the Device Mapper plugin]( +http://jpetazzo.github.io/2014/01/29/docker-device-mapper-resize/) article +explains how to tune your existing setup without the use of options. The `btrfs` driver is very fast for `docker build` - but like `devicemapper` does not share executable memory between devices. Use `docker -d -s btrfs -g /mnt/btrfs_partition`. @@ -179,6 +182,135 @@ The `overlayfs` is a very fast union filesystem. It is now merged in the main Linux kernel as of [3.18.0](https://lkml.org/lkml/2014/10/26/137). Call `docker -d -s overlayfs` to use it. +#### Storage driver options + +Particular storage-driver can be configured with options specified with +`--storage-opt` flags. The only driver accepting options is `devicemapper` as +of now. All its options are prefixed with `dm`. + +Currently supported options are: + + * `dm.basesize` + + Specifies the size to use when creating the base device, which limits the + size of images and containers. The default value is 10G. Note, thin devices + are inherently "sparse", so a 10G device which is mostly empty doesn't use + 10 GB of space on the pool. However, the filesystem will use more space for + the empty case the larger the device is. + + **Warning**: This value affects the system-wide "base" empty filesystem + that may already be initialized and inherited by pulled images. Typically, + a change to this value will require additional steps to take effect: + + $ sudo service docker stop + $ sudo rm -rf /var/lib/docker + $ sudo service docker start + + Example use: + + $ sudo docker -d --storage-opt dm.basesize=20G + + * `dm.loopdatasize` + + Specifies the size to use when creating the loopback file for the "data" + device which is used for the thin pool. The default size is 100G. Note that + the file is sparse, so it will not initially take up this much space. + + Example use: + + $ sudo docker -d --storage-opt dm.loopdatasize=200G + + * `dm.loopmetadatasize` + + Specifies the size to use when creating the loopback file for the + "metadata" device which is used for the thin pool. The default size is 2G. + Note that the file is sparse, so it will not initially take up this much + space. + + Example use: + + $ sudo docker -d --storage-opt dm.loopmetadatasize=4G + + * `dm.fs` + + Specifies the filesystem type to use for the base device. The supported + options are "ext4" and "xfs". The default is "ext4" + + Example use: + + $ sudo docker -d --storage-opt dm.fs=xfs + + * `dm.mkfsarg` + + Specifies extra mkfs arguments to be used when creating the base device. + + Example use: + + $ sudo docker -d --storage-opt "dm.mkfsarg=-O ^has_journal" + + * `dm.mountopt` + + Specifies extra mount options used when mounting the thin devices. + + Example use: + + $ sudo docker -d --storage-opt dm.mountopt=nodiscard + + * `dm.datadev` + + Specifies a custom blockdevice to use for data for the thin pool. + + If using a block device for device mapper storage, ideally both datadev and + metadatadev should be specified to completely avoid using the loopback + device. + + Example use: + + $ sudo docker -d \ + --storage-opt dm.datadev=/dev/sdb1 \ + --storage-opt dm.metadatadev=/dev/sdc1 + + * `dm.metadatadev` + + Specifies a custom blockdevice to use for metadata for the thin pool. + + For best performance the metadata should be on a different spindle than the + data, or even better on an SSD. + + If setting up a new metadata pool it is required to be valid. This can be + achieved by zeroing the first 4k to indicate empty metadata, like this: + + $ dd if=/dev/zero of=$metadata_dev bs=4096 count=1 + + Example use: + + $ sudo docker -d \ + --storage-opt dm.datadev=/dev/sdb1 \ + --storage-opt dm.metadatadev=/dev/sdc1 + + * `dm.blocksize` + + Specifies a custom blocksize to use for the thin pool. The default + blocksize is 64K. + + Example use: + + $ sudo docker -d --storage-opt dm.blocksize=512K + + * `dm.blkdiscard` + + Enables or disables the use of blkdiscard when removing devicemapper + devices. This is enabled by default (only) if using loopback devices and is + required to res-parsify the loopback file on image/container removal. + + Disabling this on loopback can lead to *much* faster container removal + times, but will make the space used in `/var/lib/docker` directory not be + returned to the system for other use when containers are removed. + + Example use: + + $ sudo docker -d --storage-opt dm.blkdiscard=false + ### Docker exec-driver option The Docker daemon uses a specifically built `libcontainer` execution driver as its From a74c12177f02be97730b83ded9521a14ba568bcd Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Thu, 20 Nov 2014 16:36:37 -0800 Subject: [PATCH 504/592] 80-char limit Signed-off-by: Sven Dowideit Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/man/docker.1.md | 53 +++++++++++++++++++++++++++++++------------- 1 file changed, 37 insertions(+), 16 deletions(-) diff --git a/docs/man/docker.1.md b/docs/man/docker.1.md index 84b958f26e..4aac7e429d 100644 --- a/docs/man/docker.1.md +++ b/docs/man/docker.1.md @@ -214,47 +214,67 @@ flags with **-s=**devicemapper. Here is the list of *devicemapper* options: #### dm.basesize - Specifies the size to use when creating the base device, which limits the size of images and containers. The default value is 10G. Note, thin devices are inherently "sparse", so a 10G device which is mostly empty doesn't use 10 GB of space on the pool. However, the filesystem will use more space for the empty case the larger the device is. **Warning**: This value affects the system-wide "base" empty filesystem that may already be initialized and inherited by pulled images. +Specifies the size to use when creating the base device, which limits the size +of images and containers. The default value is 10G. Note, thin devices are +inherently "sparse", so a 10G device which is mostly empty doesn't use 10 GB +of space on the pool. However, the filesystem will use more space for the empty +case the larger the device is. **Warning**: This value affects the system-wide +"base" empty filesystem that may already be initialized and inherited by pulled +images. #### dm.loopdatasize - Specifies the size to use when creating the loopback file for the "data" device which is used for the thin pool. The default size is 100G. Note that the file is sparse, so it will not initially take up this much space. +Specifies the size to use when creating the loopback file for the "data" +device which is used for the thin pool. The default size is 100G. Note that the +file is sparse, so it will not initially take up this much space. #### dm.loopmetadatasize - Specifies the size to use when creating the loopback file for the "metadadata" device which is used for the thin pool. The default size is 2G. Note that the file is sparse, so it will not initially take up this much space. +Specifies the size to use when creating the loopback file for the "metadadata" +device which is used for the thin pool. The default size is 2G. Note that the +file is sparse, so it will not initially take up this much space. #### dm.fs - Specifies the filesystem type to use for the base device. The supported options are "ext4" and "xfs". The default is "ext4" +Specifies the filesystem type to use for the base device. The supported +options are "ext4" and "xfs". The default is "ext4" #### dm.mkfsarg - Specifies extra mkfs arguments to be used when creating the base device. +Specifies extra mkfs arguments to be used when creating the base device. #### dm.mountopt - Specifies extra mount options used when mounting the thin devices. +Specifies extra mount options used when mounting the thin devices. #### dm.datadev - Specifies a custom blockdevice to use for data for the thin pool. +Specifies a custom blockdevice to use for data for the thin pool. - If using a block device for device mapper storage, ideally both datadev and metadatadev should be specified to completely avoid using the loopback device. +If using a block device for device mapper storage, ideally both datadev and +metadatadev should be specified to completely avoid using the loopback device. #### dm.metadatadev - Specifies a custom blockdevice to use for metadata for the thin pool. +Specifies a custom blockdevice to use for metadata for the thin pool. - For best performance the metadata should be on a different spindle than the data, or even better on an SSD. +For best performance the metadata should be on a different spindle than the +data, or even better on an SSD. - If setting up a new metadata pool it is required to be valid. This can be achieved by zeroing the first 4k to indicate empty metadata, like this: +If setting up a new metadata pool it is required to be valid. This can be +achieved by zeroing the first 4k to indicate empty metadata, like this: dd if=/dev/zero of=/dev/metadata_dev bs=4096 count=1 #### dm.blocksize - Specifies a custom blocksize to use for the thin pool. The default blocksize is 64K. +Specifies a custom blocksize to use for the thin pool. The default blocksize +is 64K. #### dm.blkdiscard - Enables or disables the use of blkdiscard when removing devicemapper devices. This is enabled by default (only) if using loopback devices and is required to res-parsify the loopback file on image/container removal. +Enables or disables the use of blkdiscard when removing devicemapper devices. +This is enabled by default (only) if using loopback devices and is required to +res-parsify the loopback file on image/container removal. - Disabling this on loopback can lead to *much* faster container removal times, but will make the space used in `/var/lib/docker` directory not be returned to the system for other use when containers are removed. +Disabling this on loopback can lead to *much* faster container removal times, +but will make the space used in `/var/lib/docker` directory not be returned to +the system for other use when containers are removed. # EXAMPLES -Launching docker daemon with *devicemapper* backend with particular block devices for data and metadata: +Launching docker daemon with *devicemapper* backend with particular block devices +for data and metadata: docker -d -s=devicemapper \ --storage-opt dm.datadev=/dev/vdb \ @@ -262,7 +282,8 @@ Launching docker daemon with *devicemapper* backend with particular block device --storage-opt dm.basesize=20G #### Client -For specific client examples please see the man page for the specific Docker command. For example: +For specific client examples please see the man page for the specific Docker +command. For example: man docker run From 94d67d5d5b300c346e17503810d521c91db89c14 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Thu, 20 Nov 2014 16:48:27 -0800 Subject: [PATCH 505/592] Try out a different phrase for --icc Signed-off-by: Sven Dowideit Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- contrib/completion/fish/docker.fish | 2 +- daemon/config.go | 2 +- docs/man/docker.1.md | 2 +- docs/sources/reference/commandline/cli.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish index 23c2085686..aa0b88678b 100644 --- a/contrib/completion/fish/docker.fish +++ b/contrib/completion/fish/docker.fish @@ -53,7 +53,7 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force docker to use specific DNS servers' complete -c docker -f -n '__fish_docker_no_subcommand' -s e -l exec-driver -d 'Force the docker runtime to use a specific exec driver' complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the docker runtime' -complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Enable inter-container communication without any restriction' +complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Allow unrestricted inter-container (and host) communication' complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports' complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Disable enabling of net.ipv4.ip_forward' complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Disable docker's addition of iptables rules" diff --git a/daemon/config.go b/daemon/config.go index dfbb781136..4571c8d1ad 100644 --- a/daemon/config.go +++ b/daemon/config.go @@ -59,7 +59,7 @@ func (config *Config) InstallFlags() { flag.StringVar(&config.BridgeIface, []string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking") flag.StringVar(&config.FixedCIDR, []string{"-fixed-cidr"}, "", "IPv4 subnet for fixed IPs (ex: 10.20.0.0/16)\nthis subnet must be nested in the bridge subnet (which is defined by -b or --bip)") opts.ListVar(&config.InsecureRegistries, []string{"-insecure-registry"}, "Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16)") - flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Enable inter-container communication without any restriction") + flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Allow unrestricted inter-container (and host) communication") flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver") flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver") flag.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, "Enable selinux support. SELinux does not presently support the BTRFS storage driver") diff --git a/docs/man/docker.1.md b/docs/man/docker.1.md index 4aac7e429d..69988f366a 100644 --- a/docs/man/docker.1.md +++ b/docs/man/docker.1.md @@ -54,7 +54,7 @@ unix://[/path/to/socket] to use. IPv4 subnet for fixed IPs (ex: 10.20.0.0/16); this subnet must be nested in the bridge subnet (which is defined by \-b or \-\-bip) **--icc**=*true*|*false* - Enable inter\-container communication without any restriction. If disabled, containers can still be linked together using **--link** option (see **docker-run(1)**). Default is true. + Allow unrestricted inter\-container (and host) communication. If disabled, containers can still be linked together using **--link** option (see **docker-run(1)**). Default is true. **--ip**="" Default IP address to use when binding container ports. Default is `0.0.0.0`. diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 64cb8d300d..0200a6bde6 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -69,7 +69,7 @@ expect an integer, and they can only be specified once. use '' (the empty string) to disable setting of a group -g, --graph="/var/lib/docker" Path to use as the root of the Docker runtime -H, --host=[] The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. - --icc=true Enable inter-container communication without any restriction + --icc=true Allow unrestricted inter-container (and host) communication --insecure-registry=[] Enable insecure communication with specified registries (disables certificate verification for HTTPS and enables HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16) --ip=0.0.0.0 Default IP address to use when binding container ports --ip-forward=true Enable net.ipv4.ip_forward From 070a519a9b972ffffda62b82fbc27825d4a6f9b0 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Fri, 21 Nov 2014 14:17:13 -0800 Subject: [PATCH 506/592] fixes as per feedback Signed-off-by: Sven Dowideit Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- contrib/completion/fish/docker.fish | 2 +- daemon/config.go | 2 +- docs/man/docker.1.md | 4 ++-- docs/sources/reference/commandline/cli.md | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish index aa0b88678b..a082adc02c 100644 --- a/contrib/completion/fish/docker.fish +++ b/contrib/completion/fish/docker.fish @@ -53,7 +53,7 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force docker to use specific DNS servers' complete -c docker -f -n '__fish_docker_no_subcommand' -s e -l exec-driver -d 'Force the docker runtime to use a specific exec driver' complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the docker runtime' -complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Allow unrestricted inter-container (and host) communication' +complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Allow unrestricted inter-container and Docker daemon host communication' complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports' complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Disable enabling of net.ipv4.ip_forward' complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Disable docker's addition of iptables rules" diff --git a/daemon/config.go b/daemon/config.go index 4571c8d1ad..4d9041e895 100644 --- a/daemon/config.go +++ b/daemon/config.go @@ -59,7 +59,7 @@ func (config *Config) InstallFlags() { flag.StringVar(&config.BridgeIface, []string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking") flag.StringVar(&config.FixedCIDR, []string{"-fixed-cidr"}, "", "IPv4 subnet for fixed IPs (ex: 10.20.0.0/16)\nthis subnet must be nested in the bridge subnet (which is defined by -b or --bip)") opts.ListVar(&config.InsecureRegistries, []string{"-insecure-registry"}, "Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16)") - flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Allow unrestricted inter-container (and host) communication") + flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Allow unrestricted inter-container and Docker daemon host communication") flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver") flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver") flag.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, "Enable selinux support. SELinux does not presently support the BTRFS storage driver") diff --git a/docs/man/docker.1.md b/docs/man/docker.1.md index 69988f366a..e07687c18d 100644 --- a/docs/man/docker.1.md +++ b/docs/man/docker.1.md @@ -54,7 +54,7 @@ unix://[/path/to/socket] to use. IPv4 subnet for fixed IPs (ex: 10.20.0.0/16); this subnet must be nested in the bridge subnet (which is defined by \-b or \-\-bip) **--icc**=*true*|*false* - Allow unrestricted inter\-container (and host) communication. If disabled, containers can still be linked together using **--link** option (see **docker-run(1)**). Default is true. + Allow unrestricted inter\-container and Docker daemon host communication. If disabled, containers can still be linked together using **--link** option (see **docker-run(1)**). Default is true. **--ip**="" Default IP address to use when binding container ports. Default is `0.0.0.0`. @@ -269,7 +269,7 @@ This is enabled by default (only) if using loopback devices and is required to res-parsify the loopback file on image/container removal. Disabling this on loopback can lead to *much* faster container removal times, -but will make the space used in `/var/lib/docker` directory not be returned to +but will prevent the space used in `/var/lib/docker` directory from being returned to the system for other use when containers are removed. # EXAMPLES diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 0200a6bde6..ec5aff4d20 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -69,7 +69,7 @@ expect an integer, and they can only be specified once. use '' (the empty string) to disable setting of a group -g, --graph="/var/lib/docker" Path to use as the root of the Docker runtime -H, --host=[] The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. - --icc=true Allow unrestricted inter-container (and host) communication + --icc=true Allow unrestricted inter-container and Docker daemon host communication --insecure-registry=[] Enable insecure communication with specified registries (disables certificate verification for HTTPS and enables HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16) --ip=0.0.0.0 Default IP address to use when binding container ports --ip-forward=true Enable net.ipv4.ip_forward From d680ca5c96484474757fb6f92274f47bf1520631 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9na=C3=AFc=20Huard?= Date: Wed, 3 Dec 2014 13:57:23 +0100 Subject: [PATCH 507/592] Rename the overlay storage driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit so that docker is started with `docker -d -s overlay` instead of `docker -d -s overlayfs` Signed-off-by: Lénaïc Huard --- contrib/check-config.sh | 4 ++-- daemon/graphdriver/driver.go | 2 +- daemon/graphdriver/overlayfs/overlayfs.go | 20 ++++++++++---------- docs/sources/reference/commandline/cli.md | 6 +++--- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/contrib/check-config.sh b/contrib/check-config.sh index e9fbb21e98..72e3108fe1 100755 --- a/contrib/check-config.sh +++ b/contrib/check-config.sh @@ -76,7 +76,7 @@ check_flags() { for flag in "$@"; do echo "- $(check_flag "$flag")" done -} +} if [ ! -e "$CONFIG" ]; then wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config..." @@ -165,7 +165,7 @@ echo '- Storage Drivers:' echo '- "'$(wrap_color 'devicemapper' blue)'":' check_flags BLK_DEV_DM DM_THIN_PROVISIONING EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY | sed 's/^/ /' - echo '- "'$(wrap_color 'overlayfs' blue)'":' + echo '- "'$(wrap_color 'overlay' blue)'":' check_flags OVERLAY_FS | sed 's/^/ /' } | sed 's/^/ /' echo diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go index e91b44b34e..95479bf64f 100644 --- a/daemon/graphdriver/driver.go +++ b/daemon/graphdriver/driver.go @@ -81,7 +81,7 @@ var ( "devicemapper", "vfs", // experimental, has to be enabled manually for now - "overlayfs", + "overlay", } ErrNotSupported = errors.New("driver not supported") diff --git a/daemon/graphdriver/overlayfs/overlayfs.go b/daemon/graphdriver/overlayfs/overlayfs.go index d7dfcb24ac..fbe6b48083 100644 --- a/daemon/graphdriver/overlayfs/overlayfs.go +++ b/daemon/graphdriver/overlayfs/overlayfs.go @@ -50,18 +50,18 @@ func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.Arc return b, err } -// This backend uses the overlayfs union filesystem for containers +// This backend uses the overlay union filesystem for containers // plus hard link file sharing for images. // Each container/image can have a "root" subdirectory which is a plain -// filesystem hierarchy, or they can use overlayfs. +// filesystem hierarchy, or they can use overlay. -// If they use overlayfs there is a "upper" directory and a "lower-id" +// If they use overlay there is a "upper" directory and a "lower-id" // file, as well as "merged" and "work" directories. The "upper" // directory has the upper layer of the overlay, and "lower-id" contains // the id of the parent whose "root" directory shall be used as the lower // layer in the overlay. The overlay itself is mounted in the "merged" -// directory, and the "work" dir is needed for overlayfs to work. +// directory, and the "work" dir is needed for overlay to work. // When a overlay layer is created there are two cases, either the // parent has a "root" dir, then we start out with a empty "upper" @@ -90,7 +90,7 @@ type Driver struct { } func init() { - graphdriver.Register("overlayfs", Init) + graphdriver.Register("overlay", Init) } func Init(home string, options []string) (graphdriver.Driver, error) { @@ -112,8 +112,8 @@ func Init(home string, options []string) (graphdriver.Driver, error) { } func supportsOverlayfs() error { - // We can try to modprobe overlayfs first before looking at - // proc/filesystems for when overlayfs is supported + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported exec.Command("modprobe", "overlay").Run() f, err := os.Open("/proc/filesystems") @@ -133,7 +133,7 @@ func supportsOverlayfs() error { } func (d *Driver) String() string { - return "overlayfs" + return "overlay" } func (d *Driver) Status() [][2]string { @@ -175,7 +175,7 @@ func (d *Driver) Create(id string, parent string) (retErr error) { return err } - // If parent has a root, just do a overlayfs to it + // If parent has a root, just do a overlay to it parentRoot := path.Join(parentDir, "root") if s, err := os.Lstat(parentRoot); err == nil { @@ -301,7 +301,7 @@ func (d *Driver) Put(id string) { if mount.mounted { if err := syscall.Unmount(mount.path, 0); err != nil { - log.Debugf("Failed to unmount %s overlayfs: %v", id, err) + log.Debugf("Failed to unmount %s overlay: %v", id, err) } } diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 06220be22a..4d33970b59 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -156,7 +156,7 @@ string is equivalent to setting the `--tlsverify` flag. The following are equiva ### Daemon storage-driver option The Docker daemon has support for several different image layer storage drivers: `aufs`, -`devicemapper`, `btrfs` and `overlayfs`. +`devicemapper`, `btrfs` and `overlay`. The `aufs` driver is the oldest, but is based on a Linux kernel patch-set that is unlikely to be merged into the main kernel. These are also known to cause some @@ -175,9 +175,9 @@ To tell the Docker daemon to use `devicemapper`, use The `btrfs` driver is very fast for `docker build` - but like `devicemapper` does not share executable memory between devices. Use `docker -d -s btrfs -g /mnt/btrfs_partition`. -The `overlayfs` is a very fast union filesystem. It is now merged in the main +The `overlay` is a very fast union filesystem. It is now merged in the main Linux kernel as of [3.18.0](https://lkml.org/lkml/2014/10/26/137). -Call `docker -d -s overlayfs` to use it. +Call `docker -d -s overlay` to use it. ### Docker exec-driver option From 2352f00e4ff2cd102a4d591d67aba8e1c7eaa7b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9na=C3=AFc=20Huard?= Date: Wed, 3 Dec 2014 14:06:19 +0100 Subject: [PATCH 508/592] Remove the last references to overlayfs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This only renames docker internal structures. It has no impact on the end-user. Signed-off-by: Lénaïc Huard --- daemon/daemon_overlay.go | 7 +++++ daemon/daemon_overlayfs.go | 7 ----- .../{overlayfs => overlay}/copy.go | 6 ++-- .../overlayfs.go => overlay/overlay.go} | 6 ++-- daemon/graphdriver/overlay/overlay_test.go | 28 +++++++++++++++++++ .../graphdriver/overlayfs/overlayfs_test.go | 28 ------------------- 6 files changed, 41 insertions(+), 41 deletions(-) create mode 100644 daemon/daemon_overlay.go delete mode 100644 daemon/daemon_overlayfs.go rename daemon/graphdriver/{overlayfs => overlay}/copy.go (94%) rename daemon/graphdriver/{overlayfs/overlayfs.go => overlay/overlay.go} (98%) create mode 100644 daemon/graphdriver/overlay/overlay_test.go delete mode 100644 daemon/graphdriver/overlayfs/overlayfs_test.go diff --git a/daemon/daemon_overlay.go b/daemon/daemon_overlay.go new file mode 100644 index 0000000000..25d6e80285 --- /dev/null +++ b/daemon/daemon_overlay.go @@ -0,0 +1,7 @@ +// +build !exclude_graphdriver_overlay + +package daemon + +import ( + _ "github.com/docker/docker/daemon/graphdriver/overlay" +) diff --git a/daemon/daemon_overlayfs.go b/daemon/daemon_overlayfs.go deleted file mode 100644 index e134b297a9..0000000000 --- a/daemon/daemon_overlayfs.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !exclude_graphdriver_overlayfs - -package daemon - -import ( - _ "github.com/docker/docker/daemon/graphdriver/overlayfs" -) diff --git a/daemon/graphdriver/overlayfs/copy.go b/daemon/graphdriver/overlay/copy.go similarity index 94% rename from daemon/graphdriver/overlayfs/copy.go rename to daemon/graphdriver/overlay/copy.go index 4c8c6239ac..ae6bee517b 100644 --- a/daemon/graphdriver/overlayfs/copy.go +++ b/daemon/graphdriver/overlay/copy.go @@ -1,6 +1,6 @@ // +build linux -package overlayfs +package overlay import ( "fmt" @@ -122,8 +122,8 @@ func copyDir(srcDir, dstDir string, flags CopyFlags) error { return err } - // We need to copy this attribute if it appears in an overlayfs upper layer, as - // this function is used to copy those. It is set by overlayfs if a directory + // We need to copy this attribute if it appears in an overlay upper layer, as + // this function is used to copy those. It is set by overlay if a directory // is removed and then re-created and should not inherit anything from the // same dir in the lower dir. if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil { diff --git a/daemon/graphdriver/overlayfs/overlayfs.go b/daemon/graphdriver/overlay/overlay.go similarity index 98% rename from daemon/graphdriver/overlayfs/overlayfs.go rename to daemon/graphdriver/overlay/overlay.go index fbe6b48083..c45c3ea7ad 100644 --- a/daemon/graphdriver/overlayfs/overlayfs.go +++ b/daemon/graphdriver/overlay/overlay.go @@ -1,6 +1,6 @@ // +build linux -package overlayfs +package overlay import ( "bufio" @@ -94,7 +94,7 @@ func init() { } func Init(home string, options []string) (graphdriver.Driver, error) { - if err := supportsOverlayfs(); err != nil { + if err := supportsOverlay(); err != nil { return nil, graphdriver.ErrNotSupported } @@ -111,7 +111,7 @@ func Init(home string, options []string) (graphdriver.Driver, error) { return NaiveDiffDriverWithApply(d), nil } -func supportsOverlayfs() error { +func supportsOverlay() error { // We can try to modprobe overlay first before looking at // proc/filesystems for when overlay is supported exec.Command("modprobe", "overlay").Run() diff --git a/daemon/graphdriver/overlay/overlay_test.go b/daemon/graphdriver/overlay/overlay_test.go new file mode 100644 index 0000000000..88194e4ff8 --- /dev/null +++ b/daemon/graphdriver/overlay/overlay_test.go @@ -0,0 +1,28 @@ +package overlay + +import ( + "github.com/docker/docker/daemon/graphdriver/graphtest" + "testing" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestOverlaySetup and TestOverlayTeardown +func TestOverlaySetup(t *testing.T) { + graphtest.GetDriver(t, "overlay") +} + +func TestOverlayCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "overlay") +} + +func TestOverlayCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "overlay") +} + +func TestOverlayCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "overlay") +} + +func TestOverlayTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/daemon/graphdriver/overlayfs/overlayfs_test.go b/daemon/graphdriver/overlayfs/overlayfs_test.go deleted file mode 100644 index 7ab71d0e64..0000000000 --- a/daemon/graphdriver/overlayfs/overlayfs_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package overlayfs - -import ( - "github.com/docker/docker/daemon/graphdriver/graphtest" - "testing" -) - -// This avoids creating a new driver for each test if all tests are run -// Make sure to put new tests between TestOverlayfsSetup and TestOverlayfsTeardown -func TestOverlayfsSetup(t *testing.T) { - graphtest.GetDriver(t, "overlayfs") -} - -func TestOverlayfsCreateEmpty(t *testing.T) { - graphtest.DriverTestCreateEmpty(t, "overlayfs") -} - -func TestOverlayfsCreateBase(t *testing.T) { - graphtest.DriverTestCreateBase(t, "overlayfs") -} - -func TestOverlayfsCreateSnap(t *testing.T) { - graphtest.DriverTestCreateSnap(t, "overlayfs") -} - -func TestOverlayfsTeardown(t *testing.T) { - graphtest.PutDriver(t) -} From bb00453e58a86b9787ac4b3e7df3c48d8ddc3f87 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 3 Dec 2014 13:06:43 -0500 Subject: [PATCH 509/592] devmapper: Do not check for pool transaction id during old metadata migration Current code is associating a transaction id with each device and if pool transaction id is greater that value, then current code assumes that device is there in pool. Transaction id of pool is a mechanism so that during device creation and removal one can define a transaction and during startup figure out if transaction was complete or not. I think we are using transaction id throughout the code little inappropriately. For example, if a device is being deleted, it is possible that we deleted the device from pool but before we could delete metafile docker crashed. When docker comes back it will think that device is in the pool (due to device transaction id being less than pool transaction id) but device is not in the pool. Similary, it could happen that some data in the pool is corrupted and during pool repair some devices are lost (without docker knowing about it). In that case tool pool transaction id will be higher than device transaction id and there are no guaratees that device is actually in the pool. So move away from this model where we think that a device is in pool if pool transaction id is greater than device transaction Id. Per device transaction Id just says that after device creation this should be pool's transaction Id and nothing more. Transaction id is per pool property (as opposed to per device property) and will be used internally to figure out if last transaction was complete or not and recover from failure during docker startup. If for some reason metafile is present but device is not in pool, then device activation will fail later. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index b9d6e7616d..9731d633c4 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -367,11 +367,7 @@ func (devices *DeviceSet) initMetaData() error { for hash, info := range m.Devices { info.Hash = hash - - // If the transaction id is larger than the actual one we lost the device due to some crash - if info.TransactionId <= devices.TransactionId { - devices.saveMetadata(info) - } + devices.saveMetadata(info) } if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { return err From b721d6d8d0313fbb4b80e12318c96fc4004ee96b Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 3 Dec 2014 13:06:43 -0500 Subject: [PATCH 510/592] devmapper: Do not check for transaction id during device metadata loading Again, just because device transaction id is greater than pool transaction id, it does not guarantee that device is in the pool. So do not check of this during loading of device metadata. Docker needs to deal with it. And device activation will fail when we try to activate a device for whom metafile is present but there is no device in the pool. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index 9731d633c4..bc83bb7fa4 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -390,11 +390,6 @@ func (devices *DeviceSet) loadMetadata(hash string) *DevInfo { return nil } - // If the transaction id is larger than the actual one we lost the device due to some crash - if info.TransactionId > devices.TransactionId { - return nil - } - return info } From 004d8b9b337f4a6cf68c124e89e02e673c6320fc Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 3 Dec 2014 13:06:43 -0500 Subject: [PATCH 511/592] devmapper: Remove unnecessary call to allocateTransactionId() during device removal Remove call to allocateTransactionId() during device removal. This seems to be unnecessary and it is not clear what this call is doing. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 1 - 1 file changed, 1 deletion(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index bc83bb7fa4..7ffabc3e2f 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -793,7 +793,6 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error { return err } - devices.allocateTransactionId() devices.devicesLock.Lock() delete(devices.Devices, info.Hash) devices.devicesLock.Unlock() From 0db6cc85edfccb16ce5308eea767530e1a3f6906 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 3 Dec 2014 13:06:43 -0500 Subject: [PATCH 512/592] devmapper: Remove transaction Id update from saveMetaData() Right now saveMetaData() is kind of little overloaded function. It is supposed to save file metadata to disk. But in addition if user has bumped up NewTransactionId before calling saveMetaData(), then it will also update the transaction ID in pool. Keep saveMetaData() simple and let it just save the file. Any update of pool transaction ID is done inline in the code which needs it. Also create an helper function updatePoolTransactionId() to update pool transaction Id. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 26 +++++++++++++++++------ 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index 7ffabc3e2f..d6891f809a 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -204,6 +204,16 @@ func (devices *DeviceSet) allocateTransactionId() uint64 { return devices.NewTransactionId } +func (devices *DeviceSet) updatePoolTransactionId() error { + if devices.NewTransactionId != devices.TransactionId { + if err := devicemapper.SetTransactionId(devices.getPoolDevName(), devices.TransactionId, devices.NewTransactionId); err != nil { + return fmt.Errorf("Error setting devmapper transaction ID: %s", err) + } + devices.TransactionId = devices.NewTransactionId + } + return nil +} + func (devices *DeviceSet) removeMetadata(info *DevInfo) error { if err := os.RemoveAll(devices.metadataFile(info)); err != nil { return fmt.Errorf("Error removing metadata file %s: %s", devices.metadataFile(info), err) @@ -246,13 +256,6 @@ func (devices *DeviceSet) saveMetadata(info *DevInfo) error { if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { return err } - - if devices.NewTransactionId != devices.TransactionId { - if err = devicemapper.SetTransactionId(devices.getPoolDevName(), devices.TransactionId, devices.NewTransactionId); err != nil { - return fmt.Errorf("Error setting devmapper transition ID: %s", err) - } - devices.TransactionId = devices.NewTransactionId - } return nil } @@ -294,6 +297,15 @@ func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*Dev return nil, err } + if err := devices.updatePoolTransactionId(); err != nil { + // Remove unused device + devices.devicesLock.Lock() + delete(devices.Devices, hash) + devices.devicesLock.Unlock() + devices.removeMetadata(info) + return nil, err + } + return info, nil } From 824a87f7efb94e4f307d920c3c3689156d6e633f Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 3 Dec 2014 13:06:43 -0500 Subject: [PATCH 513/592] devmapper: Move pool id query and migration of old data in separate function Right now initMetaData() first queries the pool for current transaciton Id and then it migrates the old metafile. Move pool transaction Id query and file migration in separate functions for better code reuse and organization. Given we have removed device transaction Id dependency from saveMetaData(), we don't have to query pool transaction Id before migrating files. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 28 +++++++++++++---------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index d6891f809a..26a3da8cf1 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -352,19 +352,8 @@ func (devices *DeviceSet) createFilesystem(info *DevInfo) error { return nil } -func (devices *DeviceSet) initMetaData() error { - _, _, _, params, err := devicemapper.GetStatus(devices.getPoolName()) - if err != nil { - return err - } - - if _, err := fmt.Sscanf(params, "%d", &devices.TransactionId); err != nil { - return err - } - devices.NewTransactionId = devices.TransactionId - +func (devices *DeviceSet) migrateOldMetaData() error { // Migrate old metadatafile - jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) if err != nil && !os.IsNotExist(err) { return err @@ -390,6 +379,21 @@ func (devices *DeviceSet) initMetaData() error { return nil } +func (devices *DeviceSet) initMetaData() error { + if err := devices.migrateOldMetaData(); err != nil { + return err + } + + _, transactionId, _, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + + devices.TransactionId = transactionId + devices.NewTransactionId = devices.TransactionId + return nil +} + func (devices *DeviceSet) loadMetadata(hash string) *DevInfo { info := &DevInfo{Hash: hash, devices: devices} From 5be77901cd505aad002b912b5febe2ba6baa23fd Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 3 Dec 2014 13:06:43 -0500 Subject: [PATCH 514/592] devmapper: Do not add back device into hash map if meta file removal failed When we are deleting a device, we also delete associated metadata file. If that file removal fails, we are adding back the device in in-memory table. I really can't see what's the point. When next lookup takes place it will be automatically loaded if need be. Remove that code. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index 26a3da8cf1..b4ce1b3b58 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -814,9 +814,6 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error { devices.devicesLock.Unlock() if err := devices.removeMetadata(info); err != nil { - devices.devicesLock.Lock() - devices.Devices[info.Hash] = info - devices.devicesLock.Unlock() log.Debugf("Error removing meta data: %s", err) return err } From 7b2b15d3e9f9b7ad898a36bbe5ceb42c9ca58d47 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 3 Dec 2014 13:06:43 -0500 Subject: [PATCH 515/592] devmapper: Use device id as specified by caller Currently devicemapper CreateDevice and CreateSnapDevice keep on retrying device creation till a suitable device id is found. With new transaction mechanism we need to store device id in transaction before it has been created. So change the logic in such a way that caller decides the devices Id to use. If that device Id is not available, caller bumps up the device Id and retries. That way caller can update transaciton too when it tries a new Id. Transaction related patches will come later in the series. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 67 +++++++++++--- pkg/devicemapper/devmapper.go | 108 +++++++++++----------- 2 files changed, 108 insertions(+), 67 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index b4ce1b3b58..db9d2528f7 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -394,6 +394,50 @@ func (devices *DeviceSet) initMetaData() error { return nil } +func (devices *DeviceSet) incNextDeviceId() { + // Ids are 24bit, so wrap around + devices.NextDeviceId = (devices.NextDeviceId + 1) & 0xffffff +} + +func (devices *DeviceSet) createDevice(deviceId *int) error { + for { + if err := devicemapper.CreateDevice(devices.getPoolDevName(), *deviceId); err != nil { + if devicemapper.DeviceIdExists(err) { + // Device Id already exists. Try a new one. + devices.incNextDeviceId() + *deviceId = devices.NextDeviceId + continue + } + log.Debugf("Error creating device: %s", err) + return err + } + break + } + devices.incNextDeviceId() + return nil +} + +func (devices *DeviceSet) createSnapDevice(baseInfo *DevInfo, deviceId *int) error { + log.Debugf("[deviceset] createSnapDevice() DeviceId=%d", *deviceId) + defer log.Debugf("[deviceset] createSnapDevice() END DeviceId=%d", *deviceId) + + for { + if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), *deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { + if devicemapper.DeviceIdExists(err) { + // Device Id already exists. Try a new one. + devices.incNextDeviceId() + *deviceId = devices.NextDeviceId + continue + } + log.Debugf("Error creating snap device: %s", err) + return err + } + break + } + devices.incNextDeviceId() + return nil +} + func (devices *DeviceSet) loadMetadata(hash string) *DevInfo { info := &DevInfo{Hash: hash, devices: devices} @@ -439,20 +483,16 @@ func (devices *DeviceSet) setupBaseImage() error { log.Debugf("Initializing base device-mapper thin volume") - id := devices.NextDeviceId - // Create initial device - if err := devicemapper.CreateDevice(devices.getPoolDevName(), &id); err != nil { + deviceId := devices.NextDeviceId + if err := devices.createDevice(&deviceId); err != nil { return err } - // Ids are 24bit, so wrap around - devices.NextDeviceId = (id + 1) & 0xffffff - - log.Debugf("Registering base device (id %v) with FS size %v", id, devices.baseFsSize) - info, err := devices.registerDevice(id, "", devices.baseFsSize) + log.Debugf("Registering base device (id %v) with FS size %v", deviceId, devices.baseFsSize) + info, err := devices.registerDevice(deviceId, "", devices.baseFsSize) if err != nil { - _ = devicemapper.DeleteDevice(devices.getPoolDevName(), id) + _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) return err } @@ -751,6 +791,9 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { } func (devices *DeviceSet) AddDevice(hash, baseHash string) error { + log.Debugf("[deviceset] AddDevice() hash=%s basehash=%s", hash, baseHash) + defer log.Debugf("[deviceset] AddDevice END") + baseInfo, err := devices.lookupDevice(baseHash) if err != nil { return err @@ -767,15 +810,11 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error { } deviceId := devices.NextDeviceId - - if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), &deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { + if err := devices.createSnapDevice(baseInfo, &deviceId); err != nil { log.Debugf("Error creating snap device: %s", err) return err } - // Ids are 24bit, so wrap around - devices.NextDeviceId = (deviceId + 1) & 0xffffff - if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size); err != nil { devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) log.Debugf("Error registering device: %s", err) diff --git a/pkg/devicemapper/devmapper.go b/pkg/devicemapper/devmapper.go index a7306ba55d..c23a3624db 100644 --- a/pkg/devicemapper/devmapper.go +++ b/pkg/devicemapper/devmapper.go @@ -67,6 +67,7 @@ var ( ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity") ErrBusy = errors.New("Device is Busy") + ErrDeviceIdExists = errors.New("Device Id Exists") dmSawBusy bool dmSawExist bool @@ -97,6 +98,16 @@ type ( AddNodeType int ) +// Returns whether error conveys the information about device Id already +// exist or not. This will be true if device creation or snap creation +// operation fails if device or snap device already exists in pool. +// Current implementation is little crude as it scans the error string +// for exact pattern match. Replacing it with more robust implementation +// is desirable. +func DeviceIdExists(err error) bool { + return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIdExists) +} + func (t *Task) destroy() { if t != nil { DmTaskDestroy(t.unmanaged) @@ -528,33 +539,29 @@ func ResumeDevice(name string) error { return nil } -func CreateDevice(poolName string, deviceId *int) error { - log.Debugf("[devmapper] CreateDevice(poolName=%v, deviceId=%v)", poolName, *deviceId) +func CreateDevice(poolName string, deviceId int) error { + log.Debugf("[devmapper] CreateDevice(poolName=%v, deviceId=%v)", poolName, deviceId) + task, err := TaskCreateNamed(DeviceTargetMsg, poolName) + if task == nil { + return err + } - for { - task, err := TaskCreateNamed(DeviceTargetMsg, poolName) - if task == nil { - return err - } + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector %s", err) + } - if err := task.SetSector(0); err != nil { - return fmt.Errorf("Can't set sector %s", err) - } + if err := task.SetMessage(fmt.Sprintf("create_thin %d", deviceId)); err != nil { + return fmt.Errorf("Can't set message %s", err) + } - if err := task.SetMessage(fmt.Sprintf("create_thin %d", *deviceId)); err != nil { - return fmt.Errorf("Can't set message %s", err) - } - - dmSawExist = false // reset before the task is run - if err := task.Run(); err != nil { - if dmSawExist { - // Already exists, try next id - *deviceId++ - continue - } + dmSawExist = false // reset before the task is run + if err := task.Run(); err != nil { + // Caller wants to know about ErrDeviceIdExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIdExists + } else { return fmt.Errorf("Error running CreateDevice %s", err) } - break } return nil } @@ -607,7 +614,7 @@ func ActivateDevice(poolName string, name string, deviceId int, size uint64) err return nil } -func CreateSnapDevice(poolName string, deviceId *int, baseName string, baseDeviceId int) error { +func CreateSnapDevice(poolName string, deviceId int, baseName string, baseDeviceId int) error { devinfo, _ := GetInfo(baseName) doSuspend := devinfo != nil && devinfo.Exists != 0 @@ -617,44 +624,39 @@ func CreateSnapDevice(poolName string, deviceId *int, baseName string, baseDevic } } - for { - task, err := TaskCreateNamed(DeviceTargetMsg, poolName) - if task == nil { - if doSuspend { - ResumeDevice(baseName) - } - return err + task, err := TaskCreateNamed(DeviceTargetMsg, poolName) + if task == nil { + if doSuspend { + ResumeDevice(baseName) } + return err + } - if err := task.SetSector(0); err != nil { - if doSuspend { - ResumeDevice(baseName) - } - return fmt.Errorf("Can't set sector %s", err) + if err := task.SetSector(0); err != nil { + if doSuspend { + ResumeDevice(baseName) } + return fmt.Errorf("Can't set sector %s", err) + } - if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", *deviceId, baseDeviceId)); err != nil { - if doSuspend { - ResumeDevice(baseName) - } - return fmt.Errorf("Can't set message %s", err) + if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", deviceId, baseDeviceId)); err != nil { + if doSuspend { + ResumeDevice(baseName) } + return fmt.Errorf("Can't set message %s", err) + } - dmSawExist = false // reset before the task is run - if err := task.Run(); err != nil { - if dmSawExist { - // Already exists, try next id - *deviceId++ - continue - } - - if doSuspend { - ResumeDevice(baseName) - } + dmSawExist = false // reset before the task is run + if err := task.Run(); err != nil { + if doSuspend { + ResumeDevice(baseName) + } + // Caller wants to know about ErrDeviceIdExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIdExists + } else { return fmt.Errorf("Error running DeviceCreate (createSnapDevice) %s", err) } - - break } if doSuspend { From 442247927b8e6c102ce1f94de58c7f93aab3d271 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 3 Dec 2014 13:06:43 -0500 Subject: [PATCH 516/592] devmapper: Provide a function unregisterDevice() Currently registerDevice() adds a device to in-memory table, saves metadata and also updates the pool transaction ID. Now move transaciton Id update out of registerDevice() and provide a new function unregisterDevice() which does the reverse of registerDevice(). This will simplify some code down the line and make it more structured. This is just code reorganization and should not change functionality. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 57 +++++++++++++++-------- 1 file changed, 38 insertions(+), 19 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index db9d2528f7..7963854565 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -274,13 +274,32 @@ func (devices *DeviceSet) lookupDevice(hash string) (*DevInfo, error) { return info, nil } -func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*DevInfo, error) { +func (devices *DeviceSet) unregisterDevice(id int, hash string) error { + log.Debugf("unregisterDevice(%v, %v)", id, hash) + info := &DevInfo{ + Hash: hash, + DeviceId: id, + } + + devices.devicesLock.Lock() + delete(devices.Devices, hash) + devices.devicesLock.Unlock() + + if err := devices.removeMetadata(info); err != nil { + log.Debugf("Error removing meta data: %s", err) + return err + } + + return nil +} + +func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionId uint64) (*DevInfo, error) { log.Debugf("registerDevice(%v, %v)", id, hash) info := &DevInfo{ Hash: hash, DeviceId: id, Size: size, - TransactionId: devices.allocateTransactionId(), + TransactionId: transactionId, Initialized: false, devices: devices, } @@ -297,15 +316,6 @@ func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*Dev return nil, err } - if err := devices.updatePoolTransactionId(); err != nil { - // Remove unused device - devices.devicesLock.Lock() - delete(devices.Devices, hash) - devices.devicesLock.Unlock() - devices.removeMetadata(info) - return nil, err - } - return info, nil } @@ -489,13 +499,20 @@ func (devices *DeviceSet) setupBaseImage() error { return err } + transactionId := devices.allocateTransactionId() log.Debugf("Registering base device (id %v) with FS size %v", deviceId, devices.baseFsSize) - info, err := devices.registerDevice(deviceId, "", devices.baseFsSize) + info, err := devices.registerDevice(deviceId, "", devices.baseFsSize, transactionId) if err != nil { _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) return err } + if err := devices.updatePoolTransactionId(); err != nil { + devices.unregisterDevice(deviceId, "") + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) + return err + } + log.Debugf("Creating filesystem on base device-mapper thin volume") if err = devices.activateDeviceIfNeeded(info); err != nil { @@ -815,11 +832,18 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error { return err } - if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size); err != nil { + transactionId := devices.allocateTransactionId() + if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size, transactionId); err != nil { devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) log.Debugf("Error registering device: %s", err) return err } + + if err := devices.updatePoolTransactionId(); err != nil { + devices.unregisterDevice(deviceId, hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) + return err + } return nil } @@ -848,12 +872,7 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error { return err } - devices.devicesLock.Lock() - delete(devices.Devices, info.Hash) - devices.devicesLock.Unlock() - - if err := devices.removeMetadata(info); err != nil { - log.Debugf("Error removing meta data: %s", err) + if err := devices.unregisterDevice(info.DeviceId, info.Hash); err != nil { return err } From ad9118c696c0953ec48eec15ea4b7546296d7c20 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 3 Dec 2014 13:06:43 -0500 Subject: [PATCH 517/592] devmapper: Create new helper function for device and snap creation Create two new helper functions for device and snap device creation. These functions will not only create the device and also register the device. Again, makes the code structure better and keeps all transaction logic contained to functions instead of spilling over into functions like setupBaseImage or AddDevice(). Just the code reorganization. No functionality change. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 76 ++++++++++++++--------- 1 file changed, 46 insertions(+), 30 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index 7963854565..713d1d60bc 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -427,6 +427,28 @@ func (devices *DeviceSet) createDevice(deviceId *int) error { return nil } +func (devices *DeviceSet) createRegisterDevice(hash string) (*DevInfo, error) { + deviceId := devices.NextDeviceId + if err := devices.createDevice(&deviceId); err != nil { + return nil, err + } + + transactionId := devices.allocateTransactionId() + log.Debugf("Registering device (id %v) with FS size %v", deviceId, devices.baseFsSize) + info, err := devices.registerDevice(deviceId, hash, devices.baseFsSize, transactionId) + if err != nil { + _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) + return nil, err + } + + if err := devices.updatePoolTransactionId(); err != nil { + devices.unregisterDevice(deviceId, hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) + return nil, err + } + return info, nil +} + func (devices *DeviceSet) createSnapDevice(baseInfo *DevInfo, deviceId *int) error { log.Debugf("[deviceset] createSnapDevice() DeviceId=%d", *deviceId) defer log.Debugf("[deviceset] createSnapDevice() END DeviceId=%d", *deviceId) @@ -448,6 +470,28 @@ func (devices *DeviceSet) createSnapDevice(baseInfo *DevInfo, deviceId *int) err return nil } +func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *DevInfo) error { + deviceId := devices.NextDeviceId + if err := devices.createSnapDevice(baseInfo, &deviceId); err != nil { + log.Debugf("Error creating snap device: %s", err) + return err + } + + transactionId := devices.allocateTransactionId() + if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size, transactionId); err != nil { + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) + log.Debugf("Error registering device: %s", err) + return err + } + + if err := devices.updatePoolTransactionId(); err != nil { + devices.unregisterDevice(deviceId, hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) + return err + } + return nil +} + func (devices *DeviceSet) loadMetadata(hash string) *DevInfo { info := &DevInfo{Hash: hash, devices: devices} @@ -494,22 +538,8 @@ func (devices *DeviceSet) setupBaseImage() error { log.Debugf("Initializing base device-mapper thin volume") // Create initial device - deviceId := devices.NextDeviceId - if err := devices.createDevice(&deviceId); err != nil { - return err - } - - transactionId := devices.allocateTransactionId() - log.Debugf("Registering base device (id %v) with FS size %v", deviceId, devices.baseFsSize) - info, err := devices.registerDevice(deviceId, "", devices.baseFsSize, transactionId) + info, err := devices.createRegisterDevice("") if err != nil { - _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) - return err - } - - if err := devices.updatePoolTransactionId(); err != nil { - devices.unregisterDevice(deviceId, "") - devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) return err } @@ -826,24 +856,10 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error { return fmt.Errorf("device %s already exists", hash) } - deviceId := devices.NextDeviceId - if err := devices.createSnapDevice(baseInfo, &deviceId); err != nil { - log.Debugf("Error creating snap device: %s", err) + if err := devices.createRegisterSnapDevice(hash, baseInfo); err != nil { return err } - transactionId := devices.allocateTransactionId() - if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size, transactionId); err != nil { - devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) - log.Debugf("Error registering device: %s", err) - return err - } - - if err := devices.updatePoolTransactionId(); err != nil { - devices.unregisterDevice(deviceId, hash) - devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) - return err - } return nil } From 6d347aeb6984ebdcb1051212ab3103880ef69ab0 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 3 Dec 2014 13:06:43 -0500 Subject: [PATCH 518/592] devmapper: Remove unnecessary condition check in updatePoolTransactionId() Currently updatePoolTransactionId() checks if NewTransactionId and TransactionId are not same only then update the transaction Id in pool. This check is redundant. Currently we call updatePoolTransactionId() only from two places and both of these first allocate a new transaction Id. Also updatePoolTransactionId() should only be called after allocating new transaction Id otherwise it does not make any sense. Remove the redundant check and reduce confusion. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index 713d1d60bc..40fb4e2d42 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -205,12 +205,10 @@ func (devices *DeviceSet) allocateTransactionId() uint64 { } func (devices *DeviceSet) updatePoolTransactionId() error { - if devices.NewTransactionId != devices.TransactionId { - if err := devicemapper.SetTransactionId(devices.getPoolDevName(), devices.TransactionId, devices.NewTransactionId); err != nil { - return fmt.Errorf("Error setting devmapper transaction ID: %s", err) - } - devices.TransactionId = devices.NewTransactionId + if err := devicemapper.SetTransactionId(devices.getPoolDevName(), devices.TransactionId, devices.NewTransactionId); err != nil { + return fmt.Errorf("Error setting devmapper transaction ID: %s", err) } + devices.TransactionId = devices.NewTransactionId return nil } From 7b0a1b814b8f13e30df466dd66c3fdc2114eac28 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 3 Dec 2014 13:06:43 -0500 Subject: [PATCH 519/592] devmapper: Allocate new transaction Id using current transaction Id Currently new transaction Id is created using allocateTransactionId() function. This function takes NewTransactionId and bumps up by one to create NewTransactionId. I think ideally we should be bumping up devices.TransactionId by 1 to come up with NewTransactionId. Because idea is that devices.TransactionId contains the current pool transaction Id and to come up with a new transaction Id bump it up by one. Current code is not wrong as we are keeping NewTransactionId and TransactionId in sync. But it will be more direct if we look at devices.TransactionId to come up with NewTransactionId. That way we don't have to even initialize NewTransactionId during startup as first time somebody wants to do a transaction, it will be allocated fresh. So simplify the code a bit. No functionality change. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index 40fb4e2d42..2660ade295 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -200,7 +200,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { } func (devices *DeviceSet) allocateTransactionId() uint64 { - devices.NewTransactionId = devices.NewTransactionId + 1 + devices.NewTransactionId = devices.TransactionId + 1 return devices.NewTransactionId } @@ -398,7 +398,6 @@ func (devices *DeviceSet) initMetaData() error { } devices.TransactionId = transactionId - devices.NewTransactionId = devices.TransactionId return nil } From f078bcd8e50913fd8b05022ebd047c5a1f2e3d52 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 3 Dec 2014 13:06:43 -0500 Subject: [PATCH 520/592] devmapper: Rename NewTransactionId to OpenTransactionId Very soon we will have the notion of an open transaction and keep its details in a metafile. When a new transaction is opened, we allocate a new transaction Id, do the device creation/deletion and then we will close the transaction. I thought that OpenTransactionId better represents the semantics of transaction Id associated with an open transaction instead of NewtransactionId. This patch just does the renaming. No functionality change. I have also introduced a structure "Transaction" which will keep all the details associated with a transaction. Later patches will add more fields in this structure. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 26 +++++++++++++---------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index 2660ade295..f132575355 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -35,6 +35,10 @@ var ( const deviceSetMetaFile string = "deviceset-metadata" +type Transaction struct { + OpenTransactionId uint64 `json:"-"` +} + type DevInfo struct { Hash string `json:"-"` DeviceId int `json:"device_id"` @@ -65,13 +69,12 @@ type MetaData struct { } type DeviceSet struct { - MetaData `json:"-"` - sync.Mutex `json:"-"` // Protects Devices map and serializes calls into libdevmapper - root string - devicePrefix string - TransactionId uint64 `json:"-"` - NewTransactionId uint64 `json:"-"` - NextDeviceId int `json:"next_device_id"` + MetaData `json:"-"` + sync.Mutex `json:"-"` // Protects Devices map and serializes calls into libdevmapper + root string + devicePrefix string + TransactionId uint64 `json:"-"` + NextDeviceId int `json:"next_device_id"` // Options dataLoopbackSize int64 @@ -85,6 +88,7 @@ type DeviceSet struct { doBlkDiscard bool thinpBlockSize uint32 thinPoolDevice string + Transaction `json:"-"` } type DiskUsage struct { @@ -200,15 +204,15 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { } func (devices *DeviceSet) allocateTransactionId() uint64 { - devices.NewTransactionId = devices.TransactionId + 1 - return devices.NewTransactionId + devices.OpenTransactionId = devices.TransactionId + 1 + return devices.OpenTransactionId } func (devices *DeviceSet) updatePoolTransactionId() error { - if err := devicemapper.SetTransactionId(devices.getPoolDevName(), devices.TransactionId, devices.NewTransactionId); err != nil { + if err := devicemapper.SetTransactionId(devices.getPoolDevName(), devices.TransactionId, devices.OpenTransactionId); err != nil { return fmt.Errorf("Error setting devmapper transaction ID: %s", err) } - devices.TransactionId = devices.NewTransactionId + devices.TransactionId = devices.OpenTransactionId return nil } From 359a38b26a164f430c79fe542babb77c6e48dcc3 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 3 Dec 2014 13:06:43 -0500 Subject: [PATCH 521/592] devmapper: Use a common delete function for all device deletion operation Right now setupBaseImage() uses deleteDevice() to delete uninitialized base image while rest of the code uses DeleteDevice(). Change it and use a common function everywhere for the sake of uniformity. I can't see what harm can be done by doing little extra locking done by DeleteDevice(). Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index f132575355..802d2e6562 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -516,7 +516,7 @@ func (devices *DeviceSet) setupBaseImage() error { if oldInfo != nil && !oldInfo.Initialized { log.Debugf("Removing uninitialized base image") - if err := devices.deleteDevice(oldInfo); err != nil { + if err := devices.DeleteDevice(""); err != nil { return err } } From 4d39e056aac2fadffcb8560101f3c31a2b7db3ae Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 3 Dec 2014 13:06:43 -0500 Subject: [PATCH 522/592] devmapper: Keep track of used device Ids in a bitmap Currently devicemapper backend does not keep track of used device Ids in the pool. It tries a device Id and if that device Id exists in pool, it tries with a different Id and keeps on doing this in a loop till it succeeds. This worked fine so far but now we are moving to transaction based device creation and deletion. We will keep deviceId information in transaction which will be rolled back if docker crashed before transaction was complete. If we store a deviceId in transaction and later figure out it already existed in pool and docker crashed, then we will rollback and remove that existing device Id from pool (which we should not have). That means, we should know free device Id in pool in advance before we put that device Id in transaction. Hence this patch creates a bitmap (one bit each for a deviceId), and sets the bit if device Id is used otherwise resets it. This patch is just preparing the ground right now. Actual usage will follow in later patches. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 32 +++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index 802d2e6562..d721861282 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -30,7 +30,9 @@ var ( DefaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 DefaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 DefaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 - DefaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + DefaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + MaxDeviceId int = 0xffffff // 24 bit, pool limit + DeviceIdMapSz int = (MaxDeviceId + 1) / 8 ) const deviceSetMetaFile string = "deviceset-metadata" @@ -75,6 +77,7 @@ type DeviceSet struct { devicePrefix string TransactionId uint64 `json:"-"` NextDeviceId int `json:"next_device_id"` + deviceIdMap []byte // Options dataLoopbackSize int64 @@ -261,6 +264,30 @@ func (devices *DeviceSet) saveMetadata(info *DevInfo) error { return nil } +func (devices *DeviceSet) markDeviceIdUsed(deviceId int) { + var mask byte + i := deviceId % 8 + mask = 1 << uint(i) + devices.deviceIdMap[deviceId/8] = devices.deviceIdMap[deviceId/8] | mask +} + +func (devices *DeviceSet) markDeviceIdFree(deviceId int) { + var mask byte + i := deviceId % 8 + mask = ^(1 << uint(i)) + devices.deviceIdMap[deviceId/8] = devices.deviceIdMap[deviceId/8] & mask +} + +func (devices *DeviceSet) isDeviceIdFree(deviceId int) bool { + var mask byte + i := deviceId % 8 + mask = (1 << uint(i)) + if (devices.deviceIdMap[deviceId/8] & mask) != 0 { + return false + } + return true +} + func (devices *DeviceSet) lookupDevice(hash string) (*DevInfo, error) { devices.devicesLock.Lock() defer devices.devicesLock.Unlock() @@ -407,7 +434,7 @@ func (devices *DeviceSet) initMetaData() error { func (devices *DeviceSet) incNextDeviceId() { // Ids are 24bit, so wrap around - devices.NextDeviceId = (devices.NextDeviceId + 1) & 0xffffff + devices.NextDeviceId = (devices.NextDeviceId + 1) & MaxDeviceId } func (devices *DeviceSet) createDevice(deviceId *int) error { @@ -1333,6 +1360,7 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error filesystem: "ext4", doBlkDiscard: true, thinpBlockSize: DefaultThinpBlockSize, + deviceIdMap: make([]byte, DeviceIdMapSz), } foundBlkDiscard := false From 39dc7829dea87d4be8e6e9b2a598fb354ebf4ba0 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 3 Dec 2014 13:06:43 -0500 Subject: [PATCH 523/592] devmapper: Construct initial device Id map from device meta files When docker starts, build a used/free Device Id map from the per device meta files we already have. These meta files have the data which device Ids are in use. Parse these files and mark device as used in the map. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 63 +++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index d721861282..fd4a11e5e2 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -303,6 +303,65 @@ func (devices *DeviceSet) lookupDevice(hash string) (*DevInfo, error) { return info, nil } +func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { + + // Skip some of the meta files which are not device files. + if strings.HasSuffix(finfo.Name(), ".migrated") { + log.Debugf("Skipping file %s", path) + return nil + } + + if finfo.Name() == deviceSetMetaFile { + log.Debugf("Skipping file %s", path) + return nil + } + + log.Debugf("Loading data for file %s", path) + + hash := finfo.Name() + if hash == "base" { + hash = "" + } + + dinfo := devices.loadMetadata(hash) + if dinfo == nil { + return fmt.Errorf("Error loading device metadata file %s", hash) + } + + if dinfo.DeviceId > MaxDeviceId { + log.Errorf("Warning: Ignoring Invalid DeviceId=%d", dinfo.DeviceId) + return nil + } + + devices.Lock() + devices.markDeviceIdUsed(dinfo.DeviceId) + devices.Unlock() + + log.Debugf("Added deviceId=%d to DeviceIdMap", dinfo.DeviceId) + return nil +} + +func (devices *DeviceSet) constructDeviceIdMap() error { + log.Debugf("[deviceset] constructDeviceIdMap()") + defer log.Debugf("[deviceset] constructDeviceIdMap() END") + + var scan = func(path string, info os.FileInfo, err error) error { + if err != nil { + log.Debugf("Can't walk the file %s", path) + return nil + } + + // Skip any directories + if info.IsDir() { + return nil + } + + return devices.deviceFileWalkFunction(path, info) + } + + return filepath.Walk(devices.metadataDir(), scan) +} + func (devices *DeviceSet) unregisterDevice(id int, hash string) error { log.Debugf("unregisterDevice(%v, %v)", id, hash) info := &DevInfo{ @@ -429,6 +488,10 @@ func (devices *DeviceSet) initMetaData() error { } devices.TransactionId = transactionId + + if err := devices.constructDeviceIdMap(); err != nil { + return err + } return nil } From a44c23fe6604d1de59c64bbb9dc234c7c3dbede9 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 3 Dec 2014 13:06:43 -0500 Subject: [PATCH 524/592] devmapper: Provide a helper function getNextDeviceId() Right now we are accessing devices.NextDeviceId directly and also incrementing it at various places. Instead provide a helper function which is responsile for incrementing NextDeviceId and return next deviceId. This is just code structuring. This will help later once we convert this function to find a free device Id and it goes through a bitmap of used/free device Ids. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index fd4a11e5e2..bf24b5164c 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -500,13 +500,17 @@ func (devices *DeviceSet) incNextDeviceId() { devices.NextDeviceId = (devices.NextDeviceId + 1) & MaxDeviceId } +func (devices *DeviceSet) getNextDeviceId() int { + devices.incNextDeviceId() + return devices.NextDeviceId +} + func (devices *DeviceSet) createDevice(deviceId *int) error { for { if err := devicemapper.CreateDevice(devices.getPoolDevName(), *deviceId); err != nil { if devicemapper.DeviceIdExists(err) { // Device Id already exists. Try a new one. - devices.incNextDeviceId() - *deviceId = devices.NextDeviceId + *deviceId = devices.getNextDeviceId() continue } log.Debugf("Error creating device: %s", err) @@ -514,12 +518,11 @@ func (devices *DeviceSet) createDevice(deviceId *int) error { } break } - devices.incNextDeviceId() return nil } func (devices *DeviceSet) createRegisterDevice(hash string) (*DevInfo, error) { - deviceId := devices.NextDeviceId + deviceId := devices.getNextDeviceId() if err := devices.createDevice(&deviceId); err != nil { return nil, err } @@ -548,8 +551,7 @@ func (devices *DeviceSet) createSnapDevice(baseInfo *DevInfo, deviceId *int) err if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), *deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { if devicemapper.DeviceIdExists(err) { // Device Id already exists. Try a new one. - devices.incNextDeviceId() - *deviceId = devices.NextDeviceId + *deviceId = devices.getNextDeviceId() continue } log.Debugf("Error creating snap device: %s", err) @@ -557,12 +559,11 @@ func (devices *DeviceSet) createSnapDevice(baseInfo *DevInfo, deviceId *int) err } break } - devices.incNextDeviceId() return nil } func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *DevInfo) error { - deviceId := devices.NextDeviceId + deviceId := devices.getNextDeviceId() if err := devices.createSnapDevice(baseInfo, &deviceId); err != nil { log.Debugf("Error creating snap device: %s", err) return err From 14d0dd855ee1e7cd1a3185c3d5a00e7afccb5c43 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 3 Dec 2014 13:06:43 -0500 Subject: [PATCH 525/592] devmapper: Open code createDevice() and createSnapDevice() Open code createDevice() and createSnapDevice() and move all the logic in the caller. This is a sheer code reorganization so that all device Id allocation logic is in one function. That way in case of erros, one can easily cleanup and mark device Id free again. (Later patches benefit from it). Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 36 ++++++----------------- 1 file changed, 9 insertions(+), 27 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index bf24b5164c..17fbf715a3 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -505,27 +505,20 @@ func (devices *DeviceSet) getNextDeviceId() int { return devices.NextDeviceId } -func (devices *DeviceSet) createDevice(deviceId *int) error { +func (devices *DeviceSet) createRegisterDevice(hash string) (*DevInfo, error) { + deviceId := devices.getNextDeviceId() for { - if err := devicemapper.CreateDevice(devices.getPoolDevName(), *deviceId); err != nil { + if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceId); err != nil { if devicemapper.DeviceIdExists(err) { // Device Id already exists. Try a new one. - *deviceId = devices.getNextDeviceId() + deviceId = devices.getNextDeviceId() continue } log.Debugf("Error creating device: %s", err) - return err + return nil, err } break } - return nil -} - -func (devices *DeviceSet) createRegisterDevice(hash string) (*DevInfo, error) { - deviceId := devices.getNextDeviceId() - if err := devices.createDevice(&deviceId); err != nil { - return nil, err - } transactionId := devices.allocateTransactionId() log.Debugf("Registering device (id %v) with FS size %v", deviceId, devices.baseFsSize) @@ -543,15 +536,13 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*DevInfo, error) { return info, nil } -func (devices *DeviceSet) createSnapDevice(baseInfo *DevInfo, deviceId *int) error { - log.Debugf("[deviceset] createSnapDevice() DeviceId=%d", *deviceId) - defer log.Debugf("[deviceset] createSnapDevice() END DeviceId=%d", *deviceId) - +func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *DevInfo) error { + deviceId := devices.getNextDeviceId() for { - if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), *deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { + if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { if devicemapper.DeviceIdExists(err) { // Device Id already exists. Try a new one. - *deviceId = devices.getNextDeviceId() + deviceId = devices.getNextDeviceId() continue } log.Debugf("Error creating snap device: %s", err) @@ -559,15 +550,6 @@ func (devices *DeviceSet) createSnapDevice(baseInfo *DevInfo, deviceId *int) err } break } - return nil -} - -func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *DevInfo) error { - deviceId := devices.getNextDeviceId() - if err := devices.createSnapDevice(baseInfo, &deviceId); err != nil { - log.Debugf("Error creating snap device: %s", err) - return err - } transactionId := devices.allocateTransactionId() if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size, transactionId); err != nil { From e28a419e1197bf50bbb378b02f0226c3115edeaa Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 3 Dec 2014 13:06:43 -0500 Subject: [PATCH 526/592] devmapper: Find a free device Id to use for device creation Finally, we seem to have all the bits to keep track of all used device Ids and find a free device Id to use when creating a new device. Start using it. Ideally we should completely move away from retry logic when pool returns -EEXISTS. For now I have retained that logic and I simply output a warning. When things are stable, we should be able to get rid of it. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 54 +++++++++++++++++++---- 1 file changed, 46 insertions(+), 8 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index 17fbf715a3..04437ce9f4 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -500,21 +500,41 @@ func (devices *DeviceSet) incNextDeviceId() { devices.NextDeviceId = (devices.NextDeviceId + 1) & MaxDeviceId } -func (devices *DeviceSet) getNextDeviceId() int { +func (devices *DeviceSet) getNextFreeDeviceId() (int, error) { devices.incNextDeviceId() - return devices.NextDeviceId + for i := 0; i <= MaxDeviceId; i++ { + if devices.isDeviceIdFree(devices.NextDeviceId) { + devices.markDeviceIdUsed(devices.NextDeviceId) + return devices.NextDeviceId, nil + } + devices.incNextDeviceId() + } + + return 0, fmt.Errorf("Unable to find a free device Id") } func (devices *DeviceSet) createRegisterDevice(hash string) (*DevInfo, error) { - deviceId := devices.getNextDeviceId() + deviceId, err := devices.getNextFreeDeviceId() + if err != nil { + return nil, err + } + for { if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceId); err != nil { if devicemapper.DeviceIdExists(err) { - // Device Id already exists. Try a new one. - deviceId = devices.getNextDeviceId() + // Device Id already exists. This should not + // happen. Now we have a mechianism to find + // a free device Id. So something is not right. + // Give a warning and continue. + log.Errorf("Warning: Device Id %d exists in pool but it is supposed to be unused", deviceId) + deviceId, err = devices.getNextFreeDeviceId() + if err != nil { + return nil, err + } continue } log.Debugf("Error creating device: %s", err) + devices.markDeviceIdFree(deviceId) return nil, err } break @@ -525,27 +545,41 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*DevInfo, error) { info, err := devices.registerDevice(deviceId, hash, devices.baseFsSize, transactionId) if err != nil { _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) + devices.markDeviceIdFree(deviceId) return nil, err } if err := devices.updatePoolTransactionId(); err != nil { devices.unregisterDevice(deviceId, hash) devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) + devices.markDeviceIdFree(deviceId) return nil, err } return info, nil } func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *DevInfo) error { - deviceId := devices.getNextDeviceId() + deviceId, err := devices.getNextFreeDeviceId() + if err != nil { + return err + } + for { if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { if devicemapper.DeviceIdExists(err) { - // Device Id already exists. Try a new one. - deviceId = devices.getNextDeviceId() + // Device Id already exists. This should not + // happen. Now we have a mechianism to find + // a free device Id. So something is not right. + // Give a warning and continue. + log.Errorf("Warning: Device Id %d exists in pool but it is supposed to be unused", deviceId) + deviceId, err = devices.getNextFreeDeviceId() + if err != nil { + return err + } continue } log.Debugf("Error creating snap device: %s", err) + devices.markDeviceIdFree(deviceId) return err } break @@ -554,6 +588,7 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *DevInf transactionId := devices.allocateTransactionId() if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size, transactionId); err != nil { devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) + devices.markDeviceIdFree(deviceId) log.Debugf("Error registering device: %s", err) return err } @@ -561,6 +596,7 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *DevInf if err := devices.updatePoolTransactionId(); err != nil { devices.unregisterDevice(deviceId, hash) devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) + devices.markDeviceIdFree(deviceId) return err } return nil @@ -966,6 +1002,8 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error { return err } + devices.markDeviceIdFree(info.DeviceId) + return nil } From c115c4aa45ba82f27859b0afba5724d437857879 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 3 Dec 2014 13:06:43 -0500 Subject: [PATCH 527/592] devmapper: Use transaction mechanism during device or snap device creation Finally this patch uses the notion of transaction for device or snapshot device creation. Following is sequence of event. - Open a trasaction and save details in a file. - Create a new device/snapshot device - If a new device id is used, refresh transaction with new device id details. - Create device metadata file - Close transaction. If docker crashes anywhere in between without closing transaction, then upon next start, docker will figure out that there was a pending transaction and it will roll back transaction. That is it will do following. - Delete Device from pool - Delete device metadata file - Remove transaction file to mark no transaction is pending. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 149 +++++++++++++++++++++- 1 file changed, 142 insertions(+), 7 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index 04437ce9f4..1e388baa60 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -36,9 +36,12 @@ var ( ) const deviceSetMetaFile string = "deviceset-metadata" +const transactionMetaFile string = "transaction-metadata" type Transaction struct { - OpenTransactionId uint64 `json:"-"` + OpenTransactionId uint64 `json:"open_transaction_id"` + DeviceIdHash string `json:"device_hash"` + DeviceId int `json:"device_id"` } type DevInfo struct { @@ -149,6 +152,10 @@ func (devices *DeviceSet) metadataFile(info *DevInfo) string { return path.Join(devices.metadataDir(), file) } +func (devices *DeviceSet) transactionMetaFile() string { + return path.Join(devices.metadataDir(), transactionMetaFile) +} + func (devices *DeviceSet) deviceSetMetaFile() string { return path.Join(devices.metadataDir(), deviceSetMetaFile) } @@ -492,6 +499,10 @@ func (devices *DeviceSet) initMetaData() error { if err := devices.constructDeviceIdMap(); err != nil { return err } + + if err := devices.processPendingTransaction(); err != nil { + return err + } return nil } @@ -519,6 +530,12 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*DevInfo, error) { return nil, err } + if err := devices.openTransaction(hash, deviceId); err != nil { + log.Debugf("Error opening transaction hash = %s deviceId = %d", hash, deviceId) + devices.markDeviceIdFree(deviceId) + return nil, err + } + for { if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceId); err != nil { if devicemapper.DeviceIdExists(err) { @@ -531,6 +548,8 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*DevInfo, error) { if err != nil { return nil, err } + // Save new device id into transaction + devices.refreshTransaction(deviceId) continue } log.Debugf("Error creating device: %s", err) @@ -540,16 +559,15 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*DevInfo, error) { break } - transactionId := devices.allocateTransactionId() log.Debugf("Registering device (id %v) with FS size %v", deviceId, devices.baseFsSize) - info, err := devices.registerDevice(deviceId, hash, devices.baseFsSize, transactionId) + info, err := devices.registerDevice(deviceId, hash, devices.baseFsSize, devices.OpenTransactionId) if err != nil { _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) devices.markDeviceIdFree(deviceId) return nil, err } - if err := devices.updatePoolTransactionId(); err != nil { + if err := devices.closeTransaction(); err != nil { devices.unregisterDevice(deviceId, hash) devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) devices.markDeviceIdFree(deviceId) @@ -564,6 +582,12 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *DevInf return err } + if err := devices.openTransaction(hash, deviceId); err != nil { + log.Debugf("Error opening transaction hash = %s deviceId = %d", hash, deviceId) + devices.markDeviceIdFree(deviceId) + return err + } + for { if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { if devicemapper.DeviceIdExists(err) { @@ -576,6 +600,8 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *DevInf if err != nil { return err } + // Save new device id into transaction + devices.refreshTransaction(deviceId) continue } log.Debugf("Error creating snap device: %s", err) @@ -585,15 +611,14 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *DevInf break } - transactionId := devices.allocateTransactionId() - if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size, transactionId); err != nil { + if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size, devices.OpenTransactionId); err != nil { devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) devices.markDeviceIdFree(deviceId) log.Debugf("Error registering device: %s", err) return err } - if err := devices.updatePoolTransactionId(); err != nil { + if err := devices.closeTransaction(); err != nil { devices.unregisterDevice(deviceId, hash) devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) devices.markDeviceIdFree(deviceId) @@ -775,6 +800,90 @@ func (devices *DeviceSet) ResizePool(size int64) error { return nil } +func (devices *DeviceSet) loadTransactionMetaData() error { + jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) + if err != nil { + // There is no active transaction. This will be the case + // during upgrade. + if os.IsNotExist(err) { + devices.OpenTransactionId = devices.TransactionId + return nil + } + return err + } + + json.Unmarshal(jsonData, &devices.Transaction) + return nil +} + +func (devices *DeviceSet) saveTransactionMetaData() error { + jsonData, err := json.Marshal(&devices.Transaction) + if err != nil { + return fmt.Errorf("Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) +} + +func (devices *DeviceSet) removeTransactionMetaData() error { + if err := os.RemoveAll(devices.transactionMetaFile()); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) rollbackTransaction() error { + log.Debugf("Rolling back open transaction: TransactionId=%d hash=%s device_id=%d", devices.OpenTransactionId, devices.DeviceIdHash, devices.DeviceId) + + // A device id might have already been deleted before transaction + // closed. In that case this call will fail. Just leave a message + // in case of failure. + if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceId); err != nil { + log.Errorf("Warning: Unable to delete device: %s", err) + } + + dinfo := &DevInfo{Hash: devices.DeviceIdHash} + if err := devices.removeMetadata(dinfo); err != nil { + log.Errorf("Warning: Unable to remove meta data: %s", err) + } else { + devices.markDeviceIdFree(devices.DeviceId) + } + + if err := devices.removeTransactionMetaData(); err != nil { + log.Errorf("Warning: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) + } + + return nil +} + +func (devices *DeviceSet) processPendingTransaction() error { + if err := devices.loadTransactionMetaData(); err != nil { + return err + } + + // If there was open transaction but pool transaction Id is same + // as open transaction Id, nothing to roll back. + if devices.TransactionId == devices.OpenTransactionId { + return nil + } + + // If open transaction Id is less than pool transaction Id, something + // is wrong. Bail out. + if devices.OpenTransactionId < devices.TransactionId { + log.Errorf("Warning: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionId, devices.TransactionId) + return nil + } + + // Pool transaction Id is not same as open transaction. There is + // a transaction which was not completed. + if err := devices.rollbackTransaction(); err != nil { + return fmt.Errorf("Rolling back open transaction failed: %s", err) + } + + devices.OpenTransactionId = devices.TransactionId + return nil +} + func (devices *DeviceSet) loadDeviceSetMetaData() error { jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) if err != nil { @@ -798,6 +907,32 @@ func (devices *DeviceSet) saveDeviceSetMetaData() error { return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) } +func (devices *DeviceSet) openTransaction(hash string, DeviceId int) error { + devices.allocateTransactionId() + devices.DeviceIdHash = hash + devices.DeviceId = DeviceId + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("Error saving transaction meta data: %s", err) + } + return nil +} + +func (devices *DeviceSet) refreshTransaction(DeviceId int) error { + devices.DeviceId = DeviceId + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("Error saving transaction meta data: %s", err) + } + return nil +} + +func (devices *DeviceSet) closeTransaction() error { + if err := devices.updatePoolTransactionId(); err != nil { + log.Debugf("Failed to close Transaction") + return err + } + return nil +} + func (devices *DeviceSet) initDevmapper(doInit bool) error { // give ourselves to libdm as a log handler devicemapper.LogInit(devices) From 17b75a21a667a27a9a27565ab282cd615dbdb66e Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 3 Dec 2014 13:06:43 -0500 Subject: [PATCH 528/592] devmapper: Use transactions during device deletion Use transaction logic during device deletion and do rollback if transaction is not complete. Following is the sequence of events. - Open transaction and save to metafile - Delete device from pool - Delete device metadata file from disk - Close Transaction If docker crashes without closing transaction then rollback will take place upon next docker start. Signed-off-by: Vivek Goyal --- daemon/graphdriver/devmapper/deviceset.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index 1e388baa60..71502a483c 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -1128,6 +1128,11 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error { } } + if err := devices.openTransaction(info.Hash, info.DeviceId); err != nil { + log.Debugf("Error opening transaction hash = %s deviceId = %d", "", info.DeviceId) + return err + } + if err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil { log.Debugf("Error deleting device: %s", err) return err @@ -1137,6 +1142,10 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error { return err } + if err := devices.closeTransaction(); err != nil { + return err + } + devices.markDeviceIdFree(info.DeviceId) return nil From 92fd49f7ca42cf5d97825853034eac685b90fc1d Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Wed, 3 Dec 2014 10:35:20 -0800 Subject: [PATCH 529/592] Correct TarSum benchmarks: 9kTar and 9kTarGzip These two cases did not actually read the same content with each iteration of the benchmark. After the first read, the buffer was consumed. This patch corrects this by using a bytes.Reader and seeking to the beginning of the buffer at the beginning of each iteration. Unfortunately, this benchmark was not actually as fast as we believed. But the new results do bring its results closer to those of the other benchmarks. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- pkg/tarsum/tarsum_test.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pkg/tarsum/tarsum_test.go b/pkg/tarsum/tarsum_test.go index 41e1b9b7c4..4e1f30e469 100644 --- a/pkg/tarsum/tarsum_test.go +++ b/pkg/tarsum/tarsum_test.go @@ -486,10 +486,13 @@ func Benchmark9kTar(b *testing.B) { n, err := io.Copy(buf, fh) fh.Close() + reader := bytes.NewReader(buf.Bytes()) + b.SetBytes(n) b.ResetTimer() for i := 0; i < b.N; i++ { - ts, err := NewTarSum(buf, true, Version0) + reader.Seek(0, 0) + ts, err := NewTarSum(reader, true, Version0) if err != nil { b.Error(err) return @@ -509,10 +512,13 @@ func Benchmark9kTarGzip(b *testing.B) { n, err := io.Copy(buf, fh) fh.Close() + reader := bytes.NewReader(buf.Bytes()) + b.SetBytes(n) b.ResetTimer() for i := 0; i < b.N; i++ { - ts, err := NewTarSum(buf, false, Version0) + reader.Seek(0, 0) + ts, err := NewTarSum(reader, false, Version0) if err != nil { b.Error(err) return From 48ec176cd51da20e23564941da2d9906a7779d28 Mon Sep 17 00:00:00 2001 From: Andy Goldstein Date: Wed, 3 Dec 2014 15:36:57 -0500 Subject: [PATCH 530/592] Fix invalid argument error on push With 32ba6ab from #9261, TempArchive now closes the underlying file and cleans it up as soon as the file's contents have been read. When pushing an image, PushImageLayerRegistry attempts to call Close() on the layer, which is a TempArchive that has already been closed. In this situation, Close() returns an "invalid argument" error. Add a Close method to TempArchive that does a no-op if the underlying file has already been closed. Signed-off-by: Andy Goldstein --- pkg/archive/archive.go | 21 +++++++++++++++++---- pkg/archive/archive_test.go | 16 ++++++++++++++++ 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 3783e72d91..ead85be0bf 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -771,20 +771,33 @@ func NewTempArchive(src Archive, dir string) (*TempArchive, error) { return nil, err } size := st.Size() - return &TempArchive{f, size, 0}, nil + return &TempArchive{File: f, Size: size}, nil } type TempArchive struct { *os.File - Size int64 // Pre-computed from Stat().Size() as a convenience - read int64 + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { - archive.File.Close() + archive.Close() os.Remove(archive.File.Name()) } return n, err diff --git a/pkg/archive/archive_test.go b/pkg/archive/archive_test.go index 05362a21c9..fdba6fb87c 100644 --- a/pkg/archive/archive_test.go +++ b/pkg/archive/archive_test.go @@ -9,6 +9,7 @@ import ( "os/exec" "path" "path/filepath" + "strings" "syscall" "testing" "time" @@ -607,3 +608,18 @@ func TestUntarInvalidSymlink(t *testing.T) { } } } + +func TestTempArchiveCloseMultipleTimes(t *testing.T) { + reader := ioutil.NopCloser(strings.NewReader("hello")) + tempArchive, err := NewTempArchive(reader, "") + buf := make([]byte, 10) + n, err := tempArchive.Read(buf) + if n != 5 { + t.Fatalf("Expected to read 5 bytes. Read %d instead", n) + } + for i := 0; i < 3; i++ { + if err = tempArchive.Close(); err != nil { + t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) + } + } +} From 826f809d0959d59d4f478615be4dc9b7db7c46f1 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Wed, 3 Dec 2014 17:13:48 -0500 Subject: [PATCH 531/592] Talk about URL support and the real meaning of `-` in the latest `fromSrc` API documentation. Signed-off-by: Jean-Paul Calderone --- docs/sources/reference/api/docker_remote_api_v1.16.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md index ed666fb3f3..9276f628c8 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.16.md +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -861,7 +861,8 @@ Create an image, either by pulling it from the registry or by importing it Query Parameters: - **fromImage** – name of the image to pull -- **fromSrc** – source to import, - means stdin +- **fromSrc** – source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. - **repo** – repository - **tag** – tag - **registry** – the registry to pull from From 0888c1880f836703987313049100bad9ce821584 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Wed, 3 Dec 2014 17:14:08 -0500 Subject: [PATCH 532/592] Update the most recent released version of the docs as well. Signed-off-by: Jean-Paul Calderone --- docs/sources/reference/api/docker_remote_api_v1.15.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/sources/reference/api/docker_remote_api_v1.15.md b/docs/sources/reference/api/docker_remote_api_v1.15.md index 6aff3607cc..e3b6fad74e 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.15.md +++ b/docs/sources/reference/api/docker_remote_api_v1.15.md @@ -913,7 +913,8 @@ Create an image, either by pulling it from the registry or by importing it Query Parameters: - **fromImage** – name of the image to pull -- **fromSrc** – source to import, - means stdin +- **fromSrc** – source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. - **repo** – repository - **tag** – tag - **registry** – the registry to pull from From 269b37503e00f86b9215bc6fe27c502cf4d90072 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 3 Dec 2014 14:53:11 -0800 Subject: [PATCH 533/592] Update libcontainer to 53eca435e63db58b06cf796d3a9 Signed-off-by: Michael Crosby --- project/vendor.sh | 2 +- .../github.com/docker/libcontainer/SPEC.md | 321 ++++++++++++++++++ .../github.com/docker/libcontainer/config.go | 10 + .../libcontainer/integration/exec_test.go | 21 ++ .../libcontainer/integration/template_test.go | 9 + .../docker/libcontainer/label/label.go | 4 + .../libcontainer/label/label_selinux.go | 8 + .../docker/libcontainer/namespaces/init.go | 14 + .../libcontainer/namespaces/nsenter/nsenter.c | 6 + .../libcontainer/netlink/netlink_linux.go | 81 ++++- .../netlink/netlink_linux_test.go | 24 ++ .../netlink/netlink_unsupported.go | 4 + .../docker/libcontainer/system/setns_linux.go | 9 +- ...all_linux_amd64.go => syscall_linux_64.go} | 2 +- 14 files changed, 496 insertions(+), 19 deletions(-) create mode 100644 vendor/src/github.com/docker/libcontainer/SPEC.md rename vendor/src/github.com/docker/libcontainer/system/{syscall_linux_amd64.go => syscall_linux_64.go} (88%) diff --git a/project/vendor.sh b/project/vendor.sh index cc44277e01..6ebce73ca7 100755 --- a/project/vendor.sh +++ b/project/vendor.sh @@ -66,7 +66,7 @@ if [ "$1" = '--go' ]; then mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar fi -clone git github.com/docker/libcontainer 84c1636580a356db88b079d118b94abe6a1a0acd +clone git github.com/docker/libcontainer 53eca435e63db58b06cf796d3a9326db5fd42253 # see src/github.com/docker/libcontainer/update-vendor.sh which is the "source of truth" for libcontainer deps (just like this file) rm -rf src/github.com/docker/libcontainer/vendor eval "$(grep '^clone ' src/github.com/docker/libcontainer/update-vendor.sh | grep -v 'github.com/codegangsta/cli')" diff --git a/vendor/src/github.com/docker/libcontainer/SPEC.md b/vendor/src/github.com/docker/libcontainer/SPEC.md new file mode 100644 index 0000000000..f5afaadc51 --- /dev/null +++ b/vendor/src/github.com/docker/libcontainer/SPEC.md @@ -0,0 +1,321 @@ +## Container Specification - v1 + +This is the standard configuration for version 1 containers. It includes +namespaces, standard filesystem setup, a default Linux capability set, and +information about resource reservations. It also has information about any +populated environment settings for the processes running inside a container. + +Along with the configuration of how a container is created the standard also +discusses actions that can be performed on a container to manage and inspect +information about the processes running inside. + +The v1 profile is meant to be able to accommodate the majority of applications +with a strong security configuration. + +### System Requirements and Compatibility + +Minimum requirements: +* Kernel version - 3.8 recommended 2.6.2x minimum(with backported patches) +* Mounted cgroups with each subsystem in its own hierarchy + + +### Namespaces + +| Flag | Enabled | +| ------------ | ------- | +| CLONE_NEWPID | 1 | +| CLONE_NEWUTS | 1 | +| CLONE_NEWIPC | 1 | +| CLONE_NEWNET | 1 | +| CLONE_NEWNS | 1 | +| CLONE_NEWUSER | 0 | + +In v1 the user namespace is not enabled by default for support of older kernels +where the user namespace feature is not fully implemented. Namespaces are +created for the container via the `clone` syscall. + + +### Filesystem + +A root filesystem must be provided to a container for execution. The container +will use this root filesystem (rootfs) to jail and spawn processes inside where +the binaries and system libraries are local to that directory. Any binaries +to be executed must be contained within this rootfs. + +Mounts that happen inside the container are automatically cleaned up when the +container exits as the mount namespace is destroyed and the kernel will +unmount all the mounts that were setup within that namespace. + +For a container to execute properly there are certain filesystems that +are required to be mounted within the rootfs that the runtime will setup. + +| Path | Type | Flags | Data | +| ----------- | ------ | -------------------------------------- | --------------------------------------- | +| /proc | proc | MS_NOEXEC,MS_NOSUID,MS_NODEV | | +| /dev | tmpfs | MS_NOEXEC,MS_STRICTATIME | mode=755 | +| /dev/shm | shm | MS_NOEXEC,MS_NOSUID,MS_NODEV | mode=1777,size=65536k | +| /dev/mqueue | mqueue | MS_NOEXEC,MS_NOSUID,MS_NODEV | | +| /dev/pts | devpts | MS_NOEXEC,MS_NOSUID | newinstance,ptmxmode=0666,mode=620,gid5 | +| /sys | sysfs | MS_NOEXEC,MS_NOSUID,MS_NODEV,MS_RDONLY | | + + +After a container's filesystems are mounted within the newly created +mount namespace `/dev` will need to be populated with a set of device nodes. +It is expected that a rootfs does not need to have any device nodes specified +for `/dev` witin the rootfs as the container will setup the correct devices +that are required for executing a container's process. + +| Path | Mode | Access | +| ------------ | ---- | ---------- | +| /dev/null | 0666 | rwm | +| /dev/zero | 0666 | rwm | +| /dev/full | 0666 | rwm | +| /dev/tty | 0666 | rwm | +| /dev/random | 0666 | rwm | +| /dev/urandom | 0666 | rwm | +| /dev/fuse | 0666 | rwm | + + +**ptmx** +`/dev/ptmx` will need to be a symlink to the host's `/dev/ptmx` within +the container. + +The use of a pseudo TTY is optional within a container and it should support both. +If a pseudo is provided to the container `/dev/console` will need to be +setup by binding the console in `/dev/` after it has been populated and mounted +in tmpfs. + +| Source | Destination | UID GID | Mode | Type | +| --------------- | ------------ | ------- | ---- | ---- | +| *pty host path* | /dev/console | 0 0 | 0600 | bind | + + +After `/dev/null` has been setup we check for any external links between +the container's io, STDIN, STDOUT, STDERR. If the container's io is pointing +to `/dev/null` outside the container we close and `dup2` the the `/dev/null` +that is local to the container's rootfs. + + +After the container has `/proc` mounted a few standard symlinks are setup +within `/dev/` for the io. + +| Source | Destination | +| ------------ | ----------- | +| /proc/1/fd | /dev/fd | +| /proc/1/fd/0 | /dev/stdin | +| /proc/1/fd/1 | /dev/stdout | +| /proc/1/fd/2 | /dev/stderr | + +A `pivot_root` is used to change the root for the process, effectively +jailing the process inside the rootfs. + +```c +put_old = mkdir(...); +pivot_root(rootfs, put_old); +chdir("/"); +unmount(put_old, MS_DETACH); +rmdir(put_old); +``` + +For container's running with a rootfs inside `ramfs` a `MS_MOVE` combined +with a `chroot` is required as `pivot_root` is not supported in `ramfs`. + +```c +mount(rootfs, "/", NULL, MS_MOVE, NULL); +chroot("."); +chdir("/"); +``` + +The `umask` is set back to `0022` after the filesystem setup has been completed. + +### Resources + +Cgroups are used to handle resource allocation for containers. This includes +system resources like cpu, memory, and device access. + +| Subsystem | Enabled | +| ---------- | ------- | +| devices | 1 | +| memory | 1 | +| cpu | 1 | +| cpuacct | 1 | +| cpuset | 1 | +| blkio | 1 | +| perf_event | 1 | +| freezer | 1 | + + +All cgroup subsystem are joined so that statistics can be collected from +each of the subsystems. Freezer does not expose any stats but is joined +so that containers can be paused and resumed. + +The parent process of the container's init must place the init pid inside +the correct cgroups before the initialization begins. This is done so +that no processes or threads escape the cgroups. This sync is +done via a pipe ( specified in the runtime section below ) that the container's +init process will block waiting for the parent to finish setup. + +### Security + +The standard set of Linux capabilities that are set in a container +provide a good default for security and flexibility for the applications. + + +| Capability | Enabled | +| -------------------- | ------- | +| CAP_NET_RAW | 1 | +| CAP_NET_BIND_SERVICE | 1 | +| CAP_AUDIT_WRITE | 1 | +| CAP_DAC_OVERRIDE | 1 | +| CAP_SETFCAP | 1 | +| CAP_SETPCAP | 1 | +| CAP_SETGID | 1 | +| CAP_SETUID | 1 | +| CAP_MKNOD | 1 | +| CAP_CHOWN | 1 | +| CAP_FOWNER | 1 | +| CAP_FSETID | 1 | +| CAP_KILL | 1 | +| CAP_SYS_CHROOT | 1 | +| CAP_NET_BROADCAST | 0 | +| CAP_SYS_MODULE | 0 | +| CAP_SYS_RAWIO | 0 | +| CAP_SYS_PACCT | 0 | +| CAP_SYS_ADMIN | 0 | +| CAP_SYS_NICE | 0 | +| CAP_SYS_RESOURCE | 0 | +| CAP_SYS_TIME | 0 | +| CAP_SYS_TTY_CONFIG | 0 | +| CAP_AUDIT_CONTROL | 0 | +| CAP_MAC_OVERRIDE | 0 | +| CAP_MAC_ADMIN | 0 | +| CAP_NET_ADMIN | 0 | +| CAP_SYSLOG | 0 | +| CAP_DAC_READ_SEARCH | 0 | +| CAP_LINUX_IMMUTABLE | 0 | +| CAP_IPC_LOCK | 0 | +| CAP_IPC_OWNER | 0 | +| CAP_SYS_PTRACE | 0 | +| CAP_SYS_BOOT | 0 | +| CAP_LEASE | 0 | +| CAP_WAKE_ALARM | 0 | +| CAP_BLOCK_SUSPE | 0 | + + +Additional security layers like [apparmor](https://wiki.ubuntu.com/AppArmor) +and [selinux](http://selinuxproject.org/page/Main_Page) can be used with +the containers. A container should support setting an apparmor profile or +selinux process and mount labels if provided in the configuration. + +Standard apparmor profile: +```c +#include +profile flags=(attach_disconnected,mediate_deleted) { + #include + network, + capability, + file, + umount, + + mount fstype=tmpfs, + mount fstype=mqueue, + mount fstype=fuse.*, + mount fstype=binfmt_misc -> /proc/sys/fs/binfmt_misc/, + mount fstype=efivarfs -> /sys/firmware/efi/efivars/, + mount fstype=fusectl -> /sys/fs/fuse/connections/, + mount fstype=securityfs -> /sys/kernel/security/, + mount fstype=debugfs -> /sys/kernel/debug/, + mount fstype=proc -> /proc/, + mount fstype=sysfs -> /sys/, + + deny @{PROC}/sys/fs/** wklx, + deny @{PROC}/sysrq-trigger rwklx, + deny @{PROC}/mem rwklx, + deny @{PROC}/kmem rwklx, + deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx, + deny @{PROC}/sys/kernel/*/** wklx, + + deny mount options=(ro, remount) -> /, + deny mount fstype=debugfs -> /var/lib/ureadahead/debugfs/, + deny mount fstype=devpts, + + deny /sys/[^f]*/** wklx, + deny /sys/f[^s]*/** wklx, + deny /sys/fs/[^c]*/** wklx, + deny /sys/fs/c[^g]*/** wklx, + deny /sys/fs/cg[^r]*/** wklx, + deny /sys/firmware/efi/efivars/** rwklx, + deny /sys/kernel/security/** rwklx, +} +``` + +*TODO: seccomp work is being done to find a good default config* + +### Runtime and Init Process + +During container creation the parent process needs to talk to the container's init +process and have a form of synchronization. This is accomplished by creating +a pipe that is passed to the container's init. When the init process first spawns +it will block on its side of the pipe until the parent closes its side. This +allows the parent to have time to set the new process inside a cgroup hierarchy +and/or write any uid/gid mappings required for user namespaces. +The pipe is passed to the init process via FD 3. + +The application consuming libcontainer should be compiled statically. libcontainer +does not define any init process and the arguments provided are used to `exec` the +process inside the application. There should be no long running init within the +container spec. + +If a pseudo tty is provided to a container it will open and `dup2` the console +as the container's STDIN, STDOUT, STDERR as well as mounting the console +as `/dev/console`. + +An extra set of mounts are provided to a container and setup for use. A container's +rootfs can contain some non portable files inside that can cause side effects during +execution of a process. These files are usually created and populated with the container +specific information via the runtime. + +**Extra runtime files:** +* /etc/hosts +* /etc/resolv.conf +* /etc/hostname +* /etc/localtime + + +#### Defaults + +There are a few defaults that can be overridden by users, but in their omission +these apply to processes within a container. + +| Type | Value | +| ------------------- | ------------------------------ | +| Parent Death Signal | SIGKILL | +| UID | 0 | +| GID | 0 | +| GROUPS | 0, NULL | +| CWD | "/" | +| $HOME | Current user's home dir or "/" | +| Readonly rootfs | false | +| Pseudo TTY | false | + + +## Actions + +After a container is created there is a standard set of actions that can +be done to the container. These actions are part of the public API for +a container. + +| Action | Description | +| -------------- | ------------------------------------------------------------------ | +| Get processes | Return all the pids for processes running inside a container | +| Get Stats | Return resource statistics for the container as a whole | +| Wait | Wait waits on the container's init process ( pid 1 ) | +| Wait Process | Wait on any of the container's processes returning the exit status | +| Destroy | Kill the container's init process and remove any filesystem state | +| Signal | Send a signal to the container's init process | +| Signal Process | Send a signal to any of the container's processes | +| Pause | Pause all processes inside the container | +| Resume | Resume all processes inside the container if paused | +| Exec | Execute a new process inside of the container ( requires setns ) | + + diff --git a/vendor/src/github.com/docker/libcontainer/config.go b/vendor/src/github.com/docker/libcontainer/config.go index 57ea5c69ac..915e00660c 100644 --- a/vendor/src/github.com/docker/libcontainer/config.go +++ b/vendor/src/github.com/docker/libcontainer/config.go @@ -68,6 +68,10 @@ type Config struct { // RestrictSys will remount /proc/sys, /sys, and mask over sysrq-trigger as well as /proc/irq and // /proc/bus RestrictSys bool `json:"restrict_sys,omitempty"` + + // Rlimits specifies the resource limits, such as max open files, to set in the container + // If Rlimits are not set, the container will inherit rlimits from the parent process + Rlimits []Rlimit `json:"rlimits,omitempty"` } // Routes can be specified to create entries in the route table as the container is started @@ -90,3 +94,9 @@ type Route struct { // The device to set this route up for, for example: eth0 InterfaceName string `json:"interface_name,omitempty"` } + +type Rlimit struct { + Type int `json:"type,omitempty"` + Hard uint64 `json:"hard,omitempty"` + Soft uint64 `json:"soft,omitempty"` +} diff --git a/vendor/src/github.com/docker/libcontainer/integration/exec_test.go b/vendor/src/github.com/docker/libcontainer/integration/exec_test.go index 261d208e3e..8f4dae0f9e 100644 --- a/vendor/src/github.com/docker/libcontainer/integration/exec_test.go +++ b/vendor/src/github.com/docker/libcontainer/integration/exec_test.go @@ -156,3 +156,24 @@ func TestIPCBadPath(t *testing.T) { t.Fatal("container succeded with bad ipc path") } } + +func TestRlimit(t *testing.T) { + if testing.Short() { + return + } + + rootfs, err := newRootFs() + if err != nil { + t.Fatal(err) + } + defer remove(rootfs) + + config := newTemplateConfig(rootfs) + out, _, err := runContainer(config, "", "/bin/sh", "-c", "ulimit -n") + if err != nil { + t.Fatal(err) + } + if limit := strings.TrimSpace(out.Stdout.String()); limit != "1024" { + t.Fatalf("expected rlimit to be 1024, got %s", limit) + } +} diff --git a/vendor/src/github.com/docker/libcontainer/integration/template_test.go b/vendor/src/github.com/docker/libcontainer/integration/template_test.go index 1805eba980..efcf6d5b90 100644 --- a/vendor/src/github.com/docker/libcontainer/integration/template_test.go +++ b/vendor/src/github.com/docker/libcontainer/integration/template_test.go @@ -1,6 +1,8 @@ package integration import ( + "syscall" + "github.com/docker/libcontainer" "github.com/docker/libcontainer/cgroups" "github.com/docker/libcontainer/devices" @@ -60,5 +62,12 @@ func newTemplateConfig(rootfs string) *libcontainer.Config { Gateway: "localhost", }, }, + Rlimits: []libcontainer.Rlimit{ + { + Type: syscall.RLIMIT_NOFILE, + Hard: uint64(1024), + Soft: uint64(1024), + }, + }, } } diff --git a/vendor/src/github.com/docker/libcontainer/label/label.go b/vendor/src/github.com/docker/libcontainer/label/label.go index 04a72aeae0..5a540fd5a0 100644 --- a/vendor/src/github.com/docker/libcontainer/label/label.go +++ b/vendor/src/github.com/docker/libcontainer/label/label.go @@ -25,6 +25,10 @@ func SetFileLabel(path string, fileLabel string) error { return nil } +func SetFileCreateLabel(fileLabel string) error { + return nil +} + func Relabel(path string, fileLabel string, relabel string) error { return nil } diff --git a/vendor/src/github.com/docker/libcontainer/label/label_selinux.go b/vendor/src/github.com/docker/libcontainer/label/label_selinux.go index 0b7d437f84..5983031ae0 100644 --- a/vendor/src/github.com/docker/libcontainer/label/label_selinux.go +++ b/vendor/src/github.com/docker/libcontainer/label/label_selinux.go @@ -87,6 +87,14 @@ func SetFileLabel(path string, fileLabel string) error { return nil } +// Tell the kernel the label for all files to be created +func SetFileCreateLabel(fileLabel string) error { + if selinux.SelinuxEnabled() { + return selinux.Setfscreatecon(fileLabel) + } + return nil +} + // Change the label of path to the filelabel string. If the relabel string // is "z", relabel will change the MCS label to s0. This will allow all // containers to share the content. If the relabel string is a "Z" then diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/init.go b/vendor/src/github.com/docker/libcontainer/namespaces/init.go index 2fa2780e7f..7c83b13761 100644 --- a/vendor/src/github.com/docker/libcontainer/namespaces/init.go +++ b/vendor/src/github.com/docker/libcontainer/namespaces/init.go @@ -89,6 +89,10 @@ func Init(container *libcontainer.Config, uncleanRootfs, consolePath string, pip return fmt.Errorf("setup route %s", err) } + if err := setupRlimits(container); err != nil { + return fmt.Errorf("setup rlimits %s", err) + } + label.Init() if err := mount.InitializeMountNamespace(rootfs, @@ -238,6 +242,16 @@ func setupRoute(container *libcontainer.Config) error { return nil } +func setupRlimits(container *libcontainer.Config) error { + for _, rlimit := range container.Rlimits { + l := &syscall.Rlimit{Max: rlimit.Hard, Cur: rlimit.Soft} + if err := syscall.Setrlimit(rlimit.Type, l); err != nil { + return fmt.Errorf("error setting rlimit type %v: %v", rlimit.Type, err) + } + } + return nil +} + // FinalizeNamespace drops the caps, sets the correct user // and working dir, and closes any leaky file descriptors // before execing the command inside the namespace diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter.c b/vendor/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter.c index 2869dd14d6..f060f63b13 100644 --- a/vendor/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter.c +++ b/vendor/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -88,6 +89,11 @@ void nsenter() return; } + if (prctl(PR_SET_CHILD_SUBREAPER, 1, 0, 0, 0) == -1) { + fprintf(stderr, "nsenter: failed to set child subreaper: %s", strerror(errno)); + exit(1); + } + static const struct option longopts[] = { {"nspid", required_argument, NULL, 'n'}, {"console", required_argument, NULL, 't'}, diff --git a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go index 851d959cd0..1bf70430f2 100644 --- a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go +++ b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go @@ -576,6 +576,31 @@ func NetworkSetMTU(iface *net.Interface, mtu int) error { return s.HandleAck(wb.Seq) } +// Set link queue length +// This is identical to running: ip link set dev $name txqueuelen $QLEN +func NetworkSetTxQueueLen(iface *net.Interface, txQueueLen int) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + + wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + + msg := newIfInfomsg(syscall.AF_UNSPEC) + msg.Type = syscall.RTM_SETLINK + msg.Flags = syscall.NLM_F_REQUEST + msg.Index = int32(iface.Index) + msg.Change = DEFAULT_CHANGE + wb.AddData(msg) + wb.AddData(uint32Attr(syscall.IFLA_TXQLEN, uint32(txQueueLen))) + + if err := s.Send(wb); err != nil { + return err + } + return s.HandleAck(wb.Seq) +} + func networkMasterAction(iface *net.Interface, rtattr *RtAttr) error { s, err := getNetlinkSocket() if err != nil { @@ -769,26 +794,38 @@ func NetworkLinkAddVlan(masterDev, vlanDev string, vlanId uint16) error { return s.HandleAck(wb.Seq) } -// Add MAC VLAN network interface with masterDev as its upper device -// This is identical to running: -// ip link add name $name link $masterdev type macvlan mode $mode -func NetworkLinkAddMacVlan(masterDev, macVlanDev string, mode string) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() +// MacVlan link has LowerDev, UpperDev and operates in Mode mode +// This simplifies the code when creating MacVlan or MacVtap interface +type MacVlanLink struct { + MasterDev string + SlaveDev string + mode string +} - macVlan := map[string]uint32{ +func (m MacVlanLink) Mode() uint32 { + modeMap := map[string]uint32{ "private": MACVLAN_MODE_PRIVATE, "vepa": MACVLAN_MODE_VEPA, "bridge": MACVLAN_MODE_BRIDGE, "passthru": MACVLAN_MODE_PASSTHRU, } + return modeMap[m.mode] +} + +// Add MAC VLAN network interface with masterDev as its upper device +// This is identical to running: +// ip link add name $name link $masterdev type macvlan mode $mode +func networkLinkMacVlan(dev_type string, mcvln *MacVlanLink) error { + s, err := getNetlinkSocket() + if err != nil { + return err + } + defer s.Close() + wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) - masterDevIfc, err := net.InterfaceByName(masterDev) + masterDevIfc, err := net.InterfaceByName(mcvln.MasterDev) if err != nil { return err } @@ -797,16 +834,16 @@ func NetworkLinkAddMacVlan(masterDev, macVlanDev string, mode string) error { wb.AddData(msg) nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil) - newRtAttrChild(nest1, IFLA_INFO_KIND, nonZeroTerminated("macvlan")) + newRtAttrChild(nest1, IFLA_INFO_KIND, nonZeroTerminated(dev_type)) nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil) macVlanData := make([]byte, 4) - native.PutUint32(macVlanData, macVlan[mode]) + native.PutUint32(macVlanData, mcvln.Mode()) newRtAttrChild(nest2, IFLA_MACVLAN_MODE, macVlanData) wb.AddData(nest1) wb.AddData(uint32Attr(syscall.IFLA_LINK, uint32(masterDevIfc.Index))) - wb.AddData(newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(macVlanDev))) + wb.AddData(newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(mcvln.SlaveDev))) if err := s.Send(wb); err != nil { return err @@ -814,6 +851,22 @@ func NetworkLinkAddMacVlan(masterDev, macVlanDev string, mode string) error { return s.HandleAck(wb.Seq) } +func NetworkLinkAddMacVlan(masterDev, macVlanDev string, mode string) error { + return networkLinkMacVlan("macvlan", &MacVlanLink{ + MasterDev: masterDev, + SlaveDev: macVlanDev, + mode: mode, + }) +} + +func NetworkLinkAddMacVtap(masterDev, macVlanDev string, mode string) error { + return networkLinkMacVlan("macvtap", &MacVlanLink{ + MasterDev: masterDev, + SlaveDev: macVlanDev, + mode: mode, + }) +} + func networkLinkIpAction(action, flags int, ifa IfAddr) error { s, err := getNetlinkSocket() if err != nil { diff --git a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go index 4b098777cd..3f6511abfe 100644 --- a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go +++ b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go @@ -248,6 +248,30 @@ func TestNetworkLinkAddMacVlan(t *testing.T) { readLink(t, tl.name) } +func TestNetworkLinkAddMacVtap(t *testing.T) { + if testing.Short() { + return + } + + tl := struct { + name string + mode string + }{ + name: "tstVtap", + mode: "private", + } + masterLink := testLink{"tstEth", "dummy"} + + addLink(t, masterLink.name, masterLink.linkType) + defer deleteLink(t, masterLink.name) + + if err := NetworkLinkAddMacVtap(masterLink.name, tl.name, tl.mode); err != nil { + t.Fatalf("Unable to create %#v MAC VTAP interface: %s", tl, err) + } + + readLink(t, tl.name) +} + func TestAddDelNetworkIp(t *testing.T) { if testing.Short() { return diff --git a/vendor/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go b/vendor/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go index 747cd1d80a..4b11bf8ba5 100644 --- a/vendor/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go +++ b/vendor/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go @@ -47,6 +47,10 @@ func NetworkSetMTU(iface *net.Interface, mtu int) error { return ErrNotImplemented } +func NetworkSetTxQueueLen(iface *net.Interface, txQueueLen int) error { + return ErrNotImplemented +} + func NetworkCreateVethPair(name1, name2 string, txQueueLen int) error { return ErrNotImplemented } diff --git a/vendor/src/github.com/docker/libcontainer/system/setns_linux.go b/vendor/src/github.com/docker/libcontainer/system/setns_linux.go index 32821ee2bf..228e6ccd7f 100644 --- a/vendor/src/github.com/docker/libcontainer/system/setns_linux.go +++ b/vendor/src/github.com/docker/libcontainer/system/setns_linux.go @@ -11,9 +11,12 @@ import ( // We need different setns values for the different platforms and arch // We are declaring the macro here because the SETNS syscall does not exist in th stdlib var setNsMap = map[string]uintptr{ - "linux/386": 346, - "linux/amd64": 308, - "linux/arm": 374, + "linux/386": 346, + "linux/amd64": 308, + "linux/arm": 374, + "linux/ppc64": 350, + "linux/ppc64le": 350, + "linux/s390x": 339, } func Setns(fd uintptr, flags uintptr) error { diff --git a/vendor/src/github.com/docker/libcontainer/system/syscall_linux_amd64.go b/vendor/src/github.com/docker/libcontainer/system/syscall_linux_64.go similarity index 88% rename from vendor/src/github.com/docker/libcontainer/system/syscall_linux_amd64.go rename to vendor/src/github.com/docker/libcontainer/system/syscall_linux_64.go index 516c17e921..6840c3770f 100644 --- a/vendor/src/github.com/docker/libcontainer/system/syscall_linux_amd64.go +++ b/vendor/src/github.com/docker/libcontainer/system/syscall_linux_64.go @@ -1,4 +1,4 @@ -// +build linux,amd64 +// +build linux,amd64 linux,ppc64 linux,ppc64le linux,s390x package system From e201d5bcd46a3dcb70dd6b6e23ee375e4b5b5f40 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Wed, 3 Dec 2014 15:53:19 -0800 Subject: [PATCH 534/592] Fix tests with old cmd function. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_create_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/integration-cli/docker_cli_create_test.go b/integration-cli/docker_cli_create_test.go index 498065b64d..0dc7993798 100644 --- a/integration-cli/docker_cli_create_test.go +++ b/integration-cli/docker_cli_create_test.go @@ -129,7 +129,9 @@ func TestCreateEchoStdout(t *testing.T) { func TestCreateVolumesCreated(t *testing.T) { name := "test_create_volume" - cmd(t, "create", "--name", name, "-v", "/foo", "busybox") + if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "create", "--name", name, "-v", "/foo", "busybox")); err != nil { + t.Fatal(out, err) + } dir, err := inspectFieldMap(name, "Volumes", "/foo") if err != nil { t.Fatalf("Error getting volume host path: %q", err) From 7c7026bd2255cef47cbb026afe0b984d4c9a9130 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Wed, 3 Dec 2014 17:36:14 -0800 Subject: [PATCH 535/592] Be consistent about libtrust import path. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- api/common.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/common.go b/api/common.go index 3a46a8a523..71e72f69e0 100644 --- a/api/common.go +++ b/api/common.go @@ -11,7 +11,7 @@ import ( "github.com/docker/docker/engine" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/version" - "github.com/docker/docker/vendor/src/github.com/docker/libtrust" + "github.com/docker/libtrust" ) const ( From 7ba9a18ade7393d942d995d34a92d757ef4af0b7 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Wed, 3 Dec 2014 17:42:25 -0800 Subject: [PATCH 536/592] cleanup: remove startCommand function, only used once, and unecessary abstraction. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_attach_test.go | 2 +- integration-cli/utils.go | 7 ------- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/integration-cli/docker_cli_attach_test.go b/integration-cli/docker_cli_attach_test.go index 510f02ab18..d03a986e48 100644 --- a/integration-cli/docker_cli_attach_test.go +++ b/integration-cli/docker_cli_attach_test.go @@ -50,7 +50,7 @@ func TestAttachMultipleAndRestart(t *testing.T) { t.Fatal(err) } - if _, err := startCommand(c); err != nil { + if err := c.Start(); err != nil { t.Fatal(err) } diff --git a/integration-cli/utils.go b/integration-cli/utils.go index 05c27dc5ac..37ce221b9b 100644 --- a/integration-cli/utils.go +++ b/integration-cli/utils.go @@ -95,13 +95,6 @@ func runCommand(cmd *exec.Cmd) (exitCode int, err error) { return } -func startCommand(cmd *exec.Cmd) (exitCode int, err error) { - exitCode = 0 - err = cmd.Start() - exitCode = processExitCode(err) - return -} - func logDone(message string) { fmt.Printf("[PASSED]: %s\n", message) } From 02a021119fb2b3e051b98817831a8c1a8a9fd464 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Wed, 3 Dec 2014 17:47:28 -0800 Subject: [PATCH 537/592] Remove unnecessary abstraction nLines Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_rmi_test.go | 8 ++++---- integration-cli/utils.go | 4 ---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/integration-cli/docker_cli_rmi_test.go b/integration-cli/docker_cli_rmi_test.go index 4600c481fd..63d9f92983 100644 --- a/integration-cli/docker_cli_rmi_test.go +++ b/integration-cli/docker_cli_rmi_test.go @@ -46,14 +46,14 @@ func TestRmiTag(t *testing.T) { dockerCmd(t, "tag", "busybox", "utest:5000/docker:tag3") { imagesAfter, _, _ := dockerCmd(t, "images", "-a") - if nLines(imagesAfter) != nLines(imagesBefore)+3 { + if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+3 { t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) } } dockerCmd(t, "rmi", "utest/docker:tag2") { imagesAfter, _, _ := dockerCmd(t, "images", "-a") - if nLines(imagesAfter) != nLines(imagesBefore)+2 { + if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+2 { t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) } @@ -61,7 +61,7 @@ func TestRmiTag(t *testing.T) { dockerCmd(t, "rmi", "utest:5000/docker:tag3") { imagesAfter, _, _ := dockerCmd(t, "images", "-a") - if nLines(imagesAfter) != nLines(imagesBefore)+1 { + if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+1 { t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) } @@ -69,7 +69,7 @@ func TestRmiTag(t *testing.T) { dockerCmd(t, "rmi", "utest:tag1") { imagesAfter, _, _ := dockerCmd(t, "images", "-a") - if nLines(imagesAfter) != nLines(imagesBefore)+0 { + if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+0 { t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) } diff --git a/integration-cli/utils.go b/integration-cli/utils.go index 37ce221b9b..3ff6d3c438 100644 --- a/integration-cli/utils.go +++ b/integration-cli/utils.go @@ -105,10 +105,6 @@ func stripTrailingCharacters(target string) string { return target } -func nLines(s string) int { - return strings.Count(s, "\n") -} - func unmarshalJSON(data []byte, result interface{}) error { err := json.Unmarshal(data, result) if err != nil { From b91d330088958bca4e59285a55fdc5593d2a86ad Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Thu, 4 Dec 2014 11:49:06 +1000 Subject: [PATCH 538/592] Note that using -lxc-conf to change things Docker manages has pitfalls Signed-off-by: Sven Dowideit Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/reference/run.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index 8b0c6a1591..9c26ec7fa6 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -369,6 +369,13 @@ Note that in the future, a given host's docker daemon may not use LXC, so this is an implementation-specific configuration meant for operators already familiar with using LXC directly. +> **Note:** +> If you use `--lxc-conf` to modify a container's configuration which is also +> managed by the Docker daemon, then the Docker daemon will not know about this +> modification, and you will need to manage any conflicts yourself. For example, +> you can use `--lxc-conf` to set a container's IP address, but this will not be +> reflected in the `/etc/hosts` file. + ## Overriding Dockerfile image defaults When a developer builds an image from a [*Dockerfile*](/reference/builder/#dockerbuilder) From fa753e67ae2bf573c9dfb1da1e1135c5ef5ef415 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Wed, 3 Dec 2014 17:52:06 -0800 Subject: [PATCH 539/592] Remove unnessary abstraction deepEqual Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_build_test.go | 3 ++- integration-cli/docker_cli_links_test.go | 5 +++-- integration-cli/utils.go | 4 ---- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 1d287bd7dc..b76e1d4789 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -9,6 +9,7 @@ import ( "os/exec" "path" "path/filepath" + "reflect" "regexp" "strings" "syscall" @@ -1569,7 +1570,7 @@ func TestBuildWithVolumes(t *testing.T) { t.Fatal(err) } - equal := deepEqual(&expected, &result) + equal := reflect.DeepEqual(&result, &expected) if !equal { t.Fatalf("Volumes %s, expected %s", result, expected) diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go index d412ef2a1a..50269245fe 100644 --- a/integration-cli/docker_cli_links_test.go +++ b/integration-cli/docker_cli_links_test.go @@ -4,6 +4,7 @@ import ( "io/ioutil" "os" "os/exec" + "reflect" "strings" "testing" "time" @@ -121,7 +122,7 @@ func TestLinksInspectLinksStarted(t *testing.T) { output := convertSliceOfStringsToMap(result) - equal := deepEqual(expected, output) + equal := reflect.DeepEqual(output, expected) if !equal { t.Fatalf("Links %s, expected %s", result, expected) @@ -150,7 +151,7 @@ func TestLinksInspectLinksStopped(t *testing.T) { output := convertSliceOfStringsToMap(result) - equal := deepEqual(expected, output) + equal := reflect.DeepEqual(output, expected) if !equal { t.Fatalf("Links %s, but expected %s", result, expected) diff --git a/integration-cli/utils.go b/integration-cli/utils.go index 3ff6d3c438..2de432549c 100644 --- a/integration-cli/utils.go +++ b/integration-cli/utils.go @@ -114,10 +114,6 @@ func unmarshalJSON(data []byte, result interface{}) error { return nil } -func deepEqual(expected interface{}, result interface{}) bool { - return reflect.DeepEqual(result, expected) -} - func convertSliceOfStringsToMap(input []string) map[string]struct{} { output := make(map[string]struct{}) for _, v := range input { From 4ee3a318a1f43f35a8c6ec8b6b73db5ea8824de9 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Wed, 3 Dec 2014 18:45:51 -0800 Subject: [PATCH 540/592] Run 'go vet' on integration-cli. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- integration-cli/docker_cli_cp_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-cli/docker_cli_cp_test.go b/integration-cli/docker_cli_cp_test.go index a5432849dd..7002e1a34a 100644 --- a/integration-cli/docker_cli_cp_test.go +++ b/integration-cli/docker_cli_cp_test.go @@ -512,7 +512,7 @@ func TestCpToDot(t *testing.T) { } content, err := ioutil.ReadFile("./test") if string(content) != "lololol\n" { - t.Fatal("Wrong content in copied file %q, should be %q", content, "lololol\n") + t.Fatalf("Wrong content in copied file %q, should be %q", content, "lololol\n") } logDone("cp - to dot path") } From 6fd818f3ef8ca018b74544362c37d41a1095ba9d Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Wed, 3 Dec 2014 19:02:51 -0800 Subject: [PATCH 541/592] Fix output format where no variable specified in mount pkg Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- pkg/mount/sharedsubtree_linux_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/mount/sharedsubtree_linux_test.go b/pkg/mount/sharedsubtree_linux_test.go index 145d57bbd8..0986bd9c75 100644 --- a/pkg/mount/sharedsubtree_linux_test.go +++ b/pkg/mount/sharedsubtree_linux_test.go @@ -312,7 +312,7 @@ func TestSubtreeUnbindable(t *testing.T) { if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != syscall.EINVAL { t.Fatal(err) } else if err == nil { - t.Fatalf("%q should not have been bindable") + t.Fatalf("%q should not have been bindable", sourceDir) } defer func() { if err := Unmount(targetDir); err != nil { From 6d560e197ccde8f1d711ad8260cf399b078c404d Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Thu, 4 Dec 2014 16:05:45 +1000 Subject: [PATCH 542/592] Something changed, broke the docs release script, and it seems that --exclude still doesn't work, so I'm removing it Signed-off-by: Sven Dowideit --- docs/README.md | 6 ++++-- docs/release.sh | 31 ++++--------------------------- 2 files changed, 8 insertions(+), 29 deletions(-) diff --git a/docs/README.md b/docs/README.md index c113a884a3..de3999ba78 100755 --- a/docs/README.md +++ b/docs/README.md @@ -131,8 +131,8 @@ Once the PR has the needed `LGTM`s, merge it, then publish to our beta server to test: git fetch upstream - git checkout post-1.2.0-docs-update-1 - git reset --hard upstream/post-1.2.0-docs-update-1 + git checkout docs + git reset --hard upstream/docs make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release Then go to http://beta-docs.docker.io.s3-website-us-west-2.amazonaws.com/ @@ -141,6 +141,8 @@ to view your results and make sure what you published is what you wanted. When you're happy with it, publish the docs to our live site: make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes docs-release + +Test the uncached version of the live docs at http://docs.docker.com.s3-website-us-east-1.amazonaws.com/ Note that the new docs will not appear live on the site until the cache (a complex, distributed CDN system) is flushed. This requires someone with S3 keys. Contact Docker diff --git a/docs/release.sh b/docs/release.sh index 41881fc05b..8df8960c75 100755 --- a/docs/release.sh +++ b/docs/release.sh @@ -88,36 +88,13 @@ upload_current_documentation() { # a really complicated way to send only the files we want # if there are too many in any one set, aws s3 sync seems to fall over with 2 files to go # versions.html_fragment - endings=( json txt html xml css js gif png JPG ttf svg woff html_fragment ) - for i in ${endings[@]}; do - include="" - for j in ${endings[@]}; do - if [ "$i" != "$j" ];then - include="$include --exclude *.$j" - fi - done - include="--include *.$i $include" + include="--recursive --include \"*.$i\" " echo "uploading *.$i" - run="aws s3 sync $OPTIONS --profile $BUCKET --cache-control \"max-age=3600\" --acl public-read \ - $include \ - --exclude *.text* \ - --exclude *.*~ \ - --exclude *Dockerfile \ - --exclude *.DS_Store \ - --exclude *.psd \ - --exclude *.ai \ - --exclude *.eot \ - --exclude *.otf \ - --exclude *.rej \ - --exclude *.rst \ - --exclude *.orig \ - --exclude *.py \ - $src $dst" + run="aws s3 cp $src $dst $OPTIONS --profile $BUCKET --cache-control \"max-age=3600\" --acl public-read $include" + echo "=======================" + echo "$run" echo "=======================" - #echo "$run" - #echo "=======================" $run - done } if [ "$OPTIONS" != "--dryrun" ]; then From 385917e22c5bd0d577a5a0be0de9e88a0499fe87 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Wed, 3 Dec 2014 22:23:31 -0800 Subject: [PATCH 543/592] Correctly close generated benchmark archives Another update to TarSum tests, this patch fixes an issue where the benchmarks were generating archives incorrectly by not closing the tarWriter. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- pkg/tarsum/tarsum_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/tarsum/tarsum_test.go b/pkg/tarsum/tarsum_test.go index 4e1f30e469..26f12cc847 100644 --- a/pkg/tarsum/tarsum_test.go +++ b/pkg/tarsum/tarsum_test.go @@ -132,6 +132,7 @@ func sizedTar(opts sizedOptions) io.Reader { fh = bytes.NewBuffer([]byte{}) } tarW := tar.NewWriter(fh) + defer tarW.Close() for i := int64(0); i < opts.num; i++ { err := tarW.WriteHeader(&tar.Header{ Name: fmt.Sprintf("/testdata%d", i), From 266a1044deb9a085ed43dccdc6cfa0723e5142b0 Mon Sep 17 00:00:00 2001 From: Arnaud Porterie Date: Wed, 3 Dec 2014 23:40:16 -0800 Subject: [PATCH 544/592] Fix interactive exec over TLS The code no longer assumes a net.TCPConn underlying the HTTP connection in order to close attached streams. Signed-off-by: Arnaud Porterie --- api/server/server.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/api/server/server.go b/api/server/server.go index bf5318c0f4..4465e8d4a0 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -1136,15 +1136,19 @@ func postContainerExecStart(eng *engine.Engine, version version.Version, w http. } defer func() { - if tcpc, ok := inStream.(*net.TCPConn); ok { - tcpc.CloseWrite() + if cw, ok := inStream.(interface { + CloseWrite() error + }); ok { + cw.CloseWrite() } else { inStream.Close() } }() defer func() { - if tcpc, ok := outStream.(*net.TCPConn); ok { - tcpc.CloseWrite() + if cw, ok := outStream.(interface { + CloseWrite() error + }); ok { + cw.CloseWrite() } else if closer, ok := outStream.(io.Closer); ok { closer.Close() } From eb7d646a44d76c4a7f96fa6ef28de9fbbbaf17ca Mon Sep 17 00:00:00 2001 From: "Jonathan A. Sternberg" Date: Thu, 4 Dec 2014 12:30:16 -0500 Subject: [PATCH 545/592] Fixing docs formatting for the forcerm flag on docker build Signed-off-by: Jonathan A. Sternberg --- docs/sources/reference/api/docker_remote_api_v1.12.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.13.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.14.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.15.md | 2 +- docs/sources/reference/api/docker_remote_api_v1.16.md | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/sources/reference/api/docker_remote_api_v1.12.md b/docs/sources/reference/api/docker_remote_api_v1.12.md index 9c6351f98e..f38b018ef9 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.12.md +++ b/docs/sources/reference/api/docker_remote_api_v1.12.md @@ -1074,7 +1074,7 @@ Query Parameters: - **q** – suppress verbose build output - **nocache** – do not use the cache when building the image - **rm** - remove intermediate containers after a successful build (default behavior) -- **forcerm - always remove intermediate containers (includes rm) +- **forcerm** - always remove intermediate containers (includes rm) Request Headers: diff --git a/docs/sources/reference/api/docker_remote_api_v1.13.md b/docs/sources/reference/api/docker_remote_api_v1.13.md index 41dbb285c5..f5ca931fe7 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.13.md +++ b/docs/sources/reference/api/docker_remote_api_v1.13.md @@ -1063,7 +1063,7 @@ Query Parameters: - **q** – suppress verbose build output - **nocache** – do not use the cache when building the image - **rm** - remove intermediate containers after a successful build (default behavior) -- **forcerm - always remove intermediate containers (includes rm) +- **forcerm** - always remove intermediate containers (includes rm) Request Headers: diff --git a/docs/sources/reference/api/docker_remote_api_v1.14.md b/docs/sources/reference/api/docker_remote_api_v1.14.md index 3830130991..a5392f3bc9 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.14.md +++ b/docs/sources/reference/api/docker_remote_api_v1.14.md @@ -1068,7 +1068,7 @@ Query Parameters: - **q** – suppress verbose build output - **nocache** – do not use the cache when building the image - **rm** - remove intermediate containers after a successful build (default behavior) -- **forcerm - always remove intermediate containers (includes rm) +- **forcerm** - always remove intermediate containers (includes rm) Request Headers: diff --git a/docs/sources/reference/api/docker_remote_api_v1.15.md b/docs/sources/reference/api/docker_remote_api_v1.15.md index e3b6fad74e..ae265653a3 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.15.md +++ b/docs/sources/reference/api/docker_remote_api_v1.15.md @@ -1210,7 +1210,7 @@ Query Parameters: - **q** – suppress verbose build output - **nocache** – do not use the cache when building the image - **rm** - remove intermediate containers after a successful build (default behavior) -- **forcerm - always remove intermediate containers (includes rm) +- **forcerm** - always remove intermediate containers (includes rm) Request Headers: diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md index 9276f628c8..72f5519e1c 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.16.md +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -1159,7 +1159,7 @@ Query Parameters: - **nocache** – do not use the cache when building the image - **pull** - attempt to pull the image even if an older image exists locally - **rm** - remove intermediate containers after a successful build (default behavior) -- **forcerm - always remove intermediate containers (includes rm) +- **forcerm** - always remove intermediate containers (includes rm) Request Headers: From 1bb02117db80e75f406f6c63d8d50680c1569019 Mon Sep 17 00:00:00 2001 From: cc272309126 Date: Fri, 28 Nov 2014 00:08:39 +0800 Subject: [PATCH 546/592] Fix the issue when docker exec a paused container, it will always hang. Add the test case of this issue. Docker-DCO-1.1-Signed-off-by: Chen Chao (github: cc272309126) --- daemon/exec.go | 4 ++- integration-cli/docker_cli_exec_test.go | 33 ++++++++++++++++++++ integration-cli/docker_utils.go | 40 +++++++++++++++++++++++++ 3 files changed, 76 insertions(+), 1 deletion(-) diff --git a/daemon/exec.go b/daemon/exec.go index 7d6755118e..ecdbc58d85 100644 --- a/daemon/exec.go +++ b/daemon/exec.go @@ -98,7 +98,9 @@ func (d *Daemon) getActiveContainer(name string) (*Container, error) { if !container.IsRunning() { return nil, fmt.Errorf("Container %s is not running", name) } - + if container.IsPaused() { + return nil, fmt.Errorf("Container %s is paused, unpause the container before exec", name) + } return container, nil } diff --git a/integration-cli/docker_cli_exec_test.go b/integration-cli/docker_cli_exec_test.go index ebb5484f2e..82ad9afe7b 100644 --- a/integration-cli/docker_cli_exec_test.go +++ b/integration-cli/docker_cli_exec_test.go @@ -230,3 +230,36 @@ func TestExecExitStatus(t *testing.T) { logDone("exec - exec non-zero ExitStatus") } + +func TestExecPausedContainer(t *testing.T) { + + defer deleteAllContainers() + defer unpauseAllContainers() + + runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "top") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatal(out, err) + } + + ContainerID := stripTrailingCharacters(out) + + pausedCmd := exec.Command(dockerBinary, "pause", "testing") + out, _, _, err = runCommandWithStdoutStderr(pausedCmd) + if err != nil { + t.Fatal(out, err) + } + + execCmd := exec.Command(dockerBinary, "exec", "-i", "-t", ContainerID, "echo", "hello") + out, _, err = runCommandWithOutput(execCmd) + if err == nil { + t.Fatal("container should fail to exec new command if it is paused") + } + + expected := ContainerID + " is paused, unpause the container before exec" + if !strings.Contains(out, expected) { + t.Fatal("container should not exec new command if it is paused") + } + + logDone("exec - exec should not exec a pause container") +} diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go index 2c66ce2d0c..031738df84 100644 --- a/integration-cli/docker_utils.go +++ b/integration-cli/docker_utils.go @@ -328,6 +328,46 @@ func deleteAllContainers() error { return nil } +func getPausedContainers() (string, error) { + getPausedContainersCmd := exec.Command(dockerBinary, "ps", "-f", "status=paused", "-q", "-a") + out, exitCode, err := runCommandWithOutput(getPausedContainersCmd) + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to get a list of paused containers: %v\n", out) + } + + return out, err +} + +func unpauseContainer(container string) error { + unpauseCmd := exec.Command(dockerBinary, "unpause", container) + exitCode, err := runCommand(unpauseCmd) + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to unpause container") + } + + return nil +} + +func unpauseAllContainers() error { + containers, err := getPausedContainers() + if err != nil { + fmt.Println(containers) + return err + } + + containers = strings.Replace(containers, "\n", " ", -1) + containers = strings.Trim(containers, " ") + containerList := strings.Split(containers, " ") + + for _, value := range containerList { + if err = unpauseContainer(value); err != nil { + return err + } + } + + return nil +} + func deleteImages(images ...string) error { args := make([]string, 1, 2) args[0] = "rmi" From da667581cffec4048d848454266b8d3cad55c859 Mon Sep 17 00:00:00 2001 From: David Mat Date: Thu, 4 Dec 2014 13:26:36 +0100 Subject: [PATCH 547/592] Update Amazon EC2 Docker installation instructions The installation guide for EC2 is outdated, as the current version of Amazon Linux (2014.09) is now Docker ready. No need to go through the manual route anymore. The official AMI has Docker packages in the repository now (this was the 'pre-release' option in the outdated instructions). Docker-DCO-1.1-Signed-off-by: David Mat (github: davidmat) --- docs/sources/installation/amazon.md | 67 +++++------------------------ 1 file changed, 10 insertions(+), 57 deletions(-) diff --git a/docs/sources/installation/amazon.md b/docs/sources/installation/amazon.md index 3715d5c44f..58d269ad7a 100644 --- a/docs/sources/installation/amazon.md +++ b/docs/sources/installation/amazon.md @@ -4,86 +4,39 @@ page_keywords: amazon ec2, virtualization, cloud, docker, documentation, install # Amazon EC2 -There are several ways to install Docker on AWS EC2: - - - [*Amazon QuickStart (Release Candidate - March 2014)*]( - #amazon-quickstart-release-candidate-march-2014) or - - [*Amazon QuickStart*](#amazon-quickstart) or - - [*Standard Ubuntu Installation*](#standard-ubuntu-installation) +There are several ways to install Docker on AWS EC2. You can use Amazon Linux, which includes the Docker packages in its Software Repository, or opt for any of the other supported Linux images, for example a [*Standard Ubuntu Installation*](#standard-ubuntu-installation). **You'll need an** [AWS account](http://aws.amazon.com/) **first, of course.** -## Amazon QuickStart +## Amazon QuickStart with Amazon Linux AMI 2014.09.1 -1. **Choose an image:** - - Launch the [Create Instance - Wizard](https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:) - menu on your AWS Console. - - Click the `Select` button for a 64Bit Ubuntu - image. For example: Ubuntu Server 12.04.3 LTS - - For testing you can use the default (possibly free) - `t1.micro` instance (more info on - [pricing](http://aws.amazon.com/ec2/pricing/)). - - Click the `Next: Configure Instance Details` - button at the bottom right. - -2. **Tell CloudInit to install Docker:** - - When you're on the "Configure Instance Details" step, expand the - "Advanced Details" section. - - Under "User data", select "As text". - - Enter `#include https://get.docker.com` into - the instance *User Data*. - [CloudInit](https://help.ubuntu.com/community/CloudInit) is part - of the Ubuntu image you chose; it will bootstrap Docker by - running the shell script located at this URL. - -3. After a few more standard choices where defaults are probably ok, - your AWS Ubuntu instance with Docker should be running! - -**If this is your first AWS instance, you may need to set up your -Security Group to allow SSH.** By default all incoming ports to your new -instance will be blocked by the AWS Security Group, so you might just -get timeouts when you try to connect. - -Installing with `get.docker.com` (as above) will -create a service named `lxc-docker`. It will also -set up a [*docker group*](../binaries/#dockergroup) and you may want to -add the *ubuntu* user to it so that you don't have to use -`sudo` for every Docker command. - -Once you`ve got Docker installed, you're ready to try it out – head on -over to the [User Guide](/userguide). - -## Amazon QuickStart (Release Candidate - March 2014) - -Amazon just published new Docker-ready AMIs (2014.03 Release Candidate). -Docker packages can now be installed from Amazon's provided Software +The latest Amazon Linux AMI, 2014.09.1, is Docker ready. Docker packages can be installed from Amazon's provided Software Repository. 1. **Choose an image:** - Launch the [Create Instance Wizard](https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:) menu on your AWS Console. - - Click the `Community AMI` menu option on the - left side - - Search for `2014.03` and select one of the Amazon provided AMI, - for example `amzn-ami-pv-2014.03.rc-0.x86_64-ebs` + - In the Quick Start menu, select the Amazon provided AMI for Amazon Linux 2014.09.1 - For testing you can use the default (possibly free) - `t1.micro` instance (more info on + `t2.micro` instance (more info on [pricing](http://aws.amazon.com/ec2/pricing/)). - Click the `Next: Configure Instance Details` button at the bottom right. - 2. After a few more standard choices where defaults are probably ok, your Amazon Linux instance should be running! 3. SSH to your instance to install Docker : `ssh -i ec2-user@` - 4. Once connected to the instance, type `sudo yum install -y docker ; sudo service docker start` to install and start Docker +**If this is your first AWS instance, you may need to set up your Security Group to allow SSH.** By default all incoming ports to your new instance will be blocked by the AWS Security Group, so you might just get timeouts when you try to connect. + +Once you`ve got Docker installed, you're ready to try it out – head on +over to the [User Guide](/userguide). + ## Standard Ubuntu Installation If you want a more hands-on installation, then you can follow the From f47d6b9b9de8c567e9e42e12243cbcce99a7bfc7 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 4 Dec 2014 12:03:44 -0800 Subject: [PATCH 548/592] Use consistent ApplyLayer in overlayfs Signed-off-by: Michael Crosby --- daemon/graphdriver/overlay/overlay.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/daemon/graphdriver/overlay/overlay.go b/daemon/graphdriver/overlay/overlay.go index c45c3ea7ad..2569ccb6d1 100644 --- a/daemon/graphdriver/overlay/overlay.go +++ b/daemon/graphdriver/overlay/overlay.go @@ -15,6 +15,7 @@ import ( log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/libcontainer/label" ) @@ -346,7 +347,7 @@ func (d *Driver) ApplyDiff(id string, parent string, diff archive.ArchiveReader) return 0, err } - if err := archive.ApplyLayer(tmpRootDir, diff); err != nil { + if err := chrootarchive.ApplyLayer(tmpRootDir, diff); err != nil { return 0, err } From 5c91bb93a7e35b2d443090cabc7ec0a2ca59b6ee Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Thu, 4 Dec 2014 14:06:40 -0800 Subject: [PATCH 549/592] Make 'docker build' send non-err output to stdout Right now 'docker build' will send: Sending build context to Docker daemon to stderr, instead of stdout. This PR fixes that. I looked in the rest of api/client/commands.go for other cases that might do this and only one jumped out at me: https://github.com/docker/docker/blob/master/api/client/commands.go#L2202 but I think if I changed that to go to stdout then it'll mess people up who are expecting just the container ID to be printed to the screen and there is no --quiet type of flag we can check. Closes #9404 Signed-off-by: Doug Davis --- api/client/commands.go | 2 +- integration-cli/docker_cli_build_test.go | 16 ++++++++++++++++ integration-cli/docker_utils.go | 19 +++++++++++++++++++ 3 files changed, 36 insertions(+), 1 deletion(-) diff --git a/api/client/commands.go b/api/client/commands.go index 6b97f3656a..26c0a3d81d 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -180,7 +180,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error { // FIXME: ProgressReader shouldn't be this annoying to use if context != nil { sf := utils.NewStreamFormatter(false) - body = utils.ProgressReader(context, 0, cli.err, sf, true, "", "Sending build context to Docker daemon") + body = utils.ProgressReader(context, 0, cli.out, sf, true, "", "Sending build context to Docker daemon") } // Send the build context v := &url.Values{} diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index b76e1d4789..4cae2adfdb 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -3523,3 +3523,19 @@ func TestBuildWithTabs(t *testing.T) { } logDone("build - with tabs") } + +func TestBuildStderr(t *testing.T) { + // This test just makes sure that no non-error output goes + // to stderr + name := "testbuildstderr" + defer deleteImages(name) + _, _, stderr, err := buildImageWithStdoutStderr(name, + "FROM busybox\nRUN echo one", true) + if err != nil { + t.Fatal(err) + } + if stderr != "" { + t.Fatal("Stderr should have been empty, instead its: %q", stderr) + } + logDone("build - testing stderr") +} diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go index 2c66ce2d0c..a629c38754 100644 --- a/integration-cli/docker_utils.go +++ b/integration-cli/docker_utils.go @@ -596,6 +596,25 @@ func buildImageWithOut(name, dockerfile string, useCache bool) (string, string, return id, out, nil } +func buildImageWithStdoutStderr(name, dockerfile string, useCache bool) (string, string, string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, "-") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Stdin = strings.NewReader(dockerfile) + stdout, stderr, exitCode, err := runCommandWithStdoutStderr(buildCmd) + if err != nil || exitCode != 0 { + return "", stdout, stderr, fmt.Errorf("failed to build the image: %s", stdout) + } + id, err := getIDByName(name) + if err != nil { + return "", stdout, stderr, err + } + return id, stdout, stderr, nil +} + func buildImage(name, dockerfile string, useCache bool) (string, error) { id, _, err := buildImageWithOut(name, dockerfile, useCache) return id, err From e01baa6be782320d3c0800697c882c2b919b202f Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Fri, 28 Nov 2014 14:21:55 +1000 Subject: [PATCH 550/592] Auto-update documentation from the output of the cli. I've re-jigged the run man page so that each option's text begins with the cli's help text for that flag, and then ay subsequent lines in the man page are carried forward. Signed-off-by: Sven Dowideit Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/docs-update.py | 39 ++++- docs/man/docker-attach.1.md | 2 +- docs/man/docker-build.1.md | 2 +- docs/man/docker-commit.1.md | 2 +- docs/man/docker-create.1.md | 17 ++- docs/man/docker-exec.1.md | 20 +-- docs/man/docker-history.1.md | 2 +- docs/man/docker-images.1.md | 2 +- docs/man/docker-kill.1.md | 2 +- docs/man/docker-login.1.md | 2 +- docs/man/docker-port.1.md | 7 +- docs/man/docker-ps.1.md | 2 + docs/man/docker-restart.1.md | 2 +- docs/man/docker-rm.1.md | 2 +- docs/man/docker-run.1.md | 170 ++++++++++++---------- docs/man/docker-save.1.md | 5 +- docs/man/docker-stop.1.md | 2 +- docs/man/docker-tag.1.md | 2 +- docs/sources/reference/commandline/cli.md | 23 +-- 19 files changed, 190 insertions(+), 115 deletions(-) diff --git a/docs/docs-update.py b/docs/docs-update.py index 11d7452268..586bde482d 100755 --- a/docs/docs-update.py +++ b/docs/docs-update.py @@ -104,6 +104,11 @@ def update_man_pages(): re.MULTILINE | re.DOTALL ) + options_re = re.compile( + r".*# OPTIONS(.*?)# (HISTORY|EXAMPLES?).*", + re.MULTILINE | re.DOTALL + ) + example_re = re.compile( r".*# EXAMPLES?(.*)# HISTORY.*", re.MULTILINE | re.DOTALL @@ -116,8 +121,12 @@ def update_man_pages(): for command in cmds: print "COMMAND: "+command + if command == "": + print "SKIPPING" + continue history = "" description = "" + original_options = "" examples = "" if os.path.isfile("docs/man/docker-"+command+".1.md"): intext = open("docs/man/docker-"+command+".1.md", "r") @@ -126,6 +135,10 @@ def update_man_pages(): match = desc_re.match(txt) if match: description = match.group(1) + match = options_re.match(txt) + if match: + original_options = match.group(1) + #print "MATCHED OPTIONS\n" + original_options match = example_re.match(txt) if match: examples = match.group(1) @@ -170,7 +183,7 @@ def update_man_pages(): # replace [OPTIONS] with the list of params options = "" - match = re.match("\[OPTIONS\](.*)", usage) + match = re.match("\[OPTIONS\]\s*(.*)", usage) if match: usage = match.group(1) @@ -178,11 +191,13 @@ def update_man_pages(): # TODO: sort without the `-`'s for key in sorted(params.keys(), key=lambda s: s.lower()): # split on commas, remove --?.*=.*, put in *'s mumble + flags = [] ps = [] opts = [] for k in key_params[key].split(","): match = re.match("(-+)([A-Za-z-0-9]*)(?:=(.*))?", k.lstrip()) if match: + flags.append("{}{}".format(match.group(1), match.group(2))) p = "**{}{}**".format(match.group(1), match.group(2)) o = "**{}{}**".format(match.group(1), match.group(2)) if match.group(3): @@ -203,7 +218,25 @@ def update_man_pages(): else: print "nomatch:{}".format(k) new_usage = "{}\n[{}]".format(new_usage, "|".join(ps)) + options = "{}{}\n {}\n\n".format(options, ", ".join(opts), params[key]) + + # look at the original options documentation and if its hand written, add it too. + print "SVEN_re: "+flags[0] + singleoption_re = re.compile( + r".*[\r\n]\*\*"+flags[0]+"\*\*([^\r\n]*)[\r\n]+(.*?)[\r\n](\*\*-|# [A-Z]|\*\*[A-Z]+\*\*).*", + #r""+flags[0]+"(.*)(^\*\*-.*)?", + re.MULTILINE | re.DOTALL + ) + match = singleoption_re.match(original_options) + if match: + info = match.group(2).strip() + print "MATCHED: " + match.group(1).strip() + if info != params[key].strip(): + #info = re.sub(params[key].strip(), '', info, flags=re.MULTILINE) + print "INFO changed: " +info + options = "{} {}\n\n".format(options, info.strip()) + if new_usage != "": new_usage = "{}\n".format(new_usage.strip()) usage = new_usage + usage @@ -230,8 +263,8 @@ def update_man_pages(): ".*{}.*".format(date_string), re.MULTILINE | re.DOTALL ) - if not recent_history_re.match(history): - outtext.write("{}, updated by Sven Dowideit \n".format(date_string)) +# if not recent_history_re.match(history): +# outtext.write("{}, updated by Sven Dowideit \n".format(date_string)) outtext.close() # main diff --git a/docs/man/docker-attach.1.md b/docs/man/docker-attach.1.md index 78fdac60a8..21bd566406 100644 --- a/docs/man/docker-attach.1.md +++ b/docs/man/docker-attach.1.md @@ -8,7 +8,7 @@ docker-attach - Attach to a running container **docker attach** [**--no-stdin**[=*false*]] [**--sig-proxy**[=*true*]] - CONTAINER +CONTAINER # DESCRIPTION If you **docker run** a container in detached mode (**-d**), you can reattach to diff --git a/docs/man/docker-build.1.md b/docs/man/docker-build.1.md index c562660b6f..67d7343af3 100644 --- a/docs/man/docker-build.1.md +++ b/docs/man/docker-build.1.md @@ -11,7 +11,7 @@ docker-build - Build a new image from the source code at PATH [**-q**|**--quiet**[=*false*]] [**--rm**[=*true*]] [**-t**|**--tag**[=*TAG*]] - PATH | URL | - +PATH | URL | - # DESCRIPTION This will read the Dockerfile from the directory specified in **PATH**. diff --git a/docs/man/docker-commit.1.md b/docs/man/docker-commit.1.md index 31edcc0397..0d1d5406cf 100644 --- a/docs/man/docker-commit.1.md +++ b/docs/man/docker-commit.1.md @@ -9,7 +9,7 @@ docker-commit - Create a new image from a container's changes [**-a**|**--author**[=*AUTHOR*]] [**-m**|**--message**[=*MESSAGE*]] [**-p**|**--pause**[=*true*]] - CONTAINER [REPOSITORY[:TAG]] +CONTAINER [REPOSITORY[:TAG]] # DESCRIPTION Using an existing container's name or ID you can create a new image. diff --git a/docs/man/docker-create.1.md b/docs/man/docker-create.1.md index bc431aa975..a83873794a 100644 --- a/docs/man/docker-create.1.md +++ b/docs/man/docker-create.1.md @@ -22,21 +22,24 @@ docker-create - Create a new container [**--expose**[=*[]*]] [**-h**|**--hostname**[=*HOSTNAME*]] [**-i**|**--interactive**[=*false*]] +[**--ipc**[=*IPC*]] [**--link**[=*[]*]] [**--lxc-conf**[=*[]*]] [**-m**|**--memory**[=*MEMORY*]] +[**--mac-address**[=*MAC-ADDRESS*]] [**--name**[=*NAME*]] [**--net**[=*"bridge"*]] [**-P**|**--publish-all**[=*false*]] [**-p**|**--publish**[=*[]*]] [**--privileged**[=*false*]] [**--restart**[=*RESTART*]] +[**--security-opt**[=*[]*]] [**-t**|**--tty**[=*false*]] [**-u**|**--user**[=*USER*]] [**-v**|**--volume**[=*[]*]] [**--volumes-from**[=*[]*]] [**-w**|**--workdir**[=*WORKDIR*]] - IMAGE [COMMAND] [ARG...] +IMAGE [COMMAND] [ARG...] # OPTIONS **-a**, **--attach**=[] @@ -87,6 +90,11 @@ docker-create - Create a new container **-i**, **--interactive**=*true*|*false* Keep STDIN open even if not attached. The default is *false*. +**--ipc**="" + Default is to create a private IPC namespace (POSIX SysV IPC) for the container + 'container:': reuses another container shared memory, semaphores and message queues + 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. + **--link**=[] Add link to another container in the form of name:alias @@ -96,6 +104,9 @@ docker-create - Create a new container **-m**, **--memory**="" Memory limit (format: , where unit = b, k, m or g) +**--mac-address**="" + Container MAC address (e.g. 92:d0:c6:0a:29:33) + **--name**="" Assign a name to the container @@ -120,6 +131,9 @@ docker-create - Create a new container **--restart**="" Restart policy to apply when a container exits (no, on-failure[:max-retry], always) +**--security-opt**=[] + Security Options + **-t**, **--tty**=*true*|*false* Allocate a pseudo-TTY. The default is *false*. @@ -138,3 +152,4 @@ docker-create - Create a new container # HISTORY August 2014, updated by Sven Dowideit September 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit diff --git a/docs/man/docker-exec.1.md b/docs/man/docker-exec.1.md index 38ba3de412..c4e649016a 100644 --- a/docs/man/docker-exec.1.md +++ b/docs/man/docker-exec.1.md @@ -1,6 +1,6 @@ % DOCKER(1) Docker User Manuals % Docker Community -% SEPT 2014 +% JUNE 2014 # NAME docker-exec - Run a command in a running container @@ -9,7 +9,7 @@ docker-exec - Run a command in a running container [**-d**|**--detach**[=*false*]] [**-i**|**--interactive**[=*false*]] [**-t**|**--tty**[=*false*]] - CONTAINER COMMAND [ARG...] +CONTAINER COMMAND [ARG...] # DESCRIPTION @@ -19,17 +19,17 @@ The command started using `docker exec` will only run while the container's prim process (`PID 1`) is running, and will not be restarted if the container is restarted. If the container is paused, then the `docker exec` command will wait until the -container is unpaused, and then run. - -# Options +container is unpaused, and then run +# OPTIONS **-d**, **--detach**=*true*|*false* - Detached mode. This runs the new process in the background. + Detached mode: run command in the background. The default is *false*. **-i**, **--interactive**=*true*|*false* - When set to true, keep STDIN open even if not attached. The default is false. + Keep STDIN open even if not attached. The default is *false*. **-t**, **--tty**=*true*|*false* - When set to true Docker can allocate a pseudo-tty and attach to the standard -input of the process. This can be used, for example, to run a throwaway -interactive shell. The default value is false. + Allocate a pseudo-TTY. The default is *false*. + +# HISTORY +November 2014, updated by Sven Dowideit diff --git a/docs/man/docker-history.1.md b/docs/man/docker-history.1.md index ddb164e50b..65ec9cd173 100644 --- a/docs/man/docker-history.1.md +++ b/docs/man/docker-history.1.md @@ -8,7 +8,7 @@ docker-history - Show the history of an image **docker history** [**--no-trunc**[=*false*]] [**-q**|**--quiet**[=*false*]] - IMAGE +IMAGE # DESCRIPTION diff --git a/docs/man/docker-images.1.md b/docs/man/docker-images.1.md index 4acafca018..6c9e6a60b5 100644 --- a/docs/man/docker-images.1.md +++ b/docs/man/docker-images.1.md @@ -10,7 +10,7 @@ docker-images - List images [**-f**|**--filter**[=*[]*]] [**--no-trunc**[=*false*]] [**-q**|**--quiet**[=*false*]] - [REPOSITORY] +[REPOSITORY] # DESCRIPTION This command lists the images stored in the local Docker repository. diff --git a/docs/man/docker-kill.1.md b/docs/man/docker-kill.1.md index 3c8d59e6d5..d1d0ee7ad6 100644 --- a/docs/man/docker-kill.1.md +++ b/docs/man/docker-kill.1.md @@ -7,7 +7,7 @@ docker-kill - Kill a running container using SIGKILL or a specified signal # SYNOPSIS **docker kill** [**-s**|**--signal**[=*"KILL"*]] - CONTAINER [CONTAINER...] +CONTAINER [CONTAINER...] # DESCRIPTION diff --git a/docs/man/docker-login.1.md b/docs/man/docker-login.1.md index c269353079..e367050be2 100644 --- a/docs/man/docker-login.1.md +++ b/docs/man/docker-login.1.md @@ -9,7 +9,7 @@ docker-login - Register or log in to a Docker registry server, if no server is s [**-e**|**--email**[=*EMAIL*]] [**-p**|**--password**[=*PASSWORD*]] [**-u**|**--username**[=*USERNAME*]] - [SERVER] +[SERVER] # DESCRIPTION Register or Login to a docker registry server, if no server is diff --git a/docs/man/docker-port.1.md b/docs/man/docker-port.1.md index 97cc61b7e5..8c4c870dc2 100644 --- a/docs/man/docker-port.1.md +++ b/docs/man/docker-port.1.md @@ -5,11 +5,15 @@ docker-port - List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT # SYNOPSIS -**docker port** CONTAINER [PRIVATE_PORT[/PROTO]] +**docker port** +CONTAINER [PRIVATE_PORT[/PROTO]] # DESCRIPTION List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT +# OPTIONS +There are no available options. + # EXAMPLES You can find out all the ports mapped by not specifying a `PRIVATE_PORT`, or ask for just a specific mapping: @@ -30,3 +34,4 @@ ask for just a specific mapping: # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) June 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit diff --git a/docs/man/docker-ps.1.md b/docs/man/docker-ps.1.md index 9a9ae70c4c..d34d98396e 100644 --- a/docs/man/docker-ps.1.md +++ b/docs/man/docker-ps.1.md @@ -32,6 +32,7 @@ the running containers. **-f**, **--filter**=[] Provide filter values. Valid filters: exited= - containers with exit code of + status=(restarting|running|paused|exited) **-l**, **--latest**=*true*|*false* Show only the latest created container, include non-running ones. The default is *false*. @@ -74,3 +75,4 @@ April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit August 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit diff --git a/docs/man/docker-restart.1.md b/docs/man/docker-restart.1.md index 2a08caa5e8..9a22688000 100644 --- a/docs/man/docker-restart.1.md +++ b/docs/man/docker-restart.1.md @@ -7,7 +7,7 @@ docker-restart - Restart a running container # SYNOPSIS **docker restart** [**-t**|**--time**[=*10*]] - CONTAINER [CONTAINER...] +CONTAINER [CONTAINER...] # DESCRIPTION Restart each container listed. diff --git a/docs/man/docker-rm.1.md b/docs/man/docker-rm.1.md index bae6a7ea8c..b8f31bd687 100644 --- a/docs/man/docker-rm.1.md +++ b/docs/man/docker-rm.1.md @@ -9,7 +9,7 @@ docker-rm - Remove one or more containers [**-f**|**--force**[=*false*]] [**-l**|**--link**[=*false*]] [**-v**|**--volumes**[=*false*]] - CONTAINER [CONTAINER...] +CONTAINER [CONTAINER...] # DESCRIPTION diff --git a/docs/man/docker-run.1.md b/docs/man/docker-run.1.md index c3a0667898..f0129bedc9 100644 --- a/docs/man/docker-run.1.md +++ b/docs/man/docker-run.1.md @@ -23,26 +23,26 @@ docker-run - Run a command in a new container [**--expose**[=*[]*]] [**-h**|**--hostname**[=*HOSTNAME*]] [**-i**|**--interactive**[=*false*]] -[**--ipc**[=*[]*]] -[**--security-opt**[=*[]*]] +[**--ipc**[=*IPC*]] [**--link**[=*[]*]] [**--lxc-conf**[=*[]*]] [**-m**|**--memory**[=*MEMORY*]] +[**--mac-address**[=*MAC-ADDRESS*]] [**--name**[=*NAME*]] [**--net**[=*"bridge"*]] -[**--mac-address**[=*MACADDRESS*]] [**-P**|**--publish-all**[=*false*]] [**-p**|**--publish**[=*[]*]] [**--privileged**[=*false*]] -[**--restart**[=*POLICY*]] +[**--restart**[=*RESTART*]] [**--rm**[=*false*]] +[**--security-opt**[=*[]*]] [**--sig-proxy**[=*true*]] [**-t**|**--tty**[=*false*]] [**-u**|**--user**[=*USER*]] [**-v**|**--volume**[=*[]*]] [**--volumes-from**[=*[]*]] [**-w**|**--workdir**[=*WORKDIR*]] - IMAGE [COMMAND] [ARG...] +IMAGE [COMMAND] [ARG...] # DESCRIPTION @@ -59,21 +59,26 @@ all image dependencies, from the repository in the same way running **docker pull** IMAGE, before it starts the container from that image. # OPTIONS +**-a**, **--attach**=[] + Attach to STDIN, STDOUT or STDERR. -**-a**, **--attach**=*stdin*|*stdout*|*stderr* - Attach to stdin, stdout or stderr. In foreground mode (the default when -**-d** is not specified), **docker run** can start the process in the container + In foreground mode (the default when **-d** +is not specified), **docker run** can start the process in the container and attach the console to the process’s standard input, output, and standard error. It can even pretend to be a TTY (this is what most commandline executables expect) and pass along signals. The **-a** option can be set for each of stdin, stdout, and stderr. -**--add-host**=*hostname*:*ip* +**--add-host**=[] + Add a custom host-to-IP mapping (host:ip) + Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** option can be set multiple times. **-c**, **--cpu-shares**=0 - CPU shares in relative weight. You can increase the priority of a container + CPU shares (relative weight) + + You can increase the priority of a container with the -c option. By default, all containers run at the same priority and get the same proportion of CPU cycles, but you can tell the kernel to give more shares of CPU time to one or more containers when you start them via **docker @@ -92,8 +97,9 @@ run**. CPUs in which to allow execution (0-3, 0,1) **-d**, **--detach**=*true*|*false* - Detached mode. This runs the container in the background. It outputs the new -container's ID and any error messages. At any time you can run **docker ps** in + Detached mode: run the container in the background and print the new container ID. The default is *false*. + + At any time you can run **docker ps** in the other shell to view a list of the running containers. You can reattach to a detached container with **docker attach**. If you choose to run a container in the detached mode, then you cannot use the **-rm** option. @@ -107,19 +113,24 @@ stopping the process by pressing the keys CTRL-P CTRL-Q. **--dns-search**=[] Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) -**--dns**=*IP-address* - Set custom DNS servers. This option can be used to override the DNS +**--dns**=[] + Set custom DNS servers + + This option can be used to override the DNS configuration passed to the container. Typically this is necessary when the host DNS configuration is invalid for the container (e.g., 127.0.0.1). When this is the case the **--dns** flags is necessary for every run. -**-e**, **--env**=*environment* - Set environment variables. This option allows you to specify arbitrary +**-e**, **--env**=[] + Set environment variables + + This option allows you to specify arbitrary environment variables that are available for the process that will be launched inside of the container. +**--entrypoint**="" + Overwrite the default ENTRYPOINT of the image -**--entrypoint**=*command* This option allows you to overwrite the default entrypoint of the image that is set in the Dockerfile. The ENTRYPOINT of an image is similar to a COMMAND because it specifies what executable to run when the container starts, but it is @@ -137,27 +148,25 @@ ENTRYPOINT. **--expose**=[] Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host -**-h**, **--hostname**=*hostname* +**-h**, **--hostname**="" + Container host name + Sets the container host name that is available inside the container. **-i**, **--interactive**=*true*|*false* + Keep STDIN open even if not attached. The default is *false*. + When set to true, keep stdin open even if not attached. The default is false. -**--ipc**=[] - Set the IPC mode for the container - **container**:<*name*|*id*>: reuses another container's IPC stack - **host**: use the host's IPC stack inside the container. - Note: the host mode gives the container full access to local IPC and is therefore considered insecure. +**--ipc**="" + Default is to create a private IPC namespace (POSIX SysV IPC) for the container + 'container:': reuses another container shared memory, semaphores and message queues + 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. -**--security-opt**=*secdriver*:*name*:*value* - "label:user:USER" : Set the label user for the container - "label:role:ROLE" : Set the label role for the container - "label:type:TYPE" : Set the label type for the container - "label:level:LEVEL" : Set the label level for the container - "label:disable" : Turn off label confinement for the container +**--link**=[] + Add link to another container in the form of name:alias -**--link**=*name*:*alias* - Add link to another container. The format is name:alias. If the operator + If the operator uses **--link** when starting the new client container, then the client container can access the exposed port via a private networking interface. Docker will set some environment variables in the client container to help indicate @@ -166,7 +175,9 @@ which interface and port to use. **--lxc-conf**=[] (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" -**-m**, **--memory**=*memory-limit* +**-m**, **--memory**="" + Memory limit (format: , where unit = b, k, m or g) + Allows you to constrain the memory available to a container. If the host supports swap memory, then the -m memory setting can be larger than physical RAM. If a limit of 0 is specified, the container's memory is not limited. The @@ -174,15 +185,23 @@ actual limit may be rounded up to a multiple of the operating system's page size, if it is not already. The memory limit should be formatted as follows: ``, where unit = b, k, m or g. -**--name**=*name* - Assign a name to the container. The operator can identify a container in -three ways: +**--mac-address**="" + Container MAC address (e.g. 92:d0:c6:0a:29:33) + + Remember that the MAC address in an Ethernet network must be unique. +The IPv6 link-local address will be based on the device's MAC address +according to RFC4862. + +**--name**="" + Assign a name to the container + + The operator can identify a container in three ways: UUID long identifier (“f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778”) UUID short identifier (“f78375b1c487”) Name (“jonah”) -The UUID identifiers come from the Docker daemon, and if a name is not assigned + The UUID identifiers come from the Docker daemon, and if a name is not assigned to the container with **--name** then the daemon will also generate a random string name. The name is useful when defining links (see **--link**) (or any other place you need to identify a container). This works for both background @@ -190,21 +209,14 @@ and foreground Docker containers. **--net**="bridge" Set the Network mode for the container - **bridge**: creates a new network stack for the container on the docker bridge - **none**: no networking for this container - **container**:<*name*|*id*>: reuses another container's network stack - **host**: use the host network stack inside the container. - Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. - -**--mac-address**=*macaddress* - Set the MAC address for the container's Ethernet device: - --mac-address=12:34:56:78:9a:bc - -Remember that the MAC address in an Ethernet network must be unique. -The IPv6 link-local address will be based on the device's MAC address -according to RFC4862. + 'bridge': creates a new network stack for the container on the docker bridge + 'none': no networking for this container + 'container:': reuses another container network stack + 'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. **-P**, **--publish-all**=*true*|*false* + Publish all exposed ports to the host interfaces. The default is *false*. + When set to true publish all exposed ports to the host interfaces. The default is false. If the operator uses -P (or -p) then Docker will make the exposed port accessible on the host and the ports will be available to any @@ -213,29 +225,44 @@ ports to a random port on the host between 49153 and 65535. To find the mapping between the host ports and the exposed ports, use **docker port**. **-p**, **--publish**=[] - Publish a container's port to the host (format: ip:hostPort:containerPort | -ip::containerPort | hostPort:containerPort | containerPort) (use **docker port** to see the -actual mapping) + Publish a container's port to the host + format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort + (use 'docker port' to see the actual mapping) **--privileged**=*true*|*false* - Give extended privileges to this container. By default, Docker containers are + Give extended privileges to this container. The default is *false*. + + By default, Docker containers are “unprivileged” (=false) and cannot, for example, run a Docker daemon inside the Docker container. This is because by default a container is not allowed to access any devices. A “privileged” container is given access to all devices. -When the operator executes **docker run --privileged**, Docker will enable access + When the operator executes **docker run --privileged**, Docker will enable access to all devices on the host as well as set some configuration in AppArmor to allow the container nearly all the same access to the host as processes running outside of a container on the host. +**--restart**="" + Restart policy to apply when a container exits (no, on-failure[:max-retry], always) **--rm**=*true*|*false* Automatically remove the container when it exits (incompatible with -d). The default is *false*. +**--security-opt**=[] + Security Options + + "label:user:USER" : Set the label user for the container + "label:role:ROLE" : Set the label role for the container + "label:type:TYPE" : Set the label type for the container + "label:level:LEVEL" : Set the label level for the container + "label:disable" : Turn off label confinement for the container + **--sig-proxy**=*true*|*false* Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is *true*. **-t**, **--tty**=*true*|*false* + Allocate a pseudo-TTY. The default is *false*. + When set to true Docker can allocate a pseudo-tty and attach to the standard input of any container. This can be used, for example, to run a throwaway interactive shell. The default is value is false. @@ -243,52 +270,39 @@ interactive shell. The default is value is false. **-u**, **--user**="" Username or UID +**-v**, **--volume**=[] + Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container) -**-v**, **--volume**=*volume*[:ro|:rw] - Bind mount a volume to the container. - -The **-v** option can be used one or + The **-v** option can be used one or more times to add one or more mounts to a container. These mounts can then be used in other containers using the **--volumes-from** option. -The volume may be optionally suffixed with :ro or :rw to mount the volumes in + The volume may be optionally suffixed with :ro or :rw to mount the volumes in read-only or read-write mode, respectively. By default, the volumes are mounted read-write. See examples. -**--volumes-from**=*container-id*[:ro|:rw] +**--volumes-from**=[] + Mount volumes from the specified container(s) + Will mount volumes from the specified container identified by container-id. Once a volume is mounted in a one container it can be shared with other containers using the **--volumes-from** option when running those other containers. The volumes can be shared even if the original container with the mount is not running. -The container ID may be optionally suffixed with :ro or + The container ID may be optionally suffixed with :ro or :rw to mount the volumes in read-only or read-write mode, respectively. By default, the volumes are mounted in the same mode (read write or read only) as the reference container. +**-w**, **--workdir**="" + Working directory inside the container -**-w**, **--workdir**=*directory* - Working directory inside the container. The default working directory for + The default working directory for running binaries within a container is the root directory (/). The developer can set a different default with the Dockerfile WORKDIR instruction. The operator can override the working directory by using the **-w** option. - -**IMAGE** - The image name or ID. You can specify a version of an image you'd like to run - the container with by adding image:tag to the command. For example, - `docker run ubuntu:14.04`. - - - -**COMMAND** - The command or program to run inside the image. - - -**ARG** - The arguments for the command to be run in the container. - # EXAMPLES ## Exposing log messages from the container to the host's log diff --git a/docs/man/docker-save.1.md b/docs/man/docker-save.1.md index ea78475b51..c02ffb101a 100644 --- a/docs/man/docker-save.1.md +++ b/docs/man/docker-save.1.md @@ -2,12 +2,12 @@ % Docker Community % JUNE 2014 # NAME -docker-save - Save an image to a tar archive (streamed to STDOUT by default) +docker-save - Save an image(s) to a tar archive (streamed to STDOUT by default) # SYNOPSIS **docker save** [**-o**|**--output**[=*OUTPUT*]] -IMAGE +IMAGE [IMAGE...] # DESCRIPTION Produces a tarred repository to the standard output stream. Contains all @@ -35,3 +35,4 @@ fedora image to a fedora-latest.tar: April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit diff --git a/docs/man/docker-stop.1.md b/docs/man/docker-stop.1.md index 0cc19918c3..1b73e387e8 100644 --- a/docs/man/docker-stop.1.md +++ b/docs/man/docker-stop.1.md @@ -7,7 +7,7 @@ docker-stop - Stop a running container by sending SIGTERM and then SIGKILL after # SYNOPSIS **docker stop** [**-t**|**--time**[=*10*]] - CONTAINER [CONTAINER...] +CONTAINER [CONTAINER...] # DESCRIPTION Stop a running container (Send SIGTERM, and then SIGKILL after diff --git a/docs/man/docker-tag.1.md b/docs/man/docker-tag.1.md index a42ebe7702..e8550ec55d 100644 --- a/docs/man/docker-tag.1.md +++ b/docs/man/docker-tag.1.md @@ -7,7 +7,7 @@ docker-tag - Tag an image into a repository # SYNOPSIS **docker tag** [**-f**|**--force**[=*false*]] - IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG] +IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG] # DESCRIPTION This will give a new alias to an image in the repository. This refers to the diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index c9989926ce..090d8180dd 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -315,7 +315,6 @@ To kill the container, use `docker kill`. --force-rm=false Always remove intermediate containers, even after unsuccessful builds --no-cache=false Do not use cache when building the image - --pull=false Always attempt to pull a newer version of the image -q, --quiet=false Suppress the verbose output generated by the containers --rm=true Remove intermediate containers after a successful build -t, --tag="" Repository name (and optionally a tag) to be applied to the resulting image in case of success @@ -538,11 +537,14 @@ Creates a new container. --expose=[] Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host -h, --hostname="" Container host name -i, --interactive=false Keep STDIN open even if not attached + --ipc="" Default is to create a private IPC namespace (POSIX SysV IPC) for the container + 'container:': reuses another container shared memory, semaphores and message queues + 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. --link=[] Add link to another container in the form of name:alias --lxc-conf=[] (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" -m, --memory="" Memory limit (format: , where unit = b, k, m or g) + --mac-address="" Container MAC address (e.g. 92:d0:c6:0a:29:33) --name="" Assign a name to the container - --mac-address="" Set the container's MAC address --net="bridge" Set the Network mode for the container 'bridge': creates a new network stack for the container on the docker bridge 'none': no networking for this container @@ -554,6 +556,7 @@ Creates a new container. (use 'docker port' to see the actual mapping) --privileged=false Give extended privileges to this container --restart="" Restart policy to apply when a container exits (no, on-failure[:max-retry], always) + --security-opt=[] Security Options -t, --tty=false Allocate a pseudo-TTY -u, --user="" Username or UID -v, --volume=[] Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container) @@ -614,10 +617,7 @@ For example: Usage: docker events [OPTIONS] Get real time events from the server - -f, --filter=[] Provide filter values. Valid filters: - event= - event to filter - image= - image to filter - container= - container to filter + --since="" Show all events created since timestamp --until="" Stream events until this timestamp @@ -1321,9 +1321,13 @@ removed before the image is removed. --expose=[] Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host -h, --hostname="" Container host name -i, --interactive=false Keep STDIN open even if not attached + --ipc="" Default is to create a private IPC namespace (POSIX SysV IPC) for the container + 'container:': reuses another container shared memory, semaphores and message queues + 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. --link=[] Add link to another container in the form of name:alias --lxc-conf=[] (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" -m, --memory="" Memory limit (format: , where unit = b, k, m or g) + --mac-address="" Container MAC address (e.g. 92:d0:c6:0a:29:33) --name="" Assign a name to the container --net="bridge" Set the Network mode for the container 'bridge': creates a new network stack for the container on the docker bridge @@ -1337,6 +1341,7 @@ removed before the image is removed. --privileged=false Give extended privileges to this container --restart="" Restart policy to apply when a container exits (no, on-failure[:max-retry], always) --rm=false Automatically remove the container when it exits (incompatible with -d) + --security-opt=[] Security Options --sig-proxy=true Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. -t, --tty=false Allocate a pseudo-TTY -u, --user="" Username or UID @@ -1682,8 +1687,8 @@ more details on finding shared images from the command line. Restart a stopped container - -a, --attach=false Attach container's `STDOUT` and `STDERR` and forward all signals to the process - -i, --interactive=false Attach container's `STDIN` + -a, --attach=false Attach container's STDOUT and STDERR and forward all signals to the process + -i, --interactive=false Attach container's STDIN When run on a container that has already been started, takes no action and succeeds unconditionally. @@ -1692,7 +1697,7 @@ takes no action and succeeds unconditionally. Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] - Stop a running container by sending `SIGTERM` and then `SIGKILL` after a grace period + Stop a running container by sending SIGTERM and then SIGKILL after a grace period -t, --time=10 Number of seconds to wait for the container to stop before killing it. Default is 10 seconds. From f3cedce3608afe7bd570666a7fc878ab85c7bc03 Mon Sep 17 00:00:00 2001 From: Arnaud Porterie Date: Tue, 2 Dec 2014 18:45:07 -0800 Subject: [PATCH 551/592] Reduce permissions changes scope after ADD/COPY Permissions after an ADD or COPY build instructions are now restricted to the scope of files potentially modified by the operation rather than the entire impacted tree. Fixes #9401. Signed-off-by: Arnaud Porterie --- builder/internals.go | 40 ++++++++++-------------- integration-cli/docker_cli_build_test.go | 25 +++++++++++++++ 2 files changed, 42 insertions(+), 23 deletions(-) diff --git a/builder/internals.go b/builder/internals.go index de32e9e88a..706064f1e2 100644 --- a/builder/internals.go +++ b/builder/internals.go @@ -615,7 +615,7 @@ func (b *Builder) addContext(container *daemon.Container, orig, dest string, dec } if fi.IsDir() { - return copyAsDirectory(origPath, destPath, destExists) + return copyAsDirectory(origPath, destPath) } // If we are adding a remote file (or we've been told not to decompress), do not try to untar it @@ -649,37 +649,31 @@ func (b *Builder) addContext(container *daemon.Container, orig, dest string, dec resPath = path.Join(destPath, path.Base(origPath)) } - return fixPermissions(resPath, 0, 0) + return fixPermissions(origPath, resPath, 0, 0) } -func copyAsDirectory(source, destination string, destinationExists bool) error { +func copyAsDirectory(source, destination string) error { if err := chrootarchive.CopyWithTar(source, destination); err != nil { return err } + return fixPermissions(source, destination, 0, 0) +} - if destinationExists { - files, err := ioutil.ReadDir(source) +func fixPermissions(source, destination string, uid, gid int) error { + // We Walk on the source rather than on the destination because we don't + // want to change permissions on things we haven't created or modified. + return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { + // Do not alter the walk root itself as it potentially existed before. + if source == fullpath { + return nil + } + // Path is prefixed by source: substitute with destination instead. + cleaned, err := filepath.Rel(source, fullpath) if err != nil { return err } - - for _, file := range files { - if err := fixPermissions(filepath.Join(destination, file.Name()), 0, 0); err != nil { - return err - } - } - return nil - } - - return fixPermissions(destination, 0, 0) -} - -func fixPermissions(destination string, uid, gid int) error { - return filepath.Walk(destination, func(path string, info os.FileInfo, err error) error { - if err := os.Lchown(path, uid, gid); err != nil && !os.IsNotExist(err) { - return err - } - return nil + fullpath = path.Join(destination, cleaned) + return os.Lchown(fullpath, uid, gid) }) } diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 1d287bd7dc..79bb16b035 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -1065,6 +1065,31 @@ ADD . /`, logDone("build - add etc directory to root") } +// Testing #9401 +func TestBuildAddPreservesFilesSpecialBits(t *testing.T) { + name := "testaddpreservesfilesspecialbits" + defer deleteImages(name) + ctx, err := fakeContext(`FROM busybox +ADD suidbin /usr/bin/suidbin +RUN chmod 4755 /usr/bin/suidbin +RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ] +ADD ./data/ / +RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]`, + map[string]string{ + "suidbin": "suidbin", + "/data/usr/test_file": "test1", + }) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + logDone("build - add preserves files special bits") +} + func TestBuildCopySingleFileToRoot(t *testing.T) { name := "testcopysinglefiletoroot" defer deleteImages(name) From 11a75ec97f904b1807dbdc7121ac4e19dc229d49 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Fri, 5 Dec 2014 12:01:13 +1000 Subject: [PATCH 552/592] this v spacing irritated me while i was reading Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/theme/mkdocs/css/docs.css | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/theme/mkdocs/css/docs.css b/docs/theme/mkdocs/css/docs.css index 9b6d5028e8..068a0003ef 100644 --- a/docs/theme/mkdocs/css/docs.css +++ b/docs/theme/mkdocs/css/docs.css @@ -29,13 +29,17 @@ font-weight: 700; color: #394d54; line-height: 1; - margin: 0px 0 10px 0; + margin: 10px 0 10px 0; padding-left: 20px; white-space: nowrap; overflow: hidden; text-overflow: ellipsis; } +#leftnav li.active { + margin-bottom: 10px; +} + .content-body { padding: 0px 0px 0px 20px; } From b4b899264ed892818ef31c3626acad8fb110aabb Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Wed, 3 Dec 2014 10:40:33 +1000 Subject: [PATCH 553/592] Add some extra details to webhook docs Update the webhook JSON payloads to real ones, and show there is a difference between an automated build webhook payload and a normal repo payload Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) Signed-off-by: Sven Dowideit --- docs/sources/docker-hub/builds.md | 101 +++++++++++++++++++++--------- docs/sources/docker-hub/repos.md | 68 ++++++++++---------- 2 files changed, 107 insertions(+), 62 deletions(-) diff --git a/docs/sources/docker-hub/builds.md b/docs/sources/docker-hub/builds.md index 7bf8b27eb2..e4d6074171 100644 --- a/docs/sources/docker-hub/builds.md +++ b/docs/sources/docker-hub/builds.md @@ -229,48 +229,91 @@ repo on the Docker Hub. ### Webhooks Automated Builds also include a Webhooks feature. Webhooks can be called -after a successful repository push is made. +after a successful repository push is made. This includes when a new tag is added +to an existing image. The webhook call will generate a HTTP POST with the following JSON payload: ``` { - "push_data":{ - "pushed_at":1385141110, - "images":[ - "imagehash1", - "imagehash2", - "imagehash3" - ], - "pusher":"username" - }, - "repository":{ - "status":"Active", - "description":"my docker repo that does cool things", - "is_automated":false, - "full_description":"This is my full description", - "repo_url":"https://registry.hub.docker.com/u/username/reponame/", - "owner":"username", - "is_official":false, - "is_private":false, - "name":"reponame", - "namespace":"username", - "star_count":1, - "comment_count":1, - "date_created":1370174400, - "dockerfile":"my full dockerfile is listed here", - "repo_name":"username/reponame" - } + "callback_url": "https://registry.hub.docker.com/u/svendowideit/testhook/hook/2141b5bi5i5b02bec211i4eeih0242eg11000a/", + "push_data": { + "images": [], + "pushed_at": 1.417566161e+09, + "pusher": "trustedbuilder" + }, + "repository": { + "comment_count": 0, + "date_created": 1.417494799e+09, + "description": "", + "dockerfile": "#\n# BUILD\u0009\u0009docker build -t svendowideit/apt-cacher .\n# RUN\u0009\u0009docker run -d -p 3142:3142 -name apt-cacher-run apt-cacher\n#\n# and then you can run containers with:\n# \u0009\u0009docker run -t -i -rm -e http_proxy http://192.168.1.2:3142/ debian bash\n#\nFROM\u0009\u0009ubuntu\nMAINTAINER\u0009SvenDowideit@home.org.au\n\n\nVOLUME\u0009\u0009[\"/var/cache/apt-cacher-ng\"]\nRUN\u0009\u0009apt-get update ; apt-get install -yq apt-cacher-ng\n\nEXPOSE \u0009\u00093142\nCMD\u0009\u0009chmod 777 /var/cache/apt-cacher-ng ; /etc/init.d/apt-cacher-ng start ; tail -f /var/log/apt-cacher-ng/*\n", + "full_description": "Docker Hub based automated build from a GitHub repo", + "is_official": false, + "is_private": true, + "is_trusted": true, + "name": "testhook", + "namespace": "svendowideit", + "owner": "svendowideit", + "repo_name": "svendowideit/testhook", + "repo_url": "https://registry.hub.docker.com/u/svendowideit/testhook/", + "star_count": 0, + "status": "Active" + } } ``` -Webhooks are available under the Settings menu of each Automated -Build's repo. +Webhooks are available under the Settings menu of each Repository. > **Note:** If you want to test your webhook out we recommend using > a tool like [requestb.in](http://requestb.in/). +### Webhook chains + +Webhook chains allow you to chain calls to multiple services. For example, +you can use this to trigger a deployment of your container only after +it has been successfully tested, then update a separate Changelog once the +deployment is complete. +After clicking the "Add webhook" button, simply add as many URLs as necessary +in your chain. + +The first webhook in a chain will be called after a successful push. Subsequent +URLs will be contacted after the callback has been validated. + +#### Validating a callback + +In order to validate a callback in a webhook chain, you need to + +1. Retrieve the `callback_url` value in the request's JSON payload. +1. Send a POST request to this URL containing a valid JSON body. + +> **Note**: A chain request will only be considered complete once the last +> callback has been validated. + +To help you debug or simply view the results of your webhook(s), +view the "History" of the webhook available on its settings page. + +#### Callback JSON data + +The following parameters are recognized in callback data: + +* `state` (required): Accepted values are `success`, `failure` and `error`. + If the state isn't `success`, the webhook chain will be interrupted. +* `description`: A string containing miscellaneous information that will be + available on the Docker Hub. Maximum 255 characters. +* `context`: A string containing the context of the operation. Can be retrieved + from the Docker Hub. Maximum 100 characters. +* `target_url`: The URL where the results of the operation can be found. Can be + retrieved on the Docker Hub. + +*Example callback payload:* + + { + "state": "success", + "description": "387 tests PASSED", + "context": "Continuous integration by Acme CI", + "target_url": "http://ci.acme.com/results/afd339c1c3d27" + } ### Repository links diff --git a/docs/sources/docker-hub/repos.md b/docs/sources/docker-hub/repos.md index d0c2faea19..0749c0814c 100644 --- a/docs/sources/docker-hub/repos.md +++ b/docs/sources/docker-hub/repos.md @@ -110,34 +110,31 @@ similar to the example shown below. *Example webhook JSON payload:* - { - "push_data":{ - "pushed_at":1385141110, - "images":[ - "imagehash1", - "imagehash2", - "imagehash3" - ], - "pusher":"username" - }, - "repository":{ - "status":"Active", - "description":"my docker repo that does cool things", - "is_automated":false, - "full_description":"This is my full description", - "repo_url":"https://registry.hub.docker.com/u/username/reponame/", - "owner":"username", - "is_official":false, - "is_private":false, - "name":"reponame", - "namespace":"username", - "star_count":1, - "comment_count":1, - "date_created":1370174400, - "dockerfile":"my full dockerfile is listed here", - "repo_name":"username/reponame" - } - } +``` +{ + "callback_url": "https://registry.hub.docker.com/u/svendowideit/busybox/hook/2141bc0cdec4hebec411i4c1g40242eg110020/", + "push_data": { + "images": [], + "pushed_at": 1.417566822e+09, + "pusher": "svendowideit" + }, + "repository": { + "comment_count": 0, + "date_created": 1.417566665e+09, + "description": "", + "full_description": "webhook triggered from a 'docker push'", + "is_official": false, + "is_private": false, + "is_trusted": false, + "name": "busybox", + "namespace": "svendowideit", + "owner": "svendowideit", + "repo_name": "svendowideit/busybox", + "repo_url": "https://registry.hub.docker.com/u/svendowideit/busybox/", + "star_count": 0, + "status": "Active" +} +``` Webhooks allow you to notify people, services and other applications of new updates to your images and repositories. To get started adding webhooks, @@ -153,7 +150,8 @@ deployment is complete. After clicking the "Add webhook" button, simply add as many URLs as necessary in your chain. -The first webhook in a chain will be called after a successful push. Subsequent URLs will be contacted after the callback has been validated. +The first webhook in a chain will be called after a successful push. Subsequent +URLs will be contacted after the callback has been validated. #### Validating a callback @@ -172,10 +170,14 @@ view the "History" of the webhook available on its settings page. The following parameters are recognized in callback data: -* `state` (required): Accepted values are `success`, `failure` and `error`. If the state isn't `success`, the webhook chain will be interrupted. -* `description`: A string containing miscellaneous information that will be available on the Docker Hub. Maximum 255 characters. -* `context`: A string containing the context of the operation. Can be retrieved on the Docker Hub. Maximum 100 characters. -* `target_url`: The URL where the results of the operation can be found. Can be retrieved on the Docker Hub. +* `state` (required): Accepted values are `success`, `failure` and `error`. + If the state isn't `success`, the webhook chain will be interrupted. +* `description`: A string containing miscellaneous information that will be + available on the Docker Hub. Maximum 255 characters. +* `context`: A string containing the context of the operation. Can be retrieved + from the Docker Hub. Maximum 100 characters. +* `target_url`: The URL where the results of the operation can be found. Can be + retrieved on the Docker Hub. *Example callback payload:* From b266ad9c607588e8541926dfca7e338148e904ff Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Fri, 5 Dec 2014 13:05:38 +1000 Subject: [PATCH 554/592] Show image of the Build trigger screen, and add a little search engine fodder for it. Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/docker-hub/builds.md | 20 +++++++++++++----- .../docker-hub/hub-images/build-trigger.png | Bin 0 -> 68137 bytes 2 files changed, 15 insertions(+), 5 deletions(-) create mode 100644 docs/sources/docker-hub/hub-images/build-trigger.png diff --git a/docs/sources/docker-hub/builds.md b/docs/sources/docker-hub/builds.md index e4d6074171..5d73e4aae9 100644 --- a/docs/sources/docker-hub/builds.md +++ b/docs/sources/docker-hub/builds.md @@ -209,7 +209,7 @@ repository's full description.The build process will look for a > rewritten the next time the Automated Build has been built. To make changes, > modify the `README.md` from the Git repository. -### Build triggers +## Remote Build triggers If you need a way to trigger Automated Builds outside of GitHub or Bitbucket, you can set up a build trigger. When you turn on the build trigger for an @@ -219,6 +219,16 @@ This will trigger the Automated Build, much as with a GitHub webhook. Build triggers are available under the Settings menu of each Automated Build repo on the Docker Hub. +![Build trigger screen](/docker-hub/hub-images/build-trigger.png) + +You can use `curl` to trigger a build: + +``` +$ curl --data "build=true" -X POST https://registry.hub.docker.com/u/svendowideit/testhook/trigger/be579c +82-7c0e-11e4-81c4-0242ac110020/ +OK +``` + > **Note:** > You can only trigger one build at a time and no more than one > every five minutes. If you already have a build pending, or if you @@ -226,7 +236,7 @@ repo on the Docker Hub. > To verify everything is working correctly, check the logs of last > ten triggers on the settings page . -### Webhooks +## Webhooks Automated Builds also include a Webhooks feature. Webhooks can be called after a successful repository push is made. This includes when a new tag is added @@ -280,7 +290,7 @@ in your chain. The first webhook in a chain will be called after a successful push. Subsequent URLs will be contacted after the callback has been validated. -#### Validating a callback +### Validating a callback In order to validate a callback in a webhook chain, you need to @@ -293,7 +303,7 @@ In order to validate a callback in a webhook chain, you need to To help you debug or simply view the results of your webhook(s), view the "History" of the webhook available on its settings page. -#### Callback JSON data +### Callback JSON data The following parameters are recognized in callback data: @@ -315,7 +325,7 @@ The following parameters are recognized in callback data: "target_url": "http://ci.acme.com/results/afd339c1c3d27" } -### Repository links +## Repository links Repository links are a way to associate one Automated Build with another. If one gets updated,the linking system triggers a rebuild diff --git a/docs/sources/docker-hub/hub-images/build-trigger.png b/docs/sources/docker-hub/hub-images/build-trigger.png new file mode 100644 index 0000000000000000000000000000000000000000..25597a27bda955a7801b95fe22ce8205db6ef78e GIT binary patch literal 68137 zcmeGEWmH?+_kfL3DpYZI4W-z@-HQh(P#i)DMS@FlD=k*ECAgO2?(XhT+@ZLoNPysQ zH|PA$zvsjKaL4`nzGH+j7#U%+lC{>{b3XG~`SM0t_Qg}ur)X$sFXZK3tD&Kx!+{^Q z$B%$#Qhz8T(9o#SoGGj@o>1#s1G2(4e@y|G5nH zJ3ipw*N-eq{J9Jo@XYl8jRyGT|HlE9D1uRB6f$Jh``pdk-dAT)Y)Tdv^jgXmTL$o_ z1ZM+w0qCFVM;L1z)rPAIwQ& z?oYW+*k{vgAbx=$y_($b6DA(wxB2fIF4?!TA%q7;q+x%IhQt@3%|My`7zeIjqNa@APRv!BS!-KwSa9#yXSj-03RXoi|i0+#U z<*!PSUw@|ZOm%lD?O7ONA=&$EgE{f&6UKjMx&Y5ECr;R9nU=?Vu2)#;`qv0u-kagn z3zt1{vFIy_4bz#rFm4CrhuLz6Z&!M@35lKSB~R41rqa7#>O|7NJ?xH*dV6&nO;)X) zw82XY^2ym_?%eAOOZ1oJW(cFe#-+nb>KdGLF+c0b{qUw?s-Ky}O{++*H_lH|W|wH?OGtw&Cxzm$g1X#>FJ@=|RTOu;C{An{?-7*yzuD z8~$K+f9+GfzN3D{%yL*yGC zu616UzMt;~Dd4qdi&H)m4Z_@3@fRDmRSwJ^nmeHsKLUlO&Cis}bL0*TbJ1w)fJM*u z#quD-O;|8U?)@8!(C$gJ7(qv0J3`Ndx83H*d1RHJ&5?2SmVOcm z2>)@8C&{FRD%j{t;573Mm8ft5);gIA$5gS#!jHK5CYNT<{grLJ?e>BAo7+a(0Q-q7 zzI`5t;grvTIY%GU5@ZN@bFjRwr6rg^6X&%ax1xi3Wl2?gi;V3UoMt`Zg`}6N=V}b^ zD*kNXUuqA?*+4pS;VFV}O=)m8{TDp-bN4V~mXwai^sX=4d!L~bjx`4!UFnfLyJ}ODgM;=F_q&z9JLy&H5y0J!)?((KAFf2_iAD1q z+cb*_5|n2u;&&H1RL_5qt^v-F@(GjGZ7Qvx?_ zYAQ2qvRW7HAPi!^Q7xL^c&MCdaj6>dJqBu|4AUy5SiAq{PJu|>(6rGVBHs@gY;y_8 zHGsIyH%J&DPYYQOc^~h{<*H>yG0rCmJI0tw%iwv;g<8EBFVnNFb6$PqUtudnGMbr5 z02NC8z2ZkUCQ00QF*a*98p-&^3tf2?W$=E0_F3Qa?NOZ6I>)PVraF&XU!4xpTC@33 zJKbjMK>gPpA>Mn7^(y7*L}TSqvyH$o>p|jl4lfJ{A!>_ICY5ljHN$Q_Wcz<&QPJxbHjZtDhjucMX~%>&j~60Dd#se3 zhI>jsT<#ep{8f{B#YcC_PiHHK_vgLpe?jZW@SokH&59om@uKj+z);-?CE1#y>t6f9 z-#A;D6>Or=#~3=lsz4Cd@FzX^$n{tpto!{!oaKOokLwdEIC>$1QVfT(54>iOsTO+F z6*j*b`cx|6KYK_4$V&`zdA}0<$rbFl(6Qk*nnpcr|5?$vKmwta_hRkgpFz%t0)tG{ zATE3MC-N4(2GWygi~DZ>2^xHKXdK(71ObRof5IIz&d0b9nX}@MKljp>L16H8I{q~E z{|_Esiz&wpZ-y9!=uIF?d%jljOCaY$`uG50kyMD9US`H-p;l3Y3}bX7r*t@>#vLJVY5yh9*xPXYUU>6S zReFICo7^b9{7auEl^g?jR}`~{rsE9i#yO7mN9^dfqmTNh2Txuars%Kj>m@eblzl%> zk&U1lJ3d#fFupB5UhDN4C3G;o3_{;=9pm$FqSm`0NKC8iy}xtg0OU3m#!L4SM4ag- z9-AL3U#d3p+B_)Ej(8|TyiAF0XNJxjBCl?gD&irN*|@7-v$ciX7=nGf7cvOEC-pk+ z-1T3}Fs=H<`!9_zK6HFC?aE0D2obYt!+&L%_ke&&yXAnZw2^~%zXAjjdT=ooA?C?m z?s~GcJMOq8_v$URnA79uKL@Kuy=|5nH&vmWbAdXGZgWN)xdrus5tL&CJ?67bE@G@n z!r{1#%3vTB&RE9FR^5yl%=u9Iqb%8Wt}#MJC0U|zwmOZovkC4r?=r6Kj=Fm2WI2{J znxP=gZrGFo+^)Y=V7^%jw`MYG^FxZ#T|ZqSBJ&8d*0Y~HQtvc3nQe0 zQ8hX01r^3sn6S7~?M~|Oe*PfQqVbqt16-#-vm`BAv``~|GWTGtEFPZg{EmzU6sUbX zuhXY@#MYQ&Lf6bs055gRW`#!f3pqjPuTM@NlFroEFXxqR%(yNI!($p60*nN^=LoKYV6d-bF;zc%^7qlHEYE7Em)GxOJB!@DKMs=HB#d><$zaZFlMCh0V zaK_=JOqomixHDv9i5D}rG%T4MW?m_LYYW+VTPSI4<~LUH?K6`L$|~b@ z&bFB<`$_pGu1krq!D5pRnpga-x>~dCPEadHP4(VL;9WAFXP94izC=_Ekxg{jOJi!s z7`>5_HCOBKyx8AXSBQFvQ)&JtRssK720WFV-7+L%emN?4PVS+qFkYajn5B|Ye@fNy zGW{Ysi)c`{M$Vo763E&=cPsskbGcOMhCv%4B|7tHqgzQWqc<^9bMLuI{ZrQFF~3Hf#Vl z21I0Kop^3%M~ZEgy&H~Y0!xYvT61IRW4xGk{_*`|K!?+?lR>cz4>DgTx?m=-4re%2 z|23KSj70G{#0^=P#+~Y?}!n| z{uMX-!;>AxIc=cw%kZ)=km<-^@R+dZk6AQ+cE3>Y_Hxw3Vua`QBwe*u)v!BHUf=E6 zsHK?Muwll}?eZ<8*wp(o+4E6`9JlMb(M$8PQ&Bo8*l?><)O9)*lhvCg;<0(%;Y7*s zC==%j0|&B6=4wrmmtv1j5Ho5Eb^Y&xJwJV9ysN#|l#AlAjmR-H${pM|Oit$K1svw} z7h-;|Rk)@k&G_>tNqy_r8vFDm%^_HEw|YEREz;#3Hc+8TcfF$!b`O=53H?UdoIh36 z?P{oiW0utbq@0R)?VBS%a#sgSlW!>Mp4#gub-9*x+kux--pW1BGxkY|vDXX9R{Ht_ ztO!O@=du7R$o4~*CDMkHzEiifIOJ#RQpCtdj#b!90aKK?E9|)Aeat|7jzYRAcZp)j zF}RF+yw9x7?@5ls=Pdj!tw40YoaNIG2tL0ZjoWx#o+6cQ<&+~Nu??xh$Kh4*elTEAmIQhxK`8($FdbOC z#=xTY*=U1iL}D8m=wkE{3yToyLXjywsUyNAaRpO#pYe;8cF%tT6$s0rTGjoMjSzk1Gn&)u%rW$&aiJytj~{k zlEgsMMK#HX-K2AB@bHC9uMD}pOc>TYX3fyt8Q<457JVOU_hW`73ym!g97QW!coK|o z6;zewqw-b!@;tRa`irWjV=39VK+OO8HtwU z$NX=>DfM=$?!LS-GCi0Y=RB`g3;bM-A|4s?*(_x3K!Zc2d#JK+N3X&}_MUh8n0gdU3WxZVU8EfiGhqqS2i5aRxpeDpK;Aon;VXexoB!R^X>Y@IPH z^V-Yui~qeD?oN6vnLbY4JS)_r5;G%{Gwxby-&KDLUTbNQPr7}bj8#GMkdE}|VcTau zOr2{EG-_N=RI+->@R=S}khi59A#>OgdE$W*37al|)8EX$zyF!K>qm9xUo5y4aph*L z4xO(On4z_2EB8W|KNhy>^NnM}@9p&(KD2_jI9=yF=!bV5q}M(l)L0>EH*N zsnpa1YK@obYGb0f*X#kq55mTbN?U%EoK+jDPqI+Ox$PW6TLW$mz_`BxQkC%tlw{I#j;Mv5xP|*p~X}qZm(H8{~8ys{4$l16AR+X|p{6;d069 z6fIV0&l*?td=t$I8MqQ^hKoHur4oI0%gkhoK7DBuoLeOe-l@bPv~rK7yx%CuHxpe3 zQF892)GkE9U#d!y5(~EeK+{6SCYdQhk!QK;3B&pQ+t2IQTuS09^!NM{ye70(Rt=ay zm56wX7ufJRPzaSo3&RtBeeeZ_OFG4EBBiEtI5V%+k46lv5T-;?+y~DZaiIi%gA0oM$Geb6sBl@I7TQ1>6Qjm?a!&e z12F#*>r!{lY+_dM_m_R6_zM^ZYUswe&3w10QB5v^jnJ8)`BD=z37FJIPtBL=l1~N3 zEZoM#3qs3ZX%5ON51S3e`DP93$<=#bhegsW(+m3zf9bp(GI?eq8cpFk-p7+LFk{)z z?7;Y9n?0mUGJZ8TG+5rd3*Uv$@%Rl5?aCYrNXus4iXSUWVu?QC{91olpUBA{L#tnu zWo9&05{(^Ft{hC$lo!h`8`jy4p`u|qW8SKAB+mg`85lln3*LrGOr{xk^Yk0ZgLM^F z`If^bErcvMdYv-O?(cBwC2VLewo~~cOnAJ6U-@`kt$o3k3Nzo>GCH1NbYpS$_6hkK zDDzuTHD~(aC$py+*2LP=-KU);TFF%f;p-LAzM9F7XXqh$3(P+TC8ytOv4mD4mODdY zE5tkCLu9ub#$Y!R9QYMY=1GH~NTah=u>n$uyI<*$`*Pf&O{kC%9)pM{a+Cz?TCttU zV%x#cF^S;!B*6*L7tzV9lLj8<+jTFvEeGtLto2p;^vZ&7OCK+9YZopJU3sN!ut984 z=C5F7#A;2$xZ57k;7U`tCRAhXrNvBPwKo4>ajvgsBKbZG^@5zd){YB(?6jEB&Q`QR zsIrom#vHk)$|-egosaaD*HKJFMf#atzXiu$Ez{I{uf~{O?7(aqJ-yX)z8i7$j^&f5 z3o=?|9Dn7Q>iS);jSolIk3yT=k9dQ1YiAMhYc+!wH-pxAaLK=-aO_>mr2<1>w+|9n zDum(WP4Bp|KpeF;H!K_!$!e2~9$)##eY58(5$(G9A})uxC&ZkQq?hBk`3ZP1&jO-8?FmRVuS;+x8F($oFKecng13)#LlA2HHm16g)j|&-t^en z;hk^mVTKr?Px;bqQZp>NH!Zh=&E6 zOvhTOm#xEdYy zsQU>|^m>dPE+I0)tS8r;XEBb5-_3PaBM5vBsG~+8|ETahv~7P4QT$x6saKu zwdvUd89aJbBLO{jD>W+XLOSnY9Gftk6iO^AAxZau>|w)~M3;7m{7F%GAIv5k$9RAG z-nUdYsI3C(9jx>xE}YMHi8l!Y9=G<8-WlM5|Hateh63E8niy+hF73$tZ~L{N#1;KSwN^ph;W-1XX*>sreiP+29q^S;ZETf205 z%I?%WVkC+E=#5V4!B7RyPhyU1krG7=lF3#`L(GC?4t!cP$T&zqGlppYb+Im;+2bEs z0r663Kr~3VrZ3zQi;BIwX7rUpPGO5Nr0!GHwlr;2 zPc9$t@yT_mAW=FpP*7#c>3OYMXxUDsvB&RGdJt>0P_DLL=7iQ^ATZ9{0=-iT-EWTA z2Q29JAdnjYZ*i!8lQ*4-T&yxCLYP80RXQkWf9!Iwg7z>CoA&BtP7cgkrqvr{7MTyv zj@A)B(Bvi;g4MaM)3CSvB4|o#)`V6%sp16oOocuik;J#bPPEF7f)Fk8u}iX0t~{i7 zN9Qe3RC#snHA2kXqqM&|&5I&DNP+8a`_TLGKC8%yw#?M#Dio}aQc-XC@eQ_zPrFz+ z_kCF{)Q38&K_JGuBV(JhDAGUe0?4;(S&m`~zEbiKYKUquhY+lUn3rLnB1S6d<-mzC zN4CA4uaNB0^9Emo>Q~ElN;g{NEqSVeTnXcwKX|OJsls8*lnF{2^boJl7l@`&CPrjW z5=kU3{l4P|%LhD)!c|`<=WGqht8m?R`{=5ml24A1<`F#GsiZI=(eBxu`6vR7OkFM; z4%cOYir>b_A5`#qoC!3IekjG>{QYStsOs#h({W_ck0L07AA%pvHFsR(?*QeVX4p@4HEJLo@k1e5* zmrRA5(MCL34nnhRGm|HMg%jDY8jBjxWc&4ottAbr8>Ai!QO^eRyXgbCrAQv2{v>lN zafBgu2Tijc-%k@o_SdJ2uuCZh$%-vW-Jo zWctpCdjId2-f|cZ7PbCf749`&`@Z8k1qB<$Dv*j6bsJUnwzcunN!!wg!yKWlUKr{R zOJm8potM$?G2C;h#xkp-&<^Ik=OUCfrs}kvyY7rHmT@0_pV^lm5B}V4B8!z%8M)rC z6{VcC^HT%C6UVM%)2q49=dpZL@48Nhv3!&gk>Z+>-iQ!hu08D(O&W0)PP;Kymr)_m zNYUM!o_?D0@SX}ab~bbr@8gfq|Ij6Q0Dg(&R;g+Wmk1 zK&%Mi_gWWntPY{$-<_Sfvc;5dr3VF1zvt3DQU}tBgj*4dbyD5>b)sNmzEPX3+bfS` z3yi8X0LiKWm^{tKTpy_rGd}x!V~JhAN%G{g;AqjhYC!5TS@gaK za>=J|pAC|ZIhguafW!Oio-CJN8*vX-`eNq&Jpe`lw2H)F+QV%7Ed8h|Axje8S<`oT zG4h1@8&X`dSn0!}y;ALr%9#|Gm_PdfNC8Yq4d0g(*REM(Y!75(yiQT6$f5Eik5 zo5TBT&9L3Ms7@oitT#Y=B1f(jeXU0(7aYr#aW%IX-n;nd{H}xPxG$Qi2u}b2(%xNE0(KbH%DyUIt?RaoV5m68pv zhbtTF&JCTW9KD}a>c8Hrpy5mcQc4y>lJR^hufv(_4{IG%+^{XQb6a*u!*}G#73{9W z-8#DBH7Rt9g^ZVdGX!%m3Hy{@ztYU=CsDhxw(Wjj9QPKz6lLSl6l#sWmytNpkbF$K z@Seg%vFHty&dPrwQUn4~0Nt+E?SDo;fWnDBv6jpC)8h1+<+fLS z-@acmX-lfy(-($1ZZ>5My5Ger8!eTL*~MmEwx74aAG*f58c=Cx+VJ81qd7ocv!xEeEcs? zQrSjwJ6xLHJ2tsdS?V+1s?e$U@cU6;9E}J;&*hU!LWt7-hQxTjfn`FyYy9!bMBz>O z@y8UPB~tf-Qes^JA8&DV%pt++HjR#(2`YMhdd$S25EZ1$oukMQ>@BtPVTLZpQzfwF zBk`g{`uX~!neWz&Kr>n{M5`FSU#OK%dOlI0kUgB7EzWV>>;=nU+3b&H&wV{uSMmPh zwE~M@4NTWSWUx^#gUe_8slr}#Xx{bP@UGz$WBY_Tweu4s1>FL2-TdOppIhlKk3(d% zsc8B8p$t72E;^!-07K`WmTbRvr{0!=nJj2`OcBR39A}saQn2o+?#1kAy;gqsMA!Qe z*28k3H=3pMZDcg7v7sSHJtXU`w&&yAvE{Ot#gifzKe3iPrB5uceYP;>+BT9s%=->} z6L@pYs0IwY?i$FF*~35jZ}rp9R-RRQZsU$OcpC(>XHixny|;J7#w*lao?==_I=;TF zb-BGak*DvQaDJWuuhtZ8QVnri(EM%Ubp-x2(B3AE4}glS!5nL6xd=9n*<{FqMfA5L zvx>oK3p|<2dqbaN2CKGbw5CL@U{bp0wQiS~6IP>%chv6-H1kO??7ye&O; zl;W`3u+U<>#Q6o?Dtdg!8hC^fMK1h$M(pD2hRXA>LkTJ^=VFb01&-e95_guL`9dC6 zU$QSxW@~X-LgWkW6^n~YD@Anjz>Q^0``yzfo(MGuAt2eV+#;pDd^3qH>^+sQs&UgD z8?&IZ8&Y}k*E;uUp^(dm%Oi6RDe_t?f=sz(X{S8Rv{J4%*lbU0l#mCn)NfHfxXEwh23p3@E%FGb(;F`&=JUwg%U$+o zqpg4*@VKXf8=O}t2d0Z#GB%-VfTQ(+YFWAbYwabrdMrJU@A^wo2OGN z+~|5E*`<}xkk)-8tYj^9F}4E3dnr{{kpYff%DvK?;Slk-8L;!Nm6)_#@CAD71h;<^ zpszlGMHO~Pes`dso!Gf1(%M%otrz^5jN!$*y~uTLogib%?-Lww7PhE$v(miI^P)Jl z*zMr6iFw-NcSAo+xJ~xZ z-ceG|MHQO5{imnuwP5$%PqT0j>D-;Hidlh8^wr`)MFz*UyQ;5e>QA6q3R@1IsF0fu zpVb`SpRV&@S9+c*8p<{j$GD@^y$nNH@JauKH!x4f6gc zG2U7V^O=DO%PHe&1ecLVRzzO*iWGQPhLrh;L-CD*V*7oT9_GW!zS1aK1o=Bny z^Th+!%U|xivjN8!s#OtvFA~(g8&aI7c^#Y@kC!fC=P`;l2n=jZ_o|oqECL7>HzdN! zGFBe!kg8r3ZZiB{e_8}?KhVs{U2G-@JP@(%(?)yVxxf6s!aWM%8EJ5kF zyomSCq1u!{7n`whEFqtbR)LFh>SZFJEwFXB;`cUw=>8R_yK#HD0<>H#QAWT*zYu4B zL5a6hWhJ|8EUR`$LXT`PrP&OXo^{B+(=Vt7#GazXis&Djg=w|4;Q?e?Km4BL)*hfC zTaa6acubtNMq1aoEE>gcl-*g{(utn!>*mx#cAiAYe65LSJ}!8IM)_`XFjYENJu9lZ z%!OU}!vp&>G>?aT05VgUuP~17X@@d>1;-jDzN3+}?7cUg6E}aoYa4K6Y4J6G6HIg% zVVmX4uj0MJyuPY0U`+^bM$E{5W7d?f@;pRq`|QWS;imk-hNHiDFkVDWcYEhc?<%dR zX8vB8>A5&BE#a%q=Iewr+x5(~!KC9o4bg0u;!44)shFBWrsAVmHoQ4Z_5!GH4r7(J z_=L@*t};=%$PvcFz)uB_Qx)`6b$R2t^myB$$d+&E+<$%ebbO^(ALVv?+NH$wb8;wA z9;TKVJmMW?GRd4u>1f*C=7B8nEgR%R7BPt_S6G1*vqCwnG?^Yd-@3o4zP6YczOWX0 zUQMDbbzPb_(R|QdGUBZ`YQBBk69VPa$CRJP1@~H`Kx!@H|KkZ*?&+V>^ z%n}H6cr7_ihQ78YJc$>9yF)qI5RhcmOl!x1+HI+|*^}LXOUaYCbg=qxOj+>79HeB&_^Af!PvHhWNXZy)$CW2a`y~VJ+CptS~u4 zufy8VW>PLEkP`W5>Z{<~0|qWoUwDX?Q@Kl6c1$!;w{%$#7cE*;eysqt=S5Ch)L#;w zNTU3Hmn&OR0gF!3*(Td&0UP+(dTyNB5bI*g5jo*G1B zn_>R0)$}dE?9Y9iYy?uXu|i`H>uqBi&`e^JqNg0ae3IN#TEbmW{TQC!l``s>l8~c4 zt*u-@8O&O88rZCVVpUe6{myiptGAw(1v|fZGtB4cTUx`kOa;>WdZVz~S?aIK)P`Ee z1o#MZDul%Q)ye0_wlL;Si-*l>kI&fd5YvT=w}lxxGAWdt zv!*pIF#sXCSwA?$17d};)^HQRaAqC*8g@?7mx9025yG<;UZy-zF@f!0a6l<$=6Yy& zV3SIg$v6zv85mA6g=lB_BCmKoVGHGj?zgt% z^KK2UnBPn@r=?LWlD;NCz;gZ~?`_-ejPS~r0Su$B5GhJww-F_29I=^N6D|&s`T@k3 z`3A@8AQycb*AVLTWRVd0#k?MTxOwmAHbaQg&c#m^2A{uJK&RwagLd9Z0hw3m>&5sf zD+7tlvBdoK3KDHZKJr?y>9>5Qko|Ub?&Iwkwgpa z3BUvs&V}tSLX8L!z1tk`>3LAO($W1flP4Rw`xE6R>y5(F8K`5XUG3y>RlPbE=re() z_sY|9+pR)Jr*0PWO(6h?`z$^PD(pB)&oQZ9hOebRenN z32**%=M1=4Su$v$%K7MXw0!a)rQ{k+dZ0&!DWX71}%)-<4FEc`SFlpSf_hKb%ZhSUOfAk@1 z;Rqu*#+rH6+ATp3rsZBx^?Q+sS+DFZ#L4Honbzb5sopHy{c3!*^EwXMHf>^sP@5`D zAd6^6xc(J(uDiN!JYtrAI=~By&!FAY&&T3=1;PV?sgGKX1p|`~dBbreg=!k&gg+qs z=5cc}wXIt7A1M(POPZz$*ofO+Wc?)C@Zl@8bAS2iHD2(ZYgx$*k992s;EG^QGATE$t$FwfD|=z@ZiA&DD0zs7&x!j3huZ z`#(%jndpQ1AJQhUAoC&LG6~^Vq9Q|CA9Q*yfJVuC44dDP-rhICVc6qOt-Qac10jdF%9S zq^6J-=QtkxpNX$qEQnTe6T8dZh%6tjo9I4V%2V}Pwwrz>hHzuV8jCBQkU6pXbV*t$ z8Y)H%b{=|WSMh);yH!Yv#jshGg-=+?5sH2#?+*gtK}ZT6H>F1fRa7LmE; z|Am|umxlq|6yA;yz2>i;uNxsD>d4^AsgqCGFO#TyLMt`1R)Y_L4TdzhraNkhBKzL> z+QFrsG!&x_n_?96ks^f+nAQ(4l@SKv21TzxBpmy%+nq1e$X`b)CuvVqI#I9p!{=I$ zSs5`nNEqV|asPlkSFtDJ;HK1*U^UJz%5;B$SWfi2iv}x_{1_Vkn3P(5Wqij zS$93QR7~6>b=xyoz$1g8s%X-^bghu6BG;8hTf8@ubhd*(oUr|o@-DCu6*P+u zu#2mgZlWh}#@k*Bj{nJ0-TYS_IOl3vI9gr_j1{MV-Q)RKglaGb5b;-5o6laR2^gi{ zdAH$JIN6z-IpN~HpU8#)#e8kV)$=)n;ZCzP(^m*xxKdr!+PzZ?ypn3nabCQsP>Pwu z+5VOo&hCI%*86EpDp#V7qDnd&(`$o-k7~41#jSq7(TS+;EKRXz0vRj>05!OmY*;2> zZ@j7I9qhEP$X)9-p6hrtH+M?;HTxF>zHFD^j@qP~{t2uIq80v*EE}p8Z*&vPygT<* z^GWf0_fo3Jdi`)*_zseHe>6mw7s(YOf3j3{Fk;zJDnAt?{0jo$BwdV+aIK^(?78>{ zT4wM1#m)0zmR!rwE>1KQ#3rQ<* zRM{CzdNIky(Yv{+9300!+^qaZ0SZt~5m5Bpu>99Ra_aa+e6Gb?^{;hekVMjJCi@B_ z8vr0Z~mF%5o#H)oDE#{mH32IAcO0zfxarKHa~(Kd@sQap(j8H=qF z+#bu&6?P5LYGFWJ!)N2gZKS(FIB%(W3Dw9%M*l;HrF&nVPTqP~q_I9Z8Weq*&Qk>?z=e zdCF~6&)(5h?2gKa&5E%OBQUef-__iOoNR0=B-T1@hJ%b6>CK)z-F@LLKM3NRv(M z9xW8Iz9N~#_B#J=L$M9?zP3-{K5G3ojj){qiMI#ko*lBUoT)fJnFfZJY=ym5cGJU! z>lz2Kp5&(phCwQ0Xk!b1;4?MkC{MiNc9!8+A`^(ieNvpF*=(U$zkWDxCzi*BsZ%!g z9dex!vo%u{P0r40;J!b3sbu@yYN4jdCe_ro&XcXG%pm8Iwy>@*S%hr++O%R{-ICqX z>oc7s@}gVbfN-zN?bL7{YJ7`n;9pZ9W9Xy$=IC0uhJ*kP(3IZ-AMgP`Q8}pm{qe75 zDbiP233}Ye>Del&+rho=g+gz}+?!U!s4jM|95azAHCVJ3lqBuc`mXp7I9_PwM~q zQ)wn#jAI5kyuaptuuA)d(eifGD#nbxr#Yr$4D(V1b2^&TxjXf)K=M<#G8DvDSlAks5qL`q<-S-m-9i13|#HMyY-vplzH9y!)Df5miWxf71yBK}`Zumtt zhYi=&veU)<(s+ikizC_CiucLv3GvAdcsy0UM1+9KOZVJ^O%ywW%C={+PQvNpk%v^)`QMvtC{A?Ga$B_}*!j-W_($CC0b+UR__i z+;vZ8%t{Ta4sFe2hs>tucNZPv8Qai^IrImq3ZT8JT(Q@y&qx;yE1 ze+<&-ZQ@dAQLlxPc6aI=JJ&sl6Vy-`yvn&0twcHaX;;Jq3_=~o!VLgvaly5O$H3eA zQ|B*3Mbdjd_ST`HU)vPCfc3^MjZpn~qOuGKlmQsCTDSMu8nd@F=3taPmuIP&fWGJT zCcb^8E^n*hV&85OwxfbjMQmzHoUr#!sA+NuD{LmFj%%!3su9%=RSO}&0=dUVx_M5t z@ikt>W%>ONd4;NqZL&csat;5Y1w}i*voJ<9{)|OGINFCr z8jy^Cd~2*%A2v$Q@ypo<)Gt_dTm^T))=YJscQmRIC{CE{=sILM{!!BIv99sAK-Kb# zOEMQ`=OXgVdJ0aI5G*r3XE|5J8F7be@d7E);})mFLl)Pjxk@iDDX&F;M=osV{a!RT zDDBqEr__lh{(V{SM9l2UraUh6KRVN9V77{am)6^Yx>s^HtqbPew^{MO3E)@cK2IqB z)}XK^`In0}AGM}DZ9F9+4ABa82c1*=i)uu?092!%6z2c(M$xsZVwB^HJfT>YZ(P42 zvfj3n+fAMc#3wr#tv)i`L8N|1LRKV0G>*QF?R~*xFF2GK%?oJ)02}U#pp1GCf(wT18U%^{xu#tu#}M zh(-!jJz?Dd4fS1CB8_n`V|F$VO7VD4o&h?{9>K_lbuR`P-MVA(4e?(H9$|9T$KuPySp@bjJ z`aC;WuuqpJt`b9-_Lkd3SLawktYI|nsKT2dt=ZGy7m4P7Bb%HE7IYu}sIw*hcewCU z>uMA#9WcolZkyR5S$Epi)<bN7I*1ieY4y3) zFvT zkpoZ2m>MI@ZbVhW=3F@FL6uf*k#TQl)2y#-o_X6gZOs^GnqV+0r;ZfBtC8s?g?%Q9 z%?yC!_jTXNFK)ihvKZv32*6G;@~J$AcR)|M0LC73@sJKWszzqaUZJV?3sYZbe^M9@ z^4NfJrr+(+5+Zuuv3bB?W_31$wb*@V?gIkLu-4QUp-SnE?qw`Q$2)cg5Bj-N5;;Jq z;ts@}spr9-ODRMZ@}<};R0hTr&54(HJ&&z6s6`dywTM(l`wo>(+$L?UfTR2JtJl%R0Gw)rlJHz8DJ&Xj&gf#aEd2^Uh+E zC}T;J=K@`DhQv2~kDa8d{HZ(+fIeQ2GJn|1>=+8yFP+M|PHl!Ux>XvPR`JA6`DTHh z#dIBO$dg3w_{lJ$@#lL55U zp`o)^%@3LVdo#{QY<-b8iD&Hu18%YVI-ftKHW@YfO!4e1kU=f0?g}q-l$+y`L6rI| zIkSMVM1{7WL#Qaus$%zs5|kyQ!X#B5*eqSxdOKP$sVSTJuhxE-teEBTg6xsB9sKW< zz_X(^*11BKt6w8|*c+^7Sp5Vmtzyxvp$#dcGfpQ1D`{WJSu6Kc@Bgl0*%!!MeGD*kBDcH4iz4@+2+uQvU!BS!`j zdaL5NwJxGcheCWm7TgJ+L`{1U&ttg`J90|Rr0nLh?sYF>1w!nQQG zPDX2r?n$PyRz&8aEh@MdpD9wkTB1FZ`bHe==7P^$D+^9wTa^z)ZCPo6EoS^U#bOXY zAB$m%91vWA22vD(W0)~e{o#~~(F#+&K0#vUEW2aGPaG)Hw}gas;|k|T!!dy9eK-nr zjUM04ztGzdc@(PJ;;o^lxmBiYL~2hmMNQaYW~V{%efZBbrGH%jzQT>Faod^MfQmj? z@VCj?J+RJ3>w-{xwye@KyU-x{o0pU2CgJP-QKBM;7WRex-+*}iK~ESCXnO^LE+d4s zOB#lViOtUNERFep&)XhA4AwD0C|8ghA=vVHX_sui*t^RO1LBzCB{{%dhlhs)_V1c9 zQ-huIKHWl$vtsL-ZX@J<72v z7wb>F$H?xxVJV96qO#^HoxX1b7D5aTXP*MAV<{Bnl@PY{MQoY@lpL2zEbsW2MbE*= z=x#3LCAOAZi-&%Qxc;;s>t%W^Ve%;*A0plgd;FVoaLs%DDx6=re?c;cwVR7JKHHB3 zSoK$;7R*CX-?+V|3*v>~<6QEn%%JQfY4PHs`CLi^O)&CM;ZED~A7ywS`jibjh&izZ z4P|3JsLt!;{Ky3J_xbVaJg4{ED1nxBkE)539=A+n_B4P701$-vvO)XXDdmRUj~phW zF5DIadzrh-)>0a%fgK9E(U*}M{*(=AmF`Cy{P|7|JtmJkGL_z;c~nnea%_{|*v|xz zYoJ$1uhMH-&(f~a8wR%%j($;Jcg>3?g0DD;pu0P*7oBWHLPt;XCr$75ZzZPf#+LkgE=YQ#)j_M?_`Ubj#Rjy9~FHOj@?jJ8FRWVrJ_$p4kf}|3B#%L2mP4tgM3f-b)@ZwKEUf1 zZLQz1MQGw}9zS}>9VK*e(&Pr&3>SOGrI@{8r_}VjYgM0!i!uYc5k)K|%$ z^Fpy0p}Ri9oG<8QGIJUFJW~V%OV!!Fh%t36FKNSOAw5F)C}~)P*I>Z)Xh;O-oLl2ZR>Yhy5Y;;x@owo=zzItmdr^1p!`e;=1mduI-Ilr; zD^F7C(R7M955X7oA~d+!B<&>H?2aYMmf#`kUP~6dtX6ssv+P+V6vp8`7?=w@UKc(Gop(dRyf97>WmN*&uqR8Cmk9( zD)nI`(uMGc?s^?_){d86MP`sa9g|BqqRcyMz(D*PyzA+udr^Gg`;~M_=D~6zww>9O zGRQ=COC_vN2c(Q@d(*F4nwh{%-_vc=pF-bPyD zu{`qCManT!zSens;Iukvok#hz&C?b*`n3L0^=ht|$ub6uJHMSCq%|DR21Og|xtvzt zX6&HykZiy6HRN*6&;LK{y=7EYUDP&wizpx}4boBvX%F3v)B&U$q@}w{3?w8Eoze}` z4Jvu)?(U95=Xp2k9naryyg!~XzITi>{NdKU_F8kzwbq>1HLvASG^P9+`fAB*ir*BR z7YmfK=nXL|hEmM(Dcd#wP`T((s<6Le2FgSEPDnU|WW27PsA40so1*TI14a1kJl`;1 z>*#=Ba|+fqHv6H7iSxsvKG= zAP8s+&u=n=64e?PRReDXQQ;*M|L%%u_uxwSa1xSCfxa092*r2w`K#HeoKK)&# zv!0S8FYB!y1*AkIJTb_!zlnIS*fQ$KjN}@f8ed~s=5#ZPJGHJ6zmG2ZG1NA?Sg&q8) z3>_|Ci(fH2*4Ug(NqJuDa}=Yhy@-)weuV#M@Pe2I;6!-5))d~?sS3xYmJ)uooAQYv zRYA!1nwX-ffm9so5tW>C(fPY7&`%|})4uopj zxX|sj{`3(#<*xaO@;2pCKnf+ZHaIr1F|`XN`n?QG>YZ?oJZ;&#rF%&Nk#|xvQ>yd{ zIfsn#fgJsrf%kJtT2T$;C_pJMOHnhx&ZUlI5F8d z8s&JmgkhnY0$eGt(t)C(Z+W;2jG+5>VXm?kwrJP(Yexc=t+1Q%?I5dR#jvHY2=Es!# z_t&*hB4deG)ND&_&jKut78AKZIJW`q`JbbOCRxCnIiAvyjQBxT{+gL16%jbiJW9l?eJItfnUPdNmg_s=WvT;*r$a85kVpK z$o-c^g5EFiUr^(2vICVO(XCx}vIVu3aM&ENUJaY``E&YnO~EC%$~y@kAf)JJ7#_|3 z`N4dot5RttPEF7n&i`MT*zsO{> zq6__n%o~>C1wRKum$5DmTiBz`WC)`muUCN@K6>q1SDrrpOD*?vDX)gN_7&~=x+NA> zV^*ICx}1pFb$lkg!ol~Q|HNnnVc(LUReJiccyfrfJvs`GE$PAxD{E_xttYaZDM~0+ zb3cK%Q0nUH30$#_KJfkPGN3MwX6npX5p=3jz```WNarDNHE;x3>rGa!ZdQ!_~>b^Pl!z3fr1AHlp7D?H#GG5bBu&tdA zeQ{NF+Omv))E|`gy8s%u90cxUb~SfDuUvz!hwOo#&d*_ApwwgrI^vnI4KN{2JyF6&5n_1)YsQlxmQLZV$7&MC4vl$};wbe~Q?WhSM4!lb*~ zxrM>sJp$9Vut;xXF^KSUoKu^YNXqZF`QzUyw`J0PkeK{e%)oD{nPRQY zllP)j;S?ZCSG>8PzdY*EtaCnNcBhe*;SszH&yQh|6pCfQxgvP};6Egf{<{U_Z}C6y z0fG#`*xn-%4H8@&!F*+*^mqUO^+P-Elw43IPl-CGU+`jr28-p62=FR?^0*yoc0{?%Tfty00xE8YI$^`+{aq69{T86~hR%_o zlfTjDCINssKL9l`k`j;b4<2`m)BUpo&b)u8+-jVsb@BpG*~wKYO+U6yW7+J!PHmzA zO#^4x;g$P!PG1@0S+P73p{VyxHefhfY`_V_Y=z8+8PJN^8m!8~)Jh#56p|kb|0b;h zh@V(!^-E?O(Ftu$O#%$_5tD_Ngv65r_1QhmS=#^sLq!=?2ER~l`|>rQxUuTU=;aAe z4|6Vfih#pUV6%4j@kl?v`=yHdZ}qz=?r{28CZiNB{RR(GCcW|$wNDCJQFKZ*pX@21 z|C9vo3*7^d4RUXy*FXB zucUM+%mozJark5lH}kH!$|Y8Z$Dl4&y39dj!k^#M>hE!5C+O{fQgwkTc3IWW^JCBY*=aZ(q2F@AwQlB;W$ z!0z-eP9CxIgnZGj@t4$~IS3mG|G(OFNcVCArROc8^Gw8qf`4(~9ZJy^=zNpdK3}8& zb!-ED_fde7VTXP|(lAipM<3LI_?-Ae_cVAQ)$T z4_5aFk8>x#8*Zhx zO*ER4`7E23v^IAJz$g>GsJ+BO2@dO>Hq6u=gf{c8!a>qhXCybv3KpY{T%S%c(!AUq z8Xquxij6&{FT0Zq^k!pmSZw3xXDV)ToBrxTaUYOONOQ2H%8HF>%TNfeI{p+lhG8)j zP-fgC3_zbT_EB0XFaR_7N6+v|8%{|!j6`BW#;j8-gC0|iE9!~k6n@I+NvHX0?y!{6rjrW{23h zVk?b%>qaV_9rN|llDte8CeSKuwq58zJbFqo+8WFLq;u?e zHs_r{+NiOI&O@1*SZD~8H}5YxrK=UE5#jl!M@lrO0g6rCa@}X7yp)JW(M@dJp{5W> ziF~(96oa;!1=DHr7najZW2_xOnykUMSPCdd{XmJ&l(kN|cL)eY&^ZigZ0@QEwxBx_ zAv)4h{F$-M~kiKHleA!Vw)fB3e+3(`8CaMs&YL=;*?#FOQbX-@?S<{O}{t+gnap8 znWC1U-8f9*J0^>5t7i^^*V}I0p7#H8B;OTa21Lhwk)xPEk9{4G{Y$Md`%bM;J1J1W zz)tAG_SIgX9N7Wmzh3fAcn7tC4Gcq9=?nJ8@eF2iWew(T)ej?q(k>li6ML&ODl5fw zbqbU+2&O#zPli6=iJvP7L=KdPEd!X8UTN9Y;4>dDT?c&$yo%jF!tQtBXWzJQ^ZTue*NgY2xvyfo~3H~3w zWE&YkvrvxV>EwUDP5n8bS@@{(9shsC6%(-m&BEazv8sP|Ghx8t0L?;CrvEX!vjIZs zY{gQz^*sgdtk!P(Wd13U^Qz?r=N_bdr8LJA#vh6%OT zv9D_CQ^d30_OuuJ+r;0mx zFpNxq@^*i1-QQ3fOdM6jF3D?^zPHybuhK&d8h_^d6&6XpGcM&`nn_nNxhY>$Teki=Sy{gT04u zjTS@!b&Oq6bkJm)cB4PTx}b$zX#O~(_EQ|Y%ogdaSy4LTVx9c*Tid8>6yKG;r@bMl z8V=E`cL~7;L8LUxj7ysb+_3)`Oef$SM~DG?fjkxzfVwj`j|hhD3aF=Df#llaAPmaJ zcXzIo2*zbi;F_UD1-eEDnnne0E`cMru9Go(7hFuVl32gTjN zkj#2FpYNb)5=|kXczJK=Ctd-aeY!Y1pA$FD89W_0cn?e-5ztlm2BbAic{Hbq&n|#0 z@tE3-wa<K0UXL6m3 zhVaFV4wXC+92v@NWWLI`GIvGZ0DZEpQ=#qho)hAS*irK?kW3A|aZk9T>jIn=7Pa&C zoI8Q(h3|iL#WOMF?Wgj1Jmc6MNw!eA4y>!PVIJP(fAeCO$1UKl)(|M5ywk%oZ>b_R zbUEQrrBnBx;xK1ae>!eGn!=253~^x!W9`~Vo&yY+6}R&Yy?H{=t#7kb|q5RyJdg$ zCcXxg1zvL?)}lbp^jq_J0<#33mnTG=31z;=6uoG~Cu27a5f#3lnG+1*{cL7=MhJ?L ziE4Zr`M3alMtun?u{y=H{-fqp!?J?p6j)Amp3_Q*+etOi61K=g#0ta2qIfX{gN8me+pk@va7ubbm^7fIVW< zaoxF%T4)>yH}1-~RZ2`%>2;n9mHy@2(G5(meq$~QkTsr@D9t6Fo4E2cK@KNY>|59m zKG6v*ci+Sq5POfzJtLQ)NM%uQkduB7v#7eQP?pvQ*MT3?n_1s`=$+OsN<|?LQNiXN zOR42ILrQBs@v|1G(gD)0RwA8jzgGzSUn>MpjvHu{#g|?lbCb`3ad!g2aI%Lfp4G%c zBu3KYcK9A7(UW6ce>Mm(u`&-lZ?9{QwQkDXnhxR`%jiLTXbLNH^H00YZS)q{-t@Rl zm05=MbV+Ioem(qfIfOzuyN`_X%kx;K1)zcY{OPg=^8*8wTyJ;Qh{4DP9dN=t$4`%= zDsM`T=?xxvfXh)I^fwC&`KH9tf8nLdZcu)$21fT0R0)7(pSpwkk0HA*0Haw6OZz_I zuGZgXD!}=c4gRtwB;QJH1_Fdbh`6Yd?$|TYT~R?4Pczg4=1u*(%{Q|FT`nJ=kSXWv z=05#zFF^hJbr67}ng__RvS)glX^!zSO%KwPzE5g1PEh%o0V)R8tD(C}azHBTQK5b; z($)xPSDVRRx!Rx-+2 ziB6yhuq-w@$eVIVuY9tSQGB9-^WJDJ>v3>^^(67-Cm?~(-=xaWoC=2CSf4>SUqpR;H3AbOcR{o) z;a-fUUE~?9=vN$Uuz7%MQB8Ke0)kd$#Yknd(wAC77015hZC=|^fV3yqcvA)M2eDD>9_268dfbSKY#*S>8gyJzD(7` zsz)O6{y!fcXM=}LVdq_$h98bned)z$nr{5>R5+FAWX&K!_J|#Di#cMEirr^S2aqE` z8!guHBMV^5*Zjtu%JHe;<2zDz7o;1H<7aJQio!E zQDZhKs&*Po0g~X)V_j2fExhGRe_k2FuP}({m~(%pDd%K+J&;MIg!>>bo$~zzCZ4!B z=BCts;~BG*?U3hwnYIDvs)?f9Vd137J zQnMZLu|c`a`lzp%yL$s=AT9_k`P?Vx_4PvZ-uU}5-iXWys(pI!B&a41mdcwQ>YO4S z1hY%U*A&8jeh>S(pD^mjqPWMISUuZ`?^wjPP4+1KJVEHDauoG-YY`PpoLxfFDvWvN zIC5LszOV~A+s-4Z)9$_bs5p|6R$~kr&?s4+Qdmv|MHZXXr$EouaV!ZM`}zfb=#@5t z*!NkYi{rfKezScFgjyKs?uSW$dOq~*wrcRm#KsrhR1krB&RT&F6}ul656R_^Xg={) z0u{~EidA1TMN-Z-94bOF*7@*0 zvc|{`3^bYj<*s|uh)YJth`-Rn|3YhhiqypJ-(2)H?l@NE@#>MuTPBbsGBao}3GTih zE#d4Sb#+m1GFf$hN>(H5V41aP?nue#hry)XLW9kTT;(80HWfhtm2k|8bRR93igBW& zE)B|h47wRunRn%E2lcOb)3?W_z}^)0I!e7m27W&y-m!!%+9uP1ZfNktF1!e}f?l7G#Up=Vl@+R`}`9_TuUpf*sLi@6bXGasp`nDvA+n0^93Z7wF zyILtZB(=2X`QD>{1tTQVvftQBnpPnNHwZRi$TSAMYfb7q`Cb2n#)Zj(qV$1K^B7YgeP2qcN`$4t%e?QaW01VY*)pnfwwqQeTsShHV9UrnCGgh>y+d9?`t=4*CCH^Ly+8bd^_b>lHT5T3 zSZ6gsN&D+39~e%j%L0ugdZ;;37ZDTgKD$#KKhA37e;ZWlIWy)#7Wmql>Q7IF{BKud z^)rX+WZ^B*i4L>|k5$>f8}2vUSiLiAg}8Vrknz>_rwA}yz*mFgWxlEWayB zAZc_ytOg9;i?%mpC67rz$h{f-<59v$22$S{LPWrX{_Pye9Omd(hxv5kOlr0r|Td_k_^g zvcNAuH^^kllR%@8(bU!76APHcA8CI#>Ndrdt_C#PlL*`T?f!QU7{&-7G@4fWJvH2EfiE?fgQ$i5GRlGw(s;2hlqs44ih|}Q`gI~-HIm(2 zDM(u3ZTnF0Ye>6NOX<|FwVc`yxE`P@M>QYWol}9zuM=5Mw^k{VMi#%m)p*p@dSxOI zS{2=ww0Tbx?47<_8r!8FO-tr2ZW`(pd-GlVdu-hGZkUl$y&Uaild0zS8NK4ya@e#o zxb|}bXERiZF?~=u?dtBYrBF)UeSa;N&FQ@4hC>Uii_HN8LP2+b5`oj46*unS%f=#O zpO?^nc=hh3j47iQXxw%tJTt!#|HJ|o+%H(vKi#;`7yr_->DnD0#UBp-;PiG#IK`7* zKg#klKT(_J>9vD~zkCv9Gs0bz%;yEPVYsPsr7vREtZ|psYW5=Qm=LUXnOoI-3J;Db zt06s7otR(mldfE|&XSnbQp(y%teTwVUiw(C+e9qsyYqJZm2clsTOR{oZ6(V@mFMKy z`^E2$h6MM3W!t30uEe?!;GS@LGZHZC4O!)|=;R};)pGoAxt#N(l#0dewfAc7WlKlP zJmS9)ubHOFJgL_)Rne>W^t@Jqcm2%@ARXd%g9w}8@o5c(He9TngYt3i3X6oG@ ziQ)=wrdM8Pi79Em(lZSq+BzbNCaR9wY(eSH;3@BJx!KN^f}|_P1C1;= zeD+)}Y)Dx2>Q-V&i>$BLe?DmLJNlf;qwvzj!E!!%x|;MVTzmTP`_~12;$w9%9XFDzEF^`;Q@(4|HeVnC%{w5klxKlBdRmPWC+v^AiR>eyt7k%CmbS||1^D%x#Mj%E+mzioA?hisD)1j200+{x96}D} zUTWy-QVxVKs`5Lntb8YIvU53BZO zE{`n^r%`T~#_nb=cUz*6Dd^QKy#x3?bn{^Nh3y-BUJ>x>LJ$2d5(6g9d>sn2e<-g+F$r_(}5Ij zoL|v%T2Pz;p&DE;K|6&dTt7d5;$?Xpzvmu#V^l1wgu~`oemu#=O`sv@Cw?*^>PLV2SfC$j4@)3Hnq!HgcHVAk`t=2ntEI+KKm>U+CKxTGQG%v5ghFtj(?jqX64?BZ}iglP&h z=S*v{^U(KTPp8gFlb78(*Af+D$gXYZ05Yd0>q}!f7V8p1yx}I(dR1xjT2cm|iIN~H zz;-sHM1?{Bl_zQQwM)tIGghU@0^tSVc7m>jz2wzTW{V2{te-u7D>w zaD|jxS+tquh~mJwjvWlMm};5k#ZEM5XBoc$TvTdW?XQnfL!W31LKVF|W%d`1NZP2k zmM|nOXJ;n9bKb@XS;8JkhK5+EgnaC>ko4_-nfm$j*Ptibof|&;Qga;+mT8SnyH~uY zQ|gH`9~+0k)u(yPgeueb4e&!9F}HC&kOuLgTOzlEbG}}%* zZ*D|1@gMc^!A-)*6kAt`5m%`J3FBZCs#ztYgMII-m5w~+^vWjgs_7^6i?JKP+Haw4H^MKaYI3Q+I1 z&p8>3D1Y!hOsnu&WW0&mcku!;b3?TVOj6geG9PPiK~5$oa(*4p>~EV$hC^DP<5(-S zYj1;KXY2i#RHm3IU0QhE(%2k9+3jFrkaI@54&+$K<*Cf zK@$~zt6PN#CiQux``JnvF+rNH?u_d4-cP(8)te1I^dy$xML#GQ>pXrSn<+UR6G?I7 z&1%;puE}NlDhed>eOxKsRi!xvzG4*Cg&{;5CEvwX^LFTMW1%Ir8&qh%Q8N1}X|6z; zJgnFTQ&WC*dUk+f%E7odopYIgrE}9{wbltRsx&Gct+wz+3v-Ky^SJl09OSZlP>YL; z6Vv22X1{%q5me-IT>V)8R0w`Wu2kSGNt)s($0vWvIn8+IY5npLBJa+k-CI`lj*HLE zL4-(yz%^te>I!Ai47Alz_gCXC-}hw+-G;Bkvr4ZSwo#+0c@D%!FU(9;qAuR?uKn>mNwm$(3iIOTn^^o;^DP(r$0Fq!tu6?pW zRRS5#ZkY` zOTv@&EdTX;HJJ*r4dJiMD(^=Uz7ZOROdT7}Fps92G&H zji`GTN34~2DcHhAL>sqN^HwyfPsn|@Gz9sz_?mAyhge;&1n+0%E`3bil24L<5tglc zkt_b3(qs}LXverbaR`OiCk$LUSrts1;zP$y=Ur`yQb~5V*4j<`hN699QE3@uk$s?0Yt;wMXjZ7uJt`Z$T*P>4v+Y2le zuh)PyfEdK5=55tXB5!3ek;bS`K|`p7&NqjU9%3#^nds6F_>0H^6dX!7r@C2nT9M2} zF6LAsoh0Y0uc`SoS&$V>YA<_@M|-5{_YF3=9FnRcPPD%URO*!OiN*7|xV6%JBhfF? zG1r}&3Q0WK+oJ2DLi=G%J@cL7ML6cPna+5z%>>Dc%IzFEh7>|^*|<_`N%AL0H^Ak27K!g~9%W_@TFq`e3d0K*4QC1Ia zmnd|0G{TX(xH?6wbYhX2(E`>Q%&XLl3cw2LFDAq`?d`%iZn2vdFaSPZlU>`Hp#DCo$%UZ&S55OaQ^ z=CuF572j%WezX!n;Qur_Y^01Q#;tVa%5AwlQtH#jjLKa9{jEBGS{$-L65B=^E@r(b zs;=&MNp~SvmrY87FfwO;mQPxw?>leoFP&hjmHDSBWH$@df&mB^CXSG9Z zXX(w!eRW)1cIh#i;6eQ9G0`~r&gA*SpMDw=`mK5l`VZ=SZ%6LWMj+c z`l^;=VD2k z$T3tx@?L{GcV@^f%!KkJKePq}D!rGjmIe2?FLS@L`xyl?ok|bg(y@QRsyM8XxON~#`Ad`? z7&}b0WhT+Et|RwX51fqD+1-}$UN{%zj!)&vF&*f*>~KkWij?Z;Xfr8_IkV%n_d4*qqRKZXVB0h%Ze>h_YG0tT*ewOb8T;^TnSKzJyqt^v*n<`@fp@O?*r{1;)o={W9X_2)X?#msjL6C4`zI|(O)AM6E?CSPF!|R^$N9h|sG_v42M6L|J;5!j)>F&b02*8HO zS*pjre~^N0lqbOK2Er(CC9T?+lO-C@1tF#@GN!v&eYr@TgZy zJ=1yV-IaFqqr(I;x7y^X<7#*viY2gPU|UVk787oS_+2au`fUWjHirD-8ZthD)J(o% zBil}w$|V{%!v%j1qx9<+Z;hIGZu_o_WKworxeBA@Q(HJcUcxfOrZc}hR)Pf9TQg}% zB_#2X_J25FThHkG3*Hy(7J{@Rcg zfWr|iUG0)|yTzdW1%QWk(NyOZ=f}!ST6sRvD#VCcEcb5C)>p<|rz_LaeO-F(t63q1 zE=^qxpLJW%_)0ex3=<}{>J7`1I(Fiv+nDs`{RZV8Ff{q%0$Gv&wqZy z-BFqQL?yYhYQJopf&Gp5raD-_=*N2pZTAPIBgwp;rvcjuZJFA9-=sU3Cj((tc=S@) z1o~0BGK@rMoZN?P1P)e5rc9s`2DJm!eZrT|DUZJwd2x~5T3wfp3CsmaGpdih9n@<~ zJjgy)N^NLS7g|vKsYGWu<3)OABh_&o{Q_h(_ptEZT{7xpq_9(~G2O`&IhDcYndB-B zTTW6}@Y&l&k4roeA;_0YccX1q*KA6O6|_n|uYw?+k7pRZRKiy*CYFD5TY!YE8r#L9o9hm*fGr z&D)A*lN_-zX|OFD7=CcfVEv_+uqhNpLubM?C+@NK@HfM|449odDi9@}z$>(Tn(}_O zEo5~U^ZV9Dhc^@%B2^cRujuf&v&!7;axOe)#yWi0uEdGFl;U^)q#Yl%eO;wtq0eTv z!VJE8xO@`HqQ?`Z-MGyfi(PU~%p&K?iy_(#V?$e9)kq221kRsMdziMX+V_DipFd^{W5tm9JoQmRAsYN!Bmmt%UU{MXQ-W$eh4%$4Syn{|F}pa^xA#eyeN8eOl>zbiPdcg5Q_1(qcecOUfU_ljto z-pVue^F{9mOMcCunPYZDc|^~=o)h<_d(33|&eNXL->oH#G_qS70#-h-yqJuA^%AuA z;jK0ns8Xmx+reG?F;5P*zG5?|x1STd*;BhR>~^LD83kwU84A#W_9=Dr67CYBKf~{4 zE`$>$rFI?VNhG;S5~#sf!=X>Bs?bUq918(UzB6Q9oA@jT15)BfkJLWi{U=e;=T$+{ z-(l&7xQAO`k+&+%$4B@Ad|#LB@%V1MmZ~Ym^hEmDLiFWtE&y;r6QL*=CjSd1I}8>E z_)&-5N~IZ$kkdyheAqZ*Vc4L8+tSK{ra0L{G0Qn7og?xlSvlU;(x^)~L0;s^*aR6pjU;dKnZgpuDLE;)!x9ls3{embI96sysOLmY9 zlm%gA*Q@vs-`bc@e@j{E zOpquvbhzz0Gv2yx8>*GS$$~6pu#zo45g9UeZa*hTaURq$tQzKe$9J;oQ2ZCQ7zv&xyo zp(gwLwLUj>Bi7>*EE#9JBWgpL@-!_VIrZ^6N?F>lXmpE(=;C3GJz38ZT%TJWnwMwHdoPxs1L4Wv=hZ-*cWf^U%#qTidY8(^yeN%l zl3jcE1?y>iBdZg4#V6NZ!`i`8VS z>D-it%O!Lz2&F*?LT}ak<4Z^ce{JF9H)(%`6dTJ=E+N~pd4?r#!52&A3f;L)0?vXf z9qx`Dq2ZE_rWoYtmzn@twrghJ^XkruzuZaz6+ov%nL$tbjT`f$GXY#+N(X11F}!X6 zM!~;fR)VZMS}Yj04qt~>t9&u;Pov5LLWZGi1Kr6yerR%(S((QtR;41VO{r}8ESDf7 zp1%y?S?ehg5#AmE+yeda-Si)<3t9g8SdcU++hkpaj+%X_t~RHtUmKUn@v-JsrAVIf zVC$g6>5Nds9NT1rC(Fg*y1sE5@9CTN$9E`8ZMGpTAEza?`3Mg>plbD@`IeqdvWWo( zr9Gkp5wa1wnJp9k>5(v!eV8y{&6jQ9%0!Q(xn6H!>|DgE?~=WFPD~A(P#j&C&ECH6 zrDlS&x1^riB^}{#bF852d$_SrPG4go=OKS2RU&(luI?jzH<`XXb-HsnL6XH-hL_BljE8T)S!g-;R+2aIL%0;s4Ff0jR6k zj2iww@aEmwg#dM>C!TFe{fP|3L(#6nl1zblVN_X+|3}-u)9qaP*Pw0E9>jI6MW-3% z-!lGQvF}Ek{||aP+=NvL|G)XQ|BK-2|Bt3vtc@$2Dhwgvvmc7_S%YA725sV4hc&7+ zW zN{D)Jr^4r_-3KtXm5ov%nvNFDk$r2LqYv?3oQ~7QCC2UPCJs04Po(p^EEG#(8i3n% zFB@jBnxJUqYu%|^ddI@;kB8Ev@4N5ic_|ie7TDNCb02gY5c1jwUEi*wxu2&&;PO{v zk$jRasV7z4-<;QDhs;_vHZ!Jfr@C2QbuGE{s2ljE^3jeaR7c}GTS`Zl*0x5U8AGJV*(LG&Ll|p4hTb$S1Ms+sxF^-6eryD{o0>%WUSGn9LA7ESp1f3~1QY zc8y8uyT@(gdDytn!Ah4mHyzte_>kQjEcaWTadn}y9FlF0o%Rp$@#@QMUdeAdLW!w- zTQi$?;cb)T+7F-|x%1jt^QbZqy;={|AoYR*vi$Xe&s!_Ss^7gd|HgVD5u*7oI>e56 zIaYg@O1efaf>*cqn_@LH{4X0`WwkSzTps*@zwO&8vqju9ANTxPn8+t*Gha((%V9&) z$XgL($buJn#$_7;kej=wc$8^0hcX3AEJv7hrz%3-ZRx=}m73al`nybY>SZ;Bv)`nF z+n#A0$Z+V=)vmHks6cHybf{QSfbMu6rvMn> zZn!g3$<$^rOIs$3EV3_u_$`jq+=AtbPqq+ce{k6cx(*xuxOjICy`FJ2mmP6+$Xtkv zy8+iS(W@IWuZ7#Mt(l98ZTXU-NNzf_$|6p&C~g-0_e{TC3% z`4KFf-}JCc~_v*6nozFpOl`P#HS?D)1gkyzi&;WH$TvcUAZ zxua6FWo@Wg7KJXHOF=k0zSJ$Ay--ba%XNc3#8NGo8Z;YvL61zPUqK7h6~RYv!jvJG zNdC*e2FJw^TXsR?QHe(WM-m}tE#~~yn!nLqhXMr^1@6DmU6%U)f$oL|TKzeNIul+T z@iQkUa7FHPRqm^x_)d{;@i)0;B&*`~yG~U`K46%KHuv99anMIa`*Y-X#%-3HoLq)P zDjs%Sy|vjLWJ84T_tu|8f-3Vj=MP)8%ME#Md^0s=)jHc1s=JbX%`oXy3v=0(*1t1M zcpuoo(qkm2=nn&&L?_E$6+DFxF7dplAvl`<>~p2N?%UW2@Sqfgy4g=Y2BR4+B|WHv zFltuY4!y<@&0O6c>yA7dlg?M6%k-#aWp!E8i5E2d24vlxGfixAE)8BMPHPq@T}Z7` zku0pPr$$fNss_AV?>Z9#Qs08HK;HBfYbo)3Dh7E(wysB~SwZZEqs?qx7Of`4L*r?F z20xND(HNf2)mEitkfnQ`>ZX3xScT}SW`#!)oQygaPNNr#*41sv=(vA*sU6R68`1PC zl3IEsB+Vb!ebL=JP)stkD~_;yHEfxdI;~7rx-x3C(B(;OrVZ|ziOocYb;)g)kLqgg zcn~je2jIbLTy4$mEftWBmAv!K_I|lio+A3@Y-tEkB&>8c?4ima-G01jyayk3P`lCH zE`nd?%}u3-ULWRAnazY3dH3h(kEq<>+z_N(%4xfH?qo3-!&XW|8GR5(HSBxWRZdn@ zQfmdJC;b=bNVeGR=t?g+3v!=5-kOt6@dDIt7tIG7t0BakQt(xGh|l!}fxBm?T|Aqq zTc*#RTIeu0E6bw1_BZY!w`UVYM%1Qpw7ts@I%eB*nwN`JZ>B;Wan5T`EOU(1`ZFu_ z6R+M^>W;a;kqRr0;O6V)<0%*1;ig@T!(pf|H7+LK^R;pOIP5&gvN(9YUh#f^J&pKd!O8#u;0rbe7E$jY*dF6X^?ZOVt~?!nK1g6Pemm zRa#+SL;FGr3TbfZm}$S?*TU^TY&wcU`~sxwnYHfGO?R~CF|ek>Z_Phcyyb9ZJX%B5xIm*CtSQWn(V)~>FHG=f;aS$3fI zXI|Jcwi!mg%>#MQwE7BO7qD*~S23zWkwKpTG;ahEk`;wcz|vK30c)rYLMinOYU_P_ zrS56$G@n8gc3}%ql8f4~&}a-&q1ROkE~b~8?i@(bb++o=Yn+!ZSIR*dD(O6Sijg1+v@Om`VVp7J?nQI%9*6s4-Ku?LdfZAc$hRQUuC~IL{0lJ zfB`vQ2*M2Ce|lXgu_%M*`Q;l{(s-Q+DO0#zhV>sVfE*xQ+PbpJpq@4BD_T~eHTT{+ z`5fWCq(ZHorPkBC30xBuYSMH)!xri+2S8L&KrERsmS0TnglRUKH;t&ln<|u-?;S?TJEAp& z>ig8IdpstjgF$JOF)Gzf?9Te&KiM9n&pbl;68}{oHek?6cC4 zhO2ziYju{^%xO1aLbsQ*-&WZOv(`4|fUEr=mki+mo(+(qZF0L2)46_*J%;d%{wnEa z#bxI&$TLS$Za;rMAtZiu5Vhn&qVN%jw+E5Xxag^oFd4M+zy)Kp(Pd|(&jNIP0 zMLzr(tqDmHbZoEmS##0u{v41ylNs$~J*ndPVz6W?&m^gm{$b~3=59N|xcw1C6w)N* z>kmgo5MvmB`}CD92Y|ZiWygx450)i2Q^D>cZfU)k1m)4R7zk@>MkvQ?BdXm-^Z&!% zTfoHGMPY+qfg;7BKye@3-L+7>Kymlt6e~_C6n6>?g+g(6@8IsVxVyW%>^uFW{jIa+GjUDQE3@9Ild&$()rQ?|P! zZ4fb`($QiGPYzX0LM(e?y|(F@w>I27~6KR;La0+ zl`gNqWDBf*=k|qd+?aVrP-5}DXxHEu20d-gpGqWMROJ%xO|0ei|Di1eCURzL3ZQ5U z#b2LUBFBJVEbNpS-tb z?^yGeby&&wof4Q|pT1Mtu>tI{HO_8t{d0zl&a86m$L|HBg=$vT zr1RAaEOl0oCIN~*e~>zyRNnkt zBD2BI)PY8N6qIE|!)4BrayGO71H?<)GpvmQB>$ToHEF7VK*Kn6tPNMe2EOq@Tm4p8 zGT!9GdO#FAZ=>boM<~vv%+XXscn2dG>`aK>9hvrdF@Ca2sk`R8tvTEF*aK}>*fJ@U z@ld#VZD;4y3Kajx6ZL;6ZA!5NOn0d^J-Z9}wnG2jzj$DrwQE-qfZSZ%iM)F0YpH^G zCgPBn^e(VX84VN|aTEMk$V{IBg2xOpo8^VoM~(!~#W%+>8_t^hKD%tD0b`{%37SII z!S)1Pw(0@7(>FYO6R!aY{u&IDjHe8fl5V7f>kZA@*jo3?Qp9r+rX962Au_c$TAf+- zUMZKupO9~Qy_>YPr5gHCaHIpMRjnOY!`AEIi6PsQ?Bqt9rb*u;D2+kOE=NI3uA_O9vJAJ2VbpNZzgdz*9>5+ zHs2phKR?X;squy=Zna!|825+qNQuF%RbsWh^h(3MQ@1+;`HNr9wrCx&onRRV9}EMP zq7&5ybHGRPhs9Utv+v_~=o>o0$>gR0UYmAAXu?XsjlO>z!0ekVj-Vqdt`d|dFid#o ziAM!W>%x!=jQ7P)tQXJMX&sgXc9T*pzS2DC@@K;$!ssG0gYH^phfxSwk9qGZv_FZF zFMnAdyF;{iZv6TyFJF9=H5>(KzDr^;#zd`W=n~UbO7n@hM<||;)`H#WT+O|*9Xs2d zKa}Qk-lih|aDKT+52}a{#gn7^8vp*J=g@bH%)0{!Mnin(BXeKBLviaS6=Zi)KwTO7 zv5fX`Q|J>#&289$#}?x1`Z`&MEG1^ZmoN?=28g>Nd(q>t+R#})V493p>dD>fv$@62 zG>jV1fA$bEt{>Yno!as@ZNTcesdZ85G!5oWT(+`Yvcy3^Y@IS|fq0xEgWSfRh(D9F z8pUOfaqqiOXA8+DuDwZ`-KL5@qDQbnlPxqJ#yB)ioP+Cx#UHiygR>vWK#itpTB~u+l%?=ht6$bozwY&XpS;Xc-024++s2j6n z)0>y@ZLp9Dk^Cc`*Imm(ETH$%n147hUFo4wHCs24j5B`6sqapFD)f+to2p6N3NOZM z@p`!Y-gBG!y>G1E{9Ir64ix~@)L(YjD^M4m@IA@bk7<>t#HOL#>n~9R;Fh(=hD`qb za}Q@99#7yhjQ!8$Gx7k;-6-?Usnse%>IPe@anh_?=&8#ccxIkoOo4eZpExdfSp0{; z$|E!bAFd#J+{%GuRI%r~Tm8yUljCt70EiKjPFCpKR*xQHGf9=Tsrjk~0&-m(T4mEz9B1IioP0locYcwfOBuH8ntLi$ZQN=#{l3$HtRil&cZBD?*1 z_C-7zLWh+J9mYII!U;_Qts~uC3p6{hB4_M7q0e$b^$hIOWqmb;FO@j{ zbbb24z!l4|$FIte;7t2(T-KiAXaofi_P?l3TFaX`4$pmDvD$tHb5Vg6zLCtFx&c8Y z$Pq-iEWmrH37vXkxo&U4V6FGHPm=UqK_Mo>&M^CXf%SZjOPZ?D5GMp2h}qC?;bPn9 zr1jL>{pmK{4{Kw1-qXUG$=(%u<6@oV1duvRmWlLXvVBXjtq1@HqaLX`Up&bg6~wW# z-lyI);SNM#O;=w^{Rg8Ijfu#4?e1r`!7e-%5 zQdpMgHKS!{Q{hB7Iomu}OuARz=cj?X9D%u2s2QA5=Msy~`t%7Cr(oe+sibeUk}^)vgHx8Jw}jpJfQ%LB(51corxwp-t{;+@%6|0gmWUeiAw@*S)#?ryC6E{?#j65aQV!KeE?4+{ytzEl|8d|V} zd>0~+fB;DLfaQ?9;WQab71c_acrzL-+!jsKbCI_+x?e2N8FZVB@aiY*1BH$-u%uAAC&s(jTD$oK zXRBP6g#3s}9k*88$u2C+HX42&`@PfAaQ)A{)=+E<&y!mBO}k;4Wre`%uzqL}%7_89 zT5$|xnGA5}vg9|Y|0T!;-~P*;)A%2_a~3J;u>WEKpjFKOOCH_-WMKlJ3I5+)00T-L zcQ+8ytU-bMH6T?cdx&OBQFcbir7wJAlPr(V|Joaw^V0*UiZAT_|_S&qFA8$voK|sEa`di z+@!t_8`)>y!ow?d)x`xn<=TWd%8Pq7atJKvodU$1ijDRtDupj&JMjF<4}8A*^o3Css`4ZW0G*M41}p7L(O6x! zJgKmzuaCpx1MG;RccjDooMkM(8vQ+2((hclDLg$WC_If3(lpKB!k|%w6USq}@VQ`c zM)uyyUZy^_(`-Lbp+eGI&piL;Zn;w~ajm$kI7=gFLS-*o{N!PSiQVYJOH06_fJ*KQ zXNJNA`$N4$Yw79T_R@x+9b-ylef44FjjI={tx^hY%&I$}bv&k)3ZTg6>fWjHb6Y=u z%kdvc^Ns?8rejTRScsDH_AK}i3ioNR z!DE2asFJ0Enk3`_#!ckFWr+(MrHkLC3{ACkMhk9P>)P!XFXc)29p7{ffQ#`#^3kHQP1=%Q#!JYN9#12yw-$6* zl1gt3{WHP3#}7d#*(bV@TlcP#TEw@g32nn74DtP+LpyF$PPh~EXC;L-Syh3eTxQg@ zYT$XpuY@pl|2mLIi=}w^?B07nSB)1gwv?MZiWoUFW5UF4A5t9P$!aToKR+ap-f+hC zSwTcwQ2IdG2>D-Ty#voLW_<#Frt!%4f%{bLjbF@5LCf%zn_ZO7tcnbPxqLF)IAYTC zN+y*tNw?8e4gUQhBa5IanF?uHC>0`Ghj`7uJfN{y=Uo!-Z0&3InK;DON~3-Y_5ypQ zZ6UAlOmIrI=_jnr?ri&9SIqU+_LTeOfss6^@mDS{Sj|6Pq!75@Gqrmy#20F08@J=K ze_s@cy)2*8-eT6fZ7Ko_D~HL|a`PccIZ+p<%+2I}!Fn_6&8E}nlWFs46&0?&_b=;t z$qp0BdRBD}50$ZgdJ1H{;a&G>FAJ&d>{_l9BY_E%sY^I9j@X%0g(k?amvj9`WZdEL z6bOnFbHBnphudml!#105z*oLOlGIXIKPR3jgy=`2y^*KzjV#il2$;%?Hb}NLH!ZdR zjWG2Ne`-MD9}Gwnbnu&ocp^QSLFIDQ%DD&p-1e_86*13SP#uF#YRD1}Q>jAE?d(jF zZcCeoWpS#fL3%TuW=S)Rq3c?JWI+cNcVv-rGL*MT(E9z|CW*>&#AJ5E48|%IXwvsC zpHZFA3|H|5PIf7@-r%FL&?QVfyJKR`#^<~4$?o!Cd@qpDqFSJL^#}9gZTiO(^=%$! zVH?nN{iaS5YhLMGu1)xwdZ02nJ-P1QgUgSYahGL&!z=76RqkHm`!Op%6PL3cwHs@O1#OlhJ<6itI91iG{&J z{J(3;vvuQ7YZpjL<-&UqhcwerTuI>Si$A{nuWrJlZv&PG#q z2W3JBxjsVzefkQXNc4G{SAmdqVu`LY$tE{@jW0`YoIs*=8fT*4l|t_6(kH!q?)w2k z5tT;%%|H*tBx;N?!o&eJl0&NR=+{=NWP}dD!W0MjTR~>Iy6MuxJ2YUf1wOHnLc1I- zS_an!V5dmz5@u{6X5EU^Si`S?!q?d?)vkIzm0MKmS1?>~I(BZ~qwtO9mjdF>?6D!^ z28$79u_|Zl@o?i45ndo(WrChi836nmz}!8H+@oL-WC7})#q9{uc6pa#ZF1pyv#13c zF+e10?I$ADc(b`h>TAVfa@@uun;hukBJAXd4e*7C$Nak#usT>a1`^bmqG~a105xqF z$cd=bf?DLA+o5TQ#xsG0WmgjY_)+g4tbDF3S*9v1Vux9-j_7HE5=Q_G-2#B2U)K8) zX&JW15Ypscyv*HKoGdEfe(9oqn6Sp+_wg%%``_&T56#ei-^?OzoB~K9iCg9nzJHbg zAYp(FC=hh%X>CL*1=h{lp7;HD4@qqh{xx3EA={()TQI;0;`#ry&Hw=5dYAmS|UW`84jAB?B|tVG9J}faC$|AIYVn9Q~abo~~zYrd{mr;*`JOCDX zy2TLIe~K5c$c8Wgtptu7Q;Gj3?-kh@fQpqEPNDvdKod=Y!e}kzdjp{+|Mwr!EhHf5 z2Ci_X`Zt&-^W&8OptwWeSk!+*Mc{Pv0~?Tr5OPX*^)EaSn32H0I!#m9s0qb?KTD?p z-r%vp81pZ%4KO1VfCA2IBFp^?Li~3F#g%|JoDFDE{M!s*qVR!kz3cjHgZTSdKq~Nt zt)xoHfAMZlu!1>&ZmovXO=J9ihA0HQ;RlCB$RC6B|G$C5_zd3Tv1;%-o96DG0m-Cj z3hOjdofRKfc5qk>%hcZ}n7th?fffRMQaf#a_f!<%xQT`J+BZFl!8gxOC%kQgLs}1! zCPmVj0g&6)1nS}cP6b&4CZe|d15lu;Fuv)~oS5@1lw-qZ80?b$Z?y!F`$6b`flNnn z83B<18o8U>k-li>%30LEpUBKpJl1d1Q>`8qqxy_Ak{kSKp?@fnKhib&XBuTC`hJR$p6 zB$!>IY2mVwF@;wfi&~aS4*@N{`9YWgxB%pHmE}lJWa&lEQSa#ZVsRD_zn@jhjoamY zdOKc`4J~T)Reh3MgF3@Uy+(0-Z~a4~%=L$GeABrP3pXk3E`)Lb9wW>2XPZj;+V06_ zuPVF49+mr%UpNq{f1@>a>{>CaSZj0{0g_ejpvAa~%kF!2(>v3At?~xW5-qN-h>mA} ztj{_uVA70X6)Uo2Y-h*wq}1|N>pu$DSni_Xb6XY+kO;}|05S%!5SPz@yE*L1^tmmG z3}fdRD36?0}e z+IM$MN!h0bc~@D`%?XV*4E>|apKR9Wp6ouBD3=#;7OM$SJf3}$GXszd(BMV;iv2&_ zE6`#|YK$uI8ISo1aC{ocl0)A|^`aexWp{t&t0B;$J5+S@?R=Nh4rd=#pT-wXX@QogDaq@5WOh2d(U3V>B^VEGH$N{ zU;rrYO=GIhNl5S=R4HKJ1O12oSc(>@`Qq;#>j_pFxx-{9Ri7>ZoRJ3vB=`$7dkf4K z!}GfQ4AA-kq{>T-kr?0s;uGHKdzc_sC3?OtC#NN5EoRpT~I&SbeEsuETBOz0C#&H)Ju0O zYYWA-2KP<^LMC5zI78l+FDDU+ltrz*PL-07Unn$Y)@hp8@nj_JkEeW={)SEk5sn7? z_s$tD3dp|h#p(k^EJiS^?Eab^3jmxNKmOdsev%Pl^0}@*!`k(HhAV%hDnehKo5cwM zpfdw((n_iO=I(ccfeIc@!;c$^lLA$bBIfLA4O);G+$!@@IsxQA-toa62B`bZ>uX0E zSDN(}B^HyTmB&FwlBvzgXV`=+re(GGOfqy|m%bnP17zk@@ z;tz_G<%HMigf55OlPbNkb(?)80xx$Ck|~`2STll9OLV?Xq0j^lt2Iz0pVvhvGc^g# zHu0E7iGF-7LG4KCp4c4~_Yk9QwG@!$5OXverUxWKqTzOY=iyOD7bNzw(A4EA%&`s= z?;QG@50ktA80XHo2-CTBU=a(Gbjn7%+4L}9FX&Kv7hCkeTc@*G?_bTzNe{nqi{!Jrjlwc#@8v4h;?@@XyW_%>xJe&vr=)jI_~)Slro(hdWJaf?Y#<~O;D#M_I_%_uKeMFGt$8Ux?^ z>b<#oRqbl4bq%dX&&vd0r>6KPGgu0rFJGu$981V!_bP&b%M`fd z0xFc1uUqfo$35!J`sT z0R)Uh;3)KJvB}#EP*CPacWW=W19Y(y67@akRyZMxF!ecn zrPb(9J&ApXPZRT-uLsuQXS#p#Eco2ow))%gQv2@j0(VqbF}==_(o($dOWS=qLUV}g zZ+MgT)79Xfeu^KVjhdgEXD5+Si@o_k+V|l)H`O}LZ3Ris=JsNLy%u0vb6HPpHKUUX zGXgz}ZTp1W|07-E-0Q2bw}hs^KgN=Ha0 z=PBf{DNMyKhT>($8U#@EW?oJ6efW7f4BYE+6@hYod)!TGc6GF@J-jxUtg+|g1X&6M zfGB`u9=qp-&8&?pO7u@{JLkTmM6s!0nP7iF4!>7P7SkcceD> zlZt$Xcir^naGFST!A=8{c2#r?rL+|}hvi7tht4cLKSdNTg_WO{L3%<(wx@V#AY^%| z_CVCQ5J#om{O(^@p)Eh(!Ys2J9b?+HhISiGH+9u020ZtM?k_h5|DK|;rNi#&^7tBK%%sj0emnN_y4+_Etkj9>X(MozDr00o&uls5Q&sk$@T7c*9qVFSy_6xTW}vDXov-mU!?NO ziw_Im770Gyood?%UQC%dZv7y)oUX_gJn5nQlCNQm1zVt$CLGIUtKcNxGLNaSs7j+DNe-LaeE~DhrA~Xviy!LhV9lcD`?N0A>fZ9*TM zj#pmZzd??o8!0SpD%ee-co`Z@zXG4gZr1k-rKnk}8|Ef|oaBX0qsRG({#+c5l5A%v z&UuIBzJ%f1Vp2GWi1%t{l<-KLK*QA*WgVuGjW+g8T|6C#!Ma)YGp+LHa>FuqgVB6qAy$d7=bzi9lMH)&eoh=4HRBnNrq^yZp-ehA{X3$b1+1cBzTQvJLFn<{ zNDU9_U3L==1Og{9E(xaKo%U}Jn@bN5*Zbn*UWVarU%$8??1y~Oky_+ z-Le2jm1&!~iSOHCY&h*V_smA2fg^~$T@*Y2aw?5nfXA7!QZ>q8&`e1FvMXw zv^hpT*qqr2Z1`gbcUqO^G9|k8Rpt9|TtoiSl{&R{0QO2lV8Cp)y1j3Dh~z~;nT|g^ zYE>e-3w+cTm7u)nEAiI^a#s_(V)Dk~ZB!B27{MBIqG=vGbgdO#z$?#U|FSt+4kGP) ze7F?ZC+~(c(;X#cLrM?rweAYXKLKWQN7$mf?@7)2d);`B&^UQ2ubpka%Ajm}y%#M0G{9#Bmluz!^H5a zpDg8jmowI-NF^3uANRnb4{e&z$T)!_+o?ec5Dsa1|pR% z?5XfgLAkcc`%<<5H2)}w= zNKI?+ni9S%iq&`l&QlA`5ScNwcQP+b-{$GbldT{)RI=!Oca}6s0jbVCXS*t9!+rqV zm2I0fOccS&Npw-O5mqbq3zSG2W<f~spOh>2 z2J?s^psUNzRJRXtgb)VL2iUILSVUz?M_COc?1Y>FTa`6>MRV^ENw8(WUmx*Wk1n)J zEiU8~0}#Qe7vL6F@)emG12)SV>KWC16>8hb zD3J%vFvByAGY&`q0;}6N*%&SyCErVJqENWJ^JZ9-nW zIQ9l%A+BiAZ(T63F{UkR)!q|HRJUH!X z8nNnJ87pbqEb1_51DBGjgVAfx25z`RA;Yw zi*xhaV1i6`!GS_W&3uEyu4Yr`BD*gFHh7tuKvL%PXtvTOXK)T_4)(ZGD4P;RhuAzemBc@3^UE_gtUBR*CbVPKLzTB{X zu>jAmn)PP)`b0gwLS!HhL7rHg7+B(YUwG_GTmq|6Xy7Lt_h$l~0D;3uF(7)%94I%U z1U(^QwnQ;}^LzmrGKhxpR@0o#?(8k*#96r)JUVKF#MbMhJW)eK$0`uOKielBI-zXP zMkMPR#S_%Z6Zx=`4+HzneDKT6>XTA(5bIn@1SyoCZA<3K_Db-Q! zt0u(Afwk>kdJthW76Ng@vOYh;Bur$puLfiww!kIw7C01c*SkI5+3G<%G=GZ`?ZkP= zduhzLtzGrx*$5uD*TvT%dN7xC0u#HAv;wZ9l@JZCb6Ov-q>_HI{Y7Yx3;dlQobx_e!m4Q>Q>`ErS^4Or8l&i?4P?JyQ9VYNjFPRTBaifULnINM8XIk|Wto2F zG!d89QMe9TVVvI5qgVw5tJ@EQt{9e?|DN^WG|WPH&Eo!R9zuBsYX9(@s~TxN-lKx3m}*+YHZEJHr9Ea!!-@$ks|RS^_V;#lbJi{b`V(D zckI!S5rLhoyy79yOni@9+Mr_)z9zs;Y*DmXinLt<%a|f&fGrLH-Cu@w$@2;XJS5i5A2nw9 zaZ!wHn^NaBU*`E^cEcx|^kO3Mu%6z*MCFwy6JE3yAc|u`!ztLJdS+Lr7XcO3%tvTC zLUEeaY%7(hzbr_p=-Oemx-uD470t87#D0?Jhf;B>45O;{CaOSA*DOa_><4#Z9?uSR zom(8cv58%y)EY<3^H%%;mjSS@JI08MG*# z4ZGl%Zi~?li*0>RH;l)sfy3>Mz7=uekz^Yz(r`MaPULGO!OyEcK>>Ac9Tu}t6Eh>@ zCSVBODt(bzXFnZ*4)Me%iCijwDbnE$Pj2_)*~H?`MDebNy)(AQ7K=Lk*5U*^t8mfX z4{pl(ct1gPcvr!8DS3kURZozAjJhCA4Hlv~k0b3&Xb6IM*8vje_w`kB3}cc7SP-;q zf>A~|`raueC`XNvS{E_kD;NcjwFm(z;G-xnUk>TS^&q@A&1WNWO$Gg{zu$pz0oNuae=u9C{B3j9EYvEN6 zTQ?n{g`22hqIz$yg|r%0^gps%uztWsVgBN34Xep1Vi?NkQr;QM&lJgoXR}O1Por zx4=%bYUhzXu8sim1X9bZa`HoIA_k6YJ?V+<b@LI580GCWYrCnn~2Mdo99Yo?mE4zvU_|B$mxkPCB+l9z< z@C%pOM3Gj-`$K&`x1(>IR%5h=0Cal?*xh9yuKT8IJuxcf#xI#@i<`~*6PRSb?hg&d zGbB3iO)0Zwx`eje0Op(Rncam7tEHq@I1iuelUM% z!VB10>+gVkL5es z|M7tb$PE9hwe(L}_*(Ebk5`3`{yNPZ_2Q=vl=dxE(=^vIX^h*DX}ak?FN8q8CO=Kr4ap#%Y4lkIkqCNZ`3(7zvUVd{rawet2D2^@M88QG6sGq zh{vE)p?cc$?gXTmj&Yt{IC*q1FJL+pji#_O)|OcB_(6s&DEqqaREs&BaB7x3xTa^V zy4n7lc_laa$i!egXZvQogRW{@=&$~q$l7f)P9cjZ`HfOd;;%iG^>+Q-CsJLl;0GZi z;*BP!OA_F+hR{qJB}(&w7kTPA&y72RBd6swj88MB3y_6|ff6{bQ(|%3jjh1Zq3`<` zDrNCEWJIv{1>kp8)KOfT9zAP?lcj9l%UCwkjp&c_kSMt0mGrj{4R3KZ2}ym=V(gNL zx5q6kJ|cqt?K~n3Gx-uX`?O^X#><57wRSn;?YF*sfktsTKbw;?b&o0BF7EjH5a840 z%PEdZb;Xc|>myQb>wJdZc*!sC_es(w@p2Vy2jwYe@+*Mql@d(Oi+W!r@RncS6F$yK zRe!^M@e@LLvVJ{c2f64mO{cGp2AMs3-gn8=)O*h-Jp5pZ$Ua?QA%X)sT+C-up;8Fl z4ofI_eHUjjTxPU@GM<}ian;yaIGL$am?IhNqym0mgpWuL(2l47X2c{i9?9gcOCIOn zt(5NdQ$p5)Ph>FkG?Cc^AjV#8gbn|!m9W-hx8CMb+?!&?FKD3p-RY)~53@a4#mn$D z+W}SiDg`hJA|($Y--`u|N0uI*_YLKc_)6&4iIhHm;yjb*q=*9U;uc?5q23RyXSyw0 zPnv76X1R$V2x1D`q;M?)Z*(7ucm=b&5YT^oQ}J3Xx7Sp;nZGBViJ|M_(9CCYgrk?F zBlJA^n{@I9t(>V=#Qq@R6_;HIUa3AEd;TUqGqZ7yZzZ2o-C{r<;h>{huM=N{PjAC96QJGFeWope#bo!Iee#jKRm?$W;rZndT`%itHGu8L6gOEV?O`q*gJWti+_lKFzUF7e7wT>Z>{_vm02K}(704&b^ zAgUi+ZOS)v{|*VAR7gFQ??O;}L!=_ep|y`|s{9edZvH{zQ%3UBo8-n%K!q3~@*yCxpBw}8j;ld|`B?2!95>S3xNu(d}1+c^0HVYe6a@KG(#=#ONQ81t|3htTz! zU4kg$JW|GS==X)N!6@#A3i@zHGj8uS{jrChqt*E8(jSK9J7~t-n*nY+T1bBs%fCuuJJiK#-7w_`3px-Qp54PmpQshKEvz- z-dj`est$P&k-MTKyPQdb@i)T{KqFnID#ILJ;mVaIjjZ1dluQRdlLz>ibcg3V%Gg3p zLeO<5yl=VcLjJ+yhQ~oo2#>AN>srN0*W+v_G`&yY*T(j1$d{6#&HmC>=9T9jSd~BC zlh*7e_L7B3O*5gFcoW6ZFVrlaD@Ykx?OeJxK_Qaw4t=dkB?s^lGPB|LmY zTKK5NunC~mfL6UipLCZjm?ocd>tf@NwTSoHyG$d&%!mjK*vSSR&hF=uc~MNqX2F=It}ykcreRrg z>I*Ync~k14_J%>@8PDT2o>O9J8$(~5sXcidFDQkWE~rB9wYQq`Ao*Ca=LN$`Tm6Ua zJj7h7(TN^Wgjk+Z4QZSMO@qBbubZ=^8#d2j@P`CCZOwZD)$g2rgx_6;h0k*RyRTDZ zn9r`)J1hZ+h^U$t-r(a`ogONlzD}tW^u7T0cWnXS^0)XyDI!YbOVS%w!1dZkNno#3 zb3jV}&ClU`1avmf>1QWcO>JeVdY|t+db23MmWQ{~D0Op>hz-{;MGN)QTT2~iEu|pnq z1s(F}E0$%~(rWfQDqIL#_`-p@pgLTceuPzDTn}C?oBM(Ix%IPLG~yR9V$3MZFqk@c z*b~nAs`)xZ{Ata`5&X5WPA{~CL8;n_g{d-jiNkekACoyYStyL|-15}Q8*T>qlU)zG z3XodDK8d=c$m1^+YaSFi9&Ika$=+xWA=; zjAjuUcC&}8@k9$_za*g7+^+M*MI(Z-~fHI0$hN+mH65WtnLq}L=RC`-vQ0%$s~Gr zl{XAJ1o3i-oIH7(_>&&*7j=x3%`el*nUH@k_-L7OXaMq051=23m_S?Doli3UZQb9E z{9j-EdzAi9hG*3D1UO56{PI5xnkcU*8IbT!C;232IuaZy!Bo$QQG&S?g`o8BA5Mt=wA( z5nq@sEkp}toY4V4gs*I)H_U4$yd@@+ddvkEUZF!C*Oa3bBTa7j*$+A8Ktj}+%5=ng z0_Q?I;Tq$@8H7bxJNYc>x%G(*oT<@;Of?*~Ec6?f#bZubeO^aI6C-i&fH%~NgCa&l zs!YjXr10zfC}dRC5rZQGGQhiMs+B=xsTMP_hq$KAwmrsoeX2JnNUqd>&_N&D~onO4Lo@jx+RhU`>uz+uz zc{7}+1hA6|234P#68#!VnN?>BCVZfI@OIx`jeN#|$)F^UW2X})BRdroQh(Hm< z^5~B0qV2+JLU6wtbu1I;oTrs97`oQKt6tPUW*Iz27rVOPk8pK6A;Ntt8!w2Tpp^W< z3+FBxxc+6}1>00HyUc0{`8TsA7K0|qWvl8KZZ%oJ!{I^$*b_goR{k)2;2x4}Ie)1K zt|OO&VWMXv4W6Vayr%-f{TFmOD8x%g(?Dpj+Va!j7#cQ_^4JAiR3AVBgAW_Cvndm< zCYs-~0zom6fD6D15)nnwMrY4f3KMOnzq;t3YvQ5Bqk4A8>(-wt0g(#2@U~D8>qjfP zl(G{MYpA=Tut2M_e1k57MLjm2eO_19wG9+o(2u12FYoZ8aX$oeot+7xixrW zIYtEV`PJ9Xf3DK(G+H7j^_#8y|BRMsE!3a?#={BO2l3@fmz)7h~=`?4SDkLYd znp>mzHudF*ccbNYuxH6=g;2?SUC8%wdP=>W&!J~5TDRE3p}44kR@}=XFx&E;?)wY( zOY)Q*FiNedn_@VpHKPKTeC8^6!&4@}n|?470)W?#%M69<(U z4=>apGYPQ9$n-ZJGwp{E8HFfy*v_X|&UBftB2o5aaV6~oHaXZ|GG$2IOS@eF=xmiAWWl#LM5w*er=99 zPk1C(CPJlpcNMr6eS>XJuMrMO@*bjICu!t~Hu^FGOYIYdtXG=B`TNtfS+w<^ zg@3sZal>fCmn6y$>D4?_#bX4qB}@hZ}{deCguD$NavlyH3HsOWI-40Bw;^CElfDv z85e%~nuXzs?ZO|5gxcH-s${*=XQN~y7V{fotFt;o=zxRS1<|CYu1~W zy#XnD6Nor5D}EJ5g)@EZzno@kNBfd5 zfk2^AQ}pF_{=p#9h{&k_);Z?R{rF^^;r!u?mZ}4A5%VBhy|HRTjpk_pt#ZcJNW9?L zlP64&@v!*r9x7}hkXJ*#2CbH^?aVQqL6$)fx7*bf8=$e3f3mm{LHJ}r{)_X0Uy4@c zH@#JO`a`B~F48#P4@R4C0ELbSfT*EFoFLTjkPZE?d9?N^J z$aQl#|5)a+s*aANl*Z)dQZjh~*5M0xvI4TjG9`uBUAl6; z5l06F*4U?>zVYUVoT#vDx6_RXinzmtCMUNvym497FZ8p0V##=$cIu2;1L^m7jw{b+ zE(rr#cGQlP$KGmInF|GuN}b+yA}5Pnsuu*Nz6=&>6g<>Ikf-H+x-Rf+d%QqXf6!#N zJ{8C<7gag-(~Won z7sQfL+=IQR@j@L$$C=L5awE=<%MuQXg1ORT`PxYMmP#-9%;3FKggSwE7>B%ij-$dm zmTu}+@IA0wWectHc~5b^_Uu6PNSkyXs^2)nwM*Ny^ea7$j-$Zw{=um3i9GD|KsNHd zYOh7zhoUwq@}jXE)`QY=MKT=}2UyF{F?X*i@rJ&FkzEPPl1tnqzcjz^t6N7LZAw0( zAyVg*3HFGx)mLkQ*Bs;0b(wrS1qv_8=e2fq<)j@&s9>~R3-LIjVtRptmB}-Aidle< zsy(e@g}D^Q^PGDkN6&M)_7tz&V%)ePi>P*m3+8AaFRhUEvel&fryuv^UjWJX>4f^> zKDE7aB56?m5k6!~4hw%kzMeKN2#zJyAyD4&c&H+ORqb|t2VS|g#k=w{pW(dIz>6VA zsUuC2mZ-PgNT^;s2-tsj=~|4~!CWRDoTYdlhAVvmvfV?*0jFUwA4=2PbC3A;Zr4ta zHHFf{o_Bc~jRky%<2aYs|6&0W@T4xtjzCUpS297D3Z^3?HC1~_PtB5=DA6sMo(=kn zB{SJufwYQ!N%*t$e0Aica}pk#+>s%4(n*c1L|SeqXZbU{n^B}8v@w}*$-8&y8v`@n z)_a$^I&l;JxAx95tgY^A^KEI16fMQASa5eQ?(XgmMT<)d#flRwXektHarYLAy9Rf6 zLJ1z`@H}(P`=9wXb6xZ8&F7ONYwdlK{ag3Fc6t2#MK?e5>R0QNyWhulM>V!BVlFFT zpDgL-mH5nE@Pl3TM{wTv;zKs&-r$n9^RNF(jA$n_0Nb7%n0Z;HM&p)6Lz|vPu%ONmU^#I`;TuarwJTPYLrC$j~5dCZKWzvI*zMvtro7 zrpu2vgIAm0-WC=Tj3(M@CgDO?yi)tKbJS)Lk~FFvzw8@yL3&A-`YZl+<`84286nGk5Z4O zXxO(5Fp0&8#GIG%@bQ=5ko9ZUtOILgqjKPStA1UJ2Cvb{#ia4}^i1)B>El>gGKwv5 zFjs(no+#P_cVEdZ-8=up5h$qSczdo$fdu{t)ymplw5N*bevdChMdH7+NK1s>xjJ zl`A=Jjb^5JCqeIEn@0^G{L~XR&w*)91}RV}&?8 z_@YCZZX!0SrR_*ni_AO~IMvd9x{!ZZyssct+hGM-ZFq#gEVc&x_FrQoSmr4WYK!&6 zyalztIBWOxnnqzb-cT|UIT@o_KDb?|W?I3pozY24EvoghOIMjIW|RGE9{M=`QizxY z=A|-gvCjtD_7(Kw4q)(8frZIsVSl3V6xk54EVAdd=!+w%hlQgj+&{MB7#NsPTQlD@JB9LUVb<#z8R~w1>aZN25f~Z zr?+|1hB!c_0teVO2A4-goRtg&tLa83K9I0`^^khL4H@~<=`N|)oO&!!#cR}tNHZ{G zJ}Voqh9DU z-+tMCh>P31e&HW~_bthOj_ZmxyG6UWRLs}nb%J+s6`J~t3-ra|^eU+Wf(p`MfzdQx zQS3o=?HChn!+#le&dS2(ozVX#*Pk;HAsW>y=37YpWQ)p>N)069cAqtc7lqt z&Jkb3`k;|l?R=~(WM?=apfiZ}+Tfj*8FutY#fAsn+~jX^IXH~2&;og|X@s&uUorK- zghVvIE>GjWkWuW1!1rJ5O`8dVJ8^F9_=BtOAAcVMJi*ld+jqRWP7TlT%CC9s<-=m# zv^nqzD$77Zc9My-a5?vMLoCQ9y^h)uG2>`LKl#qP^1`fs{^Us~d)cf^qjWg-~?2dtScR!26eiWGhb z8U5}l1*-d76rtq4yHw%E6t)NKKU#9dT`id{n=h;*nmSr5W6}gCu$ds;78r4)fxhJ@ z)?^;2^Xx7xO=cyM*vZmlO7lRmD>L;o^dNz7qy8EgekaZRbefT2y$?#w*tjOljY)8x zw+(be7Z)G!-4k$1M)R%6;6SA=8Ywv4z{H!+6mcl_vlR}M&k^;H!~eX~rxbP9TI_1q z>{dkHD3RHh+U=p0)X~yUgKIq!Tum2ma=ImOi(*IAh+{Rei9l=V6h?%iSh*9+^%A2 zJ25+AHN|X^(ljOabRqS-tNmGG1oj&C#iefts<~{dt!r4rmby9frJnY1po%p0m1F@= z+o(NF;WG+V9}vq9BS9H}%T;_8JFXrey)hjk{@bK7We)FI?)Bj=Y=7Si26n%}8J|C4 zzGc>7&xOex3%O5`l8rr1LF-}M#+Q<;JJ@3e9e?X>_#smZRl;;A{1U(DkqDBPB0lc! ziqwaM-Z~9URYx7#XY20=$s0X9ffqXE9A`#bHp$H_yP%%e3Qy$T9sztq528x-mZVm- zlr~-M5lZA#YZ&H8s$P^eP(ZLK5?*tbKpk2ivK~p##BK8%RHwI(45#tG7*4A-@Fj`) z^q|j5#G(0g1{@A#1e|-Q&hiv0)WD%aOI593NuEAJn5CZAPsx+7v7aWBVn*0KG8puT z9vt#;tK?4+$jkvB&>yeg?vS~hptv2%r6sReccINWT7xOzYbN#r_A5-DKEElcVN^C~)?B&T*hN4SQF)y z_cs3z;F>G@4di|vB1VCt5MI>1DHn&qR;6UuA%Ck;Z?rCfXep3U#i;zt z>;#{t^$G(yczLW}LapQSm>!(Lb+UM!?NB{QJ>bDx@HmO^>nNUD@ecf=exhYU^6fD- z(3nwVq15ZuPz9Vs?tgKkg9Hnf57N_o^hJ_t*<{g2lt}%h?0~3f2w8-_pQ}CmpF~eU zxtU<}k5KZ{WZ*w&uX(h&(`1m7qBCbi%3=ds7!!*DK+r2vbm z0DvYijQ{cse-TClp3Z31lj0w=cU)6m39TNAs4U`2oNa7-oM&KQp1rF1U-PWp<$DfT zZ98p>A_2Ge3*(J^MzzEvCDj0RBR{tijpX?({dh+W3H{Gf9Nq$| zrbwwinP_}GeJ*Q>chg_`Zl?*}_X-Ag??)2Terd{$7rj9-OdFb>J{i|6;t1LnUcB1j zO`?^LsgK>K9NlS}YS}?F#!%vdHf~}K<-nW*ezo?WR5=Uy8d`S7aQ>2uzHD1)kQl#P z3EMpZ>I%in1$Y@Rukr!6b6#q%2~z!yK=2-CxN=6EWAMtmaPE@v2rH!&swP&V){6vk zByVj~f6RvvhI9P2m4{<0`}NOu<^VPY)Ti+b0I{#kwy}04otKbQuH&VMd8a!5F6Cyr0>9hp)=J2`R#}h5Qo~SJs zYoFxMz1Zzbyub545e16t<_iY8mGs#IAryN84vB=-L z7sy3QJ@m%n8kR-0_WX&#=`z1f=9P{ie48(*Bmj|}TX&r^14j0h(? zY%396(I&#qY6f1e$34!Qhumlt1ej;FcB0d!$@9iIECc?x8%%z+c8y{+xi`0QD~NXy zFT~acj{M~^`9)=rlVz*2Ala#6j2CV;KK=n2oF$Vdg6APcH&w}dP^RGgxu4Vg!5gMl zs@ZSOGoez2R#8*b$~Hg$WaM|O^%Uep!X5<5dB_2p;kV)mP(JtduR&MBw?#gP^%q`b z{4t4|w%-CbUeX)aCYa1+1;r7}TTCO_d1Mqdy))!Ptd3J_+0T-SEY!bC4Ddahtj{JC zb0nV`$@$h-V!EM&^3&wSmLh)Fys}}H&#)ge(2}3Q>3-&DHh7be)wo5a;P(m$LdyOA zH713ad_i*(E9ahE4$x`EiGbuF$fPwO%0ZlEtU!e&nN>$GvDj2K_xDg@d)rrgqQ)OL z&G#o{R}FR%iGj@`kw|OlmnG})N>qbBmhtA6>!b43__70YSs;LGj2*TE zKmkkQkX7^9M73cXMpL#@#&dni=T4kBkPo%;RhM04X?xLrD;eLn*<9N0Pg{`V$z+?> zrKgxq(W}jGzI`t?ZDV}4;3d&zFfExN z4@ZxKh>}F%g0!9+#@kLL3a65*51!5KN1z^JQz`ziRo02G?)|BC^fFy6h-TuuLJ0wR zyjYR-YWY5sbT!S@(VcWnoC&z-J)jEV0L?vxsHn^5in0+ z6`1kM=lZF4l05nR)Qjk|-V`y8R$bjC>S-9UL;ueRrG5M!MJ$KdaYYMzd<#QHW;lm~ztKA`g7Np~h95Bi4mOor zB2CF+j-iraI&s;8p}0v5>kgB}?NkN<#?5{h`9-6hrhG08p-l3WZ9R344zK?l z@V#do9WGCa{u{Hj#&t;{DmC{}X27!U!)OpGrG`z@Zo|PXHJX}Tt;3wgwwFVLlPy?@ z_h>@VxtNL1X+jxP7FLr?7qr;83)^5XC{+6ZbO0HSpWnCbf@g2aFNNE81#^q(rU<97 z93azU{k-V5%&zkxn$0KTvQhH1o4_{R93o$D67oLDa}O=toi3-fVbgyKPoKK@5knL8 z+G31!W~SDl^1Ib(aONQf5G+x9 z9i|tnw=)9`_3{sSE$6viJx7-f+~Mor&F+N?hr#@lvTZhZ>&LwwxQBGVN7_#u5kAL5)S z@0isYZIYD{@KX3E4@J6C(2O;A_-$JE)$r|Ea;p;AKu`su!}o!5qMkISX)}eS(oYEC zdrG*(h%a9H-0f^5gRJrXltVC;CtnB?<*Wh#Srd`3hI zcza+PH6KjV4iYy@8QFZQ75ex>h9PHPw#efPgZ3B5H$1W<-2GMO4T`(9c1Yx6!BPC< zVm-UfTk3Be{Q~CAr6Q{yM8AbD1u0-=oJ9cj&3m}%D!xkF<0`c^FWs8mR$pI5 zx?5zm)#BS6z_X?az_{?g-xYGuCH=kBKT97>EXeu|C}vmLL8^}eW#dY1i0I1RV96FF zv?ZwK2-gw4o>l}M#a{PCfp>ZBo?A%8tJpJ)o>``0K2~B4T#TrG6n)1U_DxoR;o>Wk#uiIC94$o?@Wn0F zZT6Hc{Jyaav2j^yQeE>h@!pOj$#XJC4Dh52d*>;~*YU9XIi+}jC^Q%`DqLF2PkRma zsKUlde6jhRyYV=$KW=}m;as2suW7SoymUH6GVuCk#VLeSE2^zH_R#INKd&4x=gd43 zd-(aP>KnB@7_nKtES_zNS35qvHy<^gSA?07wROp-k8Y;eVXou}wC0A5$t=k$p2Zby-p7}xmxA@B3x~>l=fcZ@^x?2cZr@K^V8bC zkcH8^PsAJej1{K6FGwU}vZHd_f*Os>s=O<-r1o+z`YB$-^g=~?81Ca2E9sYK2k*u$MD|E(mAM9$LPCLxJ zWa%I5c_D-(qx$QnX)-%!KNw8sfwRS#%A)gpPPE^ORb@5dy#bv>Gq5e`uTS~C?ACox zwq8^N7@5#UE_0p|vWXGe+ievlIa!_ri2r_ma+`{$ip0n$kqQQJwaTkxb`S#FDoGLz zr;p-H3G89thJ(%8h5pX$5|7u1s zX2j*bM7I->WM46$9f+Q>+>U=03{#3bTp$yuu~A<;g>QZ^<#$U~70n+j?n+mJO1q?32vy&ydsF zY$=2TK6g7pEfI~0rNQ991%+VO6eWXgCDB4GButOoqRw^(N#s>5s`(K=5Wv_sYlrIl ztbC@UC!67D*R()gFBMwAFREsR1ob6 zp%veeRV~(2k!B~0AXM(g{h9_0s+g@l7Y2#Y(TT!e0*ue*pbbQLQ&PVdMS3d{(3Prh z&QEaGAjOITfzfb^-W!z*b?2b~tt&<$n5}$8q^1a(b#OLE9V5QE#DMuc^QJnNu`&Mr z&sHb@l9T6E7W_2emQjIG` zJg8oCJoA5k&M6H$vA{eq@B=}Vf~EsWO@@Lgp7AC!v^kTtzz9H2qF;qQ0$$+V88>N@dq8GZ3M7Jtt&XHPI?hI#ir@vXq!F6?(;PZ1KWQ{xi zDeRlp>m^=_FSfoLUzV8E37;XroxTY>{|YRF>~*v8-~R#)WG^n~b0i^hJ%wOO<%lix z8S-0<7C)XqG4FoEUc3z@5MK_z%jNPmm>?N>QtD9I-Hp|uDl7Lhp%B6Ixr2xgg*@Zy>r|Mu@{0)2y$9nbo*A~tS96`YZ8L{t+wj@^mdiyv^b+mxNWbPtageUdqJ+t( z6vpH>%Lnlx)2g1suu5)LR>D0+lC#lQHiE`${nHIuKYGn&29;}GhV|98y58N*4=-q? zz^!0%27Ve7&CfF8hHpF?pvaMCXB+nliD(FS^k- z=|)%mySzQ;z`ZwQwl}`H9@B(C`&_A3{@-9v??a(t*i@rIy5@6MlO{BfLAwITcw9HR zPyH9U?qCAf4%JM-X)6nf+xPfQ&!1Y>fVb^>MC$#JbT z_F_lb%nFA@`H`p^J{u5t5b9EJ3S;+JxS02nxqw-7@5Ib(_hT4<`){N`%D$gc0WncK zl3tx#4ddO~!HMC+7%CbI9%LgK)&Fu<=BSMJj?!vwg?Avf?vgooxzck<)n=4t=eh1z zfnyy``5dRb^&`Yk?b%Jl{PVV?HJfi|aik`V&n);K|?8WBz+@{4bE?Zknmj-u&( zo7p9S3G^yT(<1&LA@vcl}4IrR!vO-d%-XO!we*=rAXZ^So)IW4R4^ zja$VWSO0YpL*_$q-=_EgWnHWH#841s^KjvCIho!GM~ zUh1Nu=LDyf5iBY{v2Zh2&g*R_v;*%|D0gPG+J2mInWX7wlC;9h?8YhY11GOZ**X*(ia_rZRSO!nDfEX4g-JyV0ICy`DR zTBJYaTMFz=suMe5dAQG zKDyj?m7HPb!g3vtl1D4=UVH4Co~bFAu2m!Janng#nh@i`8!Ko#!8tTs->v?H_0aI0 zo`<%`&Redkp(G9@8#ZHk-v-615Ln6)-s)nLKhz$QO$#>HXEQh`7{9FB4rQ=)PY)*2 z=I(mFH+XEBg3RXiH$ICBOuA31*$^@yZ#7!*!^(Z-nrXnl*L;HTO{_({-S{WX@$^6iB${uozAU7d4Wj7fdh?bhKV5>E$w55+%&;|t9z*1|_FQqCjJul2|=?LEq^R`3k9 z5w^#F88}!gzuou)V_utbWakaY)NA(*v9e*FJ8#q12oFL&Z^zHVZ=N-@rhn%2wK~}* zn3{RVsIf2UY)U*Sz2(BWcas9668O8EiRRR37Zt76_z?;-VM9Yb7Xj7F@BY+1c5&`~ z_huj!C(UI=y2-o-FVVW%MFjqeZ_h|p=$;)iMch({ASz;`w@tkmm6n>hKf5Xm_PRXN z+&WsC7eQ&70LenTNwm*qzNI3zry^Z-hn)p$vzV*|@^Zk(=YWyPiK4J{ptrUdCJ^^w z4_y`v4R6E9FX3OGH|2Yy&m@zlUoN^`@1)IPNK6K%!~}b^lqAu~M&EqUy5Ki8GSB2z zr^=K@4|vBN0V-qCyI$;nG*NUTA@!|un4ftu|ak+42Ep#`14LJKW!Awo!DnjgX zPploPjN-^iMC?ntX10Wd+e!2)4N!L#U45zCU2fJ#aqkO(y@ZC1NLsm%F4w`e`iCR3 zJrL9`)v1Akia>eQrmfPEWvF~V`qrFnDgXwrx@7R3_){wP1JK?v;V^HZD^o1;Qt%@d z!lLDr%pbP3)NFoWiNL%5BuD%1kNtiqR6G!IY$c0@M1M|i70Iter4ac;N0)B(p?1w- zFLK3Nqdu8&8}ju-p#`o#IlJiiCl4^xY5to#PZO+VrPz%zEycr?Y^kOpRS&G2+B@D{ zW42|mwP)#4O%cdHLgfpZL@w8HOC={|#$vG*uo#y|&7G{WVCiR7>Uj4tx?TtD&C4o{ zzOVB`$@YBYtLDpaYC$}ixgXnJIyScbR(dnh<5ucFL|BB(DkunYU7wD`WXt{%0NNfc z>_^IzI0G86s_lA zHq4Tm%QI1dFISb$AgHv56>r3dSmAE=7Eg=)hpB-yL!8O}1xzaSL%rkRmvsLO8qiDxFJt6N}HM(MTjpJ+AP#l+<&EA z%wF5Z-Ha2rlw{<}3-}9ABq4dX;C*kS5%$G(yT$A5m4Y`9I? z>Pz*UINhi~{KrJWb)#k8znR{GystDOzMo%5ky@{=5O410oNJIg zgeUS7vKj9hpbz??F7;&x3kffLI=8zzCRwHdw_hD_$5t<(NP_~QtIqSXfCJtUw*Vl$IPe%E{jqR9U=jO56^>nqsdNF#soABUsxp`bv}`AU;N z{o@t-pO&FjU%>U_NA9tw{vY0*|2{2XVEVrd!X4JUmU%cx0J-l8@R65Rkt&xk3;7?4 C6|vO- literal 0 HcmV?d00001 From 48a596a804a24a5b4b3512a58a785b17781c49d0 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Fri, 5 Dec 2014 16:30:47 +1000 Subject: [PATCH 555/592] add --cap-add=NET_ADMIN to make a new network device inspired by #9452 Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) Signed-off-by: Sven Dowideit --- docs/sources/reference/run.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index 8b0c6a1591..01e98ba35b 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -360,6 +360,10 @@ operator wants to have all capabilities but `MKNOD` they could use: For interacting with the network stack, instead of using `--privileged` they should use `--cap-add=NET_ADMIN` to modify the network interfaces. + $ docker run -t -i --rm ubuntu:14.04 ip link add dummy0 type dummy + RTNETLINK answers: Operation not permitted + $ docker run -t -i --rm --cap-add=NET_ADMIN ubuntu:14.04 ip link add dummy0 type dummy + If the Docker daemon was started using the `lxc` exec-driver (`docker -d --exec-driver=lxc`) then the operator can also specify LXC options using one or more `--lxc-conf` parameters. These can be new parameters or From 5f2000944b0377f1e5ce86a887f495f1f295dd90 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Fri, 5 Dec 2014 16:41:18 +1000 Subject: [PATCH 556/592] Add a fuse example, combining both SYS_ADMIN and --device inspired by #9448 and #9487 Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) Signed-off-by: Sven Dowideit --- docs/sources/reference/run.md | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index 01e98ba35b..4f777e0ef1 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -364,6 +364,30 @@ should use `--cap-add=NET_ADMIN` to modify the network interfaces. RTNETLINK answers: Operation not permitted $ docker run -t -i --rm --cap-add=NET_ADMIN ubuntu:14.04 ip link add dummy0 type dummy +To mount a FUSE based filesystem, you need to combine both `--cap-add` and +`--device`: + + $ docker run --rm -it --cap-add SYS_ADMIN sshfs sshfs sven@10.10.10.20:/home/sven /mnt + fuse: failed to open /dev/fuse: Operation not permitted + $ docker run --rm -it --device /dev/fuse sshfs sshfs sven@10.10.10.20:/home/sven /mnt + fusermount: mount failed: Operation not permitted + $ docker run --rm -it --cap-add SYS_ADMIN --device /dev/fuse sshfs + # sshfs sven@10.10.10.20:/home/sven /mnt + The authenticity of host '10.10.10.20 (10.10.10.20)' can't be established. + ECDSA key fingerprint is 25:34:85:75:25:b0:17:46:05:19:04:93:b5:dd:5f:c6. + Are you sure you want to continue connecting (yes/no)? yes + sven@10.10.10.20's password: + root@30aa0cfaf1b5:/# ls -la /mnt/src/docker + total 1516 + drwxrwxr-x 1 1000 1000 4096 Dec 4 06:08 . + drwxrwxr-x 1 1000 1000 4096 Dec 4 11:46 .. + -rw-rw-r-- 1 1000 1000 16 Oct 8 00:09 .dockerignore + -rwxrwxr-x 1 1000 1000 464 Oct 8 00:09 .drone.yml + drwxrwxr-x 1 1000 1000 4096 Dec 4 06:11 .git + -rw-rw-r-- 1 1000 1000 461 Dec 4 06:08 .gitignore + .... + + If the Docker daemon was started using the `lxc` exec-driver (`docker -d --exec-driver=lxc`) then the operator can also specify LXC options using one or more `--lxc-conf` parameters. These can be new parameters or From 522e6310967bcb4fadd99d76dc3205991ecae1ad Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Fri, 5 Dec 2014 21:04:46 +1000 Subject: [PATCH 557/592] I was talking to someone whom i listen to, and she noted that our docs don't mention the license Signed-off-by: Sven Dowideit --- README.md | 4 +++- docs/sources/index.md | 10 +++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index fc41e409df..c2273eb656 100644 --- a/README.md +++ b/README.md @@ -202,5 +202,7 @@ For more information, please see http://www.bis.doc.gov Licensing ========= -Docker is licensed under the Apache License, Version 2.0. See LICENSE for full license text. +Docker is licensed under the Apache License, Version 2.0. See +[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full +license text. diff --git a/docs/sources/index.md b/docs/sources/index.md index 5780eab05d..993603eb33 100644 --- a/docs/sources/index.md +++ b/docs/sources/index.md @@ -88,4 +88,12 @@ implementation, check out the [Docker User Guide](/userguide/). ## Release Notes -A summary of the changes in each release in the current series can now be found on the separate [Release Notes page](/release-notes/) +A summary of the changes in each release in the current series can now be found +on the separate [Release Notes page](/release-notes/) + +## Licensing + +Docker is licensed under the Apache License, Version 2.0. See +[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full +license text. + From 654da4e151c449725b6996d47b7ff33767e87da9 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 3 Dec 2014 15:29:54 -0800 Subject: [PATCH 558/592] Return docker's root dir in docker -D info This adds the docker daemon's root directory to docker info when running in debug mode. This allows the user to view the root directory where docker is writing and storing state. Signed-off-by: Michael Crosby --- api/client/commands.go | 3 +++ daemon/info.go | 1 + docs/sources/reference/commandline/cli.md | 1 + 3 files changed, 5 insertions(+) diff --git a/api/client/commands.go b/api/client/commands.go index 6b97f3656a..6340171579 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -544,6 +544,9 @@ func (cli *DockerCli) CmdInfo(args ...string) error { if initPath := remoteInfo.Get("InitPath"); initPath != "" { fmt.Fprintf(cli.out, "Init Path: %s\n", initPath) } + if root := remoteInfo.Get("DockerRootDir"); root != "" { + fmt.Fprintf(cli.out, "Docker Root Dir: %s\n", root) + } } if len(remoteInfo.GetList("IndexServerAddress")) != 0 { diff --git a/daemon/info.go b/daemon/info.go index 2807adab38..518722b6c2 100644 --- a/daemon/info.go +++ b/daemon/info.go @@ -76,6 +76,7 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status { v.Set("InitPath", initPath) v.SetInt("NCPU", runtime.NumCPU()) v.SetInt64("MemTotal", meminfo.MemTotal) + v.Set("DockerRootDir", daemon.Config().Root) if hostname, err := os.Hostname(); err == nil { v.Set("Name", hostname) } diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index c9989926ce..9799914d5e 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -934,6 +934,7 @@ For example: Goroutines: 9 EventsListeners: 0 Init Path: /usr/bin/docker + Docker Root Dir: /var/lib/docker Username: svendowideit Registry: [https://index.docker.io/v1/] Labels: From fdf745768350d2068aecd34734b38a2c27810132 Mon Sep 17 00:00:00 2001 From: Abin Shahab Date: Fri, 28 Nov 2014 19:25:14 +0000 Subject: [PATCH 559/592] LXC CAP ADD CAP DROP IN TEMPLATE Added cap-drop and cap-add in lxc template Docker-DCO-1.1-Signed-off-by: Abin Shahab (github: ashahab-altiscale) --- daemon/execdriver/lxc/driver.go | 8 --- daemon/execdriver/lxc/init.go | 62 +--------------- daemon/execdriver/lxc/lxc_init_linux.go | 55 -------------- daemon/execdriver/lxc/lxc_init_unsupported.go | 4 -- daemon/execdriver/lxc/lxc_template.go | 45 ++++++++++++ .../execdriver/lxc/lxc_template_unit_test.go | 72 +++++++++++++++++++ 6 files changed, 118 insertions(+), 128 deletions(-) diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go index a0ad11cc76..642247c851 100644 --- a/daemon/execdriver/lxc/driver.go +++ b/daemon/execdriver/lxc/driver.go @@ -123,14 +123,6 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba params = append(params, "-w", c.WorkingDir) } - if len(c.CapAdd) > 0 { - params = append(params, fmt.Sprintf("-cap-add=%s", strings.Join(c.CapAdd, ":"))) - } - - if len(c.CapDrop) > 0 { - params = append(params, fmt.Sprintf("-cap-drop=%s", strings.Join(c.CapDrop, ":"))) - } - params = append(params, "--", c.ProcessConfig.Entrypoint) params = append(params, c.ProcessConfig.Arguments...) diff --git a/daemon/execdriver/lxc/init.go b/daemon/execdriver/lxc/init.go index 680f53e1a4..e99502667d 100644 --- a/daemon/execdriver/lxc/init.go +++ b/daemon/execdriver/lxc/init.go @@ -6,7 +6,6 @@ import ( "fmt" "io/ioutil" "log" - "net" "os" "os/exec" "runtime" @@ -14,7 +13,6 @@ import ( "syscall" "github.com/docker/docker/pkg/reexec" - "github.com/docker/libcontainer/netlink" ) // Args provided to the init function for a driver @@ -59,12 +57,7 @@ func setupNamespace(args *InitArgs) error { if err := setupEnv(args); err != nil { return err } - if err := setupHostname(args); err != nil { - return err - } - if err := setupNetworking(args); err != nil { - return err - } + if err := finalizeNamespace(args); err != nil { return err } @@ -138,59 +131,6 @@ func setupEnv(args *InitArgs) error { return nil } -func setupHostname(args *InitArgs) error { - hostname := getEnv(args, "HOSTNAME") - if hostname == "" { - return nil - } - return setHostname(hostname) -} - -// Setup networking -func setupNetworking(args *InitArgs) error { - if args.Ip != "" { - // eth0 - iface, err := net.InterfaceByName("eth0") - if err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - ip, ipNet, err := net.ParseCIDR(args.Ip) - if err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - if err := netlink.NetworkLinkAddIp(iface, ip, ipNet); err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - if err := netlink.NetworkSetMTU(iface, args.Mtu); err != nil { - return fmt.Errorf("Unable to set MTU: %v", err) - } - if err := netlink.NetworkLinkUp(iface); err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - - // loopback - iface, err = net.InterfaceByName("lo") - if err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - if err := netlink.NetworkLinkUp(iface); err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - } - if args.Gateway != "" { - gw := net.ParseIP(args.Gateway) - if gw == nil { - return fmt.Errorf("Unable to set up networking, %s is not a valid gateway IP", args.Gateway) - } - - if err := netlink.AddDefaultGw(gw.String(), "eth0"); err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - } - - return nil -} - // Setup working directory func setupWorkingDirectory(args *InitArgs) error { if args.WorkDir == "" { diff --git a/daemon/execdriver/lxc/lxc_init_linux.go b/daemon/execdriver/lxc/lxc_init_linux.go index 625caa1608..78bdd11fb9 100644 --- a/daemon/execdriver/lxc/lxc_init_linux.go +++ b/daemon/execdriver/lxc/lxc_init_linux.go @@ -2,74 +2,19 @@ package lxc import ( "fmt" - "strings" - "syscall" - - "github.com/docker/docker/daemon/execdriver" - "github.com/docker/docker/daemon/execdriver/native/template" "github.com/docker/libcontainer/namespaces" - "github.com/docker/libcontainer/security/capabilities" - "github.com/docker/libcontainer/system" "github.com/docker/libcontainer/utils" ) -func setHostname(hostname string) error { - return syscall.Sethostname([]byte(hostname)) -} - func finalizeNamespace(args *InitArgs) error { if err := utils.CloseExecFrom(3); err != nil { return err } - // We use the native drivers default template so that things like caps are consistent - // across both drivers - container := template.New() - - if !args.Privileged { - // drop capabilities in bounding set before changing user - if err := capabilities.DropBoundingSet(container.Capabilities); err != nil { - return fmt.Errorf("drop bounding set %s", err) - } - - // preserve existing capabilities while we change users - if err := system.SetKeepCaps(); err != nil { - return fmt.Errorf("set keep caps %s", err) - } - } - if err := namespaces.SetupUser(args.User); err != nil { return fmt.Errorf("setup user %s", err) } - if !args.Privileged { - if err := system.ClearKeepCaps(); err != nil { - return fmt.Errorf("clear keep caps %s", err) - } - - var ( - adds []string - drops []string - ) - - if args.CapAdd != "" { - adds = strings.Split(args.CapAdd, ":") - } - if args.CapDrop != "" { - drops = strings.Split(args.CapDrop, ":") - } - - caps, err := execdriver.TweakCapabilities(container.Capabilities, adds, drops) - if err != nil { - return err - } - - // drop all other capabilities - if err := capabilities.DropCapabilities(caps); err != nil { - return fmt.Errorf("drop capabilities %s", err) - } - } - if err := setupWorkingDirectory(args); err != nil { return err } diff --git a/daemon/execdriver/lxc/lxc_init_unsupported.go b/daemon/execdriver/lxc/lxc_init_unsupported.go index 6a5904a4d8..97bc8a984c 100644 --- a/daemon/execdriver/lxc/lxc_init_unsupported.go +++ b/daemon/execdriver/lxc/lxc_init_unsupported.go @@ -2,10 +2,6 @@ package lxc -func setHostname(hostname string) error { - panic("Not supported on darwin") -} - func finalizeNamespace(args *InitArgs) error { panic("Not supported on darwin") } diff --git a/daemon/execdriver/lxc/lxc_template.go b/daemon/execdriver/lxc/lxc_template.go index d3fd85b7ab..9402c0697c 100644 --- a/daemon/execdriver/lxc/lxc_template.go +++ b/daemon/execdriver/lxc/lxc_template.go @@ -2,6 +2,7 @@ package lxc import ( "github.com/docker/docker/daemon/execdriver" + nativeTemplate "github.com/docker/docker/daemon/execdriver/native/template" "github.com/docker/libcontainer/label" "os" "strings" @@ -15,6 +16,13 @@ lxc.network.type = veth lxc.network.link = {{.Network.Interface.Bridge}} lxc.network.name = eth0 lxc.network.mtu = {{.Network.Mtu}} +{{if .Network.Interface.IPAddress}} +lxc.network.ipv4 = {{.Network.Interface.IPAddress}}/{{.Network.Interface.IPPrefixLen}} +{{end}} +{{if .Network.Interface.Gateway}} +lxc.network.ipv4.gateway = {{.Network.Interface.Gateway}} +{{end}} +lxc.network.flags = up {{else if .Network.HostNetworking}} lxc.network.type = none {{else}} @@ -78,6 +86,18 @@ lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabS {{end}} {{end}} +{{if .ProcessConfig.Env}} +lxc.utsname = {{getHostname .ProcessConfig.Env}} +{{end}} + +{{if .ProcessConfig.Privileged}} +# No cap values are needed, as lxc is starting in privileged mode +{{else}} +{{range $value := keepCapabilities .CapAdd .CapDrop}} +lxc.cap.keep = {{$value}} +{{end}} +{{end}} + {{if .ProcessConfig.Privileged}} {{if .AppArmor}} lxc.aa_profile = unconfined @@ -118,6 +138,19 @@ func escapeFstabSpaces(field string) string { return strings.Replace(field, " ", "\\040", -1) } +func keepCapabilities(adds []string, drops []string) []string { + container := nativeTemplate.New() + caps, err := execdriver.TweakCapabilities(container.Capabilities, adds, drops) + var newCaps []string + for _, cap := range caps { + newCaps = append(newCaps, strings.ToLower(cap)) + } + if err != nil { + return []string{} + } + return newCaps +} + func isDirectory(source string) string { f, err := os.Stat(source) if err != nil { @@ -152,6 +185,16 @@ func getLabel(c map[string][]string, name string) string { return "" } +func getHostname(env []string) string { + for _, kv := range env { + parts := strings.SplitN(kv, "=", 2) + if parts[0] == "HOSTNAME" && len(parts) == 2 { + return parts[1] + } + } + return "" +} + func init() { var err error funcMap := template.FuncMap{ @@ -159,6 +202,8 @@ func init() { "escapeFstabSpaces": escapeFstabSpaces, "formatMountLabel": label.FormatMountLabel, "isDirectory": isDirectory, + "keepCapabilities": keepCapabilities, + "getHostname": getHostname, } LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate) if err != nil { diff --git a/daemon/execdriver/lxc/lxc_template_unit_test.go b/daemon/execdriver/lxc/lxc_template_unit_test.go index e76d5e9d03..77435114fd 100644 --- a/daemon/execdriver/lxc/lxc_template_unit_test.go +++ b/daemon/execdriver/lxc/lxc_template_unit_test.go @@ -14,6 +14,7 @@ import ( "time" "github.com/docker/docker/daemon/execdriver" + nativeTemplate "github.com/docker/docker/daemon/execdriver/native/template" "github.com/docker/libcontainer/devices" ) @@ -104,6 +105,10 @@ func TestCustomLxcConfig(t *testing.T) { } func grepFile(t *testing.T, path string, pattern string) { + grepFileWithReverse(t, path, pattern, false) +} + +func grepFileWithReverse(t *testing.T, path string, pattern string, inverseGrep bool) { f, err := os.Open(path) if err != nil { t.Fatal(err) @@ -117,9 +122,15 @@ func grepFile(t *testing.T, path string, pattern string) { for err == nil { line, err = r.ReadString('\n') if strings.Contains(line, pattern) == true { + if inverseGrep { + t.Fatalf("grepFile: pattern \"%s\" found in \"%s\"", pattern, path) + } return } } + if inverseGrep { + return + } t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path) } @@ -228,3 +239,64 @@ func TestCustomLxcConfigMounts(t *testing.T) { grepFile(t, p, fmt.Sprintf("lxc.mount.entry = %s %s none rbind,ro,create=%s 0 0", tempDir, "/"+tempDir, "dir")) grepFile(t, p, fmt.Sprintf("lxc.mount.entry = %s %s none rbind,rw,create=%s 0 0", tempFile.Name(), "/"+tempFile.Name(), "file")) } + +func TestCustomLxcConfigMisc(t *testing.T) { + root, err := ioutil.TempDir("", "TestCustomLxcConfig") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + os.MkdirAll(path.Join(root, "containers", "1"), 0777) + driver, err := NewDriver(root, "", false) + if err != nil { + t.Fatal(err) + } + processConfig := execdriver.ProcessConfig{ + Privileged: false, + } + + processConfig.Env = []string{"HOSTNAME=testhost"} + command := &execdriver.Command{ + ID: "1", + LxcConfig: []string{ + "lxc.cgroup.cpuset.cpus = 0,1", + }, + Network: &execdriver.Network{ + Mtu: 1500, + Interface: &execdriver.NetworkInterface{ + Gateway: "10.10.10.1", + IPAddress: "10.10.10.10", + IPPrefixLen: 24, + Bridge: "docker0", + }, + }, + ProcessConfig: processConfig, + CapAdd: []string{"net_admin", "syslog"}, + CapDrop: []string{"kill", "mknod"}, + } + + p, err := driver.generateLXCConfig(command) + if err != nil { + t.Fatal(err) + } + // network + grepFile(t, p, "lxc.network.type = veth") + grepFile(t, p, "lxc.network.link = docker0") + grepFile(t, p, "lxc.network.name = eth0") + grepFile(t, p, "lxc.network.ipv4 = 10.10.10.10/24") + grepFile(t, p, "lxc.network.ipv4.gateway = 10.10.10.1") + grepFile(t, p, "lxc.network.flags = up") + + // hostname + grepFile(t, p, "lxc.utsname = testhost") + grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") + container := nativeTemplate.New() + for _, cap := range container.Capabilities { + cap = strings.ToLower(cap) + if cap != "mknod" && cap != "kill" { + grepFile(t, p, fmt.Sprintf("lxc.cap.keep = %s", cap)) + } + } + grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = kill"), true) + grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = mknod"), true) +} From c128c134d71d01c555a6354d2ae7f6964f93df90 Mon Sep 17 00:00:00 2001 From: Lewis Marshall Date: Sat, 6 Dec 2014 02:30:03 +0000 Subject: [PATCH 560/592] Fix chroot untar for zero padded archive from slow reader Signed-off-by: Lewis Marshall --- pkg/chrootarchive/archive.go | 5 ++++ pkg/chrootarchive/archive_test.go | 41 +++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+) diff --git a/pkg/chrootarchive/archive.go b/pkg/chrootarchive/archive.go index fc2bea2c40..8d139fa857 100644 --- a/pkg/chrootarchive/archive.go +++ b/pkg/chrootarchive/archive.go @@ -6,6 +6,7 @@ import ( "flag" "fmt" "io" + "io/ioutil" "os" "runtime" "strings" @@ -33,6 +34,10 @@ func untar() { if err := archive.Untar(os.Stdin, "/", options); err != nil { fatal(err) } + // fully consume stdin in case it is zero padded + if _, err := ioutil.ReadAll(os.Stdin); err != nil { + fatal(err) + } os.Exit(0) } diff --git a/pkg/chrootarchive/archive_test.go b/pkg/chrootarchive/archive_test.go index 69e18e3199..8477c0676f 100644 --- a/pkg/chrootarchive/archive_test.go +++ b/pkg/chrootarchive/archive_test.go @@ -1,10 +1,12 @@ package chrootarchive import ( + "io" "io/ioutil" "os" "path/filepath" "testing" + "time" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/reexec" @@ -42,3 +44,42 @@ func TestChrootTarUntar(t *testing.T) { t.Fatal(err) } } + +type slowEmptyTarReader struct { + size int + offset int + chunkSize int +} + +// Read is a slow reader of an empty tar (like the output of "tar c --files-from /dev/null") +func (s *slowEmptyTarReader) Read(p []byte) (int, error) { + time.Sleep(100 * time.Millisecond) + count := s.chunkSize + if len(p) < s.chunkSize { + count = len(p) + } + for i := 0; i < count; i++ { + p[i] = 0 + } + s.offset += count + if s.offset > s.size { + return count, io.EOF + } + return count, nil +} + +func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchiveFromSlowReader") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + dest := filepath.Join(tmpdir, "dest") + if err := os.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} + if err := Untar(stream, dest, nil); err != nil { + t.Fatal(err) + } +} From 46b1194d996f6535e657bcb88428303305d3fe68 Mon Sep 17 00:00:00 2001 From: Erik Dubbelboer Date: Sat, 6 Dec 2014 22:42:32 +0800 Subject: [PATCH 561/592] Removed race condition If two interrupts were fired really quickly interruptCount could have been incremented twice before the LoadUint32 making cleanup not being called at all. Signed-off-by: Erik Dubbelboer --- pkg/signal/trap.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/signal/trap.go b/pkg/signal/trap.go index 9be82671a1..78a709b30a 100644 --- a/pkg/signal/trap.go +++ b/pkg/signal/trap.go @@ -34,9 +34,8 @@ func Trap(cleanup func()) { case os.Interrupt, syscall.SIGTERM: // If the user really wants to interrupt, let him do so. if atomic.LoadUint32(&interruptCount) < 3 { - atomic.AddUint32(&interruptCount, 1) // Initiate the cleanup only once - if atomic.LoadUint32(&interruptCount) == 1 { + if atomic.AddUint32(&interruptCount, 1) == 1 { // Call cleanup handler cleanup() os.Exit(0) From 1ae7be716eadf6efdc7ee033c83127e975222a76 Mon Sep 17 00:00:00 2001 From: Brandon Philips Date: Sun, 7 Dec 2014 18:35:37 -0800 Subject: [PATCH 562/592] docs: docker.service not services Minor but important typo in the new systemd guide introduced in #9347. Signed-off-by: Brandon Philips --- docs/sources/articles/systemd.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/articles/systemd.md b/docs/sources/articles/systemd.md index 028185f8df..fc0ee14443 100644 --- a/docs/sources/articles/systemd.md +++ b/docs/sources/articles/systemd.md @@ -38,7 +38,7 @@ or `/etc/systemd/service`. You may want to control the disk space used for Docker images, containers and volumes by moving it to a separate partition. -In this example, we'll assume that your `docker.services` file looks something like: +In this example, we'll assume that your `docker.service` file looks something like: [Unit] Description=Docker Application Container Engine From e0792e7ece7a71da83c902a03654bda4797ab054 Mon Sep 17 00:00:00 2001 From: Brandon Philips Date: Sun, 7 Dec 2014 18:40:12 -0800 Subject: [PATCH 563/592] docs: remove a trailing whitespace Signed-off-by: Brandon Philips --- docs/sources/articles/systemd.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/sources/articles/systemd.md b/docs/sources/articles/systemd.md index 028185f8df..77bcfe4c7b 100644 --- a/docs/sources/articles/systemd.md +++ b/docs/sources/articles/systemd.md @@ -45,14 +45,14 @@ In this example, we'll assume that your `docker.services` file looks something l Documentation=http://docs.docker.com After=network.target docker.socket Requires=docker.socket - + [Service] Type=notify EnvironmentFile=-/etc/sysconfig/docker ExecStart=/usr/bin/docker -d -H fd:// $OPTIONS LimitNOFILE=1048576 LimitNPROC=1048576 - + [Install] Also=docker.socket @@ -68,7 +68,7 @@ You can also set other environment variables in this file, for example, the This example overrides the default `docker.service` file. -If you are behind a HTTP proxy server, for example in corporate settings, +If you are behind a HTTP proxy server, for example in corporate settings, you will need to add this configuration in the Docker systemd service file. Copy file `/usr/lib/systemd/system/docker.service` to `/etc/systemd/system/docker/service`. @@ -85,7 +85,7 @@ proxying you can specify them via the `NO_PROXY` environment variable: Flush changes: $ sudo systemctl daemon-reload - + Restart Docker: $ sudo systemctl restart docker From 2d51d71561565987fc6a600234f2e2d15e0ecf31 Mon Sep 17 00:00:00 2001 From: Brandon Philips Date: Sun, 7 Dec 2014 18:44:07 -0800 Subject: [PATCH 564/592] docs: use systemd drop-ins instead of copying Copying the entire docker service file isn't necessary to add an environment variable, instead use a drop-in configuration file. The nice side-effect is that the user gets any vendor updates to the docker.service file. Signed-off-by: Brandon Philips --- docs/sources/articles/systemd.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/docs/sources/articles/systemd.md b/docs/sources/articles/systemd.md index 77bcfe4c7b..0f66cd79d2 100644 --- a/docs/sources/articles/systemd.md +++ b/docs/sources/articles/systemd.md @@ -71,10 +71,14 @@ This example overrides the default `docker.service` file. If you are behind a HTTP proxy server, for example in corporate settings, you will need to add this configuration in the Docker systemd service file. -Copy file `/usr/lib/systemd/system/docker.service` to `/etc/systemd/system/docker/service`. +First, create a systemd drop-in directory for the docker service: -Add the following to the `[Service]` section in the new file: + mkdir /etc/systemd/system/docker.service.d +Now create a file called `/etc/systemd/system/docker.service.d/http-proxy.conf` +that adds the `HTTP_PROXY` environment variable: + + [Service] Environment="HTTP_PROXY=http://proxy.example.com:80/" If you have internal Docker registries that you need to contact without From 6743be44ca9006cb7ca26cd3d8410877727a6313 Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Mon, 8 Dec 2014 11:33:18 -0800 Subject: [PATCH 565/592] Running a container that links to a container with --net host should throw an error. Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- daemon/daemon.go | 3 +++ integration-cli/docker_cli_links_test.go | 16 ++++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/daemon/daemon.go b/daemon/daemon.go index fe6e0eb2aa..a2e6a79bd6 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -696,6 +696,9 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig. if child == nil { return fmt.Errorf("Could not get container for %s", parts["name"]) } + if child.hostConfig.NetworkMode.IsHost() { + return runconfig.ErrConflictHostNetworkAndLinks + } if err := daemon.RegisterLink(container, child, parts["alias"]); err != nil { return err } diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go index 50269245fe..5b81b7fec3 100644 --- a/integration-cli/docker_cli_links_test.go +++ b/integration-cli/docker_cli_links_test.go @@ -215,3 +215,19 @@ func TestLinksHostsFilesInject(t *testing.T) { logDone("link - ensure containers hosts files are updated with the link alias.") } + +func TestLinksNetworkHostContainer(t *testing.T) { + defer deleteAllContainers() + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--net", "host", "--name", "host_container", "busybox", "top")) + if err != nil { + t.Fatal(err, out) + } + + out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "should_fail", "--link", "host_container:tester", "busybox", "true")) + if err == nil || !strings.Contains(out, "--net=host can't be used with links. This would result in undefined behavior.") { + t.Fatalf("Running container linking to a container with --net host should have failed: %s", out) + } + + logDone("link - error thrown when linking to container with --net host") +} From d1535131d259bb8a980770d47c0865a1be4373e3 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 8 Dec 2014 15:04:34 -0800 Subject: [PATCH 566/592] Flush stdin from within chroot archive This makes sure that we don't buffer in memory and that we also flush stdin from diff as well as untar. Signed-off-by: Michael Crosby --- pkg/chrootarchive/archive.go | 5 +---- pkg/chrootarchive/archive_test.go | 16 ++++++++++++++++ pkg/chrootarchive/diff.go | 1 + pkg/chrootarchive/init.go | 8 ++++++++ 4 files changed, 26 insertions(+), 4 deletions(-) diff --git a/pkg/chrootarchive/archive.go b/pkg/chrootarchive/archive.go index 8d139fa857..2942d9d6c0 100644 --- a/pkg/chrootarchive/archive.go +++ b/pkg/chrootarchive/archive.go @@ -6,7 +6,6 @@ import ( "flag" "fmt" "io" - "io/ioutil" "os" "runtime" "strings" @@ -35,9 +34,7 @@ func untar() { fatal(err) } // fully consume stdin in case it is zero padded - if _, err := ioutil.ReadAll(os.Stdin); err != nil { - fatal(err) - } + flush(os.Stdin) os.Exit(0) } diff --git a/pkg/chrootarchive/archive_test.go b/pkg/chrootarchive/archive_test.go index 8477c0676f..0fe3d64f95 100644 --- a/pkg/chrootarchive/archive_test.go +++ b/pkg/chrootarchive/archive_test.go @@ -83,3 +83,19 @@ func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) { t.Fatal(err) } } + +func TestChrootApplyEmptyArchiveFromSlowReader(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyEmptyArchiveFromSlowReader") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + dest := filepath.Join(tmpdir, "dest") + if err := os.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} + if err := ApplyLayer(dest, stream); err != nil { + t.Fatal(err) + } +} diff --git a/pkg/chrootarchive/diff.go b/pkg/chrootarchive/diff.go index 2653aefe9d..f9f9b9d5e0 100644 --- a/pkg/chrootarchive/diff.go +++ b/pkg/chrootarchive/diff.go @@ -32,6 +32,7 @@ func applyLayer() { fatal(err) } os.RemoveAll(tmpDir) + flush(os.Stdin) os.Exit(0) } diff --git a/pkg/chrootarchive/init.go b/pkg/chrootarchive/init.go index b548e9fe72..4116026eff 100644 --- a/pkg/chrootarchive/init.go +++ b/pkg/chrootarchive/init.go @@ -2,6 +2,8 @@ package chrootarchive import ( "fmt" + "io" + "io/ioutil" "os" "github.com/docker/docker/pkg/reexec" @@ -16,3 +18,9 @@ func fatal(err error) { fmt.Fprint(os.Stderr, err) os.Exit(1) } + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) { + io.Copy(ioutil.Discard, r) +} From 2597bffe9a6538c2db73e2ce380294547b738e2e Mon Sep 17 00:00:00 2001 From: shishir-a412ed Date: Mon, 24 Nov 2014 16:20:18 -0500 Subject: [PATCH 567/592] Added description for 'docker run' command, -c/--cpu-shares flag Signed-off-by: Shishir Mahajan --- docs/sources/reference/run.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index 8ac9f9d789..6d2cd3d101 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -301,6 +301,19 @@ get the same proportion of CPU cycles, but you can tell the kernel to give more shares of CPU time to one or more containers when you start them via Docker. +The flag `-c` or `--cpu-shares` with value 0 indicates that the running +container has access to all 1024 (default) CPU shares. However, this value +can be modified to run a container with a different priority or different +proportion of CPU cycles. + +E.g., If we start three {C0, C1, C2} containers with default values +(`-c` OR `--cpu-shares` = 0) and one {C3} with (`-c` or `--cpu-shares`=512) +then C0, C1, and C2 would have access to 100% CPU shares (1024) and C3 would +only have access to 50% CPU shares (512). In the context of a time-sliced OS +with time quantum set as 100 milliseconds, containers C0, C1, and C2 will run +for full-time quantum, and container C3 will run for half-time quantum i.e 50 +milliseconds. + ## Runtime privilege, Linux capabilities, and LXC configuration --cap-add: Add Linux capabilities From 8dcbd6ab636e756736cef5408710ff97fd207370 Mon Sep 17 00:00:00 2001 From: Srini Brahmaroutu Date: Tue, 9 Dec 2014 00:45:42 +0000 Subject: [PATCH 568/592] User should get error message on wrong config closes #9501 Signed-off-by: Srini Brahmaroutu --- daemon/container.go | 5 ++++- daemon/utils.go | 10 +++++++--- daemon/utils_test.go | 5 ++++- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/daemon/container.go b/daemon/container.go index b35969900c..45658c5830 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -260,7 +260,10 @@ func populateCommand(c *Container, env []string) error { autoCreatedDevices := append(devices.DefaultAutoCreatedDevices, userSpecifiedDevices...) // TODO: this can be removed after lxc-conf is fully deprecated - lxcConfig := mergeLxcConfIntoOptions(c.hostConfig) + lxcConfig, err := mergeLxcConfIntoOptions(c.hostConfig) + if err != nil { + return err + } resources := &execdriver.Resources{ Memory: c.Config.Memory, diff --git a/daemon/utils.go b/daemon/utils.go index 9c43236e0b..6202e6d961 100644 --- a/daemon/utils.go +++ b/daemon/utils.go @@ -1,6 +1,7 @@ package daemon import ( + "errors" "fmt" "strings" @@ -32,9 +33,9 @@ func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostCon return nil } -func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig) []string { +func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig) ([]string, error) { if hostConfig == nil { - return nil + return nil, nil } out := []string{} @@ -44,10 +45,13 @@ func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig) []string { for _, pair := range lxcConf { // because lxc conf gets the driver name lxc.XXXX we need to trim it off // and let the lxc driver add it back later if needed + if !strings.Contains(pair.Key, ".") { + return nil, errors.New("Illegal Key passed into LXC Configurations") + } parts := strings.SplitN(pair.Key, ".", 2) out = append(out, fmt.Sprintf("%s=%s", parts[1], pair.Value)) } } - return out + return out, nil } diff --git a/daemon/utils_test.go b/daemon/utils_test.go index 7748b86022..8a2fa719ed 100644 --- a/daemon/utils_test.go +++ b/daemon/utils_test.go @@ -14,7 +14,10 @@ func TestMergeLxcConfig(t *testing.T) { }, } - out := mergeLxcConfIntoOptions(hostConfig) + out, err := mergeLxcConfIntoOptions(hostConfig) + if err != nil { + t.Fatalf("Failed to merge Lxc Config ", err) + } cpuset := out[0] if expected := "cgroups.cpuset=1,2"; cpuset != expected { From 243a640d3e593ee11e31ac55a4df8c887c2b09c9 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Wed, 3 Dec 2014 20:27:39 -0500 Subject: [PATCH 569/592] Add test for exec tty stdin close Signed-off-by: Brian Goff --- integration-cli/docker_cli_exec_test.go | 41 +++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/integration-cli/docker_cli_exec_test.go b/integration-cli/docker_cli_exec_test.go index 82ad9afe7b..3c11a87808 100644 --- a/integration-cli/docker_cli_exec_test.go +++ b/integration-cli/docker_cli_exec_test.go @@ -263,3 +263,44 @@ func TestExecPausedContainer(t *testing.T) { logDone("exec - exec should not exec a pause container") } + +// regression test for #9476 +func TestExecTtyCloseStdin(t *testing.T) { + defer deleteAllContainers() + + cmd := exec.Command(dockerBinary, "run", "-d", "-it", "--name", "exec_tty_stdin", "busybox") + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatal(out, err) + } + + cmd = exec.Command(dockerBinary, "exec", "-it", "exec_tty_stdin", "cat") + stdinRw, err := cmd.StdinPipe() + if err != nil { + t.Fatal(err) + } + + stdinRw.Write([]byte("test")) + stdinRw.Close() + + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatal(out, err) + } + + cmd = exec.Command(dockerBinary, "top", "exec_tty_stdin") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(out, err) + } + + outArr := strings.Split(out, "\n") + if len(outArr) > 3 || strings.Contains(out, "nsenter-exec") { + // This is the really bad part + if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rm", "-f", "exec_tty_stdin")); err != nil { + t.Fatal(out, err) + } + + t.Fatalf("exec process left running\n\t %s", out) + } + + logDone("exec - stdin is closed properly with tty enabled") +} From 67e3ddb75ff27b8de0022e330413b4308ec5b010 Mon Sep 17 00:00:00 2001 From: Arnaud Porterie Date: Fri, 5 Dec 2014 16:50:56 -0800 Subject: [PATCH 570/592] Forbid client piping to tty enabled container Forbid `docker run -t` with a redirected stdin (such as `echo test | docker run -ti busybox cat`). Forbid `docker exec -t` with a redirected stdin. Forbid `docker attach` with a redirect stdin toward a tty enabled container. Signed-off-by: Arnaud Porterie --- api/client/cli.go | 11 ++ api/client/commands.go | 16 ++- docs/man/docker-attach.1.md | 3 + docs/man/docker-exec.1.md | 3 + docs/man/docker-run.1.md | 3 + docs/sources/reference/run.md | 7 +- integration-cli/docker_cli_attach_test.go | 47 ++++++++ integration-cli/docker_cli_exec_test.go | 49 +++++++- integration-cli/docker_cli_run_test.go | 29 +++++ integration/commands_test.go | 133 +++++++++------------- 10 files changed, 213 insertions(+), 88 deletions(-) diff --git a/api/client/cli.go b/api/client/cli.go index a477d0b3a9..e54eb8056e 100644 --- a/api/client/cli.go +++ b/api/client/cli.go @@ -3,6 +3,7 @@ package client import ( "crypto/tls" "encoding/json" + "errors" "fmt" "io" "net" @@ -104,6 +105,16 @@ func (cli *DockerCli) LoadConfigFile() (err error) { return err } +func (cli *DockerCli) CheckTtyInput(attachStdin, ttyMode bool) error { + // In order to attach to a container tty, input stream for the client must + // be a tty itself: redirecting or piping the client standard input is + // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. + if ttyMode && attachStdin && !cli.isTerminalIn { + return errors.New("cannot enable tty mode on non tty input") + } + return nil +} + func NewDockerCli(in io.ReadCloser, out, err io.Writer, key libtrust.PrivateKey, proto, addr string, tlsConfig *tls.Config) *DockerCli { var ( inFd uintptr diff --git a/api/client/commands.go b/api/client/commands.go index 5203513d90..89e5796bbb 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -1974,6 +1974,10 @@ func (cli *DockerCli) CmdAttach(args ...string) error { tty = config.GetBool("Tty") ) + if err := cli.CheckTtyInput(!*noStdin, tty); err != nil { + return err + } + if tty && cli.isTerminalOut { if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil { log.Debugf("Error monitoring TTY size: %s", err) @@ -2288,7 +2292,11 @@ func (cli *DockerCli) CmdRun(args ...string) error { return nil } - if *flDetach { + if !*flDetach { + if err := cli.CheckTtyInput(config.AttachStdin, config.Tty); err != nil { + return err + } + } else { if fl := cmd.Lookup("attach"); fl != nil { flAttach = fl.Value.(*opts.ListOpts) if flAttach.Len() != 0 { @@ -2600,7 +2608,11 @@ func (cli *DockerCli) CmdExec(args ...string) error { return nil } - if execConfig.Detach { + if !execConfig.Detach { + if err := cli.CheckTtyInput(execConfig.AttachStdin, execConfig.Tty); err != nil { + return err + } + } else { if _, _, err := readBody(cli.call("POST", "/exec/"+execID+"/start", execConfig, false)); err != nil { return err } diff --git a/docs/man/docker-attach.1.md b/docs/man/docker-attach.1.md index 21bd566406..19fbaceb4a 100644 --- a/docs/man/docker-attach.1.md +++ b/docs/man/docker-attach.1.md @@ -20,6 +20,9 @@ container, or `CTRL-\` to get a stacktrace of the Docker client when it quits. When you detach from a container the exit code will be returned to the client. +It is forbidden to redirect the standard input of a docker attach command while +attaching to a tty-enabled container (i.e.: launched with -t`). + # OPTIONS **--no-stdin**=*true*|*false* Do not attach STDIN. The default is *false*. diff --git a/docs/man/docker-exec.1.md b/docs/man/docker-exec.1.md index c4e649016a..3db296ed76 100644 --- a/docs/man/docker-exec.1.md +++ b/docs/man/docker-exec.1.md @@ -31,5 +31,8 @@ container is unpaused, and then run **-t**, **--tty**=*true*|*false* Allocate a pseudo-TTY. The default is *false*. +The **-t** option is incompatible with a redirection of the docker client +standard input. + # HISTORY November 2014, updated by Sven Dowideit diff --git a/docs/man/docker-run.1.md b/docs/man/docker-run.1.md index f0129bedc9..44c5545084 100644 --- a/docs/man/docker-run.1.md +++ b/docs/man/docker-run.1.md @@ -267,6 +267,9 @@ outside of a container on the host. input of any container. This can be used, for example, to run a throwaway interactive shell. The default is value is false. +The **-t** option is incompatible with a redirection of the docker client +standard input. + **-u**, **--user**="" Username or UID diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index 8ac9f9d789..74a567c00b 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -94,9 +94,10 @@ specify to which of the three standard streams (`STDIN`, `STDOUT`, $ sudo docker run -a stdin -a stdout -i -t ubuntu /bin/bash -For interactive processes (like a shell) you will typically want a tty -as well as persistent standard input (`STDIN`), so you'll use `-i -t` -together in most interactive cases. +For interactive processes (like a shell), you must use `-i -t` together in +order to allocate a tty for the container process. Specifying `-t` is however +forbidden when the client standard output is redirected or pipe, such as in: +`echo test | docker run -i busybox cat`. ## Container identification diff --git a/integration-cli/docker_cli_attach_test.go b/integration-cli/docker_cli_attach_test.go index d03a986e48..0530d3896e 100644 --- a/integration-cli/docker_cli_attach_test.go +++ b/integration-cli/docker_cli_attach_test.go @@ -87,3 +87,50 @@ func TestAttachMultipleAndRestart(t *testing.T) { logDone("attach - multiple attach") } + +func TestAttachTtyWithoutStdin(t *testing.T) { + defer deleteAllContainers() + + cmd := exec.Command(dockerBinary, "run", "-d", "-ti", "busybox") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatalf("failed to start container: %v (%v)", out, err) + } + + id := strings.TrimSpace(out) + if err := waitRun(id); err != nil { + t.Fatal(err) + } + + defer func() { + cmd := exec.Command(dockerBinary, "kill", id) + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatalf("failed to kill container: %v (%v)", out, err) + } + }() + + done := make(chan struct{}) + go func() { + defer close(done) + + cmd := exec.Command(dockerBinary, "attach", id) + if _, err := cmd.StdinPipe(); err != nil { + t.Fatal(err) + } + + expected := "cannot enable tty mode" + if out, _, err := runCommandWithOutput(cmd); err == nil { + t.Fatal("attach should have failed") + } else if !strings.Contains(out, expected) { + t.Fatal("attach failed with error %q: expected %q", out, expected) + } + }() + + select { + case <-done: + case <-time.After(attachWait): + t.Fatal("attach is running but should have failed") + } + + logDone("attach - forbid piped stdin to tty enabled container") +} diff --git a/integration-cli/docker_cli_exec_test.go b/integration-cli/docker_cli_exec_test.go index 3c11a87808..b07f215a36 100644 --- a/integration-cli/docker_cli_exec_test.go +++ b/integration-cli/docker_cli_exec_test.go @@ -273,7 +273,7 @@ func TestExecTtyCloseStdin(t *testing.T) { t.Fatal(out, err) } - cmd = exec.Command(dockerBinary, "exec", "-it", "exec_tty_stdin", "cat") + cmd = exec.Command(dockerBinary, "exec", "-i", "exec_tty_stdin", "cat") stdinRw, err := cmd.StdinPipe() if err != nil { t.Fatal(err) @@ -304,3 +304,50 @@ func TestExecTtyCloseStdin(t *testing.T) { logDone("exec - stdin is closed properly with tty enabled") } + +func TestExecTtyWithoutStdin(t *testing.T) { + defer deleteAllContainers() + + cmd := exec.Command(dockerBinary, "run", "-d", "-ti", "busybox") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatalf("failed to start container: %v (%v)", out, err) + } + + id := strings.TrimSpace(out) + if err := waitRun(id); err != nil { + t.Fatal(err) + } + + defer func() { + cmd := exec.Command(dockerBinary, "kill", id) + if out, _, err := runCommandWithOutput(cmd); err != nil { + t.Fatalf("failed to kill container: %v (%v)", out, err) + } + }() + + done := make(chan struct{}) + go func() { + defer close(done) + + cmd := exec.Command(dockerBinary, "exec", "-ti", id, "true") + if _, err := cmd.StdinPipe(); err != nil { + t.Fatal(err) + } + + expected := "cannot enable tty mode" + if out, _, err := runCommandWithOutput(cmd); err == nil { + t.Fatal("exec should have failed") + } else if !strings.Contains(out, expected) { + t.Fatal("exec failed with error %q: expected %q", out, expected) + } + }() + + select { + case <-done: + case <-time.After(3 * time.Second): + t.Fatal("exec is running but should have failed") + } + + logDone("exec - forbid piped stdin to tty enabled container") +} diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 20a096f196..0b56f235fe 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -2742,3 +2742,32 @@ func TestRunPortFromDockerRangeInUse(t *testing.T) { logDone("run - find another port if port from autorange already bound") } + +func TestRunTtyWithPipe(t *testing.T) { + defer deleteAllContainers() + + done := make(chan struct{}) + go func() { + defer close(done) + + cmd := exec.Command(dockerBinary, "run", "-ti", "busybox", "true") + if _, err := cmd.StdinPipe(); err != nil { + t.Fatal(err) + } + + expected := "cannot enable tty mode" + if out, _, err := runCommandWithOutput(cmd); err == nil { + t.Fatal("run should have failed") + } else if !strings.Contains(out, expected) { + t.Fatal("run failed with error %q: expected %q", out, expected) + } + }() + + select { + case <-done: + case <-time.After(3 * time.Second): + t.Fatal("container is running but should have failed") + } + + logDone("run - forbid piped stdin with tty") +} diff --git a/integration/commands_test.go b/integration/commands_test.go index b00c68641e..aa21791b50 100644 --- a/integration/commands_test.go +++ b/integration/commands_test.go @@ -15,6 +15,7 @@ import ( "github.com/docker/docker/pkg/term" "github.com/docker/docker/utils" "github.com/docker/libtrust" + "github.com/kr/pty" ) func closeWrap(args ...io.Closer) error { @@ -162,72 +163,20 @@ func TestRunDisconnect(t *testing.T) { }) } -// Expected behaviour: the process stay alive when the client disconnects -// but the client detaches. -func TestRunDisconnectTty(t *testing.T) { - - stdin, stdinPipe := io.Pipe() - stdout, stdoutPipe := io.Pipe() - key, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) - defer cleanup(globalEngine, t) - - c1 := make(chan struct{}) - go func() { - defer close(c1) - // We're simulating a disconnect so the return value doesn't matter. What matters is the - // fact that CmdRun returns. - if err := cli.CmdRun("-i", "-t", unitTestImageID, "/bin/cat"); err != nil { - log.Debugf("Error CmdRun: %s", err) - } - }() - - container := waitContainerStart(t, 10*time.Second) - - state := setRaw(t, container) - defer unsetRaw(t, container, state) - - // Client disconnect after run -i should keep stdin out in TTY mode - setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() { - if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { - t.Fatal(err) - } - }) - - // Close pipes (simulate disconnect) - if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil { - t.Fatal(err) - } - - // wait for CmdRun to return - setTimeout(t, "Waiting for CmdRun timed out", 5*time.Second, func() { - <-c1 - }) - - // In tty mode, we expect the process to stay alive even after client's stdin closes. - - // Give some time to monitor to do his thing - container.WaitStop(500 * time.Millisecond) - if !container.IsRunning() { - t.Fatalf("/bin/cat should still be running after closing stdin (tty mode)") - } -} - // TestRunDetach checks attaching and detaching with the escape sequence. func TestRunDetach(t *testing.T) { - - stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() + cpty, tty, err := pty.Open() + if err != nil { + t.Fatal(err) + } + key, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatal(err) } - cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) ch := make(chan struct{}) @@ -242,22 +191,22 @@ func TestRunDetach(t *testing.T) { defer unsetRaw(t, container, state) setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { - if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { + if err := assertPipe("hello\n", "hello", stdout, cpty, 150); err != nil { t.Fatal(err) } }) setTimeout(t, "Escape sequence timeout", 5*time.Second, func() { - stdinPipe.Write([]byte{16}) + cpty.Write([]byte{16}) time.Sleep(100 * time.Millisecond) - stdinPipe.Write([]byte{17}) + cpty.Write([]byte{17}) }) // wait for CmdRun to return setTimeout(t, "Waiting for CmdRun timed out", 15*time.Second, func() { <-ch }) - closeWrap(stdin, stdinPipe, stdout, stdoutPipe) + closeWrap(cpty, stdout, stdoutPipe) time.Sleep(500 * time.Millisecond) if !container.IsRunning() { @@ -271,14 +220,18 @@ func TestRunDetach(t *testing.T) { // TestAttachDetach checks that attach in tty mode can be detached using the long container ID func TestAttachDetach(t *testing.T) { - stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() + cpty, tty, err := pty.Open() + if err != nil { + t.Fatal(err) + } + key, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatal(err) } - cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) ch := make(chan struct{}) @@ -309,9 +262,13 @@ func TestAttachDetach(t *testing.T) { state := setRaw(t, container) defer unsetRaw(t, container, state) - stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() - cli = client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) + cpty, tty, err = pty.Open() + if err != nil { + t.Fatal(err) + } + + cli = client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) ch = make(chan struct{}) go func() { @@ -324,7 +281,7 @@ func TestAttachDetach(t *testing.T) { }() setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { - if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { + if err := assertPipe("hello\n", "hello", stdout, cpty, 150); err != nil { if err != io.ErrClosedPipe { t.Fatal(err) } @@ -332,9 +289,9 @@ func TestAttachDetach(t *testing.T) { }) setTimeout(t, "Escape sequence timeout", 5*time.Second, func() { - stdinPipe.Write([]byte{16}) + cpty.Write([]byte{16}) time.Sleep(100 * time.Millisecond) - stdinPipe.Write([]byte{17}) + cpty.Write([]byte{17}) }) // wait for CmdRun to return @@ -342,7 +299,7 @@ func TestAttachDetach(t *testing.T) { <-ch }) - closeWrap(stdin, stdinPipe, stdout, stdoutPipe) + closeWrap(cpty, stdout, stdoutPipe) time.Sleep(500 * time.Millisecond) if !container.IsRunning() { @@ -356,14 +313,18 @@ func TestAttachDetach(t *testing.T) { // TestAttachDetachTruncatedID checks that attach in tty mode can be detached func TestAttachDetachTruncatedID(t *testing.T) { - stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() + cpty, tty, err := pty.Open() + if err != nil { + t.Fatal(err) + } + key, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatal(err) } - cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) // Discard the CmdRun output @@ -379,9 +340,13 @@ func TestAttachDetachTruncatedID(t *testing.T) { state := setRaw(t, container) defer unsetRaw(t, container, state) - stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() - cli = client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) + cpty, tty, err = pty.Open() + if err != nil { + t.Fatal(err) + } + + cli = client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) ch := make(chan struct{}) go func() { @@ -394,7 +359,7 @@ func TestAttachDetachTruncatedID(t *testing.T) { }() setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { - if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { + if err := assertPipe("hello\n", "hello", stdout, cpty, 150); err != nil { if err != io.ErrClosedPipe { t.Fatal(err) } @@ -402,16 +367,16 @@ func TestAttachDetachTruncatedID(t *testing.T) { }) setTimeout(t, "Escape sequence timeout", 5*time.Second, func() { - stdinPipe.Write([]byte{16}) + cpty.Write([]byte{16}) time.Sleep(100 * time.Millisecond) - stdinPipe.Write([]byte{17}) + cpty.Write([]byte{17}) }) // wait for CmdRun to return setTimeout(t, "Waiting for CmdAttach timed out", 15*time.Second, func() { <-ch }) - closeWrap(stdin, stdinPipe, stdout, stdoutPipe) + closeWrap(cpty, stdout, stdoutPipe) time.Sleep(500 * time.Millisecond) if !container.IsRunning() { @@ -425,14 +390,18 @@ func TestAttachDetachTruncatedID(t *testing.T) { // Expected behaviour, the process stays alive when the client disconnects func TestAttachDisconnect(t *testing.T) { - stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() + cpty, tty, err := pty.Open() + if err != nil { + t.Fatal(err) + } + key, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatal(err) } - cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, key, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) go func() { @@ -470,12 +439,12 @@ func TestAttachDisconnect(t *testing.T) { }() setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { - if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { + if err := assertPipe("hello\n", "hello", stdout, cpty, 150); err != nil { t.Fatal(err) } }) // Close pipes (client disconnects) - if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil { + if err := closeWrap(cpty, stdout, stdoutPipe); err != nil { t.Fatal(err) } From a62cbdeb47e5e504e670c546ad8bec45e696f370 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Tue, 9 Dec 2014 19:08:24 -0800 Subject: [PATCH 571/592] Use Set for stderr "logs" job in builder Because engine implicitly adds his stder to job stderr Signed-off-by: Alexandr Morozov --- builder/internals.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builder/internals.go b/builder/internals.go index 706064f1e2..fe2eb57859 100644 --- a/builder/internals.go +++ b/builder/internals.go @@ -539,7 +539,7 @@ func (b *Builder) run(c *daemon.Container) error { logsJob.Setenv("stdout", "1") logsJob.Setenv("stderr", "1") logsJob.Stdout.Add(b.OutStream) - logsJob.Stderr.Add(b.ErrStream) + logsJob.Stderr.Set(b.ErrStream) if err := logsJob.Run(); err != nil { return err } From a25168d28d552c33988c1a0f3d6a54355c246566 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Tue, 9 Dec 2014 20:21:21 +0100 Subject: [PATCH 572/592] Fix interactive TLS postContainersAttach MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit related to 266a1044deb9a085ed43dccdc6 Signed-off-by: Jörg Thalheim --- api/server/server.go | 47 +++++++++++++------------------------------- 1 file changed, 14 insertions(+), 33 deletions(-) diff --git a/api/server/server.go b/api/server/server.go index 4465e8d4a0..629ad0ba02 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -65,6 +65,18 @@ func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { return conn, conn, nil } +func closeStreams(streams ...interface{}) { + for _, stream := range streams { + if tcpc, ok := stream.(interface { + CloseWrite() error + }); ok { + tcpc.CloseWrite() + } else if closer, ok := stream.(io.Closer); ok { + closer.Close() + } + } +} + // Check to make sure request's Content-Type is application/json func checkForJson(r *http.Request) error { ct := r.Header.Get("Content-Type") @@ -871,20 +883,7 @@ func postContainersAttach(eng *engine.Engine, version version.Version, w http.Re if err != nil { return err } - defer func() { - if tcpc, ok := inStream.(*net.TCPConn); ok { - tcpc.CloseWrite() - } else { - inStream.Close() - } - }() - defer func() { - if tcpc, ok := outStream.(*net.TCPConn); ok { - tcpc.CloseWrite() - } else if closer, ok := outStream.(io.Closer); ok { - closer.Close() - } - }() + defer closeStreams(inStream, outStream) var errStream io.Writer @@ -1134,25 +1133,7 @@ func postContainerExecStart(eng *engine.Engine, version version.Version, w http. if err != nil { return err } - - defer func() { - if cw, ok := inStream.(interface { - CloseWrite() error - }); ok { - cw.CloseWrite() - } else { - inStream.Close() - } - }() - defer func() { - if cw, ok := outStream.(interface { - CloseWrite() error - }); ok { - cw.CloseWrite() - } else if closer, ok := outStream.(io.Closer); ok { - closer.Close() - } - }() + defer closeStreams(inStream, outStream) var errStream io.Writer From cfc24769a26e825e4267cbfdd59af807e27508b9 Mon Sep 17 00:00:00 2001 From: Arnaud Porterie Date: Wed, 10 Dec 2014 11:09:03 -0800 Subject: [PATCH 573/592] Fix permissions on ADD/COPY Fix a regression introduced in PR#9467 when a single file was added or copied. Signed-off-by: Arnaud Porterie --- builder/internals.go | 10 ++++++++- integration-cli/docker_cli_build_test.go | 28 ++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/builder/internals.go b/builder/internals.go index fe2eb57859..6935f4f1c8 100644 --- a/builder/internals.go +++ b/builder/internals.go @@ -660,11 +660,19 @@ func copyAsDirectory(source, destination string) error { } func fixPermissions(source, destination string, uid, gid int) error { + // The copied root permission should not be changed for previously existing + // directories. + s, err := os.Stat(destination) + if err != nil && !os.IsNotExist(err) { + return err + } + fixRootPermission := (err != nil) || !s.IsDir() + // We Walk on the source rather than on the destination because we don't // want to change permissions on things we haven't created or modified. return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { // Do not alter the walk root itself as it potentially existed before. - if source == fullpath { + if !fixRootPermission && (source == fullpath) { return nil } // Path is prefixed by source: substitute with destination instead. diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index bb11431cd6..785b3368ca 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -3564,3 +3564,31 @@ func TestBuildStderr(t *testing.T) { } logDone("build - testing stderr") } + +func TestBuildChownSingleFile(t *testing.T) { + name := "testbuildchownsinglefile" + defer deleteImages(name) + + ctx, err := fakeContext(` +FROM busybox +COPY test / +RUN ls -l /test +RUN [ $(ls -l /test | awk '{print $3":"$4}') = 'root:root' ] +`, map[string]string{ + "test": "test", + }) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + + if err := os.Chown(filepath.Join(ctx.Dir, "test"), 4242, 4242); err != nil { + t.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + + logDone("build - change permission on single file") +} From 340ea25ac1c01561399175e4de8cecc2928ccf4b Mon Sep 17 00:00:00 2001 From: Jessica Frazelle Date: Mon, 8 Dec 2014 10:18:17 -0800 Subject: [PATCH 574/592] add note to docs about overlay + btrfs Docker-DCO-1.1-Signed-off-by: Jessica Frazelle (github: jfrazelle) --- docs/sources/reference/commandline/cli.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 7ffdcabb54..577a4c68c0 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -180,7 +180,10 @@ share executable memory between devices. Use `docker -d -s btrfs -g /mnt/btrfs_p The `overlay` is a very fast union filesystem. It is now merged in the main Linux kernel as of [3.18.0](https://lkml.org/lkml/2014/10/26/137). -Call `docker -d -s overlay` to use it. +Call `docker -d -s overlay` to use it. +> **Note:** +> It is currently unsupported on `btrfs` or any Copy on Write filesystem +> and should only be used over `ext4` partitions. #### Storage driver options From 3b4de1070fac41c400c22f200438c90823e7769d Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 10 Dec 2014 17:37:31 -0800 Subject: [PATCH 575/592] Prevent loop with var overshadowing Incase of a 3xx redirect the var was being overshowed and ever changed causing an infinite loop. Fixes #9480 Signed-off-by: Michael Crosby --- registry/session.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/registry/session.go b/registry/session.go index ba6df35841..2658ec1a81 100644 --- a/registry/session.go +++ b/registry/session.go @@ -505,7 +505,7 @@ func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate // Redirect if necessary for res.StatusCode >= 300 && res.StatusCode < 400 { log.Debugf("Redirected to %s", res.Header.Get("Location")) - req, err = r.reqFactory.NewRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJSON)) + req, err := r.reqFactory.NewRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJSON)) if err != nil { return nil, err } @@ -515,10 +515,11 @@ func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate if validate { req.Header["X-Docker-Endpoints"] = regs } - res, _, err := r.doRequest(req) + redirect, _, err := r.doRequest(req) if err != nil { return nil, err } + res = redirect defer res.Body.Close() } From 9a7a1e5be01b829e64b32a8b0f1d260f0a2a45c3 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 10 Dec 2014 18:08:40 -0800 Subject: [PATCH 576/592] Refactor put image function's redirect loop Signed-off-by: Michael Crosby --- registry/session.go | 64 ++++++++++++++++++++++++--------------------- 1 file changed, 34 insertions(+), 30 deletions(-) diff --git a/registry/session.go b/registry/session.go index 2658ec1a81..4b2f55225f 100644 --- a/registry/session.go +++ b/registry/session.go @@ -462,7 +462,6 @@ func (r *Session) PushRegistryTag(remote, revision, tag, registry string, token func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { cleanImgList := []*ImgData{} - if validate { for _, elem := range imgList { if elem.Checksum != "" { @@ -484,44 +483,28 @@ func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.VersionString(1), remote, suffix) log.Debugf("[registry] PUT %s", u) log.Debugf("Image list pushed to index:\n%s", imgListJSON) - req, err := r.reqFactory.NewRequest("PUT", u, bytes.NewReader(imgListJSON)) - if err != nil { - return nil, err + headers := map[string][]string{ + "Content-type": {"application/json"}, + "X-Docker-Token": {"true"}, } - req.Header.Add("Content-type", "application/json") - req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) - req.ContentLength = int64(len(imgListJSON)) - req.Header.Set("X-Docker-Token", "true") if validate { - req.Header["X-Docker-Endpoints"] = regs + headers["X-Docker-Endpoints"] = regs } - res, _, err := r.doRequest(req) - if err != nil { - return nil, err - } - defer res.Body.Close() - // Redirect if necessary - for res.StatusCode >= 300 && res.StatusCode < 400 { - log.Debugf("Redirected to %s", res.Header.Get("Location")) - req, err := r.reqFactory.NewRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJSON)) - if err != nil { + var res *http.Response + for { + if res, err = r.putImageRequest(u, headers, imgListJSON); err != nil { return nil, err } - req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) - req.ContentLength = int64(len(imgListJSON)) - req.Header.Set("X-Docker-Token", "true") - if validate { - req.Header["X-Docker-Endpoints"] = regs + if !shouldRedirect(res) { + break } - redirect, _, err := r.doRequest(req) - if err != nil { - return nil, err - } - res = redirect - defer res.Body.Close() + res.Body.Close() + u = res.Header.Get("Location") + log.Debugf("Redirected to %s", u) } + defer res.Body.Close() var tokens, endpoints []string if !validate { @@ -564,6 +547,27 @@ func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate }, nil } +func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) { + req, err := r.reqFactory.NewRequest("PUT", u, bytes.NewReader(body)) + if err != nil { + return nil, err + } + req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) + req.ContentLength = int64(len(body)) + for k, v := range headers { + req.Header[k] = v + } + response, _, err := r.doRequest(req) + if err != nil { + return nil, err + } + return response, nil +} + +func shouldRedirect(response *http.Response) bool { + return response.StatusCode >= 300 && response.StatusCode < 400 +} + func (r *Session) SearchRepositories(term string) (*SearchResults, error) { log.Debugf("Index server: %s", r.indexEndpoint) u := r.indexEndpoint.VersionString(1) + "search?q=" + url.QueryEscape(term) From 916cba9c587a3f3ce97b407993fecd96ac2fecaf Mon Sep 17 00:00:00 2001 From: Aleksa Sarai Date: Fri, 12 Dec 2014 05:40:16 +1100 Subject: [PATCH 577/592] builder: internals: fix incomplete chown walk when fixing permissions This patch fixes the permission fixing code used by addContext, which would not be responsible for Lchown-ing top-level directories added to a destination that didn't exist prior to untar-ing the context. Signed-off-by: Aleksa Sarai (github: cyphar) --- builder/internals.go | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/builder/internals.go b/builder/internals.go index 6935f4f1c8..c1fd617a56 100644 --- a/builder/internals.go +++ b/builder/internals.go @@ -615,7 +615,7 @@ func (b *Builder) addContext(container *daemon.Container, orig, dest string, dec } if fi.IsDir() { - return copyAsDirectory(origPath, destPath) + return copyAsDirectory(origPath, destPath, destExists) } // If we are adding a remote file (or we've been told not to decompress), do not try to untar it @@ -649,37 +649,43 @@ func (b *Builder) addContext(container *daemon.Container, orig, dest string, dec resPath = path.Join(destPath, path.Base(origPath)) } - return fixPermissions(origPath, resPath, 0, 0) + return fixPermissions(origPath, resPath, 0, 0, destExists) } -func copyAsDirectory(source, destination string) error { +func copyAsDirectory(source, destination string, destExisted bool) error { if err := chrootarchive.CopyWithTar(source, destination); err != nil { return err } - return fixPermissions(source, destination, 0, 0) + return fixPermissions(source, destination, 0, 0, destExisted) } -func fixPermissions(source, destination string, uid, gid int) error { - // The copied root permission should not be changed for previously existing - // directories. - s, err := os.Stat(destination) - if err != nil && !os.IsNotExist(err) { +func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { + // If the destination didn't already exist, or the destination isn't a + // directory, then we should Lchown the destination. Otherwise, we shouldn't + // Lchown the destination. + destStat, err := os.Stat(destination) + if err != nil { + // This should *never* be reached, because the destination must've already + // been created while untar-ing the context. return err } - fixRootPermission := (err != nil) || !s.IsDir() + doChownDestination := !destExisted || !destStat.IsDir() // We Walk on the source rather than on the destination because we don't // want to change permissions on things we haven't created or modified. return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { - // Do not alter the walk root itself as it potentially existed before. - if !fixRootPermission && (source == fullpath) { + // Do not alter the walk root iff. it existed before, as it doesn't fall under + // the domain of "things we should chown". + if !doChownDestination && (source == fullpath) { return nil } + // Path is prefixed by source: substitute with destination instead. cleaned, err := filepath.Rel(source, fullpath) if err != nil { return err } + fullpath = path.Join(destination, cleaned) return os.Lchown(fullpath, uid, gid) }) From fa3ec89515431ce425f924c8a9a804d5cb18382f Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Fri, 5 Dec 2014 14:58:46 -0700 Subject: [PATCH 578/592] Simplify FollowSymlinkInScope based on Go 1.3.3's EvalSymlinks Signed-off-by: Andrew Page --- pkg/symlink/LICENSE.APACHE | 191 +++++++++++++++++++++++++++++++++++ pkg/symlink/LICENSE.BSD | 27 +++++ pkg/symlink/MAINTAINERS | 5 +- pkg/symlink/README.md | 5 + pkg/symlink/fs.go | 202 +++++++++++++++++++++---------------- pkg/symlink/fs_test.go | 2 + 6 files changed, 344 insertions(+), 88 deletions(-) create mode 100644 pkg/symlink/LICENSE.APACHE create mode 100644 pkg/symlink/LICENSE.BSD create mode 100644 pkg/symlink/README.md diff --git a/pkg/symlink/LICENSE.APACHE b/pkg/symlink/LICENSE.APACHE new file mode 100644 index 0000000000..27448585ad --- /dev/null +++ b/pkg/symlink/LICENSE.APACHE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/pkg/symlink/LICENSE.BSD b/pkg/symlink/LICENSE.BSD new file mode 100644 index 0000000000..ebcfbcc779 --- /dev/null +++ b/pkg/symlink/LICENSE.BSD @@ -0,0 +1,27 @@ +Copyright (c) 2014 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/symlink/MAINTAINERS b/pkg/symlink/MAINTAINERS index 68a97d2fc2..51a41a5b60 100644 --- a/pkg/symlink/MAINTAINERS +++ b/pkg/symlink/MAINTAINERS @@ -1,2 +1,3 @@ -Michael Crosby (@crosbymichael) -Victor Vieux (@vieux) +Tibor Vass (@tiborvass) +Cristian Staretu (@unclejack) +Tianon Gravi (@tianon) diff --git a/pkg/symlink/README.md b/pkg/symlink/README.md new file mode 100644 index 0000000000..0d1dbb70e6 --- /dev/null +++ b/pkg/symlink/README.md @@ -0,0 +1,5 @@ +Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks +from the [Go standard library](https://golang.org/pkg/path/filepath). + +The code from filepath.EvalSymlinks has been adapted in fs.go. +Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go. diff --git a/pkg/symlink/fs.go b/pkg/symlink/fs.go index 6ce99c6bda..b4bdff24dd 100644 --- a/pkg/symlink/fs.go +++ b/pkg/symlink/fs.go @@ -1,101 +1,131 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.BSD file. + +// This code is a modified version of path/filepath/symlink.go from the Go standard library. + package symlink import ( - "fmt" + "bytes" + "errors" "os" - "path" "path/filepath" "strings" ) -const maxLoopCounter = 100 - -// FollowSymlink will follow an existing link and scope it to the root -// path provided. -// The role of this function is to return an absolute path in the root -// or normalize to the root if the symlink leads to a path which is -// outside of the root. -// Errors encountered while attempting to follow the symlink in path -// will be reported. -// Normalizations to the root don't constitute errors. -func FollowSymlinkInScope(link, root string) (string, error) { - root, err := filepath.Abs(root) +// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an absolute path +func FollowSymlinkInScope(path, root string) (string, error) { + path, err := filepath.Abs(path) if err != nil { return "", err } - - link, err = filepath.Abs(link) + root, err = filepath.Abs(root) if err != nil { return "", err } - - if link == root { - return root, nil - } - - if !strings.HasPrefix(filepath.Dir(link), root) { - return "", fmt.Errorf("%s is not within %s", link, root) - } - - prev := "/" - - for _, p := range strings.Split(link, "/") { - prev = filepath.Join(prev, p) - - loopCounter := 0 - for { - loopCounter++ - - if loopCounter >= maxLoopCounter { - return "", fmt.Errorf("loopCounter reached MAX: %v", loopCounter) - } - - if !strings.HasPrefix(prev, root) { - // Don't resolve symlinks outside of root. For example, - // we don't have to check /home in the below. - // - // /home -> usr/home - // FollowSymlinkInScope("/home/bob/foo/bar", "/home/bob/foo") - break - } - - stat, err := os.Lstat(prev) - if err != nil { - if os.IsNotExist(err) { - break - } - return "", err - } - - // let's break if we're not dealing with a symlink - if stat.Mode()&os.ModeSymlink != os.ModeSymlink { - break - } - - // process the symlink - dest, err := os.Readlink(prev) - if err != nil { - return "", err - } - - if path.IsAbs(dest) { - prev = filepath.Join(root, dest) - } else { - prev, _ = filepath.Abs(prev) - - dir := filepath.Dir(prev) - prev = filepath.Join(dir, dest) - if dir == root && !strings.HasPrefix(prev, root) { - prev = root - } - if len(prev) < len(root) || (len(prev) == len(root) && prev != root) { - prev = filepath.Join(root, filepath.Base(dest)) - } - } - } - } - if prev == "/" { - prev = root - } - return prev, nil + return evalSymlinksInScope(path, root) +} + +// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return +// a result guaranteed to be contained within the scope `root`, at the time of the call. +// Symlinks in `root` are not evaluated and left as-is. +// Errors encountered while attempting to evaluate symlinks in path will be returned. +// Non-existing paths are valid and do not constitute an error. +// `path` has to contain `root` as a prefix, or else an error will be returned. +// Trying to break out from `root` does not constitute an error. +// +// Example: +// If /foo/bar -> /outside, +// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide" +// +// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks +// are created and not to create subsequently, additional symlinks that could potentially make a +// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo") +// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should +// no longer be considered safely contained in "/foo". +func evalSymlinksInScope(path, root string) (string, error) { + root = filepath.Clean(root) + if path == root { + return path, nil + } + if !strings.HasPrefix(path, root) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + const maxIter = 255 + originalPath := path + // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c" + path = path[len(root):] + if root == string(filepath.Separator) { + path = string(filepath.Separator) + path + } + if !strings.HasPrefix(path, string(filepath.Separator)) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + path = filepath.Clean(path) + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + // b here will always be considered to be the "current absolute path inside + // root" when we append paths to it, we also append a slash and use + // filepath.Clean after the loop to trim the trailing slash + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("evalSymlinksInScope: too many links in " + originalPath) + } + + // find next path component, p + i := strings.IndexRune(path, filepath.Separator) + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + continue + } + + // this takes a b.String() like "b/../" and a p like "c" and turns it + // into "/b/../c" which then gets filepath.Cleaned into "/c" and then + // root gets prepended and we Clean again (to remove any trailing slash + // if the first Clean gave us just "/") + cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p) + if cleanP == string(filepath.Separator) { + // never Lstat "/" itself + b.Reset() + continue + } + fullP := filepath.Clean(root + cleanP) + + fi, err := os.Lstat(fullP) + if os.IsNotExist(err) { + // if p does not exist, accept it + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p + string(filepath.Separator)) + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(fullP) + if err != nil { + return "", err + } + if filepath.IsAbs(dest) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + + // see note above on "fullP := ..." for why this is double-cleaned and + // what's happening here + return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil } diff --git a/pkg/symlink/fs_test.go b/pkg/symlink/fs_test.go index 0e2f948b6a..24ffb1e7c7 100644 --- a/pkg/symlink/fs_test.go +++ b/pkg/symlink/fs_test.go @@ -1,3 +1,5 @@ +// Licensed under the Apache License, Version 2.0; See LICENSE.APACHE + package symlink import ( From b7f352b0fb7159af283275437ad69142addab13b Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Sun, 30 Nov 2014 11:39:28 -0500 Subject: [PATCH 579/592] Refactor of symlink tests to remove testdata dir Signed-off-by: Tibor Vass --- pkg/symlink/fs_test.go | 295 ++++++++++++++++-------------------- pkg/symlink/testdata/fs/a/d | 1 - pkg/symlink/testdata/fs/a/e | 1 - pkg/symlink/testdata/fs/a/f | 1 - pkg/symlink/testdata/fs/b/h | 1 - pkg/symlink/testdata/fs/g | 1 - pkg/symlink/testdata/fs/i | 1 - pkg/symlink/testdata/fs/j/k | 1 - 8 files changed, 132 insertions(+), 170 deletions(-) delete mode 120000 pkg/symlink/testdata/fs/a/d delete mode 120000 pkg/symlink/testdata/fs/a/e delete mode 120000 pkg/symlink/testdata/fs/a/f delete mode 120000 pkg/symlink/testdata/fs/b/h delete mode 120000 pkg/symlink/testdata/fs/g delete mode 120000 pkg/symlink/testdata/fs/i delete mode 120000 pkg/symlink/testdata/fs/j/k diff --git a/pkg/symlink/fs_test.go b/pkg/symlink/fs_test.go index 24ffb1e7c7..9d12041dc1 100644 --- a/pkg/symlink/fs_test.go +++ b/pkg/symlink/fs_test.go @@ -3,248 +3,217 @@ package symlink import ( + "fmt" "io/ioutil" "os" "path/filepath" "testing" ) -func abs(t *testing.T, p string) string { - o, err := filepath.Abs(p) - if err != nil { - t.Fatal(err) - } - return o +type dirOrLink struct { + path string + target string } -func TestFollowSymLinkNormal(t *testing.T) { - link := "testdata/fs/a/d/c/data" +func makeFs(tmpdir string, fs []dirOrLink) error { + for _, s := range fs { + s.path = filepath.Join(tmpdir, s.path) + if s.target == "" { + os.MkdirAll(s.path, 0755) + continue + } + if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil { + return err + } + if err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) { + return err + } + } + return nil +} - rewrite, err := FollowSymlinkInScope(link, "testdata") +func testSymlink(tmpdir, path, expected, scope string) error { + rewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope)) + if err != nil { + return err + } + expected, err = filepath.Abs(filepath.Join(tmpdir, expected)) + if err != nil { + return err + } + if expected != rewrite { + return fmt.Errorf("Expected %q got %q", expected, rewrite) + } + return nil +} + +func TestFollowSymlinkNormal(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNormal") if err != nil { t.Fatal(err) } - - if expected := abs(t, "testdata/b/c/data"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/d/c/data", "testdata/b/c/data", "testdata"); err != nil { + t.Fatal(err) } } -func TestFollowSymLinkRelativePath(t *testing.T) { - link := "testdata/fs/i" - - rewrite, err := FollowSymlinkInScope(link, "testdata") +func TestFollowSymlinkRelativePath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath") if err != nil { t.Fatal(err) } - - if expected := abs(t, "testdata/fs/a"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/i", target: "a"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/i", "testdata/fs/a", "testdata"); err != nil { + t.Fatal(err) } } -func TestFollowSymLinkUnderLinkedDir(t *testing.T) { - dir, err := ioutil.TempDir("", "docker-fs-test") +func TestFollowSymlinkUnderLinkedDir(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkUnderLinkedDir") if err != nil { t.Fatal(err) } - defer os.RemoveAll(dir) - - os.Mkdir(filepath.Join(dir, "realdir"), 0700) - os.Symlink("realdir", filepath.Join(dir, "linkdir")) - - linkDir := filepath.Join(dir, "linkdir", "foo") - dirUnderLinkDir := filepath.Join(dir, "linkdir", "foo", "bar") - os.MkdirAll(dirUnderLinkDir, 0700) - - rewrite, err := FollowSymlinkInScope(dirUnderLinkDir, linkDir) - if err != nil { + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{ + {path: "linkdir", target: "realdir"}, + {path: "linkdir/foo/bar"}, + }); err != nil { t.Fatal(err) } - - if rewrite != dirUnderLinkDir { - t.Fatalf("Expected %s got %s", dirUnderLinkDir, rewrite) + if err := testSymlink(tmpdir, "linkdir/foo/bar", "linkdir/foo/bar", "linkdir/foo"); err != nil { + t.Fatal(err) } } -func TestFollowSymLinkRandomString(t *testing.T) { +func TestFollowSymlinkRandomString(t *testing.T) { if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil { t.Fatal("Random string should fail but didn't") } } -func TestFollowSymLinkLastLink(t *testing.T) { - link := "testdata/fs/a/d" - - rewrite, err := FollowSymlinkInScope(link, "testdata") +func TestFollowSymlinkLastLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkLastLink") if err != nil { t.Fatal(err) } - - if expected := abs(t, "testdata/b"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/d", "testdata/b", "testdata"); err != nil { + t.Fatal(err) } } -func TestFollowSymLinkRelativeLink(t *testing.T) { - link := "testdata/fs/a/e/c/data" - - rewrite, err := FollowSymlinkInScope(link, "testdata") +func TestFollowSymlinkRelativeLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLink") if err != nil { t.Fatal(err) } - - if expected := abs(t, "testdata/fs/b/c/data"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/e", target: "../b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil { + t.Fatal(err) } } -func TestFollowSymLinkRelativeLinkScope(t *testing.T) { +func TestFollowSymlinkRelativeLinkScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/f", target: "../../../../test"}}); err != nil { + t.Fatal(err) + } // avoid letting symlink f lead us out of the "testdata" scope // we don't normalize because symlink f is in scope and there is no // information leak - { - link := "testdata/fs/a/f" - - rewrite, err := FollowSymlinkInScope(link, "testdata") - if err != nil { - t.Fatal(err) - } - - if expected := abs(t, "testdata/test"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) - } + if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/test", "testdata"); err != nil { + t.Fatal(err) } - // avoid letting symlink f lead us out of the "testdata/fs" scope // we don't normalize because symlink f is in scope and there is no // information leak - { - link := "testdata/fs/a/f" - - rewrite, err := FollowSymlinkInScope(link, "testdata/fs") - if err != nil { - t.Fatal(err) - } - - if expected := abs(t, "testdata/fs/test"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) - } + if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil { + t.Fatal(err) } // avoid letting symlink g (pointed at by symlink h) take out of scope // TODO: we should probably normalize to scope here because ../[....]/root // is out of scope and we leak information - { - link := "testdata/fs/b/h" - - rewrite, err := FollowSymlinkInScope(link, "testdata") - if err != nil { - t.Fatal(err) - } - - if expected := abs(t, "testdata/root"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) - } + if err := makeFs(tmpdir, []dirOrLink{ + {path: "testdata/fs/b/h", target: "../g"}, + {path: "testdata/fs/g", target: "../../../../../../../../../../../../root"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil { + t.Fatal(err) } // avoid letting allowing symlink e lead us to ../b // normalize to the "testdata/fs/a" - { - link := "testdata/fs/a/e" - - rewrite, err := FollowSymlinkInScope(link, "testdata/fs/a") - if err != nil { - t.Fatal(err) - } - - if expected := abs(t, "testdata/fs/a"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) - } + if err := makeFs(tmpdir, []dirOrLink{ + {path: "testdata/fs/a/e", target: "../b"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil { + t.Fatal(err) } // avoid letting symlink -> ../directory/file escape from scope // normalize to "testdata/fs/j" - { - link := "testdata/fs/j/k" - - rewrite, err := FollowSymlinkInScope(link, "testdata/fs/j") - if err != nil { - t.Fatal(err) - } - - if expected := abs(t, "testdata/fs/j"); expected != rewrite { - t.Fatalf("Expected %s got %s", expected, rewrite) - } + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/j/k", target: "../i/a"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil { + t.Fatal(err) } // make sure we don't allow escaping to / // normalize to dir - { - dir, err := ioutil.TempDir("", "docker-fs-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - linkFile := filepath.Join(dir, "foo") - os.Mkdir(filepath.Join(dir, ""), 0700) - os.Symlink("/", linkFile) - - rewrite, err := FollowSymlinkInScope(linkFile, dir) - if err != nil { - t.Fatal(err) - } - - if rewrite != dir { - t.Fatalf("Expected %s got %s", dir, rewrite) - } + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) } // make sure we don't allow escaping to / // normalize to dir - { - dir, err := ioutil.TempDir("", "docker-fs-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - linkFile := filepath.Join(dir, "foo") - os.Mkdir(filepath.Join(dir, ""), 0700) - os.Symlink("/../../", linkFile) - - rewrite, err := FollowSymlinkInScope(linkFile, dir) - if err != nil { - t.Fatal(err) - } - - if rewrite != dir { - t.Fatalf("Expected %s got %s", dir, rewrite) - } + if err := makeFs(filepath.Join(tmpdir, "dir", "subdir"), []dirOrLink{{path: "foo", target: "/../../"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) } // make sure we stay in scope without leaking information // this also checks for escaping to / // normalize to dir - { - dir, err := ioutil.TempDir("", "docker-fs-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + if err := makeFs(filepath.Join(tmpdir, "dir", "subdir"), []dirOrLink{{path: "foo", target: "../../"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } - linkFile := filepath.Join(dir, "foo") - os.Mkdir(filepath.Join(dir, ""), 0700) - os.Symlink("../../", linkFile) - - rewrite, err := FollowSymlinkInScope(linkFile, dir) - if err != nil { - t.Fatal(err) - } - - if rewrite != dir { - t.Fatalf("Expected %s got %s", dir, rewrite) - } + if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "bar/foo", "bar/baz/target", ""); err != nil { + t.Fatal(err) } } diff --git a/pkg/symlink/testdata/fs/a/d b/pkg/symlink/testdata/fs/a/d deleted file mode 120000 index 28abc96048..0000000000 --- a/pkg/symlink/testdata/fs/a/d +++ /dev/null @@ -1 +0,0 @@ -/b \ No newline at end of file diff --git a/pkg/symlink/testdata/fs/a/e b/pkg/symlink/testdata/fs/a/e deleted file mode 120000 index 42532fe13c..0000000000 --- a/pkg/symlink/testdata/fs/a/e +++ /dev/null @@ -1 +0,0 @@ -../b \ No newline at end of file diff --git a/pkg/symlink/testdata/fs/a/f b/pkg/symlink/testdata/fs/a/f deleted file mode 120000 index 21de7edc0a..0000000000 --- a/pkg/symlink/testdata/fs/a/f +++ /dev/null @@ -1 +0,0 @@ -../../../../test \ No newline at end of file diff --git a/pkg/symlink/testdata/fs/b/h b/pkg/symlink/testdata/fs/b/h deleted file mode 120000 index 24387a68fb..0000000000 --- a/pkg/symlink/testdata/fs/b/h +++ /dev/null @@ -1 +0,0 @@ -../g \ No newline at end of file diff --git a/pkg/symlink/testdata/fs/g b/pkg/symlink/testdata/fs/g deleted file mode 120000 index 0ce5de0647..0000000000 --- a/pkg/symlink/testdata/fs/g +++ /dev/null @@ -1 +0,0 @@ -../../../../../../../../../../../../root \ No newline at end of file diff --git a/pkg/symlink/testdata/fs/i b/pkg/symlink/testdata/fs/i deleted file mode 120000 index 2e65efe2a1..0000000000 --- a/pkg/symlink/testdata/fs/i +++ /dev/null @@ -1 +0,0 @@ -a \ No newline at end of file diff --git a/pkg/symlink/testdata/fs/j/k b/pkg/symlink/testdata/fs/j/k deleted file mode 120000 index f559e8fda2..0000000000 --- a/pkg/symlink/testdata/fs/j/k +++ /dev/null @@ -1 +0,0 @@ -../i/a \ No newline at end of file From ff4d05ed73dfff44f9bb919a0a6fccff17f866a9 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Fri, 5 Dec 2014 15:33:11 -0500 Subject: [PATCH 580/592] symlink: cleanup names and break big test into multiple smaller ones Signed-off-by: Tibor Vass --- pkg/symlink/fs_test.go | 84 ++++++++++++++++++++++++++++++++---------- 1 file changed, 64 insertions(+), 20 deletions(-) diff --git a/pkg/symlink/fs_test.go b/pkg/symlink/fs_test.go index 9d12041dc1..3869e1d914 100644 --- a/pkg/symlink/fs_test.go +++ b/pkg/symlink/fs_test.go @@ -47,8 +47,8 @@ func testSymlink(tmpdir, path, expected, scope string) error { return nil } -func TestFollowSymlinkNormal(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNormal") +func TestFollowSymlinkAbsolute(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkAbsolute") if err != nil { t.Fatal(err) } @@ -75,8 +75,8 @@ func TestFollowSymlinkRelativePath(t *testing.T) { } } -func TestFollowSymlinkUnderLinkedDir(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkUnderLinkedDir") +func TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSkipSymlinksOutsideScope") if err != nil { t.Fatal(err) } @@ -92,9 +92,9 @@ func TestFollowSymlinkUnderLinkedDir(t *testing.T) { } } -func TestFollowSymlinkRandomString(t *testing.T) { +func TestFollowSymlinkInvalidScopePathPair(t *testing.T) { if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil { - t.Fatal("Random string should fail but didn't") + t.Fatal("expected an error") } } @@ -112,8 +112,8 @@ func TestFollowSymlinkLastLink(t *testing.T) { } } -func TestFollowSymlinkRelativeLink(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLink") +func TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChangeScope") if err != nil { t.Fatal(err) } @@ -124,10 +124,15 @@ func TestFollowSymlinkRelativeLink(t *testing.T) { if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil { t.Fatal(err) } + // avoid letting allowing symlink e lead us to ../b + // normalize to the "testdata/fs/a" + if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil { + t.Fatal(err) + } } -func TestFollowSymlinkRelativeLinkScope(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkScope") +func TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDeepRelativeLinkChangeScope") if err != nil { t.Fatal(err) } @@ -148,6 +153,14 @@ func TestFollowSymlinkRelativeLinkScope(t *testing.T) { if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil { t.Fatal(err) } +} + +func TestFollowSymlinkRelativeLinkChain(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChain") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) // avoid letting symlink g (pointed at by symlink h) take out of scope // TODO: we should probably normalize to scope here because ../[....]/root @@ -161,17 +174,14 @@ func TestFollowSymlinkRelativeLinkScope(t *testing.T) { if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil { t.Fatal(err) } +} - // avoid letting allowing symlink e lead us to ../b - // normalize to the "testdata/fs/a" - if err := makeFs(tmpdir, []dirOrLink{ - {path: "testdata/fs/a/e", target: "../b"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil { +func TestFollowSymlinkBreakoutPath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutPath") + if err != nil { t.Fatal(err) } + defer os.RemoveAll(tmpdir) // avoid letting symlink -> ../directory/file escape from scope // normalize to "testdata/fs/j" @@ -181,6 +191,14 @@ func TestFollowSymlinkRelativeLinkScope(t *testing.T) { if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil { t.Fatal(err) } +} + +func TestFollowSymlinkToRoot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkToRoot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) // make sure we don't allow escaping to / // normalize to dir @@ -190,25 +208,51 @@ func TestFollowSymlinkRelativeLinkScope(t *testing.T) { if err := testSymlink(tmpdir, "foo", "", ""); err != nil { t.Fatal(err) } +} + +func TestFollowSymlinkSlashDotdot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSlashDotdot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpdir = filepath.Join(tmpdir, "dir", "subdir") // make sure we don't allow escaping to / // normalize to dir - if err := makeFs(filepath.Join(tmpdir, "dir", "subdir"), []dirOrLink{{path: "foo", target: "/../../"}}); err != nil { + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/../../"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "foo", "", ""); err != nil { t.Fatal(err) } +} + +func TestFollowSymlinkDotdot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDotdot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpdir = filepath.Join(tmpdir, "dir", "subdir") // make sure we stay in scope without leaking information // this also checks for escaping to / // normalize to dir - if err := makeFs(filepath.Join(tmpdir, "dir", "subdir"), []dirOrLink{{path: "foo", target: "../../"}}); err != nil { + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "../../"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "foo", "", ""); err != nil { t.Fatal(err) } +} + +func TestFollowSymlinkRelativePath2(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath2") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil { t.Fatal(err) From 5f0f0c228d438f01c3b9837eb6e8e97133d9d1f5 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Fri, 5 Dec 2014 16:40:53 -0500 Subject: [PATCH 581/592] symlink: add more tests Signed-off-by: Tibor Vass --- pkg/symlink/fs_test.go | 139 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 139 insertions(+) diff --git a/pkg/symlink/fs_test.go b/pkg/symlink/fs_test.go index 3869e1d914..6b2496c4e0 100644 --- a/pkg/symlink/fs_test.go +++ b/pkg/symlink/fs_test.go @@ -261,3 +261,142 @@ func TestFollowSymlinkRelativePath2(t *testing.T) { t.Fatal(err) } } + +func TestFollowSymlinkScopeLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkScopeLink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root2"}, + {path: "root", target: "root2"}, + {path: "root2/foo", target: "../bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "root/bar", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRootScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRootScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + expected, err := filepath.EvalSymlinks(tmpdir) + if err != nil { + t.Fatal(err) + } + rewrite, err := FollowSymlinkInScope(tmpdir, "/") + if err != nil { + t.Fatal(err) + } + if rewrite != expected { + t.Fatalf("expected %q got %q", expected, rewrite) + } +} + +func TestFollowSymlinkEmpty(t *testing.T) { + res, err := FollowSymlinkInScope("", "") + if err != nil { + t.Fatal(err) + } + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + if res != wd { + t.Fatal("expected %q got %q", wd, res) + } +} + +func TestFollowSymlinkCircular(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkCircular") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "root/foo", target: "foo"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { + t.Fatal("expected an error for foo -> foo") + } + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/bar", target: "baz"}, + {path: "root/baz", target: "../bak"}, + {path: "root/bak", target: "/bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { + t.Fatal("expected an error for bar -> baz -> bak -> bar") + } +} + +func TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkComplexChainWithTargetPathsContainingLinks") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root2"}, + {path: "root", target: "root2"}, + {path: "root/a", target: "r/s"}, + {path: "root/r", target: "../root/t"}, + {path: "root/root/t/s/b", target: "/../u"}, + {path: "root/u/c", target: "."}, + {path: "root/u/x/y", target: "../v"}, + {path: "root/u/v", target: "/../w"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/a/b/c/x/y/z", "root/w/z", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkBreakoutNonExistent(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutNonExistent") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/slash", target: "/"}, + {path: "root/sym", target: "/idontexist/../slash"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/sym/file", "root/file", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkNoLexicalCleaning(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNoLexicalCleaning") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/sym", target: "/foo/bar"}, + {path: "root/hello", target: "/sym/../baz"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/hello", "root/foo/baz", "root"); err != nil { + t.Fatal(err) + } +} From 0e71a48d4e89ce40c0ab94cc18599481d9e8a6b0 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Wed, 3 Dec 2014 14:04:51 -0500 Subject: [PATCH 582/592] Add another symlink breakout test Signed-off-by: Tibor Vass Conflicts: integration-cli/docker_cli_build_test.go --- integration-cli/docker_cli_build_test.go | 54 ++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 785b3368ca..b9b08cd091 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -3592,3 +3592,57 @@ RUN [ $(ls -l /test | awk '{print $3":"$4}') = 'root:root' ] logDone("build - change permission on single file") } + +func TestBuildSymlinkBreakout(t *testing.T) { + name := "testbuildsymlinkbreakout" + tmpdir, err := ioutil.TempDir("", name) + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + ctx := filepath.Join(tmpdir, "context") + if err := os.MkdirAll(ctx, 0755); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte(` + from busybox + add symlink.tar / + add inject /symlink/ + `), 0644); err != nil { + t.Fatal(err) + } + inject := filepath.Join(ctx, "inject") + if err := ioutil.WriteFile(inject, nil, 0644); err != nil { + t.Fatal(err) + } + f, err := os.Create(filepath.Join(ctx, "symlink.tar")) + if err != nil { + t.Fatal(err) + } + w := tar.NewWriter(f) + w.WriteHeader(&tar.Header{ + Name: "symlink2", + Typeflag: tar.TypeSymlink, + Linkname: "/../../../../../../../../../../../../../../", + Uid: os.Getuid(), + Gid: os.Getgid(), + }) + w.WriteHeader(&tar.Header{ + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: filepath.Join("symlink2", tmpdir), + Uid: os.Getuid(), + Gid: os.Getgid(), + }) + w.Close() + f.Close() + if _, err := buildImageFromContext(name, &FakeContext{Dir: ctx}, false); err != nil { + t.Fatal(err) + } + if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil { + t.Fatal("symlink breakout - inject") + } else if !os.IsNotExist(err) { + t.Fatalf("unexpected error: %v", err) + } + logDone("build - symlink breakout") +} From 7496cbbccc278c084620661812ed5f6390c1d2f1 Mon Sep 17 00:00:00 2001 From: unclejack Date: Thu, 27 Nov 2014 22:14:15 +0200 Subject: [PATCH 583/592] integ-cli: add build test for absolute symlink Signed-off-by: Cristian Staretu --- integration-cli/docker_cli_build_test.go | 75 ++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index b9b08cd091..c984eb2d2e 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -1310,6 +1310,81 @@ COPY https://index.docker.io/robots.txt /`, logDone("build - copy - disallow copy from remote") } +func TestBuildAddBadLinks(t *testing.T) { + const ( + dockerfile = ` + FROM scratch + ADD links.tar / + ADD foo.txt /symlink/ + ` + targetFile = "foo.txt" + ) + var ( + name = "test-link-absolute" + ) + defer deleteImages(name) + ctx, err := fakeContext(dockerfile, nil) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + + tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-") + if err != nil { + t.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + + symlinkTarget := fmt.Sprintf("/../../../../../../../../../../../..%s", tempDir) + tarPath := filepath.Join(ctx.Dir, "links.tar") + nonExistingFile := filepath.Join(tempDir, targetFile) + fooPath := filepath.Join(ctx.Dir, targetFile) + + tarOut, err := os.Create(tarPath) + if err != nil { + t.Fatal(err) + } + + tarWriter := tar.NewWriter(tarOut) + + header := &tar.Header{ + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: symlinkTarget, + Mode: 0755, + Uid: 0, + Gid: 0, + } + + err = tarWriter.WriteHeader(header) + if err != nil { + t.Fatal(err) + } + + tarWriter.Close() + tarOut.Close() + + foo, err := os.Create(fooPath) + if err != nil { + t.Fatal(err) + } + defer foo.Close() + + if _, err := foo.WriteString("test"); err != nil { + t.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { + t.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) + } + + logDone("build - ADD must add files in container") +} + // Issue #5270 - ensure we throw a better error than "unexpected EOF" // when we can't access files in the context. func TestBuildWithInaccessibleFilesInContext(t *testing.T) { From a57eee2229c2f0c53c32372587fcc2a8327044ea Mon Sep 17 00:00:00 2001 From: unclejack Date: Thu, 27 Nov 2014 22:39:43 +0200 Subject: [PATCH 584/592] integ-cli: add test for links in volumes Signed-off-by: Cristian Staretu --- integration-cli/docker_cli_build_test.go | 52 ++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index c984eb2d2e..e38ac6e47b 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -1385,6 +1385,58 @@ func TestBuildAddBadLinks(t *testing.T) { logDone("build - ADD must add files in container") } +func TestBuildAddBadLinksVolume(t *testing.T) { + const ( + dockerfileTemplate = ` + FROM busybox + RUN ln -s /../../../../../../../../%s /x + VOLUME /x + ADD foo.txt /x/` + targetFile = "foo.txt" + ) + var ( + name = "test-link-absolute-volume" + dockerfile = "" + ) + defer deleteImages(name) + + tempDir, err := ioutil.TempDir("", "test-link-absolute-volume-temp-") + if err != nil { + t.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + + dockerfile = fmt.Sprintf(dockerfileTemplate, tempDir) + nonExistingFile := filepath.Join(tempDir, targetFile) + + ctx, err := fakeContext(dockerfile, nil) + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + fooPath := filepath.Join(ctx.Dir, targetFile) + + foo, err := os.Create(fooPath) + if err != nil { + t.Fatal(err) + } + defer foo.Close() + + if _, err := foo.WriteString("test"); err != nil { + t.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { + t.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) + } + + logDone("build - ADD should add files in volume") +} + // Issue #5270 - ensure we throw a better error than "unexpected EOF" // when we can't access files in the context. func TestBuildWithInaccessibleFilesInContext(t *testing.T) { From e4ba82d50ee4642412a1e1bdf43a7b94fadd2428 Mon Sep 17 00:00:00 2001 From: Cristian Staretu Date: Mon, 8 Dec 2014 15:40:27 -0500 Subject: [PATCH 585/592] Add build tests covering extraction in chroot Signed-off-by: Cristian Staretu --- integration-cli/docker_cli_build_test.go | 112 ++++++++++++++++ integration-cli/docker_cli_save_load_test.go | 134 +++++++++++++++++++ 2 files changed, 246 insertions(+) diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index e38ac6e47b..dbb527b309 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -3304,6 +3304,118 @@ RUN cat /existing-directory-trailing-slash/test/foo | grep Hi` logDone("build - ADD tar") } +func TestBuildAddTarXz(t *testing.T) { + name := "testbuildaddtarxz" + defer deleteImages(name) + + ctx := func() *FakeContext { + dockerfile := ` + FROM busybox + ADD test.tar.xz / + RUN cat /test/foo | grep Hi` + tmpDir, err := ioutil.TempDir("", "fake-context") + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + t.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + t.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + t.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + t.Fatalf("failed to close tar archive: %v", err) + } + xzCompressCmd := exec.Command("xz", "test.tar") + xzCompressCmd.Dir = tmpDir + out, _, err := runCommandWithOutput(xzCompressCmd) + if err != nil { + t.Fatal(err, out) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + t.Fatalf("failed to open destination dockerfile: %v", err) + } + return &FakeContext{Dir: tmpDir} + }() + + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) + } + + logDone("build - ADD tar.xz") +} + +func TestBuildAddTarXzGz(t *testing.T) { + name := "testbuildaddtarxzgz" + defer deleteImages(name) + + ctx := func() *FakeContext { + dockerfile := ` + FROM busybox + ADD test.tar.xz.gz / + RUN ls /test.tar.xz.gz` + tmpDir, err := ioutil.TempDir("", "fake-context") + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + t.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + t.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + t.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + t.Fatalf("failed to close tar archive: %v", err) + } + + xzCompressCmd := exec.Command("xz", "test.tar") + xzCompressCmd.Dir = tmpDir + out, _, err := runCommandWithOutput(xzCompressCmd) + if err != nil { + t.Fatal(err, out) + } + + gzipCompressCmd := exec.Command("gzip", "test.tar.xz") + gzipCompressCmd.Dir = tmpDir + out, _, err = runCommandWithOutput(gzipCompressCmd) + if err != nil { + t.Fatal(err, out) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + t.Fatalf("failed to open destination dockerfile: %v", err) + } + return &FakeContext{Dir: tmpDir} + }() + + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) + } + + logDone("build - ADD tar.xz.gz") +} + func TestBuildFromGIT(t *testing.T) { name := "testbuildfromgit" defer deleteImages(name) diff --git a/integration-cli/docker_cli_save_load_test.go b/integration-cli/docker_cli_save_load_test.go index 1ee526865b..94bfe3d6a8 100644 --- a/integration-cli/docker_cli_save_load_test.go +++ b/integration-cli/docker_cli_save_load_test.go @@ -104,6 +104,140 @@ func TestSaveAndLoadRepoStdout(t *testing.T) { logDone("save - do not save to a tty") } +// save a repo using gz compression and try to load it using stdout +func TestSaveXzAndLoadRepoStdout(t *testing.T) { + tempDir, err := ioutil.TempDir("", "test-save-xz-gz-load-repo-stdout") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + + tarballPath := filepath.Join(tempDir, "foobar-save-load-test.tar.xz.gz") + + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatalf("failed to create a container: %v %v", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + + repoName := "foobar-save-load-test-xz-gz" + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + out, _, err = runCommandWithOutput(inspectCmd) + if err != nil { + t.Fatalf("output should've been a container id: %v %v", cleanedContainerID, err) + } + + commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName) + out, _, err = runCommandWithOutput(commitCmd) + if err != nil { + t.Fatalf("failed to commit container: %v %v", out, err) + } + + inspectCmd = exec.Command(dockerBinary, "inspect", repoName) + before, _, err := runCommandWithOutput(inspectCmd) + if err != nil { + t.Fatalf("the repo should exist before saving it: %v %v", before, err) + } + + saveCmdTemplate := `%v save %v | xz -c | gzip -c > %s` + saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName, tarballPath) + saveCmd := exec.Command("bash", "-c", saveCmdFinal) + out, _, err = runCommandWithOutput(saveCmd) + if err != nil { + t.Fatalf("failed to save repo: %v %v", out, err) + } + + deleteImages(repoName) + + loadCmdFinal := fmt.Sprintf(`cat %s | docker load`, tarballPath) + loadCmd := exec.Command("bash", "-c", loadCmdFinal) + out, _, err = runCommandWithOutput(loadCmd) + if err == nil { + t.Fatalf("expected error, but succeeded with no error and output: %v", out) + } + + inspectCmd = exec.Command(dockerBinary, "inspect", repoName) + after, _, err := runCommandWithOutput(inspectCmd) + if err == nil { + t.Fatalf("the repo should not exist: %v", after) + } + + deleteContainer(cleanedContainerID) + deleteImages(repoName) + + logDone("load - save a repo with xz compression & load it using stdout") +} + +// save a repo using xz+gz compression and try to load it using stdout +func TestSaveXzGzAndLoadRepoStdout(t *testing.T) { + tempDir, err := ioutil.TempDir("", "test-save-xz-gz-load-repo-stdout") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + + tarballPath := filepath.Join(tempDir, "foobar-save-load-test.tar.xz.gz") + + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatalf("failed to create a container: %v %v", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + + repoName := "foobar-save-load-test-xz-gz" + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + out, _, err = runCommandWithOutput(inspectCmd) + if err != nil { + t.Fatalf("output should've been a container id: %v %v", cleanedContainerID, err) + } + + commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName) + out, _, err = runCommandWithOutput(commitCmd) + if err != nil { + t.Fatalf("failed to commit container: %v %v", out, err) + } + + inspectCmd = exec.Command(dockerBinary, "inspect", repoName) + before, _, err := runCommandWithOutput(inspectCmd) + if err != nil { + t.Fatalf("the repo should exist before saving it: %v %v", before, err) + } + + saveCmdTemplate := `%v save %v | xz -c | gzip -c > %s` + saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName, tarballPath) + saveCmd := exec.Command("bash", "-c", saveCmdFinal) + out, _, err = runCommandWithOutput(saveCmd) + if err != nil { + t.Fatalf("failed to save repo: %v %v", out, err) + } + + deleteImages(repoName) + + loadCmdFinal := fmt.Sprintf(`cat %s | docker load`, tarballPath) + loadCmd := exec.Command("bash", "-c", loadCmdFinal) + out, _, err = runCommandWithOutput(loadCmd) + if err == nil { + t.Fatalf("expected error, but succeeded with no error and output: %v", out) + } + + inspectCmd = exec.Command(dockerBinary, "inspect", repoName) + after, _, err := runCommandWithOutput(inspectCmd) + if err == nil { + t.Fatalf("the repo should not exist: %v", after) + } + + deleteContainer(cleanedContainerID) + deleteImages(repoName) + + logDone("load - save a repo with xz+gz compression & load it using stdout") +} + func TestSaveSingleTag(t *testing.T) { repoName := "foobar-save-single-tag-test" From 7862f831fe99a221a0499b7764a8709e5f463bb9 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 8 Dec 2014 16:14:56 -0500 Subject: [PATCH 586/592] Update chroot apply layer to handle decompression outside chroot Signed-off-by: Michael Crosby Conflicts: pkg/archive/diff.go pkg/chrootarchive/archive.go Conflicts: pkg/archive/diff.go pkg/chrootarchive/diff.go --- pkg/archive/diff.go | 39 ++++++++++++++++++------------------ pkg/chrootarchive/archive.go | 25 ++++++++++++++++++----- pkg/chrootarchive/diff.go | 27 ++++++++++++++++++------- 3 files changed, 60 insertions(+), 31 deletions(-) diff --git a/pkg/archive/diff.go b/pkg/archive/diff.go index c6118c5db3..ba22c41f3c 100644 --- a/pkg/archive/diff.go +++ b/pkg/archive/diff.go @@ -15,24 +15,7 @@ import ( "github.com/docker/docker/pkg/system" ) -// ApplyLayer parses a diff in the standard layer format from `layer`, and -// applies it to the directory `dest`. -func ApplyLayer(dest string, layer ArchiveReader) error { - dest = filepath.Clean(dest) - - // We need to be able to set any perms - oldmask, err := system.Umask(0) - if err != nil { - return err - } - - defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform - - layer, err = DecompressStream(layer) - if err != nil { - return err - } - +func UnpackLayer(dest string, layer ArchiveReader) error { tr := tar.NewReader(layer) trBuf := pools.BufioReader32KPool.Get(tr) defer pools.BufioReader32KPool.Put(trBuf) @@ -159,6 +142,24 @@ func ApplyLayer(dest string, layer ArchiveReader) error { return err } } - return nil } + +// ApplyLayer parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. +func ApplyLayer(dest string, layer ArchiveReader) error { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return err + } + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + + layer, err = DecompressStream(layer) + if err != nil { + return err + } + return UnpackLayer(dest, layer) +} diff --git a/pkg/chrootarchive/archive.go b/pkg/chrootarchive/archive.go index 2942d9d6c0..a29d30e4f9 100644 --- a/pkg/chrootarchive/archive.go +++ b/pkg/chrootarchive/archive.go @@ -15,6 +15,15 @@ import ( "github.com/docker/docker/pkg/reexec" ) +var chrootArchiver = &archive.Archiver{Untar} + +func chroot(path string) error { + if err := syscall.Chroot(path); err != nil { + return err + } + return syscall.Chdir("/") +} + func untar() { runtime.LockOSThread() flag.Parse() @@ -38,11 +47,17 @@ func untar() { os.Exit(0) } -var ( - chrootArchiver = &archive.Archiver{Untar} -) +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + if options == nil { + options = &archive.TarOptions{} + } + if options.Excludes == nil { + options.Excludes = []string{} + } -func Untar(archive io.Reader, dest string, options *archive.TarOptions) error { var buf bytes.Buffer enc := json.NewEncoder(&buf) if err := enc.Encode(options); err != nil { @@ -55,7 +70,7 @@ func Untar(archive io.Reader, dest string, options *archive.TarOptions) error { } cmd := reexec.Command("docker-untar", dest, buf.String()) - cmd.Stdin = archive + cmd.Stdin = tarArchive out, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("Untar %s %s", err, out) diff --git a/pkg/chrootarchive/diff.go b/pkg/chrootarchive/diff.go index f9f9b9d5e0..d4e9529b6d 100644 --- a/pkg/chrootarchive/diff.go +++ b/pkg/chrootarchive/diff.go @@ -3,8 +3,10 @@ package chrootarchive import ( "flag" "fmt" + "io" "io/ioutil" "os" + "path/filepath" "runtime" "syscall" @@ -16,19 +18,20 @@ func applyLayer() { runtime.LockOSThread() flag.Parse() - if err := syscall.Chroot(flag.Arg(0)); err != nil { - fatal(err) - } - if err := syscall.Chdir("/"); err != nil { + if err := chroot(flag.Arg(0)); err != nil { fatal(err) } + // We need to be able to set any perms + oldmask := syscall.Umask(0) + defer syscall.Umask(oldmask) tmpDir, err := ioutil.TempDir("/", "temp-docker-extract") if err != nil { fatal(err) } os.Setenv("TMPDIR", tmpDir) - if err := archive.ApplyLayer("/", os.Stdin); err != nil { - os.RemoveAll(tmpDir) + err = archive.UnpackLayer("/", os.Stdin) + os.RemoveAll(tmpDir) + if err != nil { fatal(err) } os.RemoveAll(tmpDir) @@ -37,8 +40,18 @@ func applyLayer() { } func ApplyLayer(dest string, layer archive.ArchiveReader) error { + dest = filepath.Clean(dest) + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return err + } + defer func() { + if c, ok := decompressed.(io.Closer); ok { + c.Close() + } + }() cmd := reexec.Command("docker-applyLayer", dest) - cmd.Stdin = layer + cmd.Stdin = decompressed out, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("ApplyLayer %s %s", err, out) From 001ac15b54686761960352103ebf4ff2c6e1d803 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 8 Dec 2014 16:19:24 -0500 Subject: [PATCH 587/592] Decompress archive before streaming the unpack in a chroot Signed-off-by: Michael Crosby Conflicts: pkg/archive/archive.go pkg/chrootarchive/archive.go Conflicts: pkg/archive/archive.go --- pkg/archive/archive.go | 52 +++++++++++++++++------------------- pkg/chrootarchive/archive.go | 28 ++++++++++--------- 2 files changed, 41 insertions(+), 39 deletions(-) diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index ead85be0bf..ec45d8546d 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -464,32 +464,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) return pipeReader, nil } -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -// FIXME: specify behavior when target path exists vs. doesn't exist. -func Untar(archive io.Reader, dest string, options *TarOptions) error { - dest = filepath.Clean(dest) - - if options == nil { - options = &TarOptions{} - } - - if archive == nil { - return fmt.Errorf("Empty archive") - } - - if options.Excludes == nil { - options.Excludes = []string{} - } - - decompressedArchive, err := DecompressStream(archive) - if err != nil { - return err - } - defer decompressedArchive.Close() - +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) @@ -572,10 +547,33 @@ loop: return err } } - return nil } +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(archive io.Reader, dest string, options *TarOptions) error { + if archive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.Excludes == nil { + options.Excludes = []string{} + } + decompressedArchive, err := DecompressStream(archive) + if err != nil { + return err + } + defer decompressedArchive.Close() + return Unpack(decompressedArchive, dest, options) +} + func (archiver *Archiver) TarUntar(src, dst string) error { log.Debugf("TarUntar(%s %s)", src, dst) archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) diff --git a/pkg/chrootarchive/archive.go b/pkg/chrootarchive/archive.go index a29d30e4f9..0077f930d6 100644 --- a/pkg/chrootarchive/archive.go +++ b/pkg/chrootarchive/archive.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "os" + "path/filepath" "runtime" "strings" "syscall" @@ -27,19 +28,14 @@ func chroot(path string) error { func untar() { runtime.LockOSThread() flag.Parse() - - if err := syscall.Chroot(flag.Arg(0)); err != nil { + if err := chroot(flag.Arg(0)); err != nil { fatal(err) } - if err := syscall.Chdir("/"); err != nil { + var options *archive.TarOptions + if err := json.NewDecoder(strings.NewReader(flag.Arg(1))).Decode(&options); err != nil { fatal(err) } - options := new(archive.TarOptions) - dec := json.NewDecoder(strings.NewReader(flag.Arg(1))) - if err := dec.Decode(options); err != nil { - fatal(err) - } - if err := archive.Untar(os.Stdin, "/", options); err != nil { + if err := archive.Unpack(os.Stdin, "/", options); err != nil { fatal(err) } // fully consume stdin in case it is zero padded @@ -58,8 +54,10 @@ func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error options.Excludes = []string{} } - var buf bytes.Buffer - enc := json.NewEncoder(&buf) + var ( + buf bytes.Buffer + enc = json.NewEncoder(&buf) + ) if err := enc.Encode(options); err != nil { return fmt.Errorf("Untar json encode: %v", err) } @@ -68,9 +66,15 @@ func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error return err } } + dest = filepath.Clean(dest) + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() cmd := reexec.Command("docker-untar", dest, buf.String()) - cmd.Stdin = tarArchive + cmd.Stdin = decompressedArchive out, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("Untar %s %s", err, out) From af2021955cb01507984cbd076edaf3caaf5b89b3 Mon Sep 17 00:00:00 2001 From: Arnaud Porterie Date: Mon, 8 Dec 2014 14:33:46 -0800 Subject: [PATCH 588/592] Add integration test for xz path issue Signed-off-by: Arnaud Porterie Conflicts: integration-cli/docker_cli_build_test.go --- integration-cli/docker_cli_build_test.go | 29 ++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index dbb527b309..0fd5b1363d 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -3885,3 +3885,32 @@ func TestBuildSymlinkBreakout(t *testing.T) { } logDone("build - symlink breakout") } + +func TestBuildXZHost(t *testing.T) { + name := "testbuildxzhost" + defer deleteImages(name) + + ctx, err := fakeContext(` +FROM busybox +ADD xz /usr/local/sbin/ +RUN chmod 755 /usr/local/sbin/xz +ADD test.xz / +RUN [ ! -e /injected ]`, + map[string]string{ + "test.xz": "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00" + + "\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd" + + "\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21", + "xz": "#!/bin/sh\ntouch /injected", + }) + + if err != nil { + t.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + t.Fatal(err) + } + + logDone("build - xz host is being used") +} From bff1d9dbce76bed1e267a067eb4a1a74ef4da312 Mon Sep 17 00:00:00 2001 From: unclejack Date: Thu, 27 Nov 2014 23:55:03 +0200 Subject: [PATCH 589/592] validate image ID properly & before load Signed-off-by: Cristian Staretu Conflicts: graph/load.go --- graph/load.go | 5 +++++ graph/tags_unit_test.go | 2 +- registry/registry.go | 4 ++-- utils/utils.go | 12 +++++++----- 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/graph/load.go b/graph/load.go index 76172d2555..6ef219c077 100644 --- a/graph/load.go +++ b/graph/load.go @@ -14,6 +14,7 @@ import ( "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/utils" ) // Loads a set of images into the repository. This is the complementary of ImageExport. @@ -114,6 +115,10 @@ func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string log.Debugf("Error unmarshalling json", err) return err } + if err := utils.ValidateID(img.ID); err != nil { + log.Debugf("Error validating ID: %s", err) + return err + } if img.Parent != "" { if !s.graph.Exists(img.Parent) { if err := s.recursiveLoad(eng, img.Parent, tmpImageDir); err != nil { diff --git a/graph/tags_unit_test.go b/graph/tags_unit_test.go index 03b6662019..339fb51fc9 100644 --- a/graph/tags_unit_test.go +++ b/graph/tags_unit_test.go @@ -16,7 +16,7 @@ import ( const ( testImageName = "myapp" - testImageID = "foo" + testImageID = "1a2d3c4d4e5fa2d2a21acea242a5e2345d3aefc3e7dfa2a2a2a21a2a2ad2d234" ) func fakeTar() (io.Reader, error) { diff --git a/registry/registry.go b/registry/registry.go index e1d22b0908..d503a63d62 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -23,7 +23,6 @@ var ( ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") ErrDoesNotExist = errors.New("Image does not exist") errLoginRequired = errors.New("Authentication is required.") - validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) validNamespace = regexp.MustCompile(`^([a-z0-9_]{4,30})$`) validRepo = regexp.MustCompile(`^([a-z0-9-_.]+)$`) ) @@ -171,7 +170,8 @@ func validateRepositoryName(repositoryName string) error { namespace = "library" name = nameParts[0] - if validHex.MatchString(name) { + // the repository name must not be a valid image ID + if err := utils.ValidateID(name); err == nil { return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name) } } else { diff --git a/utils/utils.go b/utils/utils.go index e529cb9687..8d3b3eb73e 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -31,6 +31,10 @@ type KeyValuePair struct { Value string } +var ( + validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) +) + // Request a given URL and return an io.Reader func Download(url string) (resp *http.Response, err error) { if resp, err = http.Get(url); err != nil { @@ -190,11 +194,9 @@ func GenerateRandomID() string { } func ValidateID(id string) error { - if id == "" { - return fmt.Errorf("Id can't be empty") - } - if strings.Contains(id, ":") { - return fmt.Errorf("Invalid character in id: ':'") + if ok := validHex.MatchString(id); !ok { + err := fmt.Errorf("image ID '%s' is invalid", id) + return err } return nil } From fdabd6b14e394721aaf61402bb88073e7eb424bf Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Wed, 10 Dec 2014 18:19:04 -0500 Subject: [PATCH 590/592] docs: Add release notes Signed-off-by: Tibor Vass --- docs/sources/release-notes.md | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/docs/sources/release-notes.md b/docs/sources/release-notes.md index cf528bc729..7ec08b1a84 100644 --- a/docs/sources/release-notes.md +++ b/docs/sources/release-notes.md @@ -4,6 +4,40 @@ page_keywords: docker, documentation, about, technology, understanding, release #Release Notes +##Version 1.3.3 +(2014-12-11) + +This release fixes several security issues. In order to encourage immediate +upgrading, this release also patches some critical bugs. All users are highly +encouraged to upgrade as soon as possible. + +*Security fixes* + +Patches and changes were made to address the following vulnerabilities: + +* CVE-2014-9356: Path traversal during processing of absolute symlinks. +Absolute symlinks were not adequately checked for traversal which created a +vulnerability via image extraction and/or volume mounts. +* CVE-2014-9357: Escalation of privileges during decompression of LZMA (.xz) +archives. Docker 1.3.2 added `chroot` for archive extraction. This created a +vulnerability that could allow malicious images or builds to write files to the +host system and escape containerization, leading to privilege escalation. +* CVE-2014-9358: Path traversal and spoofing opportunities via image +identifiers. Image IDs passed either via `docker load` or registry communications +were not sufficiently validated. This created a vulnerability to path traversal +attacks wherein malicious images or repository spoofing could lead to graph +corruption and manipulation. + +*Runtime fixes* + +* Fixed an issue that cause image archives to be read slowly. + +*Client fixes* + +* Fixed a regression related to STDIN redirection. +* Fixed a regression involving `docker cp` when the current directory is the +destination. + ##Version 1.3.2 (2014-11-24) From 5b851982da2b4fc4c42a7ad0279c65f8d1de08de Mon Sep 17 00:00:00 2001 From: unclejack Date: Mon, 1 Dec 2014 13:00:44 +0200 Subject: [PATCH 591/592] Bump version to v1.3.3 Conflicts: VERSION Signed-off-by: Cristian Staretu Signed-off-by: Tibor Vass --- CHANGELOG.md | 14 ++++++++++++++ VERSION | 2 +- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2d8f5cce8c..346fe4ce2f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 1.3.3 (2014-12-11) + +#### Security +- Fix path traversal vulnerability in processing of absolute symbolic links (CVE-2014-9356) +- Fix decompression of xz image archives, preventing privilege escalation (CVE-2014-9357) +- Validate image IDs (CVE-2014-9358) + +#### Runtime +- Fix an issue when image archives are being read slowly + +#### Client +- Fix a regression related to stdin redirection +- Fix a regression with `docker cp` when destination is the current directory + ## 1.3.2 (2014-11-20) #### Security diff --git a/VERSION b/VERSION index 259bb263c9..31e5c84349 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.3.2-dev +1.3.3 From 9f48be69e5c315f4e08390e796fbceaed3ed92e0 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Thu, 11 Dec 2014 16:29:49 -0500 Subject: [PATCH 592/592] Change version to 1.3.3-dev Signed-off-by: Tibor Vass --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 31e5c84349..456ea726a0 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.3.3 +1.3.3-dev