Fix 2585, rebase to master

This commit is contained in:
Andy Rothfusz 2013-11-08 15:15:45 -08:00
commit 34d294c461
38 changed files with 584 additions and 306 deletions

1
.gitignore vendored
View File

@ -18,3 +18,4 @@ bundles/
.hg/ .hg/
.git/ .git/
vendor/pkg/ vendor/pkg/
pyenv

View File

@ -17,7 +17,6 @@
+ Prevent DNS server conflicts in CreateBridgeIface + Prevent DNS server conflicts in CreateBridgeIface
+ Validate bind mounts on the server side + Validate bind mounts on the server side
+ Use parent image config in docker build + Use parent image config in docker build
* Fix regression in /etc/hosts
#### Client #### Client

View File

@ -913,8 +913,16 @@ func (cli *DockerCli) CmdImport(args ...string) error {
cmd.Usage() cmd.Usage()
return nil return nil
} }
src := cmd.Arg(0)
repository, tag := utils.ParseRepositoryTag(cmd.Arg(1)) var src, repository, tag string
if cmd.NArg() == 3 {
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' as been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n")
src, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2)
} else {
src = cmd.Arg(0)
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
}
v := url.Values{} v := url.Values{}
v.Set("repo", repository) v.Set("repo", repository)
v.Set("tag", tag) v.Set("tag", tag)
@ -1349,8 +1357,16 @@ func (cli *DockerCli) CmdCommit(args ...string) error {
if err := cmd.Parse(args); err != nil { if err := cmd.Parse(args); err != nil {
return nil return nil
} }
name := cmd.Arg(0)
repository, tag := utils.ParseRepositoryTag(cmd.Arg(1)) var name, repository, tag string
if cmd.NArg() == 3 {
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'CONTAINER [REPOSITORY [TAG]]' as been deprecated. Please use CONTAINER [REPOSITORY[:TAG]]\n")
name, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2)
} else {
name = cmd.Arg(0)
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
}
if name == "" { if name == "" {
cmd.Usage() cmd.Usage()
@ -1387,7 +1403,7 @@ func (cli *DockerCli) CmdCommit(args ...string) error {
func (cli *DockerCli) CmdEvents(args ...string) error { func (cli *DockerCli) CmdEvents(args ...string) error {
cmd := Subcmd("events", "[OPTIONS]", "Get real time events from the server") cmd := Subcmd("events", "[OPTIONS]", "Get real time events from the server")
since := cmd.String("since", "", "Show events previously created (used for polling).") since := cmd.String("since", "", "Show previously created events and then stream.")
if err := cmd.Parse(args); err != nil { if err := cmd.Parse(args); err != nil {
return nil return nil
} }
@ -1399,7 +1415,17 @@ func (cli *DockerCli) CmdEvents(args ...string) error {
v := url.Values{} v := url.Values{}
if *since != "" { if *since != "" {
v.Set("since", *since) loc := time.FixedZone(time.Now().Zone())
format := "2006-01-02 15:04:05 -0700 MST"
if len(*since) < len(format) {
format = format[:len(*since)]
}
if t, err := time.ParseInLocation(format, *since, loc); err == nil {
v.Set("since", strconv.FormatInt(t.Unix(), 10))
} else {
v.Set("since", *since)
}
} }
if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil { if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil {
@ -1656,9 +1682,16 @@ func (cli *DockerCli) CmdTag(args ...string) error {
return nil return nil
} }
v := url.Values{} var repository, tag string
repository, tag := utils.ParseRepositoryTag(cmd.Arg(1))
if cmd.NArg() == 3 {
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'IMAGE [REPOSITORY [TAG]]' as been deprecated. Please use IMAGE [REPOSITORY[:TAG]]\n")
repository, tag = cmd.Arg(1), cmd.Arg(2)
} else {
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
}
v := url.Values{}
v.Set("repo", repository) v.Set("repo", repository)
v.Set("tag", tag) v.Set("tag", tag)

View File

@ -1,8 +1,8 @@
package docker package docker
import ( import (
"net"
"github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/engine"
"net"
) )
// FIXME: separate runtime configuration from http api configuration // FIXME: separate runtime configuration from http api configuration

View File

@ -133,7 +133,11 @@ type PortBinding struct {
type Port string type Port string
func (p Port) Proto() string { func (p Port) Proto() string {
return strings.Split(string(p), "/")[1] parts := strings.Split(string(p), "/")
if len(parts) == 1 {
return "tcp"
}
return parts[1]
} }
func (p Port) Port() string { func (p Port) Port() string {
@ -199,7 +203,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
cmd.Var(flVolumes, "v", "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)") cmd.Var(flVolumes, "v", "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)")
var flVolumesFrom utils.ListOpts var flVolumesFrom utils.ListOpts
cmd.Var(&flVolumesFrom, "volumes-from", "Mount volumes from the specified container") cmd.Var(&flVolumesFrom, "volumes-from", "Mount volumes from the specified container(s)")
flEntrypoint := cmd.String("entrypoint", "", "Overwrite the default entrypoint of the image") flEntrypoint := cmd.String("entrypoint", "", "Overwrite the default entrypoint of the image")
@ -394,9 +398,9 @@ func (container *Container) Inject(file io.Reader, pth string) error {
if _, err := os.Stat(path.Join(container.rwPath(), pth)); err == nil { if _, err := os.Stat(path.Join(container.rwPath(), pth)); err == nil {
// Since err is nil, the path could be stat'd and it exists // Since err is nil, the path could be stat'd and it exists
return fmt.Errorf("%s exists", pth) return fmt.Errorf("%s exists", pth)
} else if ! os.IsNotExist(err) { } else if !os.IsNotExist(err) {
// Expect err might be that the file doesn't exist, so // Expect err might be that the file doesn't exist, so
// if it's some other error, return that. // if it's some other error, return that.
return err return err
} }
@ -763,9 +767,23 @@ func (container *Container) Start() (err error) {
// Apply volumes from another container if requested // Apply volumes from another container if requested
if container.Config.VolumesFrom != "" { if container.Config.VolumesFrom != "" {
volumes := strings.Split(container.Config.VolumesFrom, ",") containerSpecs := strings.Split(container.Config.VolumesFrom, ",")
for _, v := range volumes { for _, containerSpec := range containerSpecs {
c := container.runtime.Get(v) mountRW := true
specParts := strings.SplitN(containerSpec, ":", 2)
switch len(specParts) {
case 0:
return fmt.Errorf("Malformed volumes-from specification: %s", container.Config.VolumesFrom)
case 2:
switch specParts[1] {
case "ro":
mountRW = false
case "rw": // mountRW is already true
default:
return fmt.Errorf("Malformed volumes-from speficication: %s", containerSpec)
}
}
c := container.runtime.Get(specParts[0])
if c == nil { if c == nil {
return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID) return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID)
} }
@ -778,7 +796,7 @@ func (container *Container) Start() (err error) {
} }
container.Volumes[volPath] = id container.Volumes[volPath] = id
if isRW, exists := c.VolumesRW[volPath]; exists { if isRW, exists := c.VolumesRW[volPath]; exists {
container.VolumesRW[volPath] = isRW container.VolumesRW[volPath] = isRW && mountRW
} }
} }
@ -819,7 +837,7 @@ func (container *Container) Start() (err error) {
// Create the mountpoint // Create the mountpoint
rootVolPath := path.Join(container.RootfsPath(), volPath) rootVolPath := path.Join(container.RootfsPath(), volPath)
if err := os.MkdirAll(rootVolPath, 0755); err != nil { if err := os.MkdirAll(rootVolPath, 0755); err != nil {
return nil return err
} }
// Do not copy or change permissions if we are mounting from the host // Do not copy or change permissions if we are mounting from the host
@ -1086,7 +1104,7 @@ func (container *Container) allocateNetwork() error {
Gateway: manager.bridgeNetwork.IP, Gateway: manager.bridgeNetwork.IP,
manager: manager, manager: manager,
} }
if iface !=nil && iface.IPNet.IP != nil { if iface != nil && iface.IPNet.IP != nil {
ipNum := ipToInt(iface.IPNet.IP) ipNum := ipToInt(iface.IPNet.IP)
manager.ipAllocator.inUse[ipNum] = struct{}{} manager.ipAllocator.inUse[ipNum] = struct{}{}
} else { } else {

View File

@ -1338,6 +1338,67 @@ func TestBindMounts(t *testing.T) {
} }
} }
// Test that -volumes-from supports both read-only mounts
func TestFromVolumesInReadonlyMode(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(
&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/echo", "-n", "foobar"},
Volumes: map[string]struct{}{"/test": {}},
},
"",
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
_, err = container.Output()
if err != nil {
t.Fatal(err)
}
if !container.VolumesRW["/test"] {
t.Fail()
}
container2, _, err := runtime.Create(
&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/echo", "-n", "foobar"},
VolumesFrom: container.ID + ":ro",
},
"",
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container2)
_, err = container2.Output()
if err != nil {
t.Fatal(err)
}
if container.Volumes["/test"] != container2.Volumes["/test"] {
t.Logf("container volumes do not match: %s | %s ",
container.Volumes["/test"],
container2.Volumes["/test"])
t.Fail()
}
_, exists := container2.VolumesRW["/test"]
if !exists {
t.Logf("container2 is missing '/test' volume: %s", container2.VolumesRW)
t.Fail()
}
if container2.VolumesRW["/test"] != false {
t.Log("'/test' volume mounted in read-write mode, expected read-only")
t.Fail()
}
}
// Test that VolumesRW values are copied to the new container. Regression test for #1201 // Test that VolumesRW values are copied to the new container. Regression test for #1201
func TestVolumesFromReadonlyMount(t *testing.T) { func TestVolumesFromReadonlyMount(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)

View File

@ -4,9 +4,9 @@ import (
"flag" "flag"
"fmt" "fmt"
"github.com/dotcloud/docker" "github.com/dotcloud/docker"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/sysinit" "github.com/dotcloud/docker/sysinit"
"github.com/dotcloud/docker/utils" "github.com/dotcloud/docker/utils"
"github.com/dotcloud/docker/engine"
"log" "log"
"os" "os"
"strings" "strings"

View File

@ -245,6 +245,9 @@ Full -run example
Usage: docker events Usage: docker events
Get real time events from the server Get real time events from the server
-since="": Show previously created events and then stream.
(either seconds since epoch, or date string as below)
.. _cli_events_example: .. _cli_events_example:
@ -277,6 +280,23 @@ Shell 1: (Again .. now showing events)
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
Show events in the past from a specified time
.............................................
.. code-block:: bash
$ sudo docker events -since 1378216169
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
$ sudo docker events -since '2013-09-03'
[2013-09-03 15:49:26 +0200 CEST] 4386fb97867d: (from 12de384bfb10) start
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
$ sudo docker events -since '2013-09-03 15:49:29 +0200 CEST'
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
.. _cli_export: .. _cli_export:
@ -460,6 +480,12 @@ Insert file from github
The main process inside the container will be sent SIGKILL. The main process inside the container will be sent SIGKILL.
Known Issues (kill)
~~~~~~~~~~~~~~~~~~~
* :issue:`197` indicates that ``docker kill`` may leave directories
behind and make it difficult to remove the container.
.. _cli_login: .. _cli_login:
``login`` ``login``
@ -568,6 +594,12 @@ The main process inside the container will be sent SIGKILL.
Remove one or more containers Remove one or more containers
-link="": Remove the link instead of the actual container -link="": Remove the link instead of the actual container
Known Issues (rm)
~~~~~~~~~~~~~~~~~~~
* :issue:`197` indicates that ``docker kill`` may leave directories
behind and make it difficult to remove the container.
Examples: Examples:
~~~~~~~~~ ~~~~~~~~~
@ -628,7 +660,7 @@ network communication.
-u="": Username or UID -u="": Username or UID
-dns=[]: Set custom dns servers for the container -dns=[]: Set custom dns servers for the container
-v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "container-dir" is missing, then docker creates a new volume. -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "container-dir" is missing, then docker creates a new volume.
-volumes-from="": Mount all volumes from the given container -volumes-from="": Mount all volumes from the given container(s)
-entrypoint="": Overwrite the default entrypoint set by the image -entrypoint="": Overwrite the default entrypoint set by the image
-w="": Working directory inside the container -w="": Working directory inside the container
-lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" -lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
@ -720,6 +752,17 @@ can access the network and environment of the redis container via
environment variables. The ``-name`` flag will assign the name ``console`` environment variables. The ``-name`` flag will assign the name ``console``
to the newly created container. to the newly created container.
.. code-block:: bash
docker run -volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd
The ``-volumes-from`` flag mounts all the defined volumes from the
refrence containers. Containers can be specified by a comma seperated
list or by repetitions of the ``-volumes-from`` argument. The container
id may be optionally suffixed with ``:ro`` or ``:rw`` to mount the volumes in
read-only or read-write mode, respectively. By default, the volumes are mounted
in the same mode (rw or ro) as the reference container.
.. _cli_search: .. _cli_search:
``search`` ``search``

View File

@ -40,7 +40,11 @@ html_additional_pages = {
# Add any Sphinx extension module names here, as strings. They can be extensions # Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinxcontrib.httpdomain'] extensions = ['sphinxcontrib.httpdomain', 'sphinx.ext.extlinks']
# Configure extlinks
extlinks = { 'issue': ('https://github.com/dotcloud/docker/issues/%s',
'Issue ') }
# Add any paths that contain templates here, relative to this directory. # Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates'] templates_path = ['_templates']

View File

@ -10,13 +10,16 @@ Want to hack on Docker? Awesome!
The repository includes `all the instructions you need to get The repository includes `all the instructions you need to get
started <https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md>`_. started <https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md>`_.
The `developer environment Dockerfile <https://github.com/dotcloud/docker/blob/master/Dockerfile>`_ The `developer environment Dockerfile
<https://github.com/dotcloud/docker/blob/master/Dockerfile>`_
specifies the tools and versions used to test and build Docker. specifies the tools and versions used to test and build Docker.
If you're making changes to the documentation, see the If you're making changes to the documentation, see the
`README.md <https://github.com/dotcloud/docker/blob/master/docs/README.md>`_. `README.md <https://github.com/dotcloud/docker/blob/master/docs/README.md>`_.
The `documentation environment Dockerfile <https://github.com/dotcloud/docker/blob/master/docs/Dockerfile>`_ The `documentation environment Dockerfile
<https://github.com/dotcloud/docker/blob/master/docs/Dockerfile>`_
specifies the tools and versions used to build the Documentation. specifies the tools and versions used to build the Documentation.
Further interesting details can be found in the `Packaging hints <https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md>`_. Further interesting details can be found in the `Packaging hints
<https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md>`_.

View File

@ -116,6 +116,16 @@ core concepts of Docker where commits are cheap and containers can be
created from any point in an image's history, much like source created from any point in an image's history, much like source
control. control.
Known Issues (RUN)
..................
* :issue:`783` is about file permissions problems that can occur when
using the AUFS file system. You might notice it during an attempt to
``rm`` a file, for example. The issue describes a workaround.
* :issue:`2424` Locale will not be set automatically.
3.4 CMD 3.4 CMD
------- -------

View File

@ -2,13 +2,12 @@ package engine
import ( import (
"fmt" "fmt"
"os"
"log"
"runtime"
"github.com/dotcloud/docker/utils" "github.com/dotcloud/docker/utils"
"log"
"os"
"runtime"
) )
type Handler func(*Job) string type Handler func(*Job) string
var globalHandlers map[string]Handler var globalHandlers map[string]Handler
@ -25,8 +24,8 @@ func Register(name string, handler Handler) error {
// It acts as a store for *containers*, and allows manipulation of these // It acts as a store for *containers*, and allows manipulation of these
// containers by executing *jobs*. // containers by executing *jobs*.
type Engine struct { type Engine struct {
root string root string
handlers map[string]Handler handlers map[string]Handler
} }
// New initializes a new engine managing the directory specified at `root`. // New initializes a new engine managing the directory specified at `root`.
@ -56,8 +55,8 @@ func New(root string) (*Engine, error) {
return nil, err return nil, err
} }
eng := &Engine{ eng := &Engine{
root: root, root: root,
handlers: globalHandlers, handlers: globalHandlers,
} }
return eng, nil return eng, nil
} }
@ -66,12 +65,12 @@ func New(root string) (*Engine, error) {
// This function mimics `Command` from the standard os/exec package. // This function mimics `Command` from the standard os/exec package.
func (eng *Engine) Job(name string, args ...string) *Job { func (eng *Engine) Job(name string, args ...string) *Job {
job := &Job{ job := &Job{
eng: eng, eng: eng,
Name: name, Name: name,
Args: args, Args: args,
Stdin: os.Stdin, Stdin: os.Stdin,
Stdout: os.Stdout, Stdout: os.Stdout,
Stderr: os.Stderr, Stderr: os.Stderr,
} }
handler, exists := eng.handlers[name] handler, exists := eng.handlers[name]
if exists { if exists {
@ -79,4 +78,3 @@ func (eng *Engine) Job(name string, args ...string) *Job {
} }
return job return job
} }

View File

@ -1,18 +1,18 @@
package engine package engine
import ( import (
"testing" "fmt"
"github.com/dotcloud/docker/utils"
"io/ioutil"
"runtime" "runtime"
"strings" "strings"
"fmt" "testing"
"io/ioutil"
"github.com/dotcloud/docker/utils"
) )
var globalTestID string var globalTestID string
func init() { func init() {
Register("dummy", func(job *Job) string { return ""; }) Register("dummy", func(job *Job) string { return "" })
} }
func mkEngine(t *testing.T) *Engine { func mkEngine(t *testing.T) *Engine {

View File

@ -1,11 +1,11 @@
package engine package engine
import ( import (
"encoding/json"
"fmt"
"github.com/dotcloud/docker/utils"
"io" "io"
"strings" "strings"
"fmt"
"encoding/json"
"github.com/dotcloud/docker/utils"
) )
// A job is the fundamental unit of work in the docker engine. // A job is the fundamental unit of work in the docker engine.
@ -20,17 +20,17 @@ import (
// One slight variation is that jobs report their status as a string. The // One slight variation is that jobs report their status as a string. The
// string "0" indicates success, and any other strings indicates an error. // string "0" indicates success, and any other strings indicates an error.
// This allows for richer error reporting. // This allows for richer error reporting.
// //
type Job struct { type Job struct {
eng *Engine eng *Engine
Name string Name string
Args []string Args []string
env []string env []string
Stdin io.ReadCloser Stdin io.ReadCloser
Stdout io.WriteCloser Stdout io.WriteCloser
Stderr io.WriteCloser Stderr io.WriteCloser
handler func(*Job) string handler func(*Job) string
status string status string
} }
// Run executes the job and blocks until the job completes. // Run executes the job and blocks until the job completes.
@ -57,21 +57,21 @@ func (job *Job) String() string {
} }
func (job *Job) Getenv(key string) (value string) { func (job *Job) Getenv(key string) (value string) {
for _, kv := range job.env { for _, kv := range job.env {
if strings.Index(kv, "=") == -1 { if strings.Index(kv, "=") == -1 {
continue continue
} }
parts := strings.SplitN(kv, "=", 2) parts := strings.SplitN(kv, "=", 2)
if parts[0] != key { if parts[0] != key {
continue continue
} }
if len(parts) < 2 { if len(parts) < 2 {
value = "" value = ""
} else { } else {
value = parts[1] value = parts[1]
} }
} }
return return
} }
func (job *Job) GetenvBool(key string) (value bool) { func (job *Job) GetenvBool(key string) (value bool) {
@ -109,5 +109,5 @@ func (job *Job) SetenvList(key string, value []string) error {
} }
func (job *Job) Setenv(key, value string) { func (job *Job) Setenv(key, value string) {
job.env = append(job.env, key + "=" + value) job.env = append(job.env, key+"="+value)
} }

View File

@ -48,7 +48,7 @@ type WalkFunc func(fullPath string, entity *Entity) error
// Graph database for storing entities and their relationships // Graph database for storing entities and their relationships
type Database struct { type Database struct {
conn *sql.DB conn *sql.DB
mux sync.Mutex mux sync.RWMutex
} }
// Create a new graph database initialized with a root entity // Create a new graph database initialized with a root entity
@ -138,7 +138,14 @@ func (db *Database) Set(fullPath, id string) (*Entity, error) {
// Return true if a name already exists in the database // Return true if a name already exists in the database
func (db *Database) Exists(name string) bool { func (db *Database) Exists(name string) bool {
return db.Get(name) != nil db.mux.RLock()
defer db.mux.RUnlock()
e, err := db.get(name)
if err != nil {
return false
}
return e != nil
} }
func (db *Database) setEdge(parentPath, name string, e *Entity) error { func (db *Database) setEdge(parentPath, name string, e *Entity) error {
@ -165,6 +172,9 @@ func (db *Database) RootEntity() *Entity {
// Return the entity for a given path // Return the entity for a given path
func (db *Database) Get(name string) *Entity { func (db *Database) Get(name string) *Entity {
db.mux.RLock()
defer db.mux.RUnlock()
e, err := db.get(name) e, err := db.get(name)
if err != nil { if err != nil {
return nil return nil
@ -200,23 +210,36 @@ func (db *Database) get(name string) (*Entity, error) {
// List all entities by from the name // List all entities by from the name
// The key will be the full path of the entity // The key will be the full path of the entity
func (db *Database) List(name string, depth int) Entities { func (db *Database) List(name string, depth int) Entities {
db.mux.RLock()
defer db.mux.RUnlock()
out := Entities{} out := Entities{}
e, err := db.get(name) e, err := db.get(name)
if err != nil { if err != nil {
return out return out
} }
for c := range db.children(e, name, depth) {
children, err := db.children(e, name, depth, nil)
if err != nil {
return out
}
for _, c := range children {
out[c.FullPath] = c.Entity out[c.FullPath] = c.Entity
} }
return out return out
} }
// Walk through the child graph of an entity, calling walkFunc for each child entity.
// It is safe for walkFunc to call graph functions.
func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error { func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error {
e, err := db.get(name) children, err := db.Children(name, depth)
if err != nil { if err != nil {
return err return err
} }
for c := range db.children(e, name, depth) {
// Note: the database lock must not be held while calling walkFunc
for _, c := range children {
if err := walkFunc(c.FullPath, c.Entity); err != nil { if err := walkFunc(c.FullPath, c.Entity); err != nil {
return err return err
} }
@ -224,8 +247,24 @@ func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error {
return nil return nil
} }
// Return the children of the specified entity
func (db *Database) Children(name string, depth int) ([]WalkMeta, error) {
db.mux.RLock()
defer db.mux.RUnlock()
e, err := db.get(name)
if err != nil {
return nil, err
}
return db.children(e, name, depth, nil)
}
// Return the refrence count for a specified id // Return the refrence count for a specified id
func (db *Database) Refs(id string) int { func (db *Database) Refs(id string) int {
db.mux.RLock()
defer db.mux.RUnlock()
var count int var count int
if err := db.conn.QueryRow("SELECT COUNT(*) FROM edge WHERE entity_id = ?;", id).Scan(&count); err != nil { if err := db.conn.QueryRow("SELECT COUNT(*) FROM edge WHERE entity_id = ?;", id).Scan(&count); err != nil {
return 0 return 0
@ -235,6 +274,9 @@ func (db *Database) Refs(id string) int {
// Return all the id's path references // Return all the id's path references
func (db *Database) RefPaths(id string) Edges { func (db *Database) RefPaths(id string) Edges {
db.mux.RLock()
defer db.mux.RUnlock()
refs := Edges{} refs := Edges{}
rows, err := db.conn.Query("SELECT name, parent_id FROM edge WHERE entity_id = ?;", id) rows, err := db.conn.Query("SELECT name, parent_id FROM edge WHERE entity_id = ?;", id)
@ -356,56 +398,51 @@ type WalkMeta struct {
Edge *Edge Edge *Edge
} }
func (db *Database) children(e *Entity, name string, depth int) <-chan WalkMeta { func (db *Database) children(e *Entity, name string, depth int, entities []WalkMeta) ([]WalkMeta, error) {
out := make(chan WalkMeta)
if e == nil { if e == nil {
close(out) return entities, nil
return out
} }
go func() { rows, err := db.conn.Query("SELECT entity_id, name FROM edge where parent_id = ?;", e.id)
rows, err := db.conn.Query("SELECT entity_id, name FROM edge where parent_id = ?;", e.id) if err != nil {
if err != nil { return nil, err
close(out) }
defer rows.Close()
for rows.Next() {
var entityId, entityName string
if err := rows.Scan(&entityId, &entityName); err != nil {
return nil, err
}
child := &Entity{entityId}
edge := &Edge{
ParentID: e.id,
Name: entityName,
EntityID: child.id,
} }
defer rows.Close()
for rows.Next() { meta := WalkMeta{
var entityId, entityName string Parent: e,
if err := rows.Scan(&entityId, &entityName); err != nil { Entity: child,
// Log error FullPath: path.Join(name, edge.Name),
continue Edge: edge,
} }
child := &Entity{entityId}
edge := &Edge{
ParentID: e.id,
Name: entityName,
EntityID: child.id,
}
meta := WalkMeta{ entities = append(entities, meta)
Parent: e,
Entity: child,
FullPath: path.Join(name, edge.Name),
Edge: edge,
}
out <- meta if depth != 0 {
if depth == 0 {
continue
}
nDepth := depth nDepth := depth
if depth != -1 { if depth != -1 {
nDepth -= 1 nDepth -= 1
} }
sc := db.children(child, meta.FullPath, nDepth) entities, err = db.children(child, meta.FullPath, nDepth, entities)
for c := range sc { if err != nil {
out <- c return nil, err
} }
} }
close(out) }
}()
return out return entities, nil
} }
// Return the entity based on the parent path and name // Return the entity based on the parent path and name

View File

@ -1,14 +1,16 @@
# VERSION: 0.22 # VERSION: 0.25
# DOCKER-VERSION 0.6.3 # DOCKER-VERSION 0.6.6
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com> # AUTHOR: Daniel Mizyrycki <daniel@docker.com>
# DESCRIPTION: Deploy docker-ci on Amazon EC2 # DESCRIPTION: Deploy docker-ci on Digital Ocean
# COMMENTS: # COMMENTS:
# CONFIG_JSON is an environment variable json string loaded as: # CONFIG_JSON is an environment variable json string loaded as:
# #
# export CONFIG_JSON=' # export CONFIG_JSON='
# { "AWS_TAG": "EC2_instance_name", # { "DROPLET_NAME": "docker-ci",
# "AWS_ACCESS_KEY": "EC2_access_key", # "DO_CLIENT_ID": "Digital_Ocean_client_id",
# "AWS_SECRET_KEY": "EC2_secret_key", # "DO_API_KEY": "Digital_Ocean_api_key",
# "DOCKER_KEY_ID": "Digital_Ocean_ssh_key_id",
# "DOCKER_CI_KEY_PATH": "docker-ci_private_key_path",
# "DOCKER_CI_PUB": "$(cat docker-ci_ssh_public_key.pub)", # "DOCKER_CI_PUB": "$(cat docker-ci_ssh_public_key.pub)",
# "DOCKER_CI_KEY": "$(cat docker-ci_ssh_private_key.key)", # "DOCKER_CI_KEY": "$(cat docker-ci_ssh_private_key.key)",
# "BUILDBOT_PWD": "Buildbot_server_password", # "BUILDBOT_PWD": "Buildbot_server_password",
@ -33,9 +35,11 @@
from ubuntu:12.04 from ubuntu:12.04
run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' \
run apt-get update; apt-get install -y python2.7 python-dev python-pip ssh rsync less vim > /etc/apt/sources.list
run pip install boto fabric run apt-get update; apt-get install -y git python2.7 python-dev libevent-dev \
python-pip ssh rsync less vim
run pip install requests fabric
# Add deployment code and set default container command # Add deployment code and set default container command
add . /docker-ci add . /docker-ci

View File

@ -0,0 +1 @@
0.4.5

View File

@ -43,7 +43,7 @@ c['slavePortnum'] = PORT_MASTER
# Schedulers # Schedulers
c['schedulers'] = [ForceScheduler(name='trigger', builderNames=['docker', c['schedulers'] = [ForceScheduler(name='trigger', builderNames=['docker',
'index','registry','coverage','nightlyrelease'])] 'index','registry','docker-coverage','registry-coverage','nightlyrelease'])]
c['schedulers'] += [SingleBranchScheduler(name="all", treeStableTimer=None, c['schedulers'] += [SingleBranchScheduler(name="all", treeStableTimer=None,
change_filter=filter.ChangeFilter(branch='master', change_filter=filter.ChangeFilter(branch='master',
repository='https://github.com/dotcloud/docker'), builderNames=['docker'])] repository='https://github.com/dotcloud/docker'), builderNames=['docker'])]
@ -51,7 +51,7 @@ c['schedulers'] += [SingleBranchScheduler(name='pullrequest',
change_filter=filter.ChangeFilter(category='github_pullrequest'), treeStableTimer=None, change_filter=filter.ChangeFilter(category='github_pullrequest'), treeStableTimer=None,
builderNames=['pullrequest'])] builderNames=['pullrequest'])]
c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=['nightlyrelease', c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=['nightlyrelease',
'coverage'], hour=7, minute=00)] 'docker-coverage','registry-coverage'], hour=7, minute=00)]
c['schedulers'] += [Nightly(name='every4hrs', branch=None, builderNames=['registry','index'], c['schedulers'] += [Nightly(name='every4hrs', branch=None, builderNames=['registry','index'],
hour=range(0,24,4), minute=15)] hour=range(0,24,4), minute=15)]
@ -76,17 +76,25 @@ c['builders'] += [BuilderConfig(name='pullrequest',slavenames=['buildworker'],
# Docker coverage test # Docker coverage test
factory = BuildFactory() factory = BuildFactory()
factory.addStep(ShellCommand(description='Coverage', logEnviron=False, factory.addStep(ShellCommand(description='docker-coverage', logEnviron=False,
usePTY=True, command='{0}/docker-coverage/coverage-docker.sh'.format( usePTY=True, command='{0}/docker-coverage/coverage-docker.sh'.format(
DOCKER_CI_PATH))) DOCKER_CI_PATH)))
c['builders'] += [BuilderConfig(name='coverage',slavenames=['buildworker'], c['builders'] += [BuilderConfig(name='docker-coverage',slavenames=['buildworker'],
factory=factory)]
# Docker registry coverage test
factory = BuildFactory()
factory.addStep(ShellCommand(description='registry-coverage', logEnviron=False,
usePTY=True, command='docker run registry_coverage'.format(
DOCKER_CI_PATH)))
c['builders'] += [BuilderConfig(name='registry-coverage',slavenames=['buildworker'],
factory=factory)] factory=factory)]
# Registry functional test # Registry functional test
factory = BuildFactory() factory = BuildFactory()
factory.addStep(ShellCommand(description='registry', logEnviron=False, factory.addStep(ShellCommand(description='registry', logEnviron=False,
command='. {0}/master/credentials.cfg; ' command='. {0}/master/credentials.cfg; '
'/docker-ci/functionaltests/test_registry.sh'.format(BUILDBOT_PATH), '{1}/functionaltests/test_registry.sh'.format(BUILDBOT_PATH, DOCKER_CI_PATH),
usePTY=True)) usePTY=True))
c['builders'] += [BuilderConfig(name='registry',slavenames=['buildworker'], c['builders'] += [BuilderConfig(name='registry',slavenames=['buildworker'],
factory=factory)] factory=factory)]
@ -95,16 +103,17 @@ c['builders'] += [BuilderConfig(name='registry',slavenames=['buildworker'],
factory = BuildFactory() factory = BuildFactory()
factory.addStep(ShellCommand(description='index', logEnviron=False, factory.addStep(ShellCommand(description='index', logEnviron=False,
command='. {0}/master/credentials.cfg; ' command='. {0}/master/credentials.cfg; '
'/docker-ci/functionaltests/test_index.py'.format(BUILDBOT_PATH), '{1}/functionaltests/test_index.py'.format(BUILDBOT_PATH, DOCKER_CI_PATH),
usePTY=True)) usePTY=True))
c['builders'] += [BuilderConfig(name='index',slavenames=['buildworker'], c['builders'] += [BuilderConfig(name='index',slavenames=['buildworker'],
factory=factory)] factory=factory)]
# Docker nightly release # Docker nightly release
nightlyrelease_cmd = ('docker version; docker run -i -t -privileged -e AWS_S3_BUCKET='
'test.docker.io dockerbuilder hack/dind dockerbuild.sh')
factory = BuildFactory() factory = BuildFactory()
factory.addStep(ShellCommand(description='NightlyRelease', logEnviron=False, factory.addStep(ShellCommand(description='NightlyRelease',logEnviron=False,
usePTY=True, command='docker run -privileged' usePTY=True, command=nightlyrelease_cmd))
' -e AWS_S3_BUCKET=test.docker.io dockerbuilder'))
c['builders'] += [BuilderConfig(name='nightlyrelease',slavenames=['buildworker'], c['builders'] += [BuilderConfig(name='nightlyrelease',slavenames=['buildworker'],
factory=factory)] factory=factory)]

View File

@ -1,11 +1,11 @@
#!/usr/bin/env python #!/usr/bin/env python
import os, sys, re, json, base64 import os, sys, re, json, requests, base64
from boto.ec2.connection import EC2Connection
from subprocess import call from subprocess import call
from fabric import api from fabric import api
from fabric.api import cd, run, put, sudo from fabric.api import cd, run, put, sudo
from os import environ as env from os import environ as env
from datetime import datetime
from time import sleep from time import sleep
# Remove SSH private key as it needs more processing # Remove SSH private key as it needs more processing
@ -20,42 +20,41 @@ for key in CONFIG:
env['DOCKER_CI_KEY'] = re.sub('^.+"DOCKER_CI_KEY".+?"(.+?)".+','\\1', env['DOCKER_CI_KEY'] = re.sub('^.+"DOCKER_CI_KEY".+?"(.+?)".+','\\1',
env['CONFIG_JSON'],flags=re.DOTALL) env['CONFIG_JSON'],flags=re.DOTALL)
DROPLET_NAME = env.get('DROPLET_NAME','docker-ci')
AWS_TAG = env.get('AWS_TAG','docker-ci') TIMEOUT = 120 # Seconds before timeout droplet creation
AWS_KEY_NAME = 'dotcloud-dev' # Same as CONFIG_JSON['DOCKER_CI_PUB'] IMAGE_ID = 1004145 # Docker on Ubuntu 13.04
AWS_AMI = 'ami-d582d6bc' # Ubuntu 13.04 REGION_ID = 4 # New York 2
AWS_REGION = 'us-east-1' SIZE_ID = 62 # memory 2GB
AWS_TYPE = 'm1.small' DO_IMAGE_USER = 'root' # Image user on Digital Ocean
AWS_SEC_GROUPS = 'gateway' API_URL = 'https://api.digitalocean.com/'
AWS_IMAGE_USER = 'ubuntu'
DOCKER_PATH = '/go/src/github.com/dotcloud/docker' DOCKER_PATH = '/go/src/github.com/dotcloud/docker'
DOCKER_CI_PATH = '/docker-ci' DOCKER_CI_PATH = '/docker-ci'
CFG_PATH = '{}/buildbot'.format(DOCKER_CI_PATH) CFG_PATH = '{}/buildbot'.format(DOCKER_CI_PATH)
class AWS_EC2: class DigitalOcean():
'''Amazon EC2'''
def __init__(self, access_key, secret_key): def __init__(self, key, client):
'''Set default API parameters''' '''Set default API parameters'''
self.handler = EC2Connection(access_key, secret_key) self.key = key
def create_instance(self, tag, instance_type): self.client = client
reservation = self.handler.run_instances(**instance_type) self.api_url = API_URL
instance = reservation.instances[0]
sleep(10) def api(self, cmd_path, api_arg={}):
while instance.state != 'running': '''Make api call'''
sleep(5) api_arg.update({'api_key':self.key, 'client_id':self.client})
instance.update() resp = requests.get(self.api_url + cmd_path, params=api_arg).text
print "Instance state: %s" % (instance.state) resp = json.loads(resp)
instance.add_tag("Name",tag) if resp['status'] != 'OK':
print "instance %s done!" % (instance.id) raise Exception(resp['error_message'])
return instance.ip_address return resp
def get_instances(self):
return self.handler.get_all_instances() def droplet_data(self, name):
def get_tags(self): '''Get droplet data'''
return dict([(i.instances[0].id, i.instances[0].tags['Name']) data = self.api('droplets')
for i in self.handler.get_all_instances() if i.instances[0].tags]) data = [droplet for droplet in data['droplets']
def del_instance(self, instance_id): if droplet['name'] == name]
self.handler.terminate_instances(instance_ids=[instance_id]) return data[0] if data else {}
def json_fmt(data): def json_fmt(data):
@ -63,20 +62,36 @@ def json_fmt(data):
return json.dumps(data, sort_keys = True, indent = 2) return json.dumps(data, sort_keys = True, indent = 2)
# Create EC2 API handler do = DigitalOcean(env['DO_API_KEY'], env['DO_CLIENT_ID'])
ec2 = AWS_EC2(env['AWS_ACCESS_KEY'], env['AWS_SECRET_KEY'])
# Stop processing if AWS_TAG exists on EC2 # Get DROPLET_NAME data
if AWS_TAG in ec2.get_tags().values(): data = do.droplet_data(DROPLET_NAME)
print ('Instance: {} already deployed. Not further processing.'
.format(AWS_TAG)) # Stop processing if DROPLET_NAME exists on Digital Ocean
if data:
print ('Droplet: {} already deployed. Not further processing.'
.format(DROPLET_NAME))
exit(1) exit(1)
ip = ec2.create_instance(AWS_TAG, {'image_id':AWS_AMI, 'instance_type':AWS_TYPE, # Create droplet
'security_groups':[AWS_SEC_GROUPS], 'key_name':AWS_KEY_NAME}) do.api('droplets/new', {'name':DROPLET_NAME, 'region_id':REGION_ID,
'image_id':IMAGE_ID, 'size_id':SIZE_ID,
'ssh_key_ids':[env['DOCKER_KEY_ID']]})
# Wait 30 seconds for the machine to boot # Wait for droplet to be created.
sleep(30) start_time = datetime.now()
while (data.get('status','') != 'active' and (
datetime.now()-start_time).seconds < TIMEOUT):
data = do.droplet_data(DROPLET_NAME)
print data['status']
sleep(3)
# Wait for the machine to boot
sleep(15)
# Get droplet IP
ip = str(data['ip_address'])
print 'droplet: {} ip: {}'.format(DROPLET_NAME, ip)
# Create docker-ci ssh private key so docker-ci docker container can communicate # Create docker-ci ssh private key so docker-ci docker container can communicate
# with its EC2 instance # with its EC2 instance
@ -86,7 +101,7 @@ os.chmod('/root/.ssh/id_rsa',0600)
open('/root/.ssh/config','w').write('StrictHostKeyChecking no\n') open('/root/.ssh/config','w').write('StrictHostKeyChecking no\n')
api.env.host_string = ip api.env.host_string = ip
api.env.user = AWS_IMAGE_USER api.env.user = DO_IMAGE_USER
api.env.key_filename = '/root/.ssh/id_rsa' api.env.key_filename = '/root/.ssh/id_rsa'
# Correct timezone # Correct timezone
@ -100,20 +115,17 @@ sudo("echo '{}' >> /root/.ssh/authorized_keys".format(env['DOCKER_CI_PUB']))
credentials = { credentials = {
'AWS_ACCESS_KEY': env['PKG_ACCESS_KEY'], 'AWS_ACCESS_KEY': env['PKG_ACCESS_KEY'],
'AWS_SECRET_KEY': env['PKG_SECRET_KEY'], 'AWS_SECRET_KEY': env['PKG_SECRET_KEY'],
'GPG_PASSPHRASE': env['PKG_GPG_PASSPHRASE'], 'GPG_PASSPHRASE': env['PKG_GPG_PASSPHRASE']}
'INDEX_AUTH': env['INDEX_AUTH']}
open(DOCKER_CI_PATH + '/nightlyrelease/release_credentials.json', 'w').write( open(DOCKER_CI_PATH + '/nightlyrelease/release_credentials.json', 'w').write(
base64.b64encode(json.dumps(credentials))) base64.b64encode(json.dumps(credentials)))
# Transfer docker # Transfer docker
sudo('mkdir -p ' + DOCKER_CI_PATH) sudo('mkdir -p ' + DOCKER_CI_PATH)
sudo('chown {}.{} {}'.format(AWS_IMAGE_USER, AWS_IMAGE_USER, DOCKER_CI_PATH)) sudo('chown {}.{} {}'.format(DO_IMAGE_USER, DO_IMAGE_USER, DOCKER_CI_PATH))
call('/usr/bin/rsync -aH {} {}@{}:{}'.format(DOCKER_CI_PATH, AWS_IMAGE_USER, ip, call('/usr/bin/rsync -aH {} {}@{}:{}'.format(DOCKER_CI_PATH, DO_IMAGE_USER, ip,
os.path.dirname(DOCKER_CI_PATH)), shell=True) os.path.dirname(DOCKER_CI_PATH)), shell=True)
# Install Docker and Buildbot dependencies # Install Docker and Buildbot dependencies
sudo('addgroup docker')
sudo('usermod -a -G docker ubuntu')
sudo('mkdir /mnt/docker; ln -s /mnt/docker /var/lib/docker') sudo('mkdir /mnt/docker; ln -s /mnt/docker /var/lib/docker')
sudo('wget -q -O - https://get.docker.io/gpg | apt-key add -') sudo('wget -q -O - https://get.docker.io/gpg | apt-key add -')
sudo('echo deb https://get.docker.io/ubuntu docker main >' sudo('echo deb https://get.docker.io/ubuntu docker main >'
@ -123,7 +135,7 @@ sudo('echo -e "deb http://archive.ubuntu.com/ubuntu raring main universe\n'
' > /etc/apt/sources.list; apt-get update') ' > /etc/apt/sources.list; apt-get update')
sudo('DEBIAN_FRONTEND=noninteractive apt-get install -q -y wget python-dev' sudo('DEBIAN_FRONTEND=noninteractive apt-get install -q -y wget python-dev'
' python-pip supervisor git mercurial linux-image-extra-$(uname -r)' ' python-pip supervisor git mercurial linux-image-extra-$(uname -r)'
' aufs-tools make libfontconfig libevent-dev') ' aufs-tools make libfontconfig libevent-dev libsqlite3-dev libssl-dev')
sudo('wget -O - https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz | ' sudo('wget -O - https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz | '
'tar -v -C /usr/local -xz; ln -s /usr/local/go/bin/go /usr/bin/go') 'tar -v -C /usr/local -xz; ln -s /usr/local/go/bin/go /usr/bin/go')
sudo('GOPATH=/go go get -d github.com/dotcloud/docker') sudo('GOPATH=/go go get -d github.com/dotcloud/docker')
@ -135,13 +147,13 @@ sudo('curl -s https://phantomjs.googlecode.com/files/'
'phantomjs-1.9.1-linux-x86_64.tar.bz2 | tar jx -C /usr/bin' 'phantomjs-1.9.1-linux-x86_64.tar.bz2 | tar jx -C /usr/bin'
' --strip-components=2 phantomjs-1.9.1-linux-x86_64/bin/phantomjs') ' --strip-components=2 phantomjs-1.9.1-linux-x86_64/bin/phantomjs')
# Preventively reboot docker-ci daily
sudo('ln -s /sbin/reboot /etc/cron.daily')
# Build docker-ci containers # Build docker-ci containers
sudo('cd {}; docker build -t docker .'.format(DOCKER_PATH)) sudo('cd {}; docker build -t docker .'.format(DOCKER_PATH))
sudo('cd {}; docker build -t docker-ci .'.format(DOCKER_CI_PATH))
sudo('cd {}/nightlyrelease; docker build -t dockerbuilder .'.format( sudo('cd {}/nightlyrelease; docker build -t dockerbuilder .'.format(
DOCKER_CI_PATH)) DOCKER_CI_PATH))
sudo('cd {}/registry-coverage; docker build -t registry_coverage .'.format(
DOCKER_CI_PATH))
# Download docker-ci testing container # Download docker-ci testing container
sudo('docker pull mzdaniel/test_docker') sudo('docker pull mzdaniel/test_docker')
@ -154,3 +166,6 @@ sudo('{0}/setup.sh root {0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}'
env['SMTP_PWD'], env['EMAIL_RCP'], env['REGISTRY_USER'], env['SMTP_PWD'], env['EMAIL_RCP'], env['REGISTRY_USER'],
env['REGISTRY_PWD'], env['REGISTRY_BUCKET'], env['REGISTRY_ACCESS_KEY'], env['REGISTRY_PWD'], env['REGISTRY_BUCKET'], env['REGISTRY_ACCESS_KEY'],
env['REGISTRY_SECRET_KEY'])) env['REGISTRY_SECRET_KEY']))
# Preventively reboot docker-ci daily
sudo('ln -s /sbin/reboot /etc/cron.daily')

View File

@ -1,6 +1,6 @@
# VERSION: 0.3 # VERSION: 0.4
# DOCKER-VERSION 0.6.3 # DOCKER-VERSION 0.6.6
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com> # AUTHOR: Daniel Mizyrycki <daniel@docker.com>
# DESCRIPTION: Testing docker PRs and commits on top of master using # DESCRIPTION: Testing docker PRs and commits on top of master using
# REFERENCES: This code reuses the excellent implementation of # REFERENCES: This code reuses the excellent implementation of
# Docker in Docker made by Jerome Petazzoni. # Docker in Docker made by Jerome Petazzoni.
@ -15,15 +15,10 @@
# TO_RUN: docker run -privileged test_docker hack/dind test_docker.sh [commit] [repo] [branch] # TO_RUN: docker run -privileged test_docker hack/dind test_docker.sh [commit] [repo] [branch]
from docker from docker
maintainer Daniel Mizyrycki <daniel@dotcloud.com> maintainer Daniel Mizyrycki <daniel@docker.com>
# Setup go environment. Extracted from /Dockerfile # Setup go in PATH. Extracted from /Dockerfile
env CGO_ENABLED 0 env PATH /usr/local/go/bin:$PATH
env GOROOT /goroot
env PATH $PATH:/goroot/bin
env GOPATH /go:/go/src/github.com/dotcloud/docker/vendor
volume /var/lib/docker
workdir /go/src/github.com/dotcloud/docker
# Add test_docker.sh # Add test_docker.sh
add test_docker.sh /usr/bin/test_docker.sh add test_docker.sh /usr/bin/test_docker.sh

View File

@ -8,31 +8,26 @@ BRANCH=${3-master}
# Compute test paths # Compute test paths
DOCKER_PATH=/go/src/github.com/dotcloud/docker DOCKER_PATH=/go/src/github.com/dotcloud/docker
# Timestamp
echo
date; echo
# Fetch latest master # Fetch latest master
cd /
rm -rf /go rm -rf /go
mkdir -p $DOCKER_PATH git clone -q -b master http://github.com/dotcloud/docker $DOCKER_PATH
cd $DOCKER_PATH cd $DOCKER_PATH
git init .
git fetch -q http://github.com/dotcloud/docker master
git reset --hard FETCH_HEAD
# Merge commit # Merge commit
#echo FIXME. Temporarily skip TestPrivilegedCanMount until DinD works reliable on AWS
git pull -q https://github.com/mzdaniel/docker.git dind-aws || exit 1
# Merge commit in top of master
git fetch -q "$REPO" "$BRANCH" git fetch -q "$REPO" "$BRANCH"
git merge --no-edit $COMMIT || exit 1 git merge --no-edit $COMMIT || exit 255
# Test commit # Test commit
go test -v; exit_status=$? ./hack/make.sh test; exit_status=$?
# Display load if test fails # Display load if test fails
if [ $exit_status -eq 1 ] ; then if [ $exit_status -ne 0 ] ; then
uptime; echo; free uptime; echo; free
fi fi
# Cleanup testing directory
rm -rf $BASE_PATH
exit $exit_status exit $exit_status

View File

@ -8,10 +8,12 @@ rm -rf docker-registry
# Setup the environment # Setup the environment
export SETTINGS_FLAVOR=test export SETTINGS_FLAVOR=test
export DOCKER_REGISTRY_CONFIG=config_test.yml export DOCKER_REGISTRY_CONFIG=config_test.yml
export PYTHONPATH=$(pwd)/docker-registry/test
# Get latest docker registry # Get latest docker registry
git clone -q https://github.com/dotcloud/docker-registry.git git clone -q https://github.com/dotcloud/docker-registry.git
cd docker-registry cd docker-registry
sed -Ei "s#(boto_bucket: ).+#\1_env:S3_BUCKET#" config_test.yml
# Get dependencies # Get dependencies
pip install -q -r requirements.txt pip install -q -r requirements.txt
@ -20,7 +22,6 @@ pip install -q tox
# Run registry tests # Run registry tests
tox || exit 1 tox || exit 1
export PYTHONPATH=$(pwd)/docker-registry
python -m unittest discover -p s3.py -s test || exit 1 python -m unittest discover -p s3.py -s test || exit 1
python -m unittest discover -p workflow.py -s test python -m unittest discover -p workflow.py -s test

View File

@ -1,20 +1,19 @@
# VERSION: 1.2 # VERSION: 1.6
# DOCKER-VERSION 0.6.3 # DOCKER-VERSION 0.6.6
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com> # AUTHOR: Daniel Mizyrycki <daniel@docker.com>
# DESCRIPTION: Build docker nightly release using Docker in Docker. # DESCRIPTION: Build docker nightly release using Docker in Docker.
# REFERENCES: This code reuses the excellent implementation of docker in docker # REFERENCES: This code reuses the excellent implementation of docker in docker
# made by Jerome Petazzoni. https://github.com/jpetazzo/dind # made by Jerome Petazzoni. https://github.com/jpetazzo/dind
# COMMENTS: # COMMENTS:
# release_credentials.json is a base64 json encoded file containing: # release_credentials.json is a base64 json encoded file containing:
# { "AWS_ACCESS_KEY": "Test_docker_AWS_S3_bucket_id", # { "AWS_ACCESS_KEY": "Test_docker_AWS_S3_bucket_id",
# "AWS_SECRET_KEY='Test_docker_AWS_S3_bucket_key' # "AWS_SECRET_KEY": "Test_docker_AWS_S3_bucket_key",
# "GPG_PASSPHRASE='Test_docker_GPG_passphrase_signature' # "GPG_PASSPHRASE": "Test_docker_GPG_passphrase_signature" }
# "INDEX_AUTH='Encripted_index_authentication' }
# TO_BUILD: docker build -t dockerbuilder . # TO_BUILD: docker build -t dockerbuilder .
# TO_RELEASE: docker run -i -t -privileged -e AWS_S3_BUCKET="test.docker.io" dockerbuilder # TO_RELEASE: docker run -i -t -privileged -e AWS_S3_BUCKET="test.docker.io" dockerbuilder hack/dind dockerbuild.sh
from docker from docker
maintainer Daniel Mizyrycki <daniel@dotcloud.com> maintainer Daniel Mizyrycki <daniel@docker.com>
# Add docker dependencies and downloading packages # Add docker dependencies and downloading packages
run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
@ -24,11 +23,8 @@ run apt-get update; apt-get install -y -q wget python2.7
run wget -q -O /usr/bin/docker http://get.docker.io/builds/Linux/x86_64/docker-latest; chmod +x /usr/bin/docker run wget -q -O /usr/bin/docker http://get.docker.io/builds/Linux/x86_64/docker-latest; chmod +x /usr/bin/docker
# Add proto docker builder # Add proto docker builder
add ./dockerbuild /usr/bin/dockerbuild add ./dockerbuild.sh /usr/bin/dockerbuild.sh
run chmod +x /usr/bin/dockerbuild run chmod +x /usr/bin/dockerbuild.sh
# Add release credentials # Add release credentials
add ./release_credentials.json /root/release_credentials.json add ./release_credentials.json /root/release_credentials.json
# Launch build process in a container
cmd dockerbuild

View File

@ -1,50 +0,0 @@
#!/bin/bash
# Variables AWS_ACCESS_KEY, AWS_SECRET_KEY, PG_PASSPHRASE and INDEX_AUTH
# are decoded from /root/release_credentials.json
# Variable AWS_S3_BUCKET is passed to the environment from docker run -e
# Enable debugging
set -x
# Fetch docker master branch
rm -rf /go/src/github.com/dotcloud/docker
cd /
git clone -q http://github.com/dotcloud/docker /go/src/github.com/dotcloud/docker
cd /go/src/github.com/dotcloud/docker
# Launch docker daemon using dind inside the container
./hack/dind /usr/bin/docker -d &
sleep 5
# Add an uncommitted change to generate a timestamped release
date > timestamp
# Build the docker package using /Dockerfile
docker build -t docker .
# Run Docker unittests binary and Ubuntu package
docker run -privileged docker hack/make.sh
exit_status=$?
# Display load if test fails
if [ $exit_status -eq 1 ] ; then
uptime; echo; free
exit 1
fi
# Commit binary and ubuntu bundles for release
docker commit -run '{"Env": ["PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin"], "WorkingDir": "/go/src/github.com/dotcloud/docker"}' $(docker ps -l -q) release
# Turn debug off to load credentials from the environment
set +x
eval $(cat /root/release_credentials.json | python -c '
import sys,json,base64;
d=json.loads(base64.b64decode(sys.stdin.read()));
exec("""for k in d: print "export {0}=\\"{1}\\"".format(k,d[k])""")')
set -x
# Push docker nightly
echo docker run -i -t -privileged -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=XXXXX -e AWS_SECRET_KEY=XXXXX -e GPG_PASSPHRASE=XXXXX release hack/release.sh
set +x
docker run -i -t -privileged -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE release hack/release.sh

View File

@ -0,0 +1,40 @@
#!/bin/bash
# Variables AWS_ACCESS_KEY, AWS_SECRET_KEY and PG_PASSPHRASE are decoded
# from /root/release_credentials.json
# Variable AWS_S3_BUCKET is passed to the environment from docker run -e
# Turn debug off to load credentials from the environment
set +x
eval $(cat /root/release_credentials.json | python -c '
import sys,json,base64;
d=json.loads(base64.b64decode(sys.stdin.read()));
exec("""for k in d: print "export {0}=\\"{1}\\"".format(k,d[k])""")')
# Fetch docker master branch
set -x
cd /
rm -rf /go
git clone -q -b master http://github.com/dotcloud/docker /go/src/github.com/dotcloud/docker
cd /go/src/github.com/dotcloud/docker
# Launch docker daemon using dind inside the container
/usr/bin/docker version
/usr/bin/docker -d &
sleep 5
# Build Docker release container
docker build -t docker .
# Test docker and if everything works well, release
echo docker run -i -t -privileged -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=XXXXX -e AWS_SECRET_KEY=XXXXX -e GPG_PASSPHRASE=XXXXX docker hack/release.sh
set +x
docker run -privileged -i -t -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE docker hack/release.sh
exit_status=$?
# Display load if test fails
set -x
if [ $exit_status -ne 0 ] ; then
uptime; echo; free
exit 1
fi

View File

@ -1 +0,0 @@
eyAiQVdTX0FDQ0VTU19LRVkiOiAiIiwKICAiQVdTX1NFQ1JFVF9LRVkiOiAiIiwKICAiR1BHX1BBU1NQSFJBU0UiOiAiIiwKICAiSU5ERVhfQVVUSCI6ICIiIH0=

View File

@ -0,0 +1,18 @@
# VERSION: 0.1
# DOCKER-VERSION 0.6.4
# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com>
# DESCRIPTION: Docker registry coverage
# COMMENTS: Add registry coverage into the docker-ci image
# TO_BUILD: docker build -t registry_coverage .
# TO_RUN: docker run registry_coverage
from docker-ci
maintainer Daniel Mizyrycki <daniel@dotcloud.com>
# Add registry_coverager.sh and dependencies
run pip install coverage flask pyyaml requests simplejson python-glanceclient \
blinker redis boto gevent rsa mock
add registry_coverage.sh /usr/bin/registry_coverage.sh
run chmod +x /usr/bin/registry_coverage.sh
cmd "/usr/bin/registry_coverage.sh"

View File

@ -0,0 +1,18 @@
#!/bin/bash
set -x
# Setup the environment
REGISTRY_PATH=/data/docker-registry
export SETTINGS_FLAVOR=test
export DOCKER_REGISTRY_CONFIG=config_test.yml
export PYTHONPATH=$REGISTRY_PATH/test
# Fetch latest docker-registry master
rm -rf $REGISTRY_PATH
git clone https://github.com/dotcloud/docker-registry -b master $REGISTRY_PATH
cd $REGISTRY_PATH
# Generate coverage
coverage run -m unittest discover test || exit 1
coverage report --include='./*' --omit='./test/*'

View File

@ -34,7 +34,7 @@ env['DOCKER_CI_KEY'] = open(env['DOCKER_CI_KEY_PATH']).read()
DROPLET_NAME = env.get('DROPLET_NAME','report') DROPLET_NAME = env.get('DROPLET_NAME','report')
TIMEOUT = 120 # Seconds before timeout droplet creation TIMEOUT = 120 # Seconds before timeout droplet creation
IMAGE_ID = 894856 # Docker on Ubuntu 13.04 IMAGE_ID = 1004145 # Docker on Ubuntu 13.04
REGION_ID = 4 # New York 2 REGION_ID = 4 # New York 2
SIZE_ID = 66 # memory 512MB SIZE_ID = 66 # memory 512MB
DO_IMAGE_USER = 'root' # Image user on Digital Ocean DO_IMAGE_USER = 'root' # Image user on Digital Ocean

View File

@ -12,7 +12,7 @@ type NameChecker interface {
var ( var (
colors = [...]string{"white", "silver", "gray", "black", "blue", "green", "cyan", "yellow", "gold", "orange", "brown", "red", "violet", "pink", "magenta", "purple", "maroon", "crimson", "plum", "fuchsia", "lavender", "slate", "navy", "azure", "aqua", "olive", "teal", "lime", "beige", "tan", "sienna"} colors = [...]string{"white", "silver", "gray", "black", "blue", "green", "cyan", "yellow", "gold", "orange", "brown", "red", "violet", "pink", "magenta", "purple", "maroon", "crimson", "plum", "fuchsia", "lavender", "slate", "navy", "azure", "aqua", "olive", "teal", "lime", "beige", "tan", "sienna"}
animals = [...]string{"ant", "bear", "bird", "cat", "chicken", "cow", "deer", "dog", "donkey", "duck", "fish", "fox", "frog", "horse", "kangaroo", "koala", "lemur", "lion", "lizard", "monkey", "octopus", "pig", "shark", "sheep", "sloth", "spider", "squirrel", "tiger", "toad", "weasel", "whale", "wolf"} animals = [...]string{"ant", "bear", "bird", "cat", "chicken", "cow", "deer", "dog", "donkey", "duck", "fish", "fox", "frog", "horse", "kangaroo", "koala", "lemur", "lion", "lizard", "monkey", "octopus", "pig", "shark", "sheep", "sloth", "spider", "squirrel", "tiger", "toad", "weasel", "whale", "wolf"}
) )
func GenerateRandomName(checker NameChecker) (string, error) { func GenerateRandomName(checker NameChecker) (string, error) {

View File

@ -9,7 +9,6 @@ func NetworkGetRoutes() ([]*net.IPNet, error) {
return nil, fmt.Errorf("Not implemented") return nil, fmt.Errorf("Not implemented")
} }
func NetworkLinkAdd(name string, linkType string) error { func NetworkLinkAdd(name string, linkType string) error {
return fmt.Errorf("Not implemented") return fmt.Errorf("Not implemented")
} }
@ -18,7 +17,6 @@ func NetworkLinkUp(iface *net.Interface) error {
return fmt.Errorf("Not implemented") return fmt.Errorf("Not implemented")
} }
func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error {
return fmt.Errorf("Not implemented") return fmt.Errorf("Not implemented")
} }

View File

@ -286,13 +286,12 @@ func (runtime *Runtime) restore() error {
// Any containers that are left over do not exist in the graph // Any containers that are left over do not exist in the graph
for _, container := range containers { for _, container := range containers {
// Try to set the default name for a container if it exists prior to links // Try to set the default name for a container if it exists prior to links
name, err := generateRandomName(runtime) container.Name, err = generateRandomName(runtime)
if err != nil { if err != nil {
container.Name = container.ShortID() container.Name = container.ShortID()
} }
container.Name = name
if _, err := runtime.containerGraph.Set(name, container.ID); err != nil { if _, err := runtime.containerGraph.Set(container.Name, container.ID); err != nil {
utils.Debugf("Setting default id - %s", err) utils.Debugf("Setting default id - %s", err)
} }
register(container) register(container)

View File

@ -89,6 +89,15 @@ func MergeConfig(userConf, imageConf *Config) error {
} }
if userConf.ExposedPorts == nil || len(userConf.ExposedPorts) == 0 { if userConf.ExposedPorts == nil || len(userConf.ExposedPorts) == 0 {
userConf.ExposedPorts = imageConf.ExposedPorts userConf.ExposedPorts = imageConf.ExposedPorts
} else if imageConf.ExposedPorts != nil {
if userConf.ExposedPorts == nil {
userConf.ExposedPorts = make(map[Port]struct{})
}
for port := range imageConf.ExposedPorts {
if _, exists := userConf.ExposedPorts[port]; !exists {
userConf.ExposedPorts[port] = struct{}{}
}
}
} }
if userConf.PortSpecs != nil && len(userConf.PortSpecs) > 0 { if userConf.PortSpecs != nil && len(userConf.PortSpecs) > 0 {

View File

@ -1,10 +1,10 @@
package utils package utils
import ( import (
"os"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"log" "log"
"os"
"strconv" "strconv"
) )

View File

@ -1,9 +1,9 @@
package utils package utils
import ( import (
"io"
"crypto/rand" "crypto/rand"
"encoding/hex" "encoding/hex"
"io"
) )
func RandomString() string { func RandomString() string {

View File

@ -15,8 +15,8 @@ import (
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime"
"regexp" "regexp"
"runtime"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@ -904,7 +904,7 @@ func StripComments(input []byte, commentMarker []byte) []byte {
return output return output
} }
// GetNameserversAsCIDR returns nameservers (if any) listed in // GetNameserversAsCIDR returns nameservers (if any) listed in
// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32") // /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32")
// This function's output is intended for net.ParseCIDR // This function's output is intended for net.ParseCIDR
func GetNameserversAsCIDR(resolvConf []byte) []string { func GetNameserversAsCIDR(resolvConf []byte) []string {

View File

@ -453,20 +453,20 @@ search example.com`: {"1.2.3.4/32", "4.3.2.1/32"},
`search example.com`: {}, `search example.com`: {},
`nameserver 1.2.3.4 `nameserver 1.2.3.4
search example.com search example.com
nameserver 4.3.2.1`: []string{"1.2.3.4/32", "4.3.2.1/32"}, nameserver 4.3.2.1`: {"1.2.3.4/32", "4.3.2.1/32"},
``: []string{}, ``: {},
` nameserver 1.2.3.4 `: []string{"1.2.3.4/32"}, ` nameserver 1.2.3.4 `: {"1.2.3.4/32"},
`search example.com `search example.com
nameserver 1.2.3.4 nameserver 1.2.3.4
#nameserver 4.3.2.1`: []string{"1.2.3.4/32"}, #nameserver 4.3.2.1`: {"1.2.3.4/32"},
`search example.com `search example.com
nameserver 1.2.3.4 # not 4.3.2.1`: []string{"1.2.3.4/32"}, nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4/32"},
} { } {
test := GetNameserversAsCIDR([]byte(resolv)) test := GetNameserversAsCIDR([]byte(resolv))
if !StrSlicesEqual(test, result) { if !StrSlicesEqual(test, result) {
t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv) t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv)
} }
} }
} }
func StrSlicesEqual(a, b []string) bool { func StrSlicesEqual(a, b []string) bool {

View File

@ -67,7 +67,7 @@ func newTestRuntime(prefix string) (runtime *Runtime, err error) {
} }
config := &DaemonConfig{ config := &DaemonConfig{
Root: root, Root: root,
AutoRestart: false, AutoRestart: false,
} }
runtime, err = NewRuntimeFromDirectory(config) runtime, err = NewRuntimeFromDirectory(config)
@ -247,7 +247,9 @@ func TestMergeConfig(t *testing.T) {
Volumes: volumesUser, Volumes: volumesUser,
} }
MergeConfig(configUser, configImage) if err := MergeConfig(configUser, configImage); err != nil {
t.Error(err)
}
if len(configUser.Dns) != 3 { if len(configUser.Dns) != 3 {
t.Fatalf("Expected 3 dns, 1.1.1.1, 2.2.2.2 and 3.3.3.3, found %d", len(configUser.Dns)) t.Fatalf("Expected 3 dns, 1.1.1.1, 2.2.2.2 and 3.3.3.3, found %d", len(configUser.Dns))
@ -259,7 +261,7 @@ func TestMergeConfig(t *testing.T) {
} }
if len(configUser.ExposedPorts) != 3 { if len(configUser.ExposedPorts) != 3 {
t.Fatalf("Expected 3 portSpecs, 1111, 2222 and 3333, found %d", len(configUser.PortSpecs)) t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
} }
for portSpecs := range configUser.ExposedPorts { for portSpecs := range configUser.ExposedPorts {
if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" {
@ -287,6 +289,28 @@ func TestMergeConfig(t *testing.T) {
if configUser.VolumesFrom != "1111" { if configUser.VolumesFrom != "1111" {
t.Fatalf("Expected VolumesFrom to be 1111, found %s", configUser.VolumesFrom) t.Fatalf("Expected VolumesFrom to be 1111, found %s", configUser.VolumesFrom)
} }
ports, _, err := parsePortSpecs([]string{"0000"})
if err != nil {
t.Error(err)
}
configImage2 := &Config{
ExposedPorts: ports,
}
if err := MergeConfig(configUser, configImage2); err != nil {
t.Error(err)
}
if len(configUser.ExposedPorts) != 4 {
t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
}
for portSpecs := range configUser.ExposedPorts {
if portSpecs.Port() != "0000" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" {
t.Fatalf("Expected 0000 or 1111 or 2222 or 3333, found %s", portSpecs)
}
}
} }
func TestParseLxcConfOpt(t *testing.T) { func TestParseLxcConfOpt(t *testing.T) {