diff --git a/api.go b/api.go index 60c4d68327..dbaa2ae7de 100644 --- a/api.go +++ b/api.go @@ -930,7 +930,7 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ if err != nil { return err } - c, err := mkBuildContext(string(dockerFile), nil) + c, err := MkBuildContext(string(dockerFile), nil) if err != nil { return err } @@ -1108,6 +1108,20 @@ func createRouter(srv *Server, logging bool) (*mux.Router, error) { return r, nil } +// ServeRequest processes a single http request to the docker remote api. +// FIXME: refactor this to be part of Server and not require re-creating a new +// router each time. This requires first moving ListenAndServe into Server. +func ServeRequest(srv *Server, apiversion float64, w http.ResponseWriter, req *http.Request) error { + router, err := createRouter(srv, false) + if err != nil { + return err + } + // Insert APIVERSION into the request as a convenience + req.URL.Path = fmt.Sprintf("/v%g%s", apiversion, req.URL.Path) + router.ServeHTTP(w, req) + return nil +} + func ListenAndServe(proto, addr string, srv *Server, logging bool) error { log.Printf("Listening for HTTP on %s (%s)\n", addr, proto) diff --git a/api_unit_tests.go b/api_unit_tests.go new file mode 100644 index 0000000000..36996c6adf --- /dev/null +++ b/api_unit_tests.go @@ -0,0 +1,20 @@ +package docker + +import ( + "testing" +) + +func TestJsonContentType(t *testing.T) { + if !matchesContentType("application/json", "application/json") { + t.Fail() + } + + if !matchesContentType("application/json; charset=utf-8", "application/json") { + t.Fail() + } + + if matchesContentType("dockerapplication/json", "application/json") { + t.Fail() + } +} + diff --git a/auth/auth_test.go b/auth/auth_test.go index 5dc634a719..5f2d3b85fd 100644 --- a/auth/auth_test.go +++ b/auth/auth_test.go @@ -1,11 +1,8 @@ package auth import ( - "crypto/rand" - "encoding/hex" "io/ioutil" "os" - "strings" "testing" ) @@ -29,52 +26,6 @@ func TestEncodeAuth(t *testing.T) { } } -func TestLogin(t *testing.T) { - os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com") - defer os.Setenv("DOCKER_INDEX_URL", "") - authConfig := &AuthConfig{Username: "unittester", Password: "surlautrerivejetattendrai", Email: "noise+unittester@dotcloud.com"} - status, err := Login(authConfig, nil) - if err != nil { - t.Fatal(err) - } - if status != "Login Succeeded" { - t.Fatalf("Expected status \"Login Succeeded\", found \"%s\" instead", status) - } -} - -func TestCreateAccount(t *testing.T) { - os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com") - defer os.Setenv("DOCKER_INDEX_URL", "") - tokenBuffer := make([]byte, 16) - _, err := rand.Read(tokenBuffer) - if err != nil { - t.Fatal(err) - } - token := hex.EncodeToString(tokenBuffer)[:12] - username := "ut" + token - authConfig := &AuthConfig{Username: username, Password: "test42", Email: "docker-ut+" + token + "@example.com"} - status, err := Login(authConfig, nil) - if err != nil { - t.Fatal(err) - } - expectedStatus := "Account created. Please use the confirmation link we sent" + - " to your e-mail to activate it." - if status != expectedStatus { - t.Fatalf("Expected status: \"%s\", found \"%s\" instead.", expectedStatus, status) - } - - status, err = Login(authConfig, nil) - if err == nil { - t.Fatalf("Expected error but found nil instead") - } - - expectedError := "Login: Account is not Active" - - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("Expected message \"%s\" but found \"%s\" instead", expectedError, err) - } -} - func setupTempConfigFile() (*ConfigFile, error) { root, err := ioutil.TempDir("", "docker-test-auth") if err != nil { diff --git a/commands.go b/commands.go index 9d144d8aeb..710a311c08 100644 --- a/commands.go +++ b/commands.go @@ -135,7 +135,7 @@ func (cli *DockerCli) CmdInsert(args ...string) error { // mkBuildContext returns an archive of an empty context with the contents // of `dockerfile` at the path ./Dockerfile -func mkBuildContext(dockerfile string, files [][2]string) (archive.Archive, error) { +func MkBuildContext(dockerfile string, files [][2]string) (archive.Archive, error) { buf := new(bytes.Buffer) tw := tar.NewWriter(buf) files = append(files, [2]string{"Dockerfile", dockerfile}) @@ -185,7 +185,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error { if err != nil { return err } - context, err = mkBuildContext(string(dockerfile), nil) + context, err = MkBuildContext(string(dockerfile), nil) } else if utils.IsURL(cmd.Arg(0)) || utils.IsGIT(cmd.Arg(0)) { isRemote = true } else { diff --git a/config_test.go b/config_test.go new file mode 100644 index 0000000000..31c961135a --- /dev/null +++ b/config_test.go @@ -0,0 +1,149 @@ +package docker + +import ( + "testing" +) + +func TestCompareConfig(t *testing.T) { + volumes1 := make(map[string]struct{}) + volumes1["/test1"] = struct{}{} + config1 := Config{ + Dns: []string{"1.1.1.1", "2.2.2.2"}, + PortSpecs: []string{"1111:1111", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + VolumesFrom: "11111111", + Volumes: volumes1, + } + config2 := Config{ + Dns: []string{"0.0.0.0", "2.2.2.2"}, + PortSpecs: []string{"1111:1111", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + VolumesFrom: "11111111", + Volumes: volumes1, + } + config3 := Config{ + Dns: []string{"1.1.1.1", "2.2.2.2"}, + PortSpecs: []string{"0000:0000", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + VolumesFrom: "11111111", + Volumes: volumes1, + } + config4 := Config{ + Dns: []string{"1.1.1.1", "2.2.2.2"}, + PortSpecs: []string{"0000:0000", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + VolumesFrom: "22222222", + Volumes: volumes1, + } + volumes2 := make(map[string]struct{}) + volumes2["/test2"] = struct{}{} + config5 := Config{ + Dns: []string{"1.1.1.1", "2.2.2.2"}, + PortSpecs: []string{"0000:0000", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + VolumesFrom: "11111111", + Volumes: volumes2, + } + if CompareConfig(&config1, &config2) { + t.Fatalf("CompareConfig should return false, Dns are different") + } + if CompareConfig(&config1, &config3) { + t.Fatalf("CompareConfig should return false, PortSpecs are different") + } + if CompareConfig(&config1, &config4) { + t.Fatalf("CompareConfig should return false, VolumesFrom are different") + } + if CompareConfig(&config1, &config5) { + t.Fatalf("CompareConfig should return false, Volumes are different") + } + if !CompareConfig(&config1, &config1) { + t.Fatalf("CompareConfig should return true") + } +} + +func TestMergeConfig(t *testing.T) { + volumesImage := make(map[string]struct{}) + volumesImage["/test1"] = struct{}{} + volumesImage["/test2"] = struct{}{} + configImage := &Config{ + Dns: []string{"1.1.1.1", "2.2.2.2"}, + PortSpecs: []string{"1111:1111", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + VolumesFrom: "1111", + Volumes: volumesImage, + } + + volumesUser := make(map[string]struct{}) + volumesUser["/test3"] = struct{}{} + configUser := &Config{ + Dns: []string{"3.3.3.3"}, + PortSpecs: []string{"3333:2222", "3333:3333"}, + Env: []string{"VAR2=3", "VAR3=3"}, + Volumes: volumesUser, + } + + if err := MergeConfig(configUser, configImage); err != nil { + t.Error(err) + } + + if len(configUser.Dns) != 3 { + t.Fatalf("Expected 3 dns, 1.1.1.1, 2.2.2.2 and 3.3.3.3, found %d", len(configUser.Dns)) + } + for _, dns := range configUser.Dns { + if dns != "1.1.1.1" && dns != "2.2.2.2" && dns != "3.3.3.3" { + t.Fatalf("Expected 1.1.1.1 or 2.2.2.2 or 3.3.3.3, found %s", dns) + } + } + + if len(configUser.ExposedPorts) != 3 { + t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) + } + for portSpecs := range configUser.ExposedPorts { + if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { + t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs) + } + } + if len(configUser.Env) != 3 { + t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env)) + } + for _, env := range configUser.Env { + if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" { + t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env) + } + } + + if len(configUser.Volumes) != 3 { + t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes)) + } + for v := range configUser.Volumes { + if v != "/test1" && v != "/test2" && v != "/test3" { + t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v) + } + } + + if configUser.VolumesFrom != "1111" { + t.Fatalf("Expected VolumesFrom to be 1111, found %s", configUser.VolumesFrom) + } + + ports, _, err := parsePortSpecs([]string{"0000"}) + if err != nil { + t.Error(err) + } + configImage2 := &Config{ + ExposedPorts: ports, + } + + if err := MergeConfig(configUser, configImage2); err != nil { + t.Error(err) + } + + if len(configUser.ExposedPorts) != 4 { + t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) + } + for portSpecs := range configUser.ExposedPorts { + if portSpecs.Port() != "0000" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { + t.Fatalf("Expected 0000 or 1111 or 2222 or 3333, found %s", portSpecs) + } + } + +} diff --git a/container_unit_test.go b/container_unit_test.go new file mode 100644 index 0000000000..679ff57e73 --- /dev/null +++ b/container_unit_test.go @@ -0,0 +1,161 @@ +package docker + +import ( + "testing" +) + +func TestParseLxcConfOpt(t *testing.T) { + opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "} + + for _, o := range opts { + k, v, err := parseLxcOpt(o) + if err != nil { + t.FailNow() + } + if k != "lxc.utsname" { + t.Fail() + } + if v != "docker" { + t.Fail() + } + } +} + +func TestParseNetworkOptsPrivateOnly(t *testing.T) { + ports, bindings, err := parsePortSpecs([]string{"192.168.1.100::80"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "tcp" { + t.Logf("Expected tcp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "80" { + t.Logf("Expected 80 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "" { + t.Logf("Expected \"\" got %s", s.HostPort) + t.Fail() + } + if s.HostIp != "192.168.1.100" { + t.Fail() + } + } +} + +func TestParseNetworkOptsPublic(t *testing.T) { + ports, bindings, err := parsePortSpecs([]string{"192.168.1.100:8080:80"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "tcp" { + t.Logf("Expected tcp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "80" { + t.Logf("Expected 80 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "8080" { + t.Logf("Expected 8080 got %s", s.HostPort) + t.Fail() + } + if s.HostIp != "192.168.1.100" { + t.Fail() + } + } +} + +func TestParseNetworkOptsUdp(t *testing.T) { + ports, bindings, err := parsePortSpecs([]string{"192.168.1.100::6000/udp"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "udp" { + t.Logf("Expected udp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "6000" { + t.Logf("Expected 6000 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "" { + t.Logf("Expected \"\" got %s", s.HostPort) + t.Fail() + } + if s.HostIp != "192.168.1.100" { + t.Fail() + } + } +} + +func TestGetFullName(t *testing.T) { + name, err := getFullName("testing") + if err != nil { + t.Fatal(err) + } + if name != "/testing" { + t.Fatalf("Expected /testing got %s", name) + } + if _, err := getFullName(""); err == nil { + t.Fatal("Error should not be nil") + } +} diff --git a/engine/job.go b/engine/job.go index c4a2c3ef52..3ccaa8d1a0 100644 --- a/engine/job.go +++ b/engine/job.go @@ -214,7 +214,7 @@ func (job *Job) GetenvList(key string) []string { return l } -func (job *Job) SetenvList(key string, value []string) error { +func (job *Job) SetenvJson(key string, value interface{}) error { sval, err := json.Marshal(value) if err != nil { return err @@ -223,6 +223,10 @@ func (job *Job) SetenvList(key string, value []string) error { return nil } +func (job *Job) SetenvList(key string, value []string) error { + return job.SetenvJson(key, value) +} + func (job *Job) Setenv(key, value string) { job.env = append(job.env, key+"="+value) } diff --git a/graph_test.go b/graph_test.go index 1102129aba..f08752a192 100644 --- a/graph_test.go +++ b/graph_test.go @@ -9,7 +9,6 @@ import ( "io" "io/ioutil" "os" - "path" "testing" "time" ) @@ -121,41 +120,6 @@ func TestRegister(t *testing.T) { } } -func TestMount(t *testing.T) { - graph := tempGraph(t) - defer os.RemoveAll(graph.Root) - archive, err := fakeTar() - if err != nil { - t.Fatal(err) - } - image, err := graph.Create(archive, nil, "Testing", "", nil) - if err != nil { - t.Fatal(err) - } - tmp, err := ioutil.TempDir("", "docker-test-graph-mount-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - rootfs := path.Join(tmp, "rootfs") - if err := os.MkdirAll(rootfs, 0700); err != nil { - t.Fatal(err) - } - rw := path.Join(tmp, "rw") - if err := os.MkdirAll(rw, 0700); err != nil { - t.Fatal(err) - } - if err := image.Mount(rootfs, rw); err != nil { - t.Fatal(err) - } - // FIXME: test for mount contents - defer func() { - if err := Unmount(rootfs); err != nil { - t.Error(err) - } - }() -} - // Test that an image can be deleted by its shorthand prefix func TestDeletePrefix(t *testing.T) { graph := tempGraph(t) diff --git a/http_test.go b/http_test.go new file mode 100644 index 0000000000..b9ecd6a203 --- /dev/null +++ b/http_test.go @@ -0,0 +1,51 @@ +package docker + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" +) + +func TestGetBoolParam(t *testing.T) { + if ret, err := getBoolParam("true"); err != nil || !ret { + t.Fatalf("true -> true, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("True"); err != nil || !ret { + t.Fatalf("True -> true, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("1"); err != nil || !ret { + t.Fatalf("1 -> true, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam(""); err != nil || ret { + t.Fatalf("\"\" -> false, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("false"); err != nil || ret { + t.Fatalf("false -> false, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("0"); err != nil || ret { + t.Fatalf("0 -> false, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("faux"); err == nil || ret { + t.Fatalf("faux -> false, err | got %t %s", ret, err) + } +} + +func TesthttpError(t *testing.T) { + r := httptest.NewRecorder() + + httpError(r, fmt.Errorf("No such method")) + if r.Code != http.StatusNotFound { + t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code) + } + + httpError(r, fmt.Errorf("This accound hasn't been activated")) + if r.Code != http.StatusForbidden { + t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code) + } + + httpError(r, fmt.Errorf("Some error")) + if r.Code != http.StatusInternalServerError { + t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code) + } +} diff --git a/api_test.go b/integration/api_test.go similarity index 55% rename from api_test.go rename to integration/api_test.go index 5794abba62..0f49bf897b 100644 --- a/api_test.go +++ b/integration/api_test.go @@ -6,101 +6,68 @@ import ( "bytes" "encoding/json" "fmt" + "github.com/dotcloud/docker" "github.com/dotcloud/docker/utils" "io" "net" "net/http" "net/http/httptest" - "os" - "path" "strings" "testing" "time" ) -func TestGetBoolParam(t *testing.T) { - if ret, err := getBoolParam("true"); err != nil || !ret { - t.Fatalf("true -> true, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("True"); err != nil || !ret { - t.Fatalf("True -> true, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("1"); err != nil || !ret { - t.Fatalf("1 -> true, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam(""); err != nil || ret { - t.Fatalf("\"\" -> false, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("false"); err != nil || ret { - t.Fatalf("false -> false, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("0"); err != nil || ret { - t.Fatalf("0 -> false, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("faux"); err == nil || ret { - t.Fatalf("faux -> false, err | got %t %s", ret, err) - } -} - -func TesthttpError(t *testing.T) { - r := httptest.NewRecorder() - - httpError(r, fmt.Errorf("No such method")) - if r.Code != http.StatusNotFound { - t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code) - } - - httpError(r, fmt.Errorf("This accound hasn't been activated")) - if r.Code != http.StatusForbidden { - t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code) - } - - httpError(r, fmt.Errorf("Some error")) - if r.Code != http.StatusInternalServerError { - t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code) - } -} func TestGetVersion(t *testing.T) { + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) + var err error - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{runtime: runtime} - r := httptest.NewRecorder() - if err := getVersion(srv, APIVERSION, r, nil, nil); err != nil { + req, err := http.NewRequest("GET", "/version", nil) + if err != nil { t.Fatal(err) } + // FIXME getting the version should require an actual running Server + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) - v := &APIVersion{} + v := &docker.APIVersion{} if err = json.Unmarshal(r.Body.Bytes(), v); err != nil { t.Fatal(err) } - if v.Version != VERSION { - t.Errorf("Expected version %s, %s found", VERSION, v.Version) + if v.Version != docker.VERSION { + t.Errorf("Expected version %s, %s found", docker.VERSION, v.Version) } } + func TestGetInfo(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) - srv := &Server{runtime: runtime} - - initialImages, err := srv.runtime.graph.Map() + initialImages, err := srv.Images(false, "") if err != nil { t.Fatal(err) } - r := httptest.NewRecorder() - - if err := getInfo(srv, APIVERSION, r, nil, nil); err != nil { + req, err := http.NewRequest("GET", "/info", nil) + if err != nil { t.Fatal(err) } + r := httptest.NewRecorder() - infos := &APIInfo{} + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + + infos := &docker.APIInfo{} err = json.Unmarshal(r.Body.Bytes(), infos) if err != nil { t.Fatal(err) @@ -111,16 +78,22 @@ func TestGetInfo(t *testing.T) { } func TestGetEvents(t *testing.T) { - runtime := mkRuntime(t) + eng := NewTestEngine(t) + srv := mkServerFromEngine(eng, t) + // FIXME: we might not need runtime, why not simply nuke + // the engine? + runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - srv := &Server{ - runtime: runtime, - events: make([]utils.JSONMessage, 0, 64), - listeners: make(map[string]chan utils.JSONMessage), - } - srv.LogEvent("fakeaction", "fakeid", "fakeimage") - srv.LogEvent("fakeaction2", "fakeid", "fakeimage") + var events []*utils.JSONMessage + for _, parts := range [][3]string{ + {"fakeaction", "fakeid", "fakeimage"}, + {"fakeaction2", "fakeid", "fakeimage"}, + } { + action, id, from := parts[0], parts[1], parts[2] + ev := srv.LogEvent(action, id, from) + events = append(events, ev) + } req, err := http.NewRequest("GET", "/events?since=1", nil) if err != nil { @@ -129,9 +102,10 @@ func TestGetEvents(t *testing.T) { r := httptest.NewRecorder() setTimeout(t, "", 500*time.Millisecond, func() { - if err := getEvents(srv, APIVERSION, r, req, nil); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { t.Fatal(err) } + assertHttpNotError(r, t) }) dec := json.NewDecoder(r.Body) @@ -142,7 +116,7 @@ func TestGetEvents(t *testing.T) { } else if err != nil { t.Fatal(err) } - if jm != srv.events[i] { + if jm != *events[i] { t.Fatalf("Event received it different than expected") } } @@ -150,10 +124,9 @@ func TestGetEvents(t *testing.T) { } func TestGetImagesJSON(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{runtime: runtime} + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) // all=0 @@ -169,11 +142,12 @@ func TestGetImagesJSON(t *testing.T) { r := httptest.NewRecorder() - if err := getImagesJSON(srv, APIVERSION, r, req, nil); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { t.Fatal(err) } + assertHttpNotError(r, t) - images := []APIImages{} + images := []docker.APIImages{} if err := json.Unmarshal(r.Body.Bytes(), &images); err != nil { t.Fatal(err) } @@ -206,12 +180,13 @@ func TestGetImagesJSON(t *testing.T) { if err != nil { t.Fatal(err) } - - if err := getImagesJSON(srv, APIVERSION, r2, req2, nil); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r2, req2); err != nil { t.Fatal(err) } + assertHttpNotError(r2, t) - images2 := []APIImages{} + + images2 := []docker.APIImages{} if err := json.Unmarshal(r2.Body.Bytes(), &images2); err != nil { t.Fatal(err) } @@ -222,13 +197,13 @@ func TestGetImagesJSON(t *testing.T) { found = false for _, img := range images2 { - if img.ID == GetTestImage(runtime).ID { + if img.ID == unitTestImageID { found = true break } } if !found { - t.Errorf("Retrieved image Id differs, expected %s, received %+v", GetTestImage(runtime).ID, images2) + t.Errorf("Retrieved image Id differs, expected %s, received %+v", unitTestImageID, images2) } r3 := httptest.NewRecorder() @@ -239,11 +214,12 @@ func TestGetImagesJSON(t *testing.T) { t.Fatal(err) } - if err := getImagesJSON(srv, APIVERSION, r3, req3, nil); err != nil { - t.Fatal(err) - } + if err := docker.ServeRequest(srv, docker.APIVERSION, r3, req3); err != nil { + t.Fatal(err) + } + assertHttpNotError(r3, t) - images3 := []APIImages{} + images3 := []docker.APIImages{} if err := json.Unmarshal(r3.Body.Bytes(), &images3); err != nil { t.Fatal(err) } @@ -260,34 +236,32 @@ func TestGetImagesJSON(t *testing.T) { t.Fatal(err) } - err = getImagesJSON(srv, APIVERSION, r4, req4, nil) - if err == nil { - t.Fatalf("Error expected, received none") + if err := docker.ServeRequest(srv, docker.APIVERSION, r4, req4); err != nil { + t.Fatal(err) } - - if !strings.HasPrefix(err.Error(), "Bad parameter") { - t.Fatalf("Error should starts with \"Bad parameter\"") - } - http.Error(r4, err.Error(), http.StatusBadRequest) - + // Don't assert against HTTP error since we expect an error if r4.Code != http.StatusBadRequest { t.Fatalf("%d Bad Request expected, received %d\n", http.StatusBadRequest, r4.Code) } } func TestGetImagesHistory(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{runtime: runtime} + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) r := httptest.NewRecorder() - if err := getImagesHistory(srv, APIVERSION, r, nil, map[string]string{"name": unitTestImageName}); err != nil { + req, err := http.NewRequest("GET", fmt.Sprintf("/images/%s/history", unitTestImageName), nil) + if err != nil { t.Fatal(err) } + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) - history := []APIHistory{} + history := []docker.APIHistory{} if err := json.Unmarshal(r.Body.Bytes(), &history); err != nil { t.Fatal(err) } @@ -297,17 +271,22 @@ func TestGetImagesHistory(t *testing.T) { } func TestGetImagesByName(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) - srv := &Server{runtime: runtime} - - r := httptest.NewRecorder() - if err := getImagesByName(srv, APIVERSION, r, nil, map[string]string{"name": unitTestImageName}); err != nil { - t.Fatal(err) + req, err := http.NewRequest("GET", "/images/"+unitTestImageName+"/json", nil) + if err != nil { + t.Fatal(err) } - img := &Image{} + r := httptest.NewRecorder() + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + + img := &docker.Image{} if err := json.Unmarshal(r.Body.Bytes(), img); err != nil { t.Fatal(err) } @@ -317,21 +296,16 @@ func TestGetImagesByName(t *testing.T) { } func TestGetContainersJSON(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) - srv := &Server{runtime: runtime} + beginLen := len(srv.Containers(true, false, -1, "", "")) - beginLen := runtime.containers.Len() - - container, _, err := runtime.Create(&Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, &docker.Config{ + Image: unitTestImageID, Cmd: []string{"echo", "test"}, - }, "") - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) + }, t) req, err := http.NewRequest("GET", "/containers/json?all=1", nil) if err != nil { @@ -339,48 +313,47 @@ func TestGetContainersJSON(t *testing.T) { } r := httptest.NewRecorder() - if err := getContainersJSON(srv, APIVERSION, r, req, nil); err != nil { - t.Fatal(err) - } - containers := []APIContainers{} + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + containers := []docker.APIContainers{} if err := json.Unmarshal(r.Body.Bytes(), &containers); err != nil { t.Fatal(err) } if len(containers) != beginLen+1 { t.Fatalf("Expected %d container, %d found (started with: %d)", beginLen+1, len(containers), beginLen) } - if containers[0].ID != container.ID { - t.Fatalf("Container ID mismatch. Expected: %s, received: %s\n", container.ID, containers[0].ID) + if containers[0].ID != containerID { + t.Fatalf("Container ID mismatch. Expected: %s, received: %s\n", containerID, containers[0].ID) } } func TestGetContainersExport(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{runtime: runtime} + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) // Create a container and remove a file - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"touch", "/test"}, }, - "", + t, ) + containerRun(eng, containerID, t) + + r := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "/containers/" + containerID + "/export", nil) if err != nil { t.Fatal(err) } - defer runtime.Destroy(container) - - if err := container.Run(); err != nil { - t.Fatal(err) - } - - r := httptest.NewRecorder() - if err = getContainersExport(srv, APIVERSION, r, nil, map[string]string{"name": container.ID}); err != nil { - t.Fatal(err) - } + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) if r.Code != http.StatusOK { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) @@ -406,33 +379,30 @@ func TestGetContainersExport(t *testing.T) { } func TestGetContainersChanges(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{runtime: runtime} + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) // Create a container and remove a file - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"/bin/rm", "/etc/passwd"}, }, - "", + t, ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - - if err := container.Run(); err != nil { - t.Fatal(err) - } + containerRun(eng, containerID, t) r := httptest.NewRecorder() - if err := getContainersChanges(srv, APIVERSION, r, nil, map[string]string{"name": container.ID}); err != nil { - t.Fatal(err) + req, err := http.NewRequest("GET", "/containers/"+containerID+"/changes", nil) + if err != nil { + t.Fatal(err) } - changes := []Change{} + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + changes := []docker.Change{} if err := json.Unmarshal(r.Body.Bytes(), &changes); err != nil { t.Fatal(err) } @@ -451,64 +421,57 @@ func TestGetContainersChanges(t *testing.T) { func TestGetContainersTop(t *testing.T) { t.Skip("Fixme. Skipping test for now. Reported error when testing using dind: 'api_test.go:527: Expected 2 processes, found 0.'") - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) - srv := &Server{runtime: runtime} - - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"/bin/sh", "-c", "cat"}, OpenStdin: true, }, - "", + t, ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) defer func() { // Make sure the process dies before destroying runtime - container.stdin.Close() - container.WaitTimeout(2 * time.Second) + containerKill(eng, containerID, t) + containerWait(eng, containerID, t) }() - if err := container.Start(); err != nil { - t.Fatal(err) - } + startContainer(eng, containerID, t) setTimeout(t, "Waiting for the container to be started timed out", 10*time.Second, func() { for { - if container.State.Running { + if containerRunning(eng, containerID, t) { break } time.Sleep(10 * time.Millisecond) } }) - if !container.State.Running { + if !containerRunning(eng, containerID, t) { t.Fatalf("Container should be running") } // Make sure sh spawn up cat setTimeout(t, "read/write assertion timed out", 2*time.Second, func() { - in, _ := container.StdinPipe() - out, _ := container.StdoutPipe() + in, out := containerAttach(eng, containerID, t) if err := assertPipe("hello\n", "hello", out, in, 15); err != nil { t.Fatal(err) } }) r := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/"+container.ID+"/top?ps_args=u", bytes.NewReader([]byte{})) + req, err := http.NewRequest("GET", "/"+containerID+"/top?ps_args=u", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } - if err := getContainersTop(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil { - t.Fatal(err) - } - procs := APITop{} + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + procs := docker.APITop{} if err := json.Unmarshal(r.Body.Bytes(), &procs); err != nil { t.Fatal(err) } @@ -532,90 +495,83 @@ func TestGetContainersTop(t *testing.T) { } func TestGetContainersByName(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{runtime: runtime} + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) // Create a container and remove a file - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"echo", "test"}, }, - "", + t, ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) r := httptest.NewRecorder() - if err := getContainersByName(srv, APIVERSION, r, nil, map[string]string{"name": container.ID}); err != nil { - t.Fatal(err) + req, err := http.NewRequest("GET", "/containers/"+containerID+"/json", nil) + if err != nil { + t.Fatal(err) } - outContainer := &Container{} + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + outContainer := &docker.Container{} if err := json.Unmarshal(r.Body.Bytes(), outContainer); err != nil { t.Fatal(err) } - if outContainer.ID != container.ID { - t.Fatalf("Wrong containers retrieved. Expected %s, received %s", container.ID, outContainer.ID) + if outContainer.ID != containerID { + t.Fatalf("Wrong containers retrieved. Expected %s, received %s", containerID, outContainer.ID) } } func TestPostCommit(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{runtime: runtime} + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) // Create a container and remove a file - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"touch", "/test"}, }, - "", + t, ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - if err := container.Run(); err != nil { - t.Fatal(err) - } + containerRun(eng, containerID, t) - req, err := http.NewRequest("POST", "/commit?repo=testrepo&testtag=tag&container="+container.ID, bytes.NewReader([]byte{})) + req, err := http.NewRequest("POST", "/commit?repo=testrepo&testtag=tag&container="+containerID, bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() - if err := postCommit(srv, APIVERSION, r, req, nil); err != nil { - t.Fatal(err) - } + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) if r.Code != http.StatusCreated { t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code) } - apiID := &APIID{} + apiID := &docker.APIID{} if err := json.Unmarshal(r.Body.Bytes(), apiID); err != nil { t.Fatal(err) } - if _, err := runtime.graph.Get(apiID.ID); err != nil { - t.Fatalf("The image has not been commited") + if _, err := srv.ImageInspect(apiID.ID); err != nil { + t.Fatalf("The image has not been committed") } } func TestPostContainersCreate(t *testing.T) { eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() srv := mkServerFromEngine(eng, t) - runtime := srv.runtime - defer nuke(runtime) - configJSON, err := json.Marshal(&Config{ - Image: GetTestImage(runtime).ID, + configJSON, err := json.Marshal(&docker.Config{ + Image: unitTestImageID, Memory: 33554432, Cmd: []string{"touch", "/test"}, }) @@ -629,150 +585,132 @@ func TestPostContainersCreate(t *testing.T) { } r := httptest.NewRecorder() - if err := postContainersCreate(srv, APIVERSION, r, req, nil); err != nil { - t.Fatal(err) - } + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) if r.Code != http.StatusCreated { t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code) } - apiRun := &APIRun{} + apiRun := &docker.APIRun{} if err := json.Unmarshal(r.Body.Bytes(), apiRun); err != nil { t.Fatal(err) } + containerID := apiRun.ID - container := srv.runtime.Get(apiRun.ID) - if container == nil { - t.Fatalf("Container not created") - } + containerAssertExists(eng, containerID, t) + containerRun(eng, containerID, t) - if err := container.Run(); err != nil { - t.Fatal(err) - } - - if _, err := os.Stat(path.Join(container.rwPath(), "test")); err != nil { - if os.IsNotExist(err) { - utils.Debugf("Err: %s", err) - t.Fatalf("The test file has not been created") - } - t.Fatal(err) + if !containerFileExists(eng, containerID, "test", t) { + t.Fatal("Test file was not created") } } func TestPostContainersKill(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) - srv := &Server{runtime: runtime} - - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"/bin/cat"}, OpenStdin: true, }, - "", + t, ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - if err := container.Start(); err != nil { - t.Fatal(err) - } + startContainer(eng, containerID, t) // Give some time to the process to start - container.WaitTimeout(500 * time.Millisecond) + containerWaitTimeout(eng, containerID, t) - if !container.State.Running { + if !containerRunning(eng, containerID, t) { t.Errorf("Container should be running") } r := httptest.NewRecorder() - if err := postContainersKill(srv, APIVERSION, r, nil, map[string]string{"name": container.ID}); err != nil { - t.Fatal(err) + req, err := http.NewRequest("POST", "/containers/"+containerID+"/kill", bytes.NewReader([]byte{})) + if err != nil { + t.Fatal(err) } + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) if r.Code != http.StatusNoContent { t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) } - if container.State.Running { + if containerRunning(eng, containerID, t) { t.Fatalf("The container hasn't been killed") } } func TestPostContainersRestart(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) - srv := &Server{runtime: runtime} - - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"/bin/top"}, OpenStdin: true, }, - "", + t, ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - if err := container.Start(); err != nil { - t.Fatal(err) - } + startContainer(eng, containerID, t) // Give some time to the process to start - container.WaitTimeout(500 * time.Millisecond) + containerWaitTimeout(eng, containerID, t) - if !container.State.Running { + if !containerRunning(eng, containerID, t) { t.Errorf("Container should be running") } - req, err := http.NewRequest("POST", "/containers/"+container.ID+"/restart?t=1", bytes.NewReader([]byte{})) + req, err := http.NewRequest("POST", "/containers/"+containerID+"/restart?t=1", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() - if err := postContainersRestart(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil { - t.Fatal(err) - } + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) if r.Code != http.StatusNoContent { t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) } // Give some time to the process to restart - container.WaitTimeout(500 * time.Millisecond) + containerWaitTimeout(eng, containerID, t) - if !container.State.Running { + if !containerRunning(eng, containerID, t) { t.Fatalf("Container should be running") } - if err := container.Kill(); err != nil { - t.Fatal(err) - } + containerKill(eng, containerID, t) } func TestPostContainersStart(t *testing.T) { eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() srv := mkServerFromEngine(eng, t) - runtime := srv.runtime - defer nuke(runtime) - id := createTestContainer( + containerID := createTestContainer( eng, - &Config{ - Image: GetTestImage(runtime).ID, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"/bin/cat"}, OpenStdin: true, }, - t) + t, + ) - hostConfigJSON, err := json.Marshal(&HostConfig{}) + hostConfigJSON, err := json.Marshal(&docker.HostConfig{}) - req, err := http.NewRequest("POST", "/containers/"+id+"/start", bytes.NewReader(hostConfigJSON)) + req, err := http.NewRequest("POST", "/containers/"+containerID+"/start", bytes.NewReader(hostConfigJSON)) if err != nil { t.Fatal(err) } @@ -780,110 +718,101 @@ func TestPostContainersStart(t *testing.T) { req.Header.Set("Content-Type", "application/json") r := httptest.NewRecorder() - if err := postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": id}); err != nil { - t.Fatal(err) - } + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) if r.Code != http.StatusNoContent { t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) } - container := runtime.Get(id) - if container == nil { - t.Fatalf("Container %s was not created", id) - } + containerAssertExists(eng, containerID, t) // Give some time to the process to start // FIXME: use Wait once it's available as a job - container.WaitTimeout(500 * time.Millisecond) - if !container.State.Running { + containerWaitTimeout(eng, containerID, t) + if !containerRunning(eng, containerID, t) { t.Errorf("Container should be running") } r = httptest.NewRecorder() - if err = postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": id}); err == nil { - t.Fatalf("A running container should be able to be started") - } - - if err := container.Kill(); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { t.Fatal(err) } + // Starting an already started container should return an error + // FIXME: verify a precise error code. There is a possible bug here + // which causes this to return 404 even though the container exists. + assertHttpError(r, t) + containerAssertExists(eng, containerID, t) + containerKill(eng, containerID, t) } func TestPostContainersStop(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) - srv := &Server{runtime: runtime} - - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"/bin/top"}, OpenStdin: true, }, - "", + t, ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - if err := container.Start(); err != nil { - t.Fatal(err) - } + startContainer(eng, containerID, t) // Give some time to the process to start - container.WaitTimeout(500 * time.Millisecond) + containerWaitTimeout(eng, containerID, t) - if !container.State.Running { + if !containerRunning(eng, containerID, t) { t.Errorf("Container should be running") } // Note: as it is a POST request, it requires a body. - req, err := http.NewRequest("POST", "/containers/"+container.ID+"/stop?t=1", bytes.NewReader([]byte{})) + req, err := http.NewRequest("POST", "/containers/"+containerID+"/stop?t=1", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() - if err := postContainersStop(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil { - t.Fatal(err) - } + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) if r.Code != http.StatusNoContent { t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) } - if container.State.Running { + if containerRunning(eng, containerID, t) { t.Fatalf("The container hasn't been stopped") } } func TestPostContainersWait(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) - srv := &Server{runtime: runtime} - - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"/bin/sleep", "1"}, OpenStdin: true, }, - "", + t, ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - - if err := container.Start(); err != nil { - t.Fatal(err) - } + startContainer(eng, containerID, t) setTimeout(t, "Wait timed out", 3*time.Second, func() { r := httptest.NewRecorder() - if err := postContainersWait(srv, APIVERSION, r, nil, map[string]string{"name": container.ID}); err != nil { - t.Fatal(err) + req, err := http.NewRequest("POST", "/containers/"+containerID+"/wait", bytes.NewReader([]byte{})) + if err != nil { + t.Fatal(err) } - apiWait := &APIWait{} + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + apiWait := &docker.APIWait{} if err := json.Unmarshal(r.Body.Bytes(), apiWait); err != nil { t.Fatal(err) } @@ -892,34 +821,26 @@ func TestPostContainersWait(t *testing.T) { } }) - if container.State.Running { + if containerRunning(eng, containerID, t) { t.Fatalf("The container should be stopped after wait") } } func TestPostContainersAttach(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) - srv := &Server{runtime: runtime} - - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"/bin/cat"}, OpenStdin: true, }, - "", + t, ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - // Start the process - if err := container.Start(); err != nil { - t.Fatal(err) - } + startContainer(eng, containerID, t) stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() @@ -927,7 +848,7 @@ func TestPostContainersAttach(t *testing.T) { // Try to avoid the timeout in destroy. Best effort, don't check error defer func() { closeWrap(stdin, stdinPipe, stdout, stdoutPipe) - container.Kill() + containerKill(eng, containerID, t) }() // Attach to it @@ -941,14 +862,15 @@ func TestPostContainersAttach(t *testing.T) { out: stdoutPipe, } - req, err := http.NewRequest("POST", "/containers/"+container.ID+"/attach?stream=1&stdin=1&stdout=1&stderr=1", bytes.NewReader([]byte{})) + req, err := http.NewRequest("POST", "/containers/"+containerID+"/attach?stream=1&stdin=1&stdout=1&stderr=1", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } - if err := postContainersAttach(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil { - t.Fatal(err) + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) } + assertHttpNotError(r.ResponseRecorder, t) }() // Acknowledge hijack @@ -975,40 +897,29 @@ func TestPostContainersAttach(t *testing.T) { // We closed stdin, expect /bin/cat to still be running // Wait a little bit to make sure container.monitor() did his thing - err = container.WaitTimeout(500 * time.Millisecond) - if err == nil || !container.State.Running { - t.Fatalf("/bin/cat is not running after closing stdin") - } + containerWaitTimeout(eng, containerID, t) // Try to avoid the timeout in destroy. Best effort, don't check error - cStdin, _ := container.StdinPipe() + cStdin, _ := containerAttach(eng, containerID, t) cStdin.Close() - container.Wait() + containerWait(eng, containerID, t) } func TestPostContainersAttachStderr(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) - srv := &Server{runtime: runtime} - - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"/bin/sh", "-c", "/bin/cat >&2"}, OpenStdin: true, }, - "", + t, ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - // Start the process - if err := container.Start(); err != nil { - t.Fatal(err) - } + startContainer(eng, containerID, t) stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() @@ -1016,7 +927,7 @@ func TestPostContainersAttachStderr(t *testing.T) { // Try to avoid the timeout in destroy. Best effort, don't check error defer func() { closeWrap(stdin, stdinPipe, stdout, stdoutPipe) - container.Kill() + containerKill(eng, containerID, t) }() // Attach to it @@ -1030,14 +941,15 @@ func TestPostContainersAttachStderr(t *testing.T) { out: stdoutPipe, } - req, err := http.NewRequest("POST", "/containers/"+container.ID+"/attach?stream=1&stdin=1&stdout=1&stderr=1", bytes.NewReader([]byte{})) + req, err := http.NewRequest("POST", "/containers/"+containerID+"/attach?stream=1&stdin=1&stdout=1&stderr=1", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } - if err := postContainersAttach(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { t.Fatal(err) } + assertHttpNotError(r.ResponseRecorder, t) }() // Acknowledge hijack @@ -1064,104 +976,76 @@ func TestPostContainersAttachStderr(t *testing.T) { // We closed stdin, expect /bin/cat to still be running // Wait a little bit to make sure container.monitor() did his thing - err = container.WaitTimeout(500 * time.Millisecond) - if err == nil || !container.State.Running { - t.Fatalf("/bin/cat is not running after closing stdin") - } + containerWaitTimeout(eng, containerID, t) // Try to avoid the timeout in destroy. Best effort, don't check error - cStdin, _ := container.StdinPipe() + cStdin, _ := containerAttach(eng, containerID, t) cStdin.Close() - container.Wait() + containerWait(eng, containerID, t) } // FIXME: Test deleting running container // FIXME: Test deleting container with volume // FIXME: Test deleting volume in use by other container func TestDeleteContainers(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) - srv := &Server{runtime: runtime} - - container, _, err := runtime.Create(&Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"touch", "/test"}, - }, "") - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - - if err := container.Run(); err != nil { - t.Fatal(err) - } - - req, err := http.NewRequest("DELETE", "/containers/"+container.ID, nil) + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, + Cmd: []string{"touch", "/test"}, + }, + t, + ) + req, err := http.NewRequest("DELETE", "/containers/"+containerID, nil) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() - if err := deleteContainers(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil { - t.Fatal(err) - } + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) if r.Code != http.StatusNoContent { t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) } - - if c := runtime.Get(container.ID); c != nil { - t.Fatalf("The container as not been deleted") - } - - if _, err := os.Stat(path.Join(container.rwPath(), "test")); err == nil { - t.Fatalf("The test file has not been deleted") - } + containerAssertNotExists(eng, containerID, t) } func TestOptionsRoute(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - runtime.config.EnableCors = true - srv := &Server{runtime: runtime} - + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) r := httptest.NewRecorder() - router, err := createRouter(srv, false) - if err != nil { - t.Fatal(err) - } - req, err := http.NewRequest("OPTIONS", "/", nil) if err != nil { t.Fatal(err) } - - router.ServeHTTP(r, req) + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) if r.Code != http.StatusOK { t.Errorf("Expected response for OPTIONS request to be \"200\", %v found.", r.Code) } } func TestGetEnabledCors(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - runtime.config.EnableCors = true - srv := &Server{runtime: runtime} - + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) r := httptest.NewRecorder() - router, err := createRouter(srv, false) - if err != nil { - t.Fatal(err) - } - req, err := http.NewRequest("GET", "/version", nil) if err != nil { t.Fatal(err) } - - router.ServeHTTP(r, req) + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) if r.Code != http.StatusOK { t.Errorf("Expected response for OPTIONS request to be \"200\", %v found.", r.Code) } @@ -1182,20 +1066,18 @@ func TestGetEnabledCors(t *testing.T) { } func TestDeleteImages(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{runtime: runtime} + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) initialImages, err := srv.Images(false, "") if err != nil { t.Fatal(err) } - if err := srv.runtime.repositories.Set("test", "test", unitTestImageName, true); err != nil { + if err := srv.ContainerTag(unitTestImageName, "test", "test", false); err != nil { t.Fatal(err) } - images, err := srv.Images(false, "") if err != nil { t.Fatal(err) @@ -1211,8 +1093,11 @@ func TestDeleteImages(t *testing.T) { } r := httptest.NewRecorder() - if err := deleteImages(srv, APIVERSION, r, req, map[string]string{"name": unitTestImageID}); err == nil { - t.Fatalf("Expected conflict error, got none") + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusConflict { + t.Fatalf("Expected http status 409-conflict, got %v", r.Code) } req2, err := http.NewRequest("DELETE", "/images/test:test", nil) @@ -1221,14 +1106,15 @@ func TestDeleteImages(t *testing.T) { } r2 := httptest.NewRecorder() - if err := deleteImages(srv, APIVERSION, r2, req2, map[string]string{"name": "test:test"}); err != nil { - t.Fatal(err) - } + if err := docker.ServeRequest(srv, docker.APIVERSION, r2, req2); err != nil { + t.Fatal(err) + } + assertHttpNotError(r2, t) if r2.Code != http.StatusOK { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) } - var outs []APIRmi + var outs []docker.APIRmi if err := json.Unmarshal(r2.Body.Bytes(), &outs); err != nil { t.Fatal(err) } @@ -1243,69 +1129,40 @@ func TestDeleteImages(t *testing.T) { if len(images[0].RepoTags) != len(initialImages[0].RepoTags) { t.Errorf("Expected %d image, %d found", len(initialImages), len(images)) } - - /* if c := runtime.Get(container.Id); c != nil { - t.Fatalf("The container as not been deleted") - } - - if _, err := os.Stat(path.Join(container.rwPath(), "test")); err == nil { - t.Fatalf("The test file has not been deleted") - } */ -} - -func TestJsonContentType(t *testing.T) { - if !matchesContentType("application/json", "application/json") { - t.Fail() - } - - if !matchesContentType("application/json; charset=utf-8", "application/json") { - t.Fail() - } - - if matchesContentType("dockerapplication/json", "application/json") { - t.Fail() - } } func TestPostContainersCopy(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{runtime: runtime} + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) // Create a container and remove a file - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"touch", "/test.txt"}, }, - "", + t, ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - - if err := container.Run(); err != nil { - t.Fatal(err) - } + containerRun(eng, containerID, t) r := httptest.NewRecorder() - copyData := APICopy{HostPath: ".", Resource: "/test.txt"} + copyData := docker.APICopy{HostPath: ".", Resource: "/test.txt"} jsonData, err := json.Marshal(copyData) if err != nil { t.Fatal(err) } - req, err := http.NewRequest("POST", "/containers/"+container.ID+"/copy", bytes.NewReader(jsonData)) + req, err := http.NewRequest("POST", "/containers/"+containerID+"/copy", bytes.NewReader(jsonData)) if err != nil { t.Fatal(err) } req.Header.Add("Content-Type", "application/json") - if err = postContainersCopy(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { t.Fatal(err) } + assertHttpNotError(r, t) if r.Code != http.StatusOK { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) diff --git a/integration/auth_test.go b/integration/auth_test.go new file mode 100644 index 0000000000..dfeb9002bf --- /dev/null +++ b/integration/auth_test.go @@ -0,0 +1,63 @@ +package docker + +import ( + "github.com/dotcloud/docker/auth" + "crypto/rand" + "encoding/hex" + "os" + "strings" + "testing" +) + +// FIXME: these tests have an external dependency on a staging index hosted +// on the docker.io infrastructure. That dependency should be removed. +// - Unit tests should have no side-effect dependencies. +// - Integration tests should have side-effects limited to the host environment being tested. + +func TestLogin(t *testing.T) { + os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com") + defer os.Setenv("DOCKER_INDEX_URL", "") + authConfig := &auth.AuthConfig{Username: "unittester", Password: "surlautrerivejetattendrai", Email: "noise+unittester@dotcloud.com"} + status, err := auth.Login(authConfig, nil) + if err != nil { + t.Fatal(err) + } + if status != "Login Succeeded" { + t.Fatalf("Expected status \"Login Succeeded\", found \"%s\" instead", status) + } +} + + + +func TestCreateAccount(t *testing.T) { + os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com") + defer os.Setenv("DOCKER_INDEX_URL", "") + tokenBuffer := make([]byte, 16) + _, err := rand.Read(tokenBuffer) + if err != nil { + t.Fatal(err) + } + token := hex.EncodeToString(tokenBuffer)[:12] + username := "ut" + token + authConfig := &auth.AuthConfig{Username: username, Password: "test42", Email: "docker-ut+" + token + "@example.com"} + status, err := auth.Login(authConfig, nil) + if err != nil { + t.Fatal(err) + } + expectedStatus := "Account created. Please use the confirmation link we sent" + + " to your e-mail to activate it." + if status != expectedStatus { + t.Fatalf("Expected status: \"%s\", found \"%s\" instead.", expectedStatus, status) + } + + status, err = auth.Login(authConfig, nil) + if err == nil { + t.Fatalf("Expected error but found nil instead") + } + + expectedError := "Login: Account is not Active" + + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("Expected message \"%s\" but found \"%s\" instead", expectedError, err) + } +} diff --git a/buildfile_test.go b/integration/buildfile_test.go similarity index 83% rename from buildfile_test.go rename to integration/buildfile_test.go index d3fca3c788..964b58403b 100644 --- a/buildfile_test.go +++ b/integration/buildfile_test.go @@ -2,7 +2,9 @@ package docker import ( "fmt" + "github.com/dotcloud/docker" "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/engine" "io/ioutil" "net" "net/http" @@ -14,7 +16,7 @@ import ( // mkTestContext generates a build context from the contents of the provided dockerfile. // This context is suitable for use as an argument to BuildFile.Build() func mkTestContext(dockerfile string, files [][2]string, t *testing.T) archive.Archive { - context, err := mkBuildContext(dockerfile, files) + context, err := docker.MkBuildContext(dockerfile, files) if err != nil { t.Fatal(err) } @@ -228,17 +230,15 @@ func TestBuild(t *testing.T) { } } -func buildImage(context testContextTemplate, t *testing.T, srv *Server, useCache bool) *Image { - if srv == nil { - runtime := mkRuntime(t) +func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, useCache bool) *docker.Image { + if eng == nil { + eng = NewTestEngine(t) + runtime := mkRuntimeFromEngine(eng, t) + // FIXME: we might not need runtime, why not simply nuke + // the engine? defer nuke(runtime) - - srv = &Server{ - runtime: runtime, - pullingPool: make(map[string]struct{}), - pushingPool: make(map[string]struct{}), - } } + srv := mkServerFromEngine(eng, t) httpServer, err := mkTestingFileServer(context.remoteFiles) if err != nil { @@ -252,10 +252,17 @@ func buildImage(context testContextTemplate, t *testing.T, srv *Server, useCache } port := httpServer.URL[idx+1:] - ip := srv.runtime.networkManager.bridgeNetwork.IP + iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP") + if iIP == nil { + t.Fatal("Legacy bridgeIP field not set in engine") + } + ip, ok := iIP.(net.IP) + if !ok { + panic("Legacy bridgeIP field in engine does not cast to net.IP") + } dockerfile := constructDockerfile(context.dockerfile, ip, port) - buildfile := NewBuildFile(srv, ioutil.Discard, false, useCache, false) + buildfile := docker.NewBuildFile(srv, ioutil.Discard, false, useCache, false) id, err := buildfile.Build(mkTestContext(dockerfile, context.files, t)) if err != nil { t.Fatal(err) @@ -368,20 +375,14 @@ func TestBuildEntrypoint(t *testing.T) { // testing #1405 - config.Cmd does not get cleaned up if // utilizing cache func TestBuildEntrypointRunCleanup(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{ - runtime: runtime, - pullingPool: make(map[string]struct{}), - pushingPool: make(map[string]struct{}), - } + eng := NewTestEngine(t) + defer nuke(mkRuntimeFromEngine(eng, t)) img := buildImage(testContextTemplate{` from {IMAGE} run echo "hello" `, - nil, nil}, t, srv, true) + nil, nil}, t, eng, true) img = buildImage(testContextTemplate{` from {IMAGE} @@ -389,7 +390,7 @@ func TestBuildEntrypointRunCleanup(t *testing.T) { add foo /foo entrypoint ["/bin/echo"] `, - [][2]string{{"foo", "HEYO"}}, nil}, t, srv, true) + [][2]string{{"foo", "HEYO"}}, nil}, t, eng, true) if len(img.Config.Cmd) != 0 { t.Fail() @@ -397,14 +398,8 @@ func TestBuildEntrypointRunCleanup(t *testing.T) { } func TestBuildImageWithCache(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{ - runtime: runtime, - pullingPool: make(map[string]struct{}), - pushingPool: make(map[string]struct{}), - } + eng := NewTestEngine(t) + defer nuke(mkRuntimeFromEngine(eng, t)) template := testContextTemplate{` from {IMAGE} @@ -412,11 +407,11 @@ func TestBuildImageWithCache(t *testing.T) { `, nil, nil} - img := buildImage(template, t, srv, true) + img := buildImage(template, t, eng, true) imageId := img.ID img = nil - img = buildImage(template, t, srv, true) + img = buildImage(template, t, eng, true) if imageId != img.ID { t.Logf("Image ids should match: %s != %s", imageId, img.ID) @@ -425,14 +420,8 @@ func TestBuildImageWithCache(t *testing.T) { } func TestBuildImageWithoutCache(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{ - runtime: runtime, - pullingPool: make(map[string]struct{}), - pushingPool: make(map[string]struct{}), - } + eng := NewTestEngine(t) + defer nuke(mkRuntimeFromEngine(eng, t)) template := testContextTemplate{` from {IMAGE} @@ -440,11 +429,11 @@ func TestBuildImageWithoutCache(t *testing.T) { `, nil, nil} - img := buildImage(template, t, srv, true) + img := buildImage(template, t, eng, true) imageId := img.ID img = nil - img = buildImage(template, t, srv, false) + img = buildImage(template, t, eng, false) if imageId == img.ID { t.Logf("Image ids should not match: %s == %s", imageId, img.ID) @@ -453,14 +442,9 @@ func TestBuildImageWithoutCache(t *testing.T) { } func TestForbiddenContextPath(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{ - runtime: runtime, - pullingPool: make(map[string]struct{}), - pushingPool: make(map[string]struct{}), - } + eng := NewTestEngine(t) + defer nuke(mkRuntimeFromEngine(eng, t)) + srv := mkServerFromEngine(eng, t) context := testContextTemplate{` from {IMAGE} @@ -481,10 +465,17 @@ func TestForbiddenContextPath(t *testing.T) { } port := httpServer.URL[idx+1:] - ip := srv.runtime.networkManager.bridgeNetwork.IP + iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP") + if iIP == nil { + t.Fatal("Legacy bridgeIP field not set in engine") + } + ip, ok := iIP.(net.IP) + if !ok { + panic("Legacy bridgeIP field in engine does not cast to net.IP") + } dockerfile := constructDockerfile(context.dockerfile, ip, port) - buildfile := NewBuildFile(srv, ioutil.Discard, false, true, false) + buildfile := docker.NewBuildFile(srv, ioutil.Discard, false, true, false) _, err = buildfile.Build(mkTestContext(dockerfile, context.files, t)) if err == nil { @@ -499,14 +490,8 @@ func TestForbiddenContextPath(t *testing.T) { } func TestBuildADDFileNotFound(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{ - runtime: runtime, - pullingPool: make(map[string]struct{}), - pushingPool: make(map[string]struct{}), - } + eng := NewTestEngine(t) + defer nuke(mkRuntimeFromEngine(eng, t)) context := testContextTemplate{` from {IMAGE} @@ -526,10 +511,17 @@ func TestBuildADDFileNotFound(t *testing.T) { } port := httpServer.URL[idx+1:] - ip := srv.runtime.networkManager.bridgeNetwork.IP + iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP") + if iIP == nil { + t.Fatal("Legacy bridgeIP field not set in engine") + } + ip, ok := iIP.(net.IP) + if !ok { + panic("Legacy bridgeIP field in engine does not cast to net.IP") + } dockerfile := constructDockerfile(context.dockerfile, ip, port) - buildfile := NewBuildFile(srv, ioutil.Discard, false, true, false) + buildfile := docker.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, false, true, false) _, err = buildfile.Build(mkTestContext(dockerfile, context.files, t)) if err == nil { @@ -544,26 +536,20 @@ func TestBuildADDFileNotFound(t *testing.T) { } func TestBuildInheritance(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{ - runtime: runtime, - pullingPool: make(map[string]struct{}), - pushingPool: make(map[string]struct{}), - } + eng := NewTestEngine(t) + defer nuke(mkRuntimeFromEngine(eng, t)) img := buildImage(testContextTemplate{` from {IMAGE} expose 4243 `, - nil, nil}, t, srv, true) + nil, nil}, t, eng, true) img2 := buildImage(testContextTemplate{fmt.Sprintf(` from %s entrypoint ["/bin/echo"] `, img.ID), - nil, nil}, t, srv, true) + nil, nil}, t, eng, true) // from child if img2.Config.Entrypoint[0] != "/bin/echo" { diff --git a/commands_test.go b/integration/commands_test.go similarity index 88% rename from commands_test.go rename to integration/commands_test.go index 1778f1b89f..ab186f4a2c 100644 --- a/commands_test.go +++ b/integration/commands_test.go @@ -3,6 +3,8 @@ package docker import ( "bufio" "fmt" + "github.com/dotcloud/docker" + "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/utils" "io" "io/ioutil" @@ -66,8 +68,8 @@ func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error func TestRunHostname(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { @@ -111,8 +113,8 @@ func TestRunHostname(t *testing.T) { func TestRunWorkdir(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { @@ -156,8 +158,8 @@ func TestRunWorkdir(t *testing.T) { func TestRunWorkdirExists(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { @@ -201,8 +203,8 @@ func TestRunExit(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) c1 := make(chan struct{}) go func() { @@ -254,8 +256,8 @@ func TestRunDisconnect(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) c1 := make(chan struct{}) go func() { @@ -299,8 +301,8 @@ func TestRunDisconnectTty(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) c1 := make(chan struct{}) go func() { @@ -356,8 +358,8 @@ func TestRunAttachStdin(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) ch := make(chan struct{}) go func() { @@ -420,8 +422,8 @@ func TestRunDetach(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) ch := make(chan struct{}) go func() { @@ -466,8 +468,8 @@ func TestAttachDetach(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) ch := make(chan struct{}) go func() { @@ -477,7 +479,7 @@ func TestAttachDetach(t *testing.T) { } }() - var container *Container + var container *docker.Container setTimeout(t, "Reading container's id timed out", 10*time.Second, func() { buf := make([]byte, 1024) @@ -498,7 +500,7 @@ func TestAttachDetach(t *testing.T) { stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() - cli = NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli = docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) ch = make(chan struct{}) go func() { @@ -546,8 +548,8 @@ func TestAttachDetachTruncatedID(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) go stdout.Read(make([]byte, 1024)) setTimeout(t, "Starting container timed out", 2*time.Second, func() { @@ -560,7 +562,7 @@ func TestAttachDetachTruncatedID(t *testing.T) { stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() - cli = NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli = docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) ch := make(chan struct{}) go func() { @@ -608,8 +610,8 @@ func TestAttachDisconnect(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) go func() { // Start a process in daemon mode @@ -677,8 +679,8 @@ func TestAttachDisconnect(t *testing.T) { func TestRunAutoRemove(t *testing.T) { t.Skip("Fixme. Skipping test for now, race condition") stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { @@ -712,8 +714,8 @@ func TestRunAutoRemove(t *testing.T) { } func TestCmdLogs(t *testing.T) { - cli := NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) if err := cli.CmdRun(unitTestImageID, "sh", "-c", "ls -l"); err != nil { t.Fatal(err) @@ -730,8 +732,8 @@ func TestCmdLogs(t *testing.T) { // Expected behaviour: using / as a bind mount source should throw an error func TestRunErrorBindMountRootSource(t *testing.T) { - cli := NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { @@ -749,8 +751,8 @@ func TestRunErrorBindMountRootSource(t *testing.T) { // Expected behaviour: error out when attempting to bind mount non-existing source paths func TestRunErrorBindNonExistingSource(t *testing.T) { - cli := NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { @@ -768,11 +770,10 @@ func TestRunErrorBindNonExistingSource(t *testing.T) { func TestImagesViz(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) - srv := &Server{runtime: globalRuntime} - image := buildTestImages(t, srv) + image := buildTestImages(t, globalEngine) c := make(chan struct{}) go func() { @@ -819,11 +820,10 @@ func TestImagesViz(t *testing.T) { func TestImagesTree(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) - srv := &Server{runtime: globalRuntime} - image := buildTestImages(t, srv) + image := buildTestImages(t, globalEngine) c := make(chan struct{}) go func() { @@ -867,7 +867,7 @@ func TestImagesTree(t *testing.T) { }) } -func buildTestImages(t *testing.T, srv *Server) *Image { +func buildTestImages(t *testing.T, eng *engine.Engine) *docker.Image { var testBuilder = testContextTemplate{ ` @@ -880,9 +880,9 @@ run [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ] nil, nil, } - image := buildImage(testBuilder, t, srv, true) + image := buildImage(testBuilder, t, eng, true) - err := srv.ContainerTag(image.ID, "test", "latest", false) + err := mkServerFromEngine(eng, t).ContainerTag(image.ID, "test", "latest", false) if err != nil { t.Fatal(err) } @@ -902,8 +902,8 @@ func TestRunCidFile(t *testing.T) { } tmpCidFile := path.Join(tmpDir, "cid") - cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { diff --git a/container_test.go b/integration/container_test.go similarity index 82% rename from container_test.go rename to integration/container_test.go index 26007a732d..3658d9d4dc 100644 --- a/container_test.go +++ b/integration/container_test.go @@ -3,10 +3,10 @@ package docker import ( "bufio" "fmt" + "github.com/dotcloud/docker" "github.com/dotcloud/docker/utils" "io" "io/ioutil" - "math/rand" "os" "path" "regexp" @@ -20,7 +20,7 @@ func TestIDFormat(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container1, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/sh", "-c", "echo hello world"}, }, @@ -41,7 +41,7 @@ func TestIDFormat(t *testing.T) { func TestMultipleAttachRestart(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _ := mkContainer( + container, _, _ := mkContainer( runtime, []string{"_", "/bin/sh", "-c", "i=1; while [ $i -le 5 ]; do i=`expr $i + 1`; echo hello; done"}, t, @@ -134,10 +134,11 @@ func TestMultipleAttachRestart(t *testing.T) { } func TestDiff(t *testing.T) { - runtime := mkRuntime(t) + eng := NewTestEngine(t) + runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) // Create a container and remove a file - container1, _ := mkContainer(runtime, []string{"_", "/bin/rm", "/etc/passwd"}, t) + container1, _, _ := mkContainer(runtime, []string{"_", "/bin/rm", "/etc/passwd"}, t) defer runtime.Destroy(container1) // The changelog should be empty and not fail before run. See #1705 @@ -169,17 +170,13 @@ func TestDiff(t *testing.T) { } // Commit the container - rwTar, err := container1.ExportRw() - if err != nil { - t.Error(err) - } - img, err := runtime.graph.Create(rwTar, container1, "unit test commited image - diff", "", nil) + img, err := runtime.Commit(container1, "", "", "unit test commited image - diff", "", nil) if err != nil { t.Error(err) } // Create a new container from the commited image - container2, _ := mkContainer(runtime, []string{img.ID, "cat", "/etc/passwd"}, t) + container2, _, _ := mkContainer(runtime, []string{img.ID, "cat", "/etc/passwd"}, t) defer runtime.Destroy(container2) if err := container2.Run(); err != nil { @@ -198,7 +195,7 @@ func TestDiff(t *testing.T) { } // Create a new container - container3, _ := mkContainer(runtime, []string{"_", "rm", "/bin/httpd"}, t) + container3, _, _ := mkContainer(runtime, []string{"_", "rm", "/bin/httpd"}, t) defer runtime.Destroy(container3) if err := container3.Run(); err != nil { @@ -224,7 +221,7 @@ func TestDiff(t *testing.T) { func TestCommitAutoRun(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container1, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t) + container1, _, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t) defer runtime.Destroy(container1) if container1.State.Running { @@ -237,17 +234,13 @@ func TestCommitAutoRun(t *testing.T) { t.Errorf("Container shouldn't be running") } - rwTar, err := container1.ExportRw() - if err != nil { - t.Error(err) - } - img, err := runtime.graph.Create(rwTar, container1, "unit test commited image", "", &Config{Cmd: []string{"cat", "/world"}}) + img, err := runtime.Commit(container1, "", "", "unit test commited image", "", &docker.Config{Cmd: []string{"cat", "/world"}}) if err != nil { t.Error(err) } // FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world - container2, _ := mkContainer(runtime, []string{img.ID}, t) + container2, _, _ := mkContainer(runtime, []string{img.ID}, t) defer runtime.Destroy(container2) stdout, err := container2.StdoutPipe() if err != nil { @@ -284,7 +277,7 @@ func TestCommitRun(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container1, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t) + container1, _, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t) defer runtime.Destroy(container1) if container1.State.Running { @@ -297,17 +290,13 @@ func TestCommitRun(t *testing.T) { t.Errorf("Container shouldn't be running") } - rwTar, err := container1.ExportRw() - if err != nil { - t.Error(err) - } - img, err := runtime.graph.Create(rwTar, container1, "unit test commited image", "", nil) + img, err := runtime.Commit(container1, "", "", "unit test commited image", "", nil) if err != nil { t.Error(err) } // FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world - container2, _ := mkContainer(runtime, []string{img.ID, "cat", "/world"}, t) + container2, _, _ := mkContainer(runtime, []string{img.ID, "cat", "/world"}, t) defer runtime.Destroy(container2) stdout, err := container2.StdoutPipe() if err != nil { @@ -343,7 +332,7 @@ func TestCommitRun(t *testing.T) { func TestStart(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _ := mkContainer(runtime, []string{"-m", "33554432", "-c", "1000", "-i", "_", "/bin/cat"}, t) + container, _, _ := mkContainer(runtime, []string{"-m", "33554432", "-c", "1000", "-i", "_", "/bin/cat"}, t) defer runtime.Destroy(container) cStdin, err := container.StdinPipe() @@ -373,7 +362,7 @@ func TestStart(t *testing.T) { func TestRun(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) + container, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) defer runtime.Destroy(container) if container.State.Running { @@ -391,7 +380,7 @@ func TestOutput(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foobar"}, }, @@ -414,7 +403,7 @@ func TestContainerNetwork(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ping", "-c", "1", "127.0.0.1"}, }, @@ -436,7 +425,7 @@ func TestKillDifferentUser(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat"}, OpenStdin: true, @@ -448,7 +437,9 @@ func TestKillDifferentUser(t *testing.T) { t.Fatal(err) } defer runtime.Destroy(container) - defer container.stdin.Close() + // FIXME @shykes: this seems redundant, but is very old, I'm leaving it in case + // there is a side effect I'm not seeing. + // defer container.stdin.Close() if container.State.Running { t.Errorf("Container shouldn't be running") @@ -490,22 +481,35 @@ func TestKillDifferentUser(t *testing.T) { // Test that creating a container with a volume doesn't crash. Regression test for #995. func TestCreateVolume(t *testing.T) { - runtime := mkRuntime(t) + eng := NewTestEngine(t) + runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, hc, _, err := ParseRun([]string{"-v", "/var/lib/data", GetTestImage(runtime).ID, "echo", "hello", "world"}, nil) + config, hc, _, err := docker.ParseRun([]string{"-v", "/var/lib/data", unitTestImageID, "echo", "hello", "world"}, nil) if err != nil { t.Fatal(err) } - c, _, err := runtime.Create(config, "") - if err != nil { + jobCreate := eng.Job("create") + if err := jobCreate.ImportEnv(config); err != nil { t.Fatal(err) } - defer runtime.Destroy(c) - c.hostConfig = hc - if err := c.Start(); err != nil { + var id string + jobCreate.StdoutParseString(&id) + if err := jobCreate.Run(); err != nil { t.Fatal(err) } + jobStart := eng.Job("start", id) + if err := jobStart.ImportEnv(hc); err != nil { + t.Fatal(err) + } + if err := jobStart.Run(); err != nil { + t.Fatal(err) + } + // FIXME: this hack can be removed once Wait is a job + c := runtime.Get(id) + if c == nil { + t.Fatalf("Couldn't retrieve container %s from runtime", id) + } c.WaitTimeout(500 * time.Millisecond) c.Wait() } @@ -513,7 +517,7 @@ func TestCreateVolume(t *testing.T) { func TestKill(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sleep", "2"}, }, @@ -557,7 +561,7 @@ func TestExitCode(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - trueContainer, _, err := runtime.Create(&Config{ + trueContainer, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/true", ""}, }, "") @@ -572,7 +576,7 @@ func TestExitCode(t *testing.T) { t.Errorf("Unexpected exit code %d (expected 0)", trueContainer.State.ExitCode) } - falseContainer, _, err := runtime.Create(&Config{ + falseContainer, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/false", ""}, }, "") @@ -591,7 +595,7 @@ func TestExitCode(t *testing.T) { func TestRestart(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foobar"}, }, @@ -622,7 +626,7 @@ func TestRestart(t *testing.T) { func TestRestartStdin(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat"}, @@ -700,7 +704,7 @@ func TestUser(t *testing.T) { defer nuke(runtime) // Default user must be root - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, }, @@ -719,7 +723,7 @@ func TestUser(t *testing.T) { } // Set a username - container, _, err = runtime.Create(&Config{ + container, _, err = runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, @@ -740,7 +744,7 @@ func TestUser(t *testing.T) { } // Set a UID - container, _, err = runtime.Create(&Config{ + container, _, err = runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, @@ -761,7 +765,7 @@ func TestUser(t *testing.T) { } // Set a different user by uid - container, _, err = runtime.Create(&Config{ + container, _, err = runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, @@ -784,7 +788,7 @@ func TestUser(t *testing.T) { } // Set a different user by username - container, _, err = runtime.Create(&Config{ + container, _, err = runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, @@ -805,7 +809,7 @@ func TestUser(t *testing.T) { } // Test an wrong username - container, _, err = runtime.Create(&Config{ + container, _, err = runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, @@ -827,7 +831,7 @@ func TestMultipleContainers(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container1, _, err := runtime.Create(&Config{ + container1, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sleep", "2"}, }, @@ -838,7 +842,7 @@ func TestMultipleContainers(t *testing.T) { } defer runtime.Destroy(container1) - container2, _, err := runtime.Create(&Config{ + container2, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sleep", "2"}, }, @@ -882,7 +886,7 @@ func TestMultipleContainers(t *testing.T) { func TestStdin(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat"}, @@ -927,7 +931,7 @@ func TestStdin(t *testing.T) { func TestTty(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat"}, @@ -974,7 +978,7 @@ func TestEnv(t *testing.T) { os.Setenv("TRICKY", "tri\ncky\n") runtime := mkRuntime(t) defer nuke(runtime) - config, _, _, err := ParseRun([]string{"-e=FALSE=true", "-e=TRUE", "-e=TRICKY", GetTestImage(runtime).ID, "env"}, nil) + config, _, _, err := docker.ParseRun([]string{"-e=FALSE=true", "-e=TRUE", "-e=TRICKY", GetTestImage(runtime).ID, "env"}, nil) if err != nil { t.Fatal(err) } @@ -1028,7 +1032,7 @@ func TestEntrypoint(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Entrypoint: []string{"/bin/echo"}, Cmd: []string{"-n", "foobar"}, @@ -1052,7 +1056,7 @@ func TestEntrypointNoCmd(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Entrypoint: []string{"/bin/echo", "foobar"}, }, @@ -1071,96 +1075,11 @@ func TestEntrypointNoCmd(t *testing.T) { } } -func grepFile(t *testing.T, path string, pattern string) { - f, err := os.Open(path) - if err != nil { - t.Fatal(err) - } - defer f.Close() - r := bufio.NewReader(f) - var ( - line string - ) - err = nil - for err == nil { - line, err = r.ReadString('\n') - if strings.Contains(line, pattern) == true { - return - } - } - t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path) -} - -func TestLXCConfig(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - // Memory is allocated randomly for testing - rand.Seed(time.Now().UTC().UnixNano()) - memMin := 33554432 - memMax := 536870912 - mem := memMin + rand.Intn(memMax-memMin) - // CPU shares as well - cpuMin := 100 - cpuMax := 10000 - cpu := cpuMin + rand.Intn(cpuMax-cpuMin) - container, _, err := runtime.Create(&Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"/bin/true"}, - - Hostname: "foobar", - Memory: int64(mem), - CpuShares: int64(cpu), - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - container.generateLXCConfig() - grepFile(t, container.lxcConfigPath(), "lxc.utsname = foobar") - grepFile(t, container.lxcConfigPath(), - fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem)) - grepFile(t, container.lxcConfigPath(), - fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2)) -} - -func TestCustomLxcConfig(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - container, _, err := runtime.Create(&Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"/bin/true"}, - - Hostname: "foobar", - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - container.hostConfig = &HostConfig{LxcConf: []KeyValuePair{ - { - Key: "lxc.utsname", - Value: "docker", - }, - { - Key: "lxc.cgroup.cpuset.cpus", - Value: "0,1", - }, - }} - - container.generateLXCConfig() - grepFile(t, container.lxcConfigPath(), "lxc.utsname = docker") - grepFile(t, container.lxcConfigPath(), "lxc.cgroup.cpuset.cpus = 0,1") -} - func BenchmarkRunSequencial(b *testing.B) { runtime := mkRuntime(b) defer nuke(runtime) for i := 0; i < b.N; i++ { - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foo"}, }, @@ -1193,7 +1112,7 @@ func BenchmarkRunParallel(b *testing.B) { complete := make(chan error) tasks = append(tasks, complete) go func(i int, complete chan error) { - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foo"}, }, @@ -1244,11 +1163,12 @@ func tempDir(t *testing.T) string { // Test for #1737 func TestCopyVolumeUidGid(t *testing.T) { - r := mkRuntime(t) - defer nuke(r) + eng := NewTestEngine(t) + r := mkRuntimeFromEngine(eng, t) + defer r.Nuke() // Add directory not owned by root - container1, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && touch /hello/test.txt && chown daemon.daemon /hello"}, t) + container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && touch /hello/test.txt && chown daemon.daemon /hello"}, t) defer r.Destroy(container1) if container1.State.Running { @@ -1261,11 +1181,7 @@ func TestCopyVolumeUidGid(t *testing.T) { t.Errorf("Container shouldn't be running") } - rwTar, err := container1.ExportRw() - if err != nil { - t.Error(err) - } - img, err := r.graph.Create(rwTar, container1, "unit test commited image", "", nil) + img, err := r.Commit(container1, "", "", "unit test commited image", "", nil) if err != nil { t.Error(err) } @@ -1273,7 +1189,7 @@ func TestCopyVolumeUidGid(t *testing.T) { // Test that the uid and gid is copied from the image to the volume tmpDir1 := tempDir(t) defer os.RemoveAll(tmpDir1) - stdout1, _ := runContainer(r, []string{"-v", "/hello", img.ID, "stat", "-c", "%U %G", "/hello"}, t) + stdout1, _ := runContainer(eng, r, []string{"-v", "/hello", img.ID, "stat", "-c", "%U %G", "/hello"}, t) if !strings.Contains(stdout1, "daemon daemon") { t.Fatal("Container failed to transfer uid and gid to volume") } @@ -1281,11 +1197,12 @@ func TestCopyVolumeUidGid(t *testing.T) { // Test for #1582 func TestCopyVolumeContent(t *testing.T) { - r := mkRuntime(t) - defer nuke(r) + eng := NewTestEngine(t) + r := mkRuntimeFromEngine(eng, t) + defer r.Nuke() // Put some content in a directory of a container and commit it - container1, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello/local && echo hello > /hello/local/world"}, t) + container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello/local && echo hello > /hello/local/world"}, t) defer r.Destroy(container1) if container1.State.Running { @@ -1298,11 +1215,7 @@ func TestCopyVolumeContent(t *testing.T) { t.Errorf("Container shouldn't be running") } - rwTar, err := container1.ExportRw() - if err != nil { - t.Error(err) - } - img, err := r.graph.Create(rwTar, container1, "unit test commited image", "", nil) + img, err := r.Commit(container1, "", "", "unit test commited image", "", nil) if err != nil { t.Error(err) } @@ -1310,31 +1223,33 @@ func TestCopyVolumeContent(t *testing.T) { // Test that the content is copied from the image to the volume tmpDir1 := tempDir(t) defer os.RemoveAll(tmpDir1) - stdout1, _ := runContainer(r, []string{"-v", "/hello", img.ID, "find", "/hello"}, t) + stdout1, _ := runContainer(eng, r, []string{"-v", "/hello", img.ID, "find", "/hello"}, t) if !(strings.Contains(stdout1, "/hello/local/world") && strings.Contains(stdout1, "/hello/local")) { t.Fatal("Container failed to transfer content to volume") } } func TestBindMounts(t *testing.T) { - r := mkRuntime(t) - defer nuke(r) + eng := NewTestEngine(t) + r := mkRuntimeFromEngine(eng, t) + defer r.Nuke() + tmpDir := tempDir(t) defer os.RemoveAll(tmpDir) writeFile(path.Join(tmpDir, "touch-me"), "", t) // Test reading from a read-only bind mount - stdout, _ := runContainer(r, []string{"-v", fmt.Sprintf("%s:/tmp:ro", tmpDir), "_", "ls", "/tmp"}, t) + stdout, _ := runContainer(eng, r, []string{"-v", fmt.Sprintf("%s:/tmp:ro", tmpDir), "_", "ls", "/tmp"}, t) if !strings.Contains(stdout, "touch-me") { t.Fatal("Container failed to read from bind mount") } // test writing to bind mount - runContainer(r, []string{"-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "_", "touch", "/tmp/holla"}, t) + runContainer(eng, r, []string{"-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "_", "touch", "/tmp/holla"}, t) readFile(path.Join(tmpDir, "holla"), t) // Will fail if the file doesn't exist // test mounting to an illegal destination directory - if _, err := runContainer(r, []string{"-v", fmt.Sprintf("%s:.", tmpDir), "_", "ls", "."}, nil); err == nil { + if _, err := runContainer(eng, r, []string{"-v", fmt.Sprintf("%s:.", tmpDir), "_", "ls", "."}, nil); err == nil { t.Fatal("Container bind mounted illegal directory") } } @@ -1344,7 +1259,7 @@ func TestFromVolumesInReadonlyMode(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, Volumes: map[string]struct{}{"/test": {}}, @@ -1364,7 +1279,7 @@ func TestFromVolumesInReadonlyMode(t *testing.T) { } container2, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, VolumesFrom: container.ID + ":ro", @@ -1405,7 +1320,7 @@ func TestVolumesFromReadonlyMount(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, Volumes: map[string]struct{}{"/test": {}}, @@ -1425,7 +1340,7 @@ func TestVolumesFromReadonlyMount(t *testing.T) { } container2, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, VolumesFrom: container.ID, @@ -1461,7 +1376,7 @@ func TestRestartWithVolumes(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foobar"}, Volumes: map[string]struct{}{"/test": {}}, @@ -1505,7 +1420,7 @@ func TestVolumesFromWithVolumes(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, Volumes: map[string]struct{}{"/test": {}}, @@ -1534,7 +1449,7 @@ func TestVolumesFromWithVolumes(t *testing.T) { } container2, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat", "/test/foo"}, VolumesFrom: container.ID, @@ -1568,26 +1483,42 @@ func TestVolumesFromWithVolumes(t *testing.T) { } func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) { - runtime := mkRuntime(t) + eng := NewTestEngine(t) + runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, hc, _, err := ParseRun([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show"}, nil) + config, hc, _, err := docker.ParseRun([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show"}, nil) if err != nil { t.Fatal(err) } - c, _, err := runtime.Create(config, "") - if err != nil { + + jobCreate := eng.Job("create") + if err := jobCreate.ImportEnv(config); err != nil { t.Fatal(err) } + var id string + jobCreate.StdoutParseString(&id) + if err := jobCreate.Run(); err != nil { + t.Fatal(err) + } + // FIXME: this hack can be removed once Wait is a job + c := runtime.Get(id) + if c == nil { + t.Fatalf("Couldn't retrieve container %s from runtime", id) + } stdout, err := c.StdoutPipe() if err != nil { t.Fatal(err) } - defer runtime.Destroy(c) - c.hostConfig = hc - if err := c.Start(); err != nil { + + jobStart := eng.Job("start", id) + if err := jobStart.ImportEnv(hc); err != nil { t.Fatal(err) } + if err := jobStart.Run(); err != nil { + t.Fatal(err) + } + c.WaitTimeout(500 * time.Millisecond) c.Wait() output, err := ioutil.ReadAll(stdout) @@ -1602,37 +1533,40 @@ func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) { if !strings.HasSuffix(interfaces[0], ": lo") { t.Fatalf("Wrong interface in test container: expected [*: lo], got %s", interfaces) } - } func TestPrivilegedCanMknod(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - if output, _ := runContainer(runtime, []string{"-privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" { + eng := NewTestEngine(t) + runtime := mkRuntimeFromEngine(eng, t) + defer runtime.Nuke() + if output, _ := runContainer(eng, runtime, []string{"-privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" { t.Fatal("Could not mknod into privileged container") } } func TestPrivilegedCanMount(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - if output, _ := runContainer(runtime, []string{"-privileged", "_", "sh", "-c", "mount -t tmpfs none /tmp && echo ok"}, t); output != "ok\n" { + eng := NewTestEngine(t) + runtime := mkRuntimeFromEngine(eng, t) + defer runtime.Nuke() + if output, _ := runContainer(eng, runtime, []string{"-privileged", "_", "sh", "-c", "mount -t tmpfs none /tmp && echo ok"}, t); output != "ok\n" { t.Fatal("Could not mount into privileged container") } } func TestPrivilegedCannotMknod(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - if output, _ := runContainer(runtime, []string{"_", "sh", "-c", "mknod /tmp/sda b 8 0 || echo ok"}, t); output != "ok\n" { + eng := NewTestEngine(t) + runtime := mkRuntimeFromEngine(eng, t) + defer runtime.Nuke() + if output, _ := runContainer(eng, runtime, []string{"_", "sh", "-c", "mknod /tmp/sda b 8 0 || echo ok"}, t); output != "ok\n" { t.Fatal("Could mknod into secure container") } } func TestPrivilegedCannotMount(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - if output, _ := runContainer(runtime, []string{"_", "sh", "-c", "mount -t tmpfs none /tmp || echo ok"}, t); output != "ok\n" { + eng := NewTestEngine(t) + runtime := mkRuntimeFromEngine(eng, t) + defer runtime.Nuke() + if output, _ := runContainer(eng, runtime, []string{"_", "sh", "-c", "mount -t tmpfs none /tmp || echo ok"}, t); output != "ok\n" { t.Fatal("Could mount into secure container") } } @@ -1641,7 +1575,7 @@ func TestMultipleVolumesFrom(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, Volumes: map[string]struct{}{"/test": {}}, @@ -1670,7 +1604,7 @@ func TestMultipleVolumesFrom(t *testing.T) { } container2, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sh", "-c", "echo -n bar > /other/foo"}, Volumes: map[string]struct{}{"/other": {}}, @@ -1692,7 +1626,7 @@ func TestMultipleVolumesFrom(t *testing.T) { } container3, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, VolumesFrom: strings.Join([]string{container.ID, container2.ID}, ","), @@ -1720,7 +1654,7 @@ func TestRestartGhost(t *testing.T) { defer nuke(runtime) container, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, Volumes: map[string]struct{}{"/test": {}}, diff --git a/integration/graph_test.go b/integration/graph_test.go new file mode 100644 index 0000000000..bfbedfa48f --- /dev/null +++ b/integration/graph_test.go @@ -0,0 +1,57 @@ +package docker + +import ( + "github.com/dotcloud/docker" + "io/ioutil" + "os" + "path" + "testing" +) + +func TestMount(t *testing.T) { + graph := tempGraph(t) + defer os.RemoveAll(graph.Root) + archive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + image, err := graph.Create(archive, nil, "Testing", "", nil) + if err != nil { + t.Fatal(err) + } + tmp, err := ioutil.TempDir("", "docker-test-graph-mount-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + rootfs := path.Join(tmp, "rootfs") + if err := os.MkdirAll(rootfs, 0700); err != nil { + t.Fatal(err) + } + rw := path.Join(tmp, "rw") + if err := os.MkdirAll(rw, 0700); err != nil { + t.Fatal(err) + } + if err := image.Mount(rootfs, rw); err != nil { + t.Fatal(err) + } + // FIXME: test for mount contents + defer func() { + if err := docker.Unmount(rootfs); err != nil { + t.Error(err) + } + }() +} + +//FIXME: duplicate +func tempGraph(t *testing.T) *docker.Graph { + tmp, err := ioutil.TempDir("", "docker-graph-") + if err != nil { + t.Fatal(err) + } + graph, err := docker.NewGraph(tmp) + if err != nil { + t.Fatal(err) + } + return graph +} diff --git a/integration/iptables_test.go b/integration/iptables_test.go new file mode 100644 index 0000000000..060d0fe074 --- /dev/null +++ b/integration/iptables_test.go @@ -0,0 +1,22 @@ +package docker + +import ( + "github.com/dotcloud/docker/iptables" + "os" + "testing" +) + +// FIXME: this test should be a unit test. +// For example by mocking os/exec to make sure iptables is not actually called. + +func TestIptables(t *testing.T) { + if _, err := iptables.Raw("-L"); err != nil { + t.Fatal(err) + } + path := os.Getenv("PATH") + os.Setenv("PATH", "") + defer os.Setenv("PATH", path) + if _, err := iptables.Raw("-L"); err == nil { + t.Fatal("Not finding iptables in the PATH should cause an error") + } +} diff --git a/runtime_test.go b/integration/runtime_test.go similarity index 74% rename from runtime_test.go rename to integration/runtime_test.go index 6a365d338d..fb928d8160 100644 --- a/runtime_test.go +++ b/integration/runtime_test.go @@ -3,6 +3,7 @@ package docker import ( "bytes" "fmt" + "github.com/dotcloud/docker" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/sysinit" "github.com/dotcloud/docker/utils" @@ -15,7 +16,6 @@ import ( "runtime" "strconv" "strings" - "sync" "syscall" "testing" "time" @@ -32,39 +32,33 @@ const ( ) var ( - globalRuntime *Runtime + // FIXME: globalRuntime is deprecated by globalEngine. All tests should be converted. + globalRuntime *docker.Runtime + globalEngine *engine.Engine startFds int startGoroutines int ) -func nuke(runtime *Runtime) error { - var wg sync.WaitGroup - for _, container := range runtime.List() { - wg.Add(1) - go func(c *Container) { - c.Kill() - wg.Done() - }(container) - } - wg.Wait() - runtime.Close() - - os.Remove(filepath.Join(runtime.config.Root, "linkgraph.db")) - return os.RemoveAll(runtime.config.Root) +// FIXME: nuke() is deprecated by Runtime.Nuke() +func nuke(runtime *docker.Runtime) error { + return runtime.Nuke() } -func cleanup(runtime *Runtime) error { +// FIXME: cleanup and nuke are redundant. +func cleanup(eng *engine.Engine, t *testing.T) error { + runtime := mkRuntimeFromEngine(eng, t) for _, container := range runtime.List() { container.Kill() runtime.Destroy(container) } - images, err := runtime.graph.Map() + srv := mkServerFromEngine(eng, t) + images, err := srv.Images(true, "") if err != nil { return err } for _, image := range images { if image.ID != unitTestImageID { - runtime.graph.Delete(image.ID) + srv.ImageDelete(image.ID, false) } } return nil @@ -133,10 +127,9 @@ func setupBaseImage() { log.Fatalf("Unable to create a runtime for tests:", err) } srv := mkServerFromEngine(eng, log.New(os.Stderr, "", 0)) - runtime := srv.runtime // If the unit test is not found, try to download it. - if img, err := runtime.repositories.LookupImage(unitTestImageName); err != nil || img.ID != unitTestImageID { + if img, err := srv.ImageInspect(unitTestImageName); err != nil || img.ID != unitTestImageID { // Retrieve the Image if err := srv.ImagePull(unitTestImageName, "", os.Stdout, utils.NewStreamFormatter(false), nil, nil, true); err != nil { log.Fatalf("Unable to pull the test image: %s", err) @@ -151,8 +144,8 @@ func spawnGlobalDaemon() { } t := log.New(os.Stderr, "", 0) eng := NewTestEngine(t) - srv := mkServerFromEngine(eng, t) - globalRuntime = srv.runtime + globalEngine = eng + globalRuntime = mkRuntimeFromEngine(eng, t) // Spawn a Daemon go func() { @@ -174,8 +167,8 @@ func spawnGlobalDaemon() { // FIXME: test that ImagePull(json=true) send correct json output -func GetTestImage(runtime *Runtime) *Image { - imgs, err := runtime.graph.Map() +func GetTestImage(runtime *docker.Runtime) *docker.Image { + imgs, err := runtime.Graph().Map() if err != nil { log.Fatalf("Unable to get the test image:", err) } @@ -184,7 +177,7 @@ func GetTestImage(runtime *Runtime) *Image { return image } } - log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, runtime.graph.Root, imgs) + log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, runtime.Graph().Root, imgs) return nil } @@ -197,7 +190,7 @@ func TestRuntimeCreate(t *testing.T) { t.Errorf("Expected 0 containers, %v found", len(runtime.List())) } - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}, }, @@ -239,12 +232,12 @@ func TestRuntimeCreate(t *testing.T) { } // Make sure create with bad parameters returns an error - if _, _, err = runtime.Create(&Config{Image: GetTestImage(runtime).ID}, ""); err == nil { + if _, _, err = runtime.Create(&docker.Config{Image: GetTestImage(runtime).ID}, ""); err == nil { t.Fatal("Builder.Create should throw an error when Cmd is missing") } if _, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{}, }, @@ -253,7 +246,7 @@ func TestRuntimeCreate(t *testing.T) { t.Fatal("Builder.Create should throw an error when Cmd is empty") } - config := &Config{ + config := &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/ls"}, PortSpecs: []string{"80"}, @@ -266,7 +259,7 @@ func TestRuntimeCreate(t *testing.T) { } // test expose 80:8000 - container, warnings, err := runtime.Create(&Config{ + container, warnings, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}, PortSpecs: []string{"80:8000"}, @@ -285,7 +278,7 @@ func TestDestroy(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}, }, "") @@ -312,12 +305,6 @@ func TestDestroy(t *testing.T) { t.Errorf("Unable to get newly created container") } - // Make sure the container root directory does not exist anymore - _, err = os.Stat(container.root) - if err == nil || !os.IsNotExist(err) { - t.Errorf("Container root directory still exists after destroy") - } - // Test double destroy if err := runtime.Destroy(container); err == nil { // It should have failed @@ -329,13 +316,13 @@ func TestGet(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container1, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) + container1, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) defer runtime.Destroy(container1) - container2, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) + container2, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) defer runtime.Destroy(container2) - container3, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) + container3, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) defer runtime.Destroy(container3) if runtime.Get(container1.ID) != container1 { @@ -352,15 +339,21 @@ func TestGet(t *testing.T) { } -func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container, string) { +func startEchoServerContainer(t *testing.T, proto string) (*docker.Runtime, *docker.Container, string) { var ( - err error - container *Container - strPort string - runtime = mkRuntime(t) - port = 5554 - p Port + err error + id string + strPort string + eng = NewTestEngine(t) + runtime = mkRuntimeFromEngine(eng, t) + port = 5554 + p docker.Port ) + defer func() { + if err != nil { + runtime.Nuke() + } + }() for { port += 1 @@ -373,37 +366,45 @@ func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container, } else { t.Fatal(fmt.Errorf("Unknown protocol %v", proto)) } - ep := make(map[Port]struct{}, 1) - p = Port(fmt.Sprintf("%s/%s", strPort, proto)) + ep := make(map[docker.Port]struct{}, 1) + p = docker.Port(fmt.Sprintf("%s/%s", strPort, proto)) ep[p] = struct{}{} - container, _, err = runtime.Create(&Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"sh", "-c", cmd}, - PortSpecs: []string{fmt.Sprintf("%s/%s", strPort, proto)}, - ExposedPorts: ep, - }, "") - if err != nil { - nuke(runtime) + jobCreate := eng.Job("create") + jobCreate.Setenv("Image", unitTestImageID) + jobCreate.SetenvList("Cmd", []string{"sh", "-c", cmd}) + jobCreate.SetenvList("PortSpecs", []string{fmt.Sprintf("%s/%s", strPort, proto)}) + jobCreate.SetenvJson("ExposedPorts", ep) + jobCreate.StdoutParseString(&id) + if err := jobCreate.Run(); err != nil { t.Fatal(err) } - - if container != nil { + // FIXME: this relies on the undocumented behavior of runtime.Create + // which will return a nil error AND container if the exposed ports + // are invalid. That behavior should be fixed! + if id != "" { break } t.Logf("Port %v already in use, trying another one", strPort) + } - container.hostConfig = &HostConfig{ - PortBindings: make(map[Port][]PortBinding), - } - container.hostConfig.PortBindings[p] = []PortBinding{ + jobStart := eng.Job("start", id) + portBindings := make(map[docker.Port][]docker.PortBinding) + portBindings[p] = []docker.PortBinding{ {}, } - if err := container.Start(); err != nil { - nuke(runtime) + if err := jobStart.SetenvJson("PortsBindings", portBindings); err != nil { t.Fatal(err) } + if err := jobStart.Run(); err != nil { + t.Fatal(err) + } + + container := runtime.Get(id) + if container == nil { + t.Fatalf("Couldn't fetch test container %s", id) + } setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() { for !container.State.Running { @@ -504,14 +505,15 @@ func TestAllocateUDPPortLocalhost(t *testing.T) { } func TestRestore(t *testing.T) { - runtime1 := mkRuntime(t) - defer nuke(runtime1) + eng := NewTestEngine(t) + runtime1 := mkRuntimeFromEngine(eng, t) + defer runtime1.Nuke() // Create a container with one instance of docker - container1, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t) + container1, _, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t) defer runtime1.Destroy(container1) // Create a second container meant to be killed - container2, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t) + container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t) defer runtime1.Destroy(container2) // Start the container non blocking @@ -545,12 +547,19 @@ func TestRestore(t *testing.T) { // Here are are simulating a docker restart - that is, reloading all containers // from scratch - runtime1.config.AutoRestart = false - runtime2, err := NewRuntimeFromDirectory(runtime1.config) + root := eng.Root() + eng, err := engine.New(root) if err != nil { t.Fatal(err) } - defer nuke(runtime2) + job := eng.Job("initapi") + job.Setenv("Root", eng.Root()) + job.SetenvBool("Autorestart", false) + if err := job.Run(); err != nil { + t.Fatal(err) + } + + runtime2 := mkRuntimeFromEngine(eng, t) if len(runtime2.List()) != 2 { t.Errorf("Expected 2 container, %v found", len(runtime2.List())) } @@ -575,14 +584,31 @@ func TestRestore(t *testing.T) { } func TestReloadContainerLinks(t *testing.T) { - runtime1 := mkRuntime(t) + // FIXME: here we don't use NewTestEngine because it calls initapi with Autorestart=false, + // and we want to set it to true. + root, err := newTestDirectory(unitTestStoreBase) + if err != nil { + t.Fatal(err) + } + eng, err := engine.New(root) + if err != nil { + t.Fatal(err) + } + job := eng.Job("initapi") + job.Setenv("Root", eng.Root()) + job.SetenvBool("Autorestart", true) + if err := job.Run(); err != nil { + t.Fatal(err) + } + + runtime1 := mkRuntimeFromEngine(eng, t) defer nuke(runtime1) // Create a container with one instance of docker - container1, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/sh"}, t) + container1, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/sh"}, t) defer runtime1.Destroy(container1) // Create a second container meant to be killed - container2, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t) + container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t) defer runtime1.Destroy(container2) // Start the container non blocking @@ -590,7 +616,9 @@ func TestReloadContainerLinks(t *testing.T) { t.Fatal(err) } // Add a link to container 2 - container1.hostConfig.Links = []string{"/" + container2.ID + ":first"} + // FIXME @shykes: setting hostConfig.Links seems redundant with calling RegisterLink(). + // Why do we need it @crosbymichael? + // container1.hostConfig.Links = []string{"/" + container2.ID + ":first"} if err := runtime1.RegisterLink(container1, container2, "first"); err != nil { t.Fatal(err) } @@ -612,12 +640,18 @@ func TestReloadContainerLinks(t *testing.T) { // Here are are simulating a docker restart - that is, reloading all containers // from scratch - runtime1.config.AutoRestart = true - runtime2, err := NewRuntimeFromDirectory(runtime1.config) + eng, err = engine.New(root) if err != nil { t.Fatal(err) } - defer nuke(runtime2) + job = eng.Job("initapi") + job.Setenv("Root", eng.Root()) + job.SetenvBool("Autorestart", false) + if err := job.Run(); err != nil { + t.Fatal(err) + } + + runtime2 := mkRuntimeFromEngine(eng, t) if len(runtime2.List()) != 2 { t.Errorf("Expected 2 container, %v found", len(runtime2.List())) } @@ -631,27 +665,32 @@ func TestReloadContainerLinks(t *testing.T) { t.Fatalf("Expected 2 container alive, %d found", runningCount) } + // FIXME: we no longer test if containers were registered in the right order, + // because there is no public // Make sure container 2 ( the child of container 1 ) was registered and started first // with the runtime - first := runtime2.containers.Front() - if first.Value.(*Container).ID != container2.ID { + // + containers := runtime2.List() + if len(containers) == 0 { + t.Fatalf("Runtime has no containers") + } + first := containers[0] + if first.ID != container2.ID { t.Fatalf("Container 2 %s should be registered first in the runtime", container2.ID) } // Verify that the link is still registered in the runtime - entity := runtime2.containerGraph.Get(container1.Name) - if entity == nil { - t.Fatal("Entity should not be nil") + if c := runtime2.Get(container1.Name); c == nil { + t.Fatal("Named container is no longer registered after restart") } } func TestDefaultContainerName(t *testing.T) { eng := NewTestEngine(t) - srv := mkServerFromEngine(eng, t) - runtime := srv.runtime + runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) + config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -663,29 +702,19 @@ func TestDefaultContainerName(t *testing.T) { t.Fatalf("Expect /some_name got %s", container.Name) } - paths := runtime.containerGraph.RefPaths(containerID) - if paths == nil || len(paths) == 0 { - t.Fatalf("Could not find edges for %s", containerID) - } - edge := paths[0] - if edge.ParentID != "0" { - t.Fatalf("Expected engine got %s", edge.ParentID) - } - if edge.EntityID != containerID { - t.Fatalf("Expected %s got %s", containerID, edge.EntityID) - } - if edge.Name != "some_name" { - t.Fatalf("Expected some_name got %s", edge.Name) + if c := runtime.Get("/some_name"); c == nil { + t.Fatalf("Couldn't retrieve test container as /some_name") + } else if c.ID != containerID { + t.Fatalf("Container /some_name has ID %s instead of %s", c.ID, containerID) } } func TestRandomContainerName(t *testing.T) { eng := NewTestEngine(t) - srv := mkServerFromEngine(eng, t) - runtime := srv.runtime + runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) + config, _, _, err := docker.ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -697,29 +726,19 @@ func TestRandomContainerName(t *testing.T) { t.Fatalf("Expected not empty container name") } - paths := runtime.containerGraph.RefPaths(containerID) - if paths == nil || len(paths) == 0 { - t.Fatalf("Could not find edges for %s", containerID) - } - edge := paths[0] - if edge.ParentID != "0" { - t.Fatalf("Expected engine got %s", edge.ParentID) - } - if edge.EntityID != containerID { - t.Fatalf("Expected %s got %s", containerID, edge.EntityID) - } - if edge.Name == "" { - t.Fatalf("Expected not empty container name") + if c := runtime.Get(container.Name); c == nil { + log.Fatalf("Could not lookup container %s by its name", container.Name) + } else if c.ID != containerID { + log.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID) } } func TestLinkChildContainer(t *testing.T) { eng := NewTestEngine(t) - srv := mkServerFromEngine(eng, t) - runtime := srv.runtime + runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) + config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -735,7 +754,7 @@ func TestLinkChildContainer(t *testing.T) { t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) } - config, _, _, err = ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) + config, _, _, err = docker.ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -758,11 +777,10 @@ func TestLinkChildContainer(t *testing.T) { func TestGetAllChildren(t *testing.T) { eng := NewTestEngine(t) - srv := mkServerFromEngine(eng, t) - runtime := srv.runtime + runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) + config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -778,7 +796,7 @@ func TestGetAllChildren(t *testing.T) { t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) } - config, _, _, err = ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) + config, _, _, err = docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -810,19 +828,3 @@ func TestGetAllChildren(t *testing.T) { } } } - -func TestGetFullName(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - name, err := runtime.getFullName("testing") - if err != nil { - t.Fatal(err) - } - if name != "/testing" { - t.Fatalf("Expected /testing got %s", name) - } - if _, err := runtime.getFullName(""); err == nil { - t.Fatal("Error should not be nil") - } -} diff --git a/server_test.go b/integration/server_test.go similarity index 52% rename from server_test.go rename to integration/server_test.go index 1ab38422f5..6c61bedafb 100644 --- a/server_test.go +++ b/integration/server_test.go @@ -1,32 +1,31 @@ package docker import ( + "github.com/dotcloud/docker" "github.com/dotcloud/docker/utils" "io/ioutil" "strings" "testing" - "time" ) func TestContainerTagImageDelete(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() - srv := &Server{runtime: runtime} + srv := mkServerFromEngine(eng, t) initialImages, err := srv.Images(false, "") if err != nil { t.Fatal(err) } - - if err := srv.runtime.repositories.Set("utest", "tag1", unitTestImageName, false); err != nil { + if err := srv.ContainerTag(unitTestImageName, "utest", "tag1", false); err != nil { t.Fatal(err) } - if err := srv.runtime.repositories.Set("utest/docker", "tag2", unitTestImageName, false); err != nil { + if err := srv.ContainerTag(unitTestImageName, "utest/docker", "tag2", false); err != nil { t.Fatal(err) } - if err := srv.runtime.repositories.Set("utest:5000/docker", "tag3", unitTestImageName, false); err != nil { + if err := srv.ContainerTag(unitTestImageName, "utest:5000/docker", "tag3", false); err != nil { t.Fatal(err) } @@ -82,46 +81,43 @@ func TestContainerTagImageDelete(t *testing.T) { func TestCreateRm(t *testing.T) { eng := NewTestEngine(t) srv := mkServerFromEngine(eng, t) - runtime := srv.runtime - defer nuke(runtime) + defer mkRuntimeFromEngine(eng, t).Nuke() - config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) + config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } id := createTestContainer(eng, config, t) - if len(runtime.List()) != 1 { - t.Errorf("Expected 1 container, %v found", len(runtime.List())) + if c := srv.Containers(true, false, -1, "", ""); len(c) != 1 { + t.Errorf("Expected 1 container, %v found", len(c)) } if err = srv.ContainerDestroy(id, true, false); err != nil { t.Fatal(err) } - if len(runtime.List()) != 0 { - t.Errorf("Expected 0 container, %v found", len(runtime.List())) + if c := srv.Containers(true, false, -1, "", ""); len(c) != 0 { + t.Errorf("Expected 0 container, %v found", len(c)) } } func TestCreateRmVolumes(t *testing.T) { eng := NewTestEngine(t) - srv := mkServerFromEngine(eng, t) - runtime := srv.runtime - defer nuke(runtime) + defer mkRuntimeFromEngine(eng, t).Nuke() - config, hostConfig, _, err := ParseRun([]string{"-v", "/srv", GetTestImage(runtime).ID, "echo test"}, nil) + config, hostConfig, _, err := docker.ParseRun([]string{"-v", "/srv", unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } id := createTestContainer(eng, config, t) - if len(runtime.List()) != 1 { - t.Errorf("Expected 1 container, %v found", len(runtime.List())) + if c := srv.Containers(true, false, -1, "", ""); len(c) != 1 { + t.Errorf("Expected 1 container, %v found", len(c)) } job := eng.Job("start", id) @@ -141,18 +137,17 @@ func TestCreateRmVolumes(t *testing.T) { t.Fatal(err) } - if len(runtime.List()) != 0 { - t.Errorf("Expected 0 container, %v found", len(runtime.List())) + if c := srv.Containers(true, false, -1, "", ""); len(c) != 0 { + t.Errorf("Expected 0 container, %v found", len(c)) } } func TestCommit(t *testing.T) { eng := NewTestEngine(t) srv := mkServerFromEngine(eng, t) - runtime := srv.runtime - defer nuke(runtime) + defer mkRuntimeFromEngine(eng, t).Nuke() - config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "/bin/cat"}, nil) + config, _, _, err := docker.ParseRun([]string{unitTestImageID, "/bin/cat"}, nil) if err != nil { t.Fatal(err) } @@ -167,18 +162,17 @@ func TestCommit(t *testing.T) { func TestCreateStartRestartStopStartKillRm(t *testing.T) { eng := NewTestEngine(t) srv := mkServerFromEngine(eng, t) - runtime := srv.runtime - defer nuke(runtime) + defer mkRuntimeFromEngine(eng, t).Nuke() - config, hostConfig, _, err := ParseRun([]string{GetTestImage(runtime).ID, "/bin/cat"}, nil) + config, hostConfig, _, err := docker.ParseRun([]string{unitTestImageID, "/bin/cat"}, nil) if err != nil { t.Fatal(err) } id := createTestContainer(eng, config, t) - if len(runtime.List()) != 1 { - t.Errorf("Expected 1 container, %v found", len(runtime.List())) + if c := srv.Containers(true, false, -1, "", ""); len(c) != 1 { + t.Errorf("Expected 1 container, %v found", len(c)) } job := eng.Job("start", id) @@ -214,21 +208,18 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) { t.Fatal(err) } - if len(runtime.List()) != 0 { - t.Errorf("Expected 0 container, %v found", len(runtime.List())) + if c := srv.Containers(true, false, -1, "", ""); len(c) != 0 { + t.Errorf("Expected 0 container, %v found", len(c)) } - } func TestRunWithTooLowMemoryLimit(t *testing.T) { eng := NewTestEngine(t) - srv := mkServerFromEngine(eng, t) - runtime := srv.runtime - defer nuke(runtime) + defer mkRuntimeFromEngine(eng, t).Nuke() // Try to create a container with a memory limit of 1 byte less than the minimum allowed limit. job := eng.Job("create") - job.Setenv("Image", GetTestImage(runtime).ID) + job.Setenv("Image", unitTestImageID) job.Setenv("Memory", "524287") job.Setenv("CpuShares", "1000") job.SetenvList("Cmd", []string{"/bin/cat"}) @@ -239,163 +230,17 @@ func TestRunWithTooLowMemoryLimit(t *testing.T) { } } -func TestContainerTop(t *testing.T) { - t.Skip("Fixme. Skipping test for now. Reported error: 'server_test.go:236: Expected 2 processes, found 1.'") - - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{runtime: runtime} - - c, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "sleep 2"}, t) - c, err := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "sleep 2"}, t) - if err != nil { - t.Fatal(err) - } - - defer runtime.Destroy(c) - if err := c.Start(); err != nil { - t.Fatal(err) - } - - // Give some time to the process to start - c.WaitTimeout(500 * time.Millisecond) - - if !c.State.Running { - t.Errorf("Container should be running") - } - procs, err := srv.ContainerTop(c.ID, "") - if err != nil { - t.Fatal(err) - } - - if len(procs.Processes) != 2 { - t.Fatalf("Expected 2 processes, found %d.", len(procs.Processes)) - } - - pos := -1 - for i := 0; i < len(procs.Titles); i++ { - if procs.Titles[i] == "CMD" { - pos = i - break - } - } - - if pos == -1 { - t.Fatalf("Expected CMD, not found.") - } - - if procs.Processes[0][pos] != "sh" && procs.Processes[0][pos] != "busybox" { - t.Fatalf("Expected `busybox` or `sh`, found %s.", procs.Processes[0][pos]) - } - - if procs.Processes[1][pos] != "sh" && procs.Processes[1][pos] != "busybox" { - t.Fatalf("Expected `busybox` or `sh`, found %s.", procs.Processes[1][pos]) - } -} - -func TestPools(t *testing.T) { - runtime := mkRuntime(t) - srv := &Server{ - runtime: runtime, - pullingPool: make(map[string]struct{}), - pushingPool: make(map[string]struct{}), - } - defer nuke(runtime) - - err := srv.poolAdd("pull", "test1") - if err != nil { - t.Fatal(err) - } - err = srv.poolAdd("pull", "test2") - if err != nil { - t.Fatal(err) - } - err = srv.poolAdd("push", "test1") - if err == nil || err.Error() != "pull test1 is already in progress" { - t.Fatalf("Expected `pull test1 is already in progress`") - } - err = srv.poolAdd("pull", "test1") - if err == nil || err.Error() != "pull test1 is already in progress" { - t.Fatalf("Expected `pull test1 is already in progress`") - } - err = srv.poolAdd("wait", "test3") - if err == nil || err.Error() != "Unknown pool type" { - t.Fatalf("Expected `Unknown pool type`") - } - - err = srv.poolRemove("pull", "test2") - if err != nil { - t.Fatal(err) - } - err = srv.poolRemove("pull", "test2") - if err != nil { - t.Fatal(err) - } - err = srv.poolRemove("pull", "test1") - if err != nil { - t.Fatal(err) - } - err = srv.poolRemove("push", "test1") - if err != nil { - t.Fatal(err) - } - err = srv.poolRemove("wait", "test3") - if err == nil || err.Error() != "Unknown pool type" { - t.Fatalf("Expected `Unknown pool type`") - } -} - -func TestLogEvent(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - srv := &Server{ - runtime: runtime, - events: make([]utils.JSONMessage, 0, 64), - listeners: make(map[string]chan utils.JSONMessage), - } - - srv.LogEvent("fakeaction", "fakeid", "fakeimage") - - listener := make(chan utils.JSONMessage) - srv.Lock() - srv.listeners["test"] = listener - srv.Unlock() - - srv.LogEvent("fakeaction2", "fakeid", "fakeimage") - - if len(srv.events) != 2 { - t.Fatalf("Expected 2 events, found %d", len(srv.events)) - } - go func() { - time.Sleep(200 * time.Millisecond) - srv.LogEvent("fakeaction3", "fakeid", "fakeimage") - time.Sleep(200 * time.Millisecond) - srv.LogEvent("fakeaction4", "fakeid", "fakeimage") - }() - - setTimeout(t, "Listening for events timed out", 2*time.Second, func() { - for i := 2; i < 4; i++ { - event := <-listener - if event != srv.events[i] { - t.Fatalf("Event received it different than expected") - } - } - }) -} - func TestRmi(t *testing.T) { eng := NewTestEngine(t) srv := mkServerFromEngine(eng, t) - runtime := srv.runtime - defer nuke(runtime) + defer mkRuntimeFromEngine(eng, t).Nuke() initialImages, err := srv.Images(false, "") if err != nil { t.Fatal(err) } - config, hostConfig, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) + config, hostConfig, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -471,19 +316,19 @@ func TestRmi(t *testing.T) { } func TestImagesFilter(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer nuke(mkRuntimeFromEngine(eng, t)) - srv := &Server{runtime: runtime} + srv := mkServerFromEngine(eng, t) - if err := srv.runtime.repositories.Set("utest", "tag1", unitTestImageName, false); err != nil { + if err := srv.ContainerTag(unitTestImageName, "utest", "tag1", false); err != nil { t.Fatal(err) } - if err := srv.runtime.repositories.Set("utest/docker", "tag2", unitTestImageName, false); err != nil { + if err := srv.ContainerTag(unitTestImageName, "utest/docker", "tag2", false); err != nil { t.Fatal(err) } - if err := srv.runtime.repositories.Set("utest:5000/docker", "tag3", unitTestImageName, false); err != nil { + if err := srv.ContainerTag(unitTestImageName, "utest:5000/docker", "tag3", false); err != nil { t.Fatal(err) } @@ -525,9 +370,9 @@ func TestImagesFilter(t *testing.T) { } func TestImageInsert(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - srv := &Server{runtime: runtime} + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) sf := utils.NewStreamFormatter(true) // bad image name fails @@ -536,12 +381,12 @@ func TestImageInsert(t *testing.T) { } // bad url fails - if err := srv.ImageInsert(GetTestImage(runtime).ID, "http://bad_host_name_that_will_totally_fail.com/", "/foo", ioutil.Discard, sf); err == nil { + if err := srv.ImageInsert(unitTestImageID, "http://bad_host_name_that_will_totally_fail.com/", "/foo", ioutil.Discard, sf); err == nil { t.Fatal("expected an error and got none") } // success returns nil - if err := srv.ImageInsert(GetTestImage(runtime).ID, "https://www.docker.io/static/img/docker-top-logo.png", "/foo", ioutil.Discard, sf); err != nil { + if err := srv.ImageInsert(unitTestImageID, "https://www.docker.io/static/img/docker-top-logo.png", "/foo", ioutil.Discard, sf); err != nil { t.Fatalf("expected no error, but got %v", err) } } diff --git a/integration/sorter_test.go b/integration/sorter_test.go new file mode 100644 index 0000000000..77848c7ddf --- /dev/null +++ b/integration/sorter_test.go @@ -0,0 +1,63 @@ +package docker + +import ( + "github.com/dotcloud/docker" + "github.com/dotcloud/docker/utils" + "io/ioutil" + "testing" + "time" +) + +func TestServerListOrderedImagesByCreationDate(t *testing.T) { + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) + + if err := generateImage("", srv); err != nil { + t.Fatal(err) + } + + images, err := srv.Images(true, "") + if err != nil { + t.Fatal(err) + } + + if images[0].Created < images[1].Created { + t.Error("Expected []APIImges to be ordered by most recent creation date.") + } +} + +func TestServerListOrderedImagesByCreationDateAndTag(t *testing.T) { + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) + + err := generateImage("bar", srv) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Second) + + err = generateImage("zed", srv) + if err != nil { + t.Fatal(err) + } + + images, err := srv.Images(true, "") + if err != nil { + t.Fatal(err) + } + + if images[0].RepoTags[0] != "repo:zed" && images[0].RepoTags[0] != "repo:bar" { + t.Errorf("Expected []APIImges to be ordered by most recent creation date. %s", images) + } +} + +func generateImage(name string, srv *docker.Server) error { + archive, err := fakeTar() + if err != nil { + return err + } + return srv.ImageImport("-", "repo", name, archive, ioutil.Discard, utils.NewStreamFormatter(true)) +} diff --git a/integration/utils_test.go b/integration/utils_test.go new file mode 100644 index 0000000000..7a46bed16c --- /dev/null +++ b/integration/utils_test.go @@ -0,0 +1,332 @@ +package docker + +import ( + "archive/tar" + "bytes" + "github.com/dotcloud/docker" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/utils" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path" + "strings" + "testing" + "time" +) + +// This file contains utility functions for docker's unit test suite. +// It has to be named XXX_test.go, apparently, in other to access private functions +// from other XXX_test.go functions. + +// Create a temporary runtime suitable for unit testing. +// Call t.Fatal() at the first error. +func mkRuntime(f utils.Fataler) *docker.Runtime { + root, err := newTestDirectory(unitTestStoreBase) + if err != nil { + f.Fatal(err) + } + config := &docker.DaemonConfig{ + Root: root, + AutoRestart: false, + } + r, err := docker.NewRuntimeFromDirectory(config) + if err != nil { + f.Fatal(err) + } + r.UpdateCapabilities(true) + return r +} + +func createNamedTestContainer(eng *engine.Engine, config *docker.Config, f utils.Fataler, name string) (shortId string) { + job := eng.Job("create", name) + if err := job.ImportEnv(config); err != nil { + f.Fatal(err) + } + job.StdoutParseString(&shortId) + if err := job.Run(); err != nil { + f.Fatal(err) + } + return +} + +func createTestContainer(eng *engine.Engine, config *docker.Config, f utils.Fataler) (shortId string) { + return createNamedTestContainer(eng, config, f, "") +} + +func startContainer(eng *engine.Engine, id string, t utils.Fataler) { + job := eng.Job("start", id) + if err := job.Run(); err != nil { + t.Fatal(err) + } +} + + +func containerRun(eng *engine.Engine, id string, t utils.Fataler) { + startContainer(eng, id, t) + containerWait(eng, id, t) +} + +func containerFileExists(eng *engine.Engine, id, dir string, t utils.Fataler) bool { + c := getContainer(eng, id, t) + if err := c.EnsureMounted(); err != nil { + t.Fatal(err) + } + if _, err := os.Stat(path.Join(c.RootfsPath(), dir)); err != nil { + if os.IsNotExist(err) { + return false + } + t.Fatal(err) + } + return true +} + +func containerAttach(eng *engine.Engine, id string, t utils.Fataler) (io.WriteCloser, io.ReadCloser) { + c := getContainer(eng, id, t) + i, err := c.StdinPipe() + if err != nil { + t.Fatal(err) + } + o, err := c.StdoutPipe() + if err != nil { + t.Fatal(err) + } + return i, o +} + + +func containerWait(eng *engine.Engine, id string, t utils.Fataler) int { + return getContainer(eng, id, t).Wait() +} + + +func containerWaitTimeout(eng *engine.Engine, id string, t utils.Fataler) error { + return getContainer(eng, id, t).WaitTimeout(500 * time.Millisecond) +} + +func containerKill(eng *engine.Engine, id string, t utils.Fataler) { + if err := getContainer(eng, id, t).Kill(); err != nil { + t.Fatal(err) + } +} + +func containerRunning(eng *engine.Engine, id string, t utils.Fataler) bool { + return getContainer(eng, id, t).State.Running +} + +func containerAssertExists(eng *engine.Engine, id string, t utils.Fataler) { + getContainer(eng, id, t) +} + +func containerAssertNotExists(eng *engine.Engine, id string, t utils.Fataler) { + runtime := mkRuntimeFromEngine(eng, t) + if c := runtime.Get(id); c != nil { + t.Fatal(fmt.Errorf("Container %s should not exist", id)) + } +} + +// assertHttpNotError expect the given response to not have an error. +// Otherwise the it causes the test to fail. +func assertHttpNotError(r *httptest.ResponseRecorder, t utils.Fataler) { + // Non-error http status are [200, 400) + if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest { + t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code)) + } +} + + +// assertHttpError expect the given response to have an error. +// Otherwise the it causes the test to fail. +func assertHttpError(r *httptest.ResponseRecorder, t utils.Fataler) { + // Non-error http status are [200, 400) + if !(r.Code < http.StatusOK || r.Code >= http.StatusBadRequest) { + t.Fatal(fmt.Errorf("Unexpected http success code: %v", r.Code)) + } +} + +func getContainer(eng *engine.Engine, id string, t utils.Fataler) *docker.Container { + runtime := mkRuntimeFromEngine(eng, t) + c := runtime.Get(id) + if c == nil { + t.Fatal(fmt.Errorf("No such container: %s", id)) + } + return c +} + +func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Server { + iSrv := eng.Hack_GetGlobalVar("httpapi.server") + if iSrv == nil { + panic("Legacy server field not set in engine") + } + srv, ok := iSrv.(*docker.Server) + if !ok { + panic("Legacy server field in engine does not cast to *docker.Server") + } + return srv +} + +func mkRuntimeFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Runtime { + iRuntime := eng.Hack_GetGlobalVar("httpapi.runtime") + if iRuntime == nil { + panic("Legacy runtime field not set in engine") + } + runtime, ok := iRuntime.(*docker.Runtime) + if !ok { + panic("Legacy runtime field in engine does not cast to *docker.Runtime") + } + return runtime +} + +func NewTestEngine(t utils.Fataler) *engine.Engine { + root, err := newTestDirectory(unitTestStoreBase) + if err != nil { + t.Fatal(err) + } + eng, err := engine.New(root) + if err != nil { + t.Fatal(err) + } + // Load default plugins + // (This is manually copied and modified from main() until we have a more generic plugin system) + job := eng.Job("initapi") + job.Setenv("Root", root) + job.SetenvBool("AutoRestart", false) + // TestGetEnabledCors and TestOptionsRoute require EnableCors=true + job.SetenvBool("EnableCors", true) + if err := job.Run(); err != nil { + t.Fatal(err) + } + return eng +} + +func newTestDirectory(templateDir string) (dir string, err error) { + return utils.TestDirectory(templateDir) +} + +func getCallerName(depth int) string { + return utils.GetCallerName(depth) +} + +// Write `content` to the file at path `dst`, creating it if necessary, +// as well as any missing directories. +// The file is truncated if it already exists. +// Call t.Fatal() at the first error. +func writeFile(dst, content string, t *testing.T) { + // Create subdirectories if necessary + if err := os.MkdirAll(path.Dir(dst), 0700); err != nil && !os.IsExist(err) { + t.Fatal(err) + } + f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) + if err != nil { + t.Fatal(err) + } + // Write content (truncate if it exists) + if _, err := io.Copy(f, strings.NewReader(content)); err != nil { + t.Fatal(err) + } +} + +// Return the contents of file at path `src`. +// Call t.Fatal() at the first error (including if the file doesn't exist) +func readFile(src string, t *testing.T) (content string) { + f, err := os.Open(src) + if err != nil { + t.Fatal(err) + } + data, err := ioutil.ReadAll(f) + if err != nil { + t.Fatal(err) + } + return string(data) +} + +// Create a test container from the given runtime `r` and run arguments `args`. +// If the image name is "_", (eg. []string{"-i", "-t", "_", "bash"}, it is +// dynamically replaced by the current test image. +// The caller is responsible for destroying the container. +// Call t.Fatal() at the first error. +func mkContainer(r *docker.Runtime, args []string, t *testing.T) (*docker.Container, *docker.HostConfig, error) { + config, hc, _, err := docker.ParseRun(args, nil) + defer func() { + if err != nil && t != nil { + t.Fatal(err) + } + }() + if err != nil { + return nil, nil, err + } + if config.Image == "_" { + config.Image = GetTestImage(r).ID + } + c, _, err := r.Create(config, "") + if err != nil { + return nil, nil, err + } + // NOTE: hostConfig is ignored. + // If `args` specify privileged mode, custom lxc conf, external mount binds, + // port redirects etc. they will be ignored. + // This is because the correct way to set these things is to pass environment + // to the `start` job. + // FIXME: this helper function should be deprecated in favor of calling + // `create` and `start` jobs directly. + return c, hc, nil +} + +// Create a test container, start it, wait for it to complete, destroy it, +// and return its standard output as a string. +// The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image. +// If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally. +func runContainer(eng *engine.Engine, r *docker.Runtime, args []string, t *testing.T) (output string, err error) { + defer func() { + if err != nil && t != nil { + t.Fatal(err) + } + }() + container, hc, err := mkContainer(r, args, t) + if err != nil { + return "", err + } + defer r.Destroy(container) + stdout, err := container.StdoutPipe() + if err != nil { + return "", err + } + defer stdout.Close() + + job := eng.Job("start", container.ID) + if err := job.ImportEnv(hc); err != nil { + return "", err + } + if err := job.Run(); err != nil { + return "", err + } + + container.Wait() + data, err := ioutil.ReadAll(stdout) + if err != nil { + return "", err + } + output = string(data) + return +} + +// FIXME: this is duplicated from graph_test.go in the docker package. +func fakeTar() (io.Reader, error) { + content := []byte("Hello world!\n") + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} { + hdr := new(tar.Header) + hdr.Size = int64(len(content)) + hdr.Name = name + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + tw.Write([]byte(content)) + } + tw.Close() + return buf, nil +} diff --git a/z_final_test.go b/integration/z_final_test.go similarity index 100% rename from z_final_test.go rename to integration/z_final_test.go diff --git a/iptables/iptables_test.go b/iptables/iptables_test.go deleted file mode 100644 index 886a63c03f..0000000000 --- a/iptables/iptables_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package iptables - -import ( - "os" - "testing" -) - -func TestIptables(t *testing.T) { - if _, err := Raw("-L"); err != nil { - t.Fatal(err) - } - path := os.Getenv("PATH") - os.Setenv("PATH", "") - defer os.Setenv("PATH", path) - if _, err := Raw("-L"); err == nil { - t.Fatal("Not finding iptables in the PATH should cause an error") - } -} diff --git a/lxc_template_unit_test.go b/lxc_template_unit_test.go new file mode 100644 index 0000000000..ce5af1d321 --- /dev/null +++ b/lxc_template_unit_test.go @@ -0,0 +1,102 @@ +package docker + +import ( + "bufio" + "fmt" + "io/ioutil" + "math/rand" + "os" + "strings" + "testing" + "time" +) + +func TestLXCConfig(t *testing.T) { + root, err := ioutil.TempDir("", "TestLXCConfig") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + // Memory is allocated randomly for testing + rand.Seed(time.Now().UTC().UnixNano()) + memMin := 33554432 + memMax := 536870912 + mem := memMin + rand.Intn(memMax-memMin) + // CPU shares as well + cpuMin := 100 + cpuMax := 10000 + cpu := cpuMin + rand.Intn(cpuMax-cpuMin) + container := &Container{ + root: root, + Config: &Config{ + Hostname: "foobar", + Memory: int64(mem), + CpuShares: int64(cpu), + NetworkDisabled: true, + }, + hostConfig: &HostConfig{ + Privileged: false, + }, + } + if err := container.generateLXCConfig(); err != nil { + t.Fatal(err) + } + grepFile(t, container.lxcConfigPath(), "lxc.utsname = foobar") + grepFile(t, container.lxcConfigPath(), + fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem)) + grepFile(t, container.lxcConfigPath(), + fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2)) +} + +func TestCustomLxcConfig(t *testing.T) { + root, err := ioutil.TempDir("", "TestCustomLxcConfig") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + container := &Container{ + root: root, + Config: &Config{ + Hostname: "foobar", + NetworkDisabled: true, + }, + hostConfig: &HostConfig{ + Privileged: false, + LxcConf: []KeyValuePair{ + { + Key: "lxc.utsname", + Value: "docker", + }, + { + Key: "lxc.cgroup.cpuset.cpus", + Value: "0,1", + }, + }, + }, + } + if err := container.generateLXCConfig(); err != nil { + t.Fatal(err) + } + grepFile(t, container.lxcConfigPath(), "lxc.utsname = docker") + grepFile(t, container.lxcConfigPath(), "lxc.cgroup.cpuset.cpus = 0,1") +} + +func grepFile(t *testing.T, path string, pattern string) { + f, err := os.Open(path) + if err != nil { + t.Fatal(err) + } + defer f.Close() + r := bufio.NewReader(f) + var ( + line string + ) + err = nil + for err == nil { + line, err = r.ReadString('\n') + if strings.Contains(line, pattern) == true { + return + } + } + t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path) +} diff --git a/runtime.go b/runtime.go index f4744cc60a..4559164e28 100644 --- a/runtime.go +++ b/runtime.go @@ -15,6 +15,7 @@ import ( "path" "sort" "strings" + "sync" "time" ) @@ -516,7 +517,12 @@ func (runtime *Runtime) Commit(container *Container, repository, tag, comment, a return img, nil } +// FIXME: this is deprecated by the getFullName *function* func (runtime *Runtime) getFullName(name string) (string, error) { + return getFullName(name) +} + +func getFullName(name string) (string, error) { if name == "" { return "", fmt.Errorf("Container name cannot be empty") } @@ -655,6 +661,25 @@ func (runtime *Runtime) Close() error { return runtime.containerGraph.Close() } +// Nuke kills all containers then removes all content +// from the content root, including images, volumes and +// container filesystems. +// Again: this will remove your entire docker runtime! +func (runtime *Runtime) Nuke() error { + var wg sync.WaitGroup + for _, container := range runtime.List() { + wg.Add(1) + go func(c *Container) { + c.Kill() + wg.Done() + }(container) + } + wg.Wait() + runtime.Close() + + return os.RemoveAll(runtime.config.Root) +} + func linkLxcStart(root string) error { sourcePath, err := exec.LookPath("lxc-start") if err != nil { @@ -672,6 +697,14 @@ func linkLxcStart(root string) error { return os.Symlink(sourcePath, targetPath) } +// FIXME: this is a convenience function for integration tests +// which need direct access to runtime.graph. +// Once the tests switch to using engine and jobs, this method +// can go away. +func (runtime *Runtime) Graph() *Graph { + return runtime.graph +} + // History is a convenience type for storing a list of containers, // ordered by creation date. type History []*Container diff --git a/server.go b/server.go index 28858d6f6b..768f73a688 100644 --- a/server.go +++ b/server.go @@ -62,6 +62,8 @@ func jobInitApi(job *engine.Job) string { os.Exit(0) }() job.Eng.Hack_SetGlobalVar("httpapi.server", srv) + job.Eng.Hack_SetGlobalVar("httpapi.runtime", srv.runtime) + job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", srv.runtime.networkManager.bridgeNetwork.IP) if err := job.Eng.Register("create", srv.ContainerCreate); err != nil { return err.Error() } @@ -530,6 +532,7 @@ func (srv *Server) ContainerCommit(name, repo, tag, author, comment string, conf return img.ID, err } +// FIXME: this should be called ImageTag func (srv *Server) ContainerTag(name, repo, tag string, force bool) error { if err := srv.runtime.repositories.Set(repo, tag, name, force); err != nil { return err @@ -1062,7 +1065,12 @@ func (srv *Server) ContainerCreate(job *engine.Job) string { return err.Error() } srv.LogEvent("create", container.ID, srv.runtime.repositories.ImageName(container.Image)) - job.Printf("%s\n", container.ID) + // FIXME: this is necessary because runtime.Create might return a nil container + // with a non-nil error. This should not happen! Once it's fixed we + // can remove this workaround. + if container != nil { + job.Printf("%s\n", container.ID) + } for _, warning := range buildWarnings { job.Errorf("%s\n", warning) } @@ -1600,7 +1608,7 @@ func (srv *Server) HTTPRequestFactory(metaHeaders map[string][]string) *utils.HT return srv.reqFactory } -func (srv *Server) LogEvent(action, id, from string) { +func (srv *Server) LogEvent(action, id, from string) *utils.JSONMessage { now := time.Now().Unix() jm := utils.JSONMessage{Status: action, ID: id, From: from, Time: now} srv.events = append(srv.events, jm) @@ -1610,6 +1618,7 @@ func (srv *Server) LogEvent(action, id, from string) { default: } } + return &jm } type Server struct { diff --git a/server_unit_test.go b/server_unit_test.go new file mode 100644 index 0000000000..a51e2ddff5 --- /dev/null +++ b/server_unit_test.go @@ -0,0 +1,109 @@ +package docker + +import ( + "github.com/dotcloud/docker/utils" + "testing" + "time" +) + +func TestPools(t *testing.T) { + srv := &Server{ + pullingPool: make(map[string]struct{}), + pushingPool: make(map[string]struct{}), + } + + err := srv.poolAdd("pull", "test1") + if err != nil { + t.Fatal(err) + } + err = srv.poolAdd("pull", "test2") + if err != nil { + t.Fatal(err) + } + err = srv.poolAdd("push", "test1") + if err == nil || err.Error() != "pull test1 is already in progress" { + t.Fatalf("Expected `pull test1 is already in progress`") + } + err = srv.poolAdd("pull", "test1") + if err == nil || err.Error() != "pull test1 is already in progress" { + t.Fatalf("Expected `pull test1 is already in progress`") + } + err = srv.poolAdd("wait", "test3") + if err == nil || err.Error() != "Unknown pool type" { + t.Fatalf("Expected `Unknown pool type`") + } + + err = srv.poolRemove("pull", "test2") + if err != nil { + t.Fatal(err) + } + err = srv.poolRemove("pull", "test2") + if err != nil { + t.Fatal(err) + } + err = srv.poolRemove("pull", "test1") + if err != nil { + t.Fatal(err) + } + err = srv.poolRemove("push", "test1") + if err != nil { + t.Fatal(err) + } + err = srv.poolRemove("wait", "test3") + if err == nil || err.Error() != "Unknown pool type" { + t.Fatalf("Expected `Unknown pool type`") + } +} + +func TestLogEvent(t *testing.T) { + srv := &Server{ + events: make([]utils.JSONMessage, 0, 64), + listeners: make(map[string]chan utils.JSONMessage), + } + + srv.LogEvent("fakeaction", "fakeid", "fakeimage") + + listener := make(chan utils.JSONMessage) + srv.Lock() + srv.listeners["test"] = listener + srv.Unlock() + + srv.LogEvent("fakeaction2", "fakeid", "fakeimage") + + if len(srv.events) != 2 { + t.Fatalf("Expected 2 events, found %d", len(srv.events)) + } + go func() { + time.Sleep(200 * time.Millisecond) + srv.LogEvent("fakeaction3", "fakeid", "fakeimage") + time.Sleep(200 * time.Millisecond) + srv.LogEvent("fakeaction4", "fakeid", "fakeimage") + }() + + setTimeout(t, "Listening for events timed out", 2*time.Second, func() { + for i := 2; i < 4; i++ { + event := <-listener + if event != srv.events[i] { + t.Fatalf("Event received it different than expected") + } + } + }) +} + +// FIXME: this is duplicated from integration/commands_test.go +func setTimeout(t *testing.T, msg string, d time.Duration, f func()) { + c := make(chan bool) + + // Make sure we are not too long + go func() { + time.Sleep(d) + c <- true + }() + go func() { + f() + c <- false + }() + if <-c && msg != "" { + t.Fatal(msg) + } +} diff --git a/sorter_test.go b/sorter_test.go deleted file mode 100644 index 54f647132f..0000000000 --- a/sorter_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package docker - -import ( - "fmt" - "testing" - "time" -) - -func TestServerListOrderedImagesByCreationDate(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - archive, err := fakeTar() - if err != nil { - t.Fatal(err) - } - _, err = runtime.graph.Create(archive, nil, "Testing", "", nil) - if err != nil { - t.Fatal(err) - } - - srv := &Server{runtime: runtime} - - images, err := srv.Images(true, "") - if err != nil { - t.Fatal(err) - } - - if images[0].Created < images[1].Created { - t.Error("Expected []APIImges to be ordered by most recent creation date.") - } -} - -func TestServerListOrderedImagesByCreationDateAndTag(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - err := generateImage("bar", runtime) - if err != nil { - t.Fatal(err) - } - - time.Sleep(time.Second) - - err = generateImage("zed", runtime) - if err != nil { - t.Fatal(err) - } - - srv := &Server{runtime: runtime} - images, err := srv.Images(true, "") - if err != nil { - t.Fatal(err) - } - - if images[0].RepoTags[0] != "repo:zed" && images[0].RepoTags[0] != "repo:bar" { - t.Errorf("Expected []APIImges to be ordered by most recent creation date. %s", images) - } -} - -func generateImage(name string, runtime *Runtime) error { - - archive, err := fakeTar() - if err != nil { - return err - } - image, err := runtime.graph.Create(archive, nil, "Testing", "", nil) - if err != nil { - return err - } - - srv := &Server{runtime: runtime} - srv.ContainerTag(image.ID, "repo", name, false) - - return nil -} - -func TestSortUniquePorts(t *testing.T) { - ports := []Port{ - Port("6379/tcp"), - Port("22/tcp"), - } - - sortPorts(ports, func(ip, jp Port) bool { - return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") - }) - - first := ports[0] - if fmt.Sprint(first) != "22/tcp" { - t.Log(fmt.Sprint(first)) - t.Fail() - } -} - -func TestSortSamePortWithDifferentProto(t *testing.T) { - ports := []Port{ - Port("8888/tcp"), - Port("8888/udp"), - Port("6379/tcp"), - Port("6379/udp"), - } - - sortPorts(ports, func(ip, jp Port) bool { - return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") - }) - - first := ports[0] - if fmt.Sprint(first) != "6379/tcp" { - t.Fail() - } -} diff --git a/sorter_unit_test.go b/sorter_unit_test.go new file mode 100644 index 0000000000..0669feedb3 --- /dev/null +++ b/sorter_unit_test.go @@ -0,0 +1,41 @@ +package docker + +import ( + "fmt" + "testing" +) + +func TestSortUniquePorts(t *testing.T) { + ports := []Port{ + Port("6379/tcp"), + Port("22/tcp"), + } + + sortPorts(ports, func(ip, jp Port) bool { + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") + }) + + first := ports[0] + if fmt.Sprint(first) != "22/tcp" { + t.Log(fmt.Sprint(first)) + t.Fail() + } +} + +func TestSortSamePortWithDifferentProto(t *testing.T) { + ports := []Port{ + Port("8888/tcp"), + Port("8888/udp"), + Port("6379/tcp"), + Port("6379/udp"), + } + + sortPorts(ports, func(ip, jp Port) bool { + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") + }) + + first := ports[0] + if fmt.Sprint(first) != "6379/tcp" { + t.Fail() + } +} diff --git a/tags_test.go b/tags_test.go deleted file mode 100644 index d920943795..0000000000 --- a/tags_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package docker - -import ( - "testing" -) - -func TestLookupImage(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - if img, err := runtime.repositories.LookupImage(unitTestImageName); err != nil { - t.Fatal(err) - } else if img == nil { - t.Errorf("Expected 1 image, none found") - } - - if img, err := runtime.repositories.LookupImage(unitTestImageName + ":" + DEFAULTTAG); err != nil { - t.Fatal(err) - } else if img == nil { - t.Errorf("Expected 1 image, none found") - } - - if img, err := runtime.repositories.LookupImage(unitTestImageName + ":" + "fail"); err == nil { - t.Errorf("Expected error, none found") - } else if img != nil { - t.Errorf("Expected 0 image, 1 found") - } - - if img, err := runtime.repositories.LookupImage("fail:fail"); err == nil { - t.Errorf("Expected error, none found") - } else if img != nil { - t.Errorf("Expected 0 image, 1 found") - } - - if img, err := runtime.repositories.LookupImage(unitTestImageID); err != nil { - t.Fatal(err) - } else if img == nil { - t.Errorf("Expected 1 image, none found") - } - - if img, err := runtime.repositories.LookupImage(unitTestImageName + ":" + unitTestImageID); err != nil { - t.Fatal(err) - } else if img == nil { - t.Errorf("Expected 1 image, none found") - } -} diff --git a/tags_unit_test.go b/tags_unit_test.go new file mode 100644 index 0000000000..0f72e30200 --- /dev/null +++ b/tags_unit_test.go @@ -0,0 +1,80 @@ +package docker + +import ( + "github.com/dotcloud/docker/utils" + "os" + "path" + "testing" +) + +const ( + testImageName string = "myapp" + testImageID string = "foo" +) + +func mkTestTagStore(root string, t *testing.T) *TagStore { + graph, err := NewGraph(root) + if err != nil { + t.Fatal(err) + } + store, err := NewTagStore(path.Join(root, "tags"), graph) + if err != nil { + t.Fatal(err) + } + archive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + img := &Image{ID: testImageID} + if err := graph.Register(nil, archive, img); err != nil { + t.Fatal(err) + } + if err := store.Set(testImageName, "", testImageID, false); err != nil { + t.Fatal(err) + } + return store +} + +func TestLookupImage(t *testing.T) { + tmp, err := utils.TestDirectory("") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + store := mkTestTagStore(tmp, t) + + if img, err := store.LookupImage(testImageName); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } + if img, err := store.LookupImage(testImageName + ":" + DEFAULTTAG); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } + + if img, err := store.LookupImage(testImageName + ":" + "fail"); err == nil { + t.Errorf("Expected error, none found") + } else if img != nil { + t.Errorf("Expected 0 image, 1 found") + } + + if img, err := store.LookupImage("fail:fail"); err == nil { + t.Errorf("Expected error, none found") + } else if img != nil { + t.Errorf("Expected 0 image, 1 found") + } + + if img, err := store.LookupImage(testImageID); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } + + if img, err := store.LookupImage(testImageName + ":" + testImageID); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } +} diff --git a/utils/utils.go b/utils/utils.go index d16ffe3171..5a021d99e3 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -1242,3 +1242,40 @@ func PartParser(template, data string) (map[string]string, error) { } return out, nil } + +var globalTestID string + +// TestDirectory creates a new temporary directory and returns its path. +// The contents of directory at path `templateDir` is copied into the +// new directory. +func TestDirectory(templateDir string) (dir string, err error) { + if globalTestID == "" { + globalTestID = RandomString()[:4] + } + prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2)) + if prefix == "" { + prefix = "docker-test-" + } + dir, err = ioutil.TempDir("", prefix) + if err = os.Remove(dir); err != nil { + return + } + if templateDir != "" { + if err = CopyDirectory(templateDir, dir); err != nil { + return + } + } + return +} + +// GetCallerName introspects the call stack and returns the name of the +// function `depth` levels down in the stack. +func GetCallerName(depth int) string { + // Use the caller function name as a prefix. + // This helps trace temp directories back to their test. + pc, _, _, _ := runtime.Caller(depth + 1) + callerLongName := runtime.FuncForPC(pc).Name() + parts := strings.Split(callerLongName, ".") + callerShortName := parts[len(parts)-1] + return callerShortName +} diff --git a/utils_test.go b/utils_test.go deleted file mode 100644 index a9678a9bbd..0000000000 --- a/utils_test.go +++ /dev/null @@ -1,493 +0,0 @@ -package docker - -import ( - "fmt" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/utils" - "io" - "io/ioutil" - "os" - "path" - "runtime" - "strings" - "testing" -) - -// This file contains utility functions for docker's unit test suite. -// It has to be named XXX_test.go, apparently, in other to access private functions -// from other XXX_test.go functions. - -var globalTestID string - -// Create a temporary runtime suitable for unit testing. -// Call t.Fatal() at the first error. -func mkRuntime(f utils.Fataler) *Runtime { - root, err := newTestDirectory(unitTestStoreBase) - if err != nil { - f.Fatal(err) - } - config := &DaemonConfig{ - Root: root, - AutoRestart: false, - } - r, err := NewRuntimeFromDirectory(config) - if err != nil { - f.Fatal(err) - } - r.UpdateCapabilities(true) - return r -} - -func createNamedTestContainer(eng *engine.Engine, config *Config, f utils.Fataler, name string) (shortId string) { - job := eng.Job("create", name) - if err := job.ImportEnv(config); err != nil { - f.Fatal(err) - } - job.StdoutParseString(&shortId) - if err := job.Run(); err != nil { - f.Fatal(err) - } - return -} - -func createTestContainer(eng *engine.Engine, config *Config, f utils.Fataler) (shortId string) { - return createNamedTestContainer(eng, config, f, "") -} - -func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *Server { - iSrv := eng.Hack_GetGlobalVar("httpapi.server") - if iSrv == nil { - panic("Legacy server field not set in engine") - } - srv, ok := iSrv.(*Server) - if !ok { - panic("Legacy server field in engine does not cast to *Server") - } - return srv -} - -func NewTestEngine(t utils.Fataler) *engine.Engine { - root, err := newTestDirectory(unitTestStoreBase) - if err != nil { - t.Fatal(err) - } - eng, err := engine.New(root) - if err != nil { - t.Fatal(err) - } - // Load default plugins - // (This is manually copied and modified from main() until we have a more generic plugin system) - job := eng.Job("initapi") - job.Setenv("Root", root) - job.SetenvBool("AutoRestart", false) - if err := job.Run(); err != nil { - t.Fatal(err) - } - return eng -} - -func newTestDirectory(templateDir string) (dir string, err error) { - if globalTestID == "" { - globalTestID = GenerateID()[:4] - } - prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, getCallerName(2)) - if prefix == "" { - prefix = "docker-test-" - } - dir, err = ioutil.TempDir("", prefix) - if err = os.Remove(dir); err != nil { - return - } - if err = utils.CopyDirectory(templateDir, dir); err != nil { - return - } - return -} - -func getCallerName(depth int) string { - // Use the caller function name as a prefix. - // This helps trace temp directories back to their test. - pc, _, _, _ := runtime.Caller(depth + 1) - callerLongName := runtime.FuncForPC(pc).Name() - parts := strings.Split(callerLongName, ".") - callerShortName := parts[len(parts)-1] - return callerShortName -} - -// Write `content` to the file at path `dst`, creating it if necessary, -// as well as any missing directories. -// The file is truncated if it already exists. -// Call t.Fatal() at the first error. -func writeFile(dst, content string, t *testing.T) { - // Create subdirectories if necessary - if err := os.MkdirAll(path.Dir(dst), 0700); err != nil && !os.IsExist(err) { - t.Fatal(err) - } - f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) - if err != nil { - t.Fatal(err) - } - // Write content (truncate if it exists) - if _, err := io.Copy(f, strings.NewReader(content)); err != nil { - t.Fatal(err) - } -} - -// Return the contents of file at path `src`. -// Call t.Fatal() at the first error (including if the file doesn't exist) -func readFile(src string, t *testing.T) (content string) { - f, err := os.Open(src) - if err != nil { - t.Fatal(err) - } - data, err := ioutil.ReadAll(f) - if err != nil { - t.Fatal(err) - } - return string(data) -} - -// Create a test container from the given runtime `r` and run arguments `args`. -// If the image name is "_", (eg. []string{"-i", "-t", "_", "bash"}, it is -// dynamically replaced by the current test image. -// The caller is responsible for destroying the container. -// Call t.Fatal() at the first error. -func mkContainer(r *Runtime, args []string, t *testing.T) (*Container, error) { - config, hostConfig, _, err := ParseRun(args, nil) - defer func() { - if err != nil && t != nil { - t.Fatal(err) - } - }() - if err != nil { - return nil, err - } - if config.Image == "_" { - config.Image = GetTestImage(r).ID - } - c, _, err := r.Create(config, "") - if err != nil { - return nil, err - } - c.hostConfig = hostConfig - return c, nil -} - -// Create a test container, start it, wait for it to complete, destroy it, -// and return its standard output as a string. -// The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image. -// If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally. -func runContainer(r *Runtime, args []string, t *testing.T) (output string, err error) { - defer func() { - if err != nil && t != nil { - t.Fatal(err) - } - }() - container, err := mkContainer(r, args, t) - if err != nil { - return "", err - } - defer r.Destroy(container) - stdout, err := container.StdoutPipe() - if err != nil { - return "", err - } - defer stdout.Close() - if err := container.Start(); err != nil { - return "", err - } - container.Wait() - data, err := ioutil.ReadAll(stdout) - if err != nil { - return "", err - } - output = string(data) - return -} - -func TestCompareConfig(t *testing.T) { - volumes1 := make(map[string]struct{}) - volumes1["/test1"] = struct{}{} - config1 := Config{ - Dns: []string{"1.1.1.1", "2.2.2.2"}, - PortSpecs: []string{"1111:1111", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "11111111", - Volumes: volumes1, - } - config2 := Config{ - Dns: []string{"0.0.0.0", "2.2.2.2"}, - PortSpecs: []string{"1111:1111", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "11111111", - Volumes: volumes1, - } - config3 := Config{ - Dns: []string{"1.1.1.1", "2.2.2.2"}, - PortSpecs: []string{"0000:0000", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "11111111", - Volumes: volumes1, - } - config4 := Config{ - Dns: []string{"1.1.1.1", "2.2.2.2"}, - PortSpecs: []string{"0000:0000", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "22222222", - Volumes: volumes1, - } - volumes2 := make(map[string]struct{}) - volumes2["/test2"] = struct{}{} - config5 := Config{ - Dns: []string{"1.1.1.1", "2.2.2.2"}, - PortSpecs: []string{"0000:0000", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "11111111", - Volumes: volumes2, - } - if CompareConfig(&config1, &config2) { - t.Fatalf("CompareConfig should return false, Dns are different") - } - if CompareConfig(&config1, &config3) { - t.Fatalf("CompareConfig should return false, PortSpecs are different") - } - if CompareConfig(&config1, &config4) { - t.Fatalf("CompareConfig should return false, VolumesFrom are different") - } - if CompareConfig(&config1, &config5) { - t.Fatalf("CompareConfig should return false, Volumes are different") - } - if !CompareConfig(&config1, &config1) { - t.Fatalf("CompareConfig should return true") - } -} - -func TestMergeConfig(t *testing.T) { - volumesImage := make(map[string]struct{}) - volumesImage["/test1"] = struct{}{} - volumesImage["/test2"] = struct{}{} - configImage := &Config{ - Dns: []string{"1.1.1.1", "2.2.2.2"}, - PortSpecs: []string{"1111:1111", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "1111", - Volumes: volumesImage, - } - - volumesUser := make(map[string]struct{}) - volumesUser["/test3"] = struct{}{} - configUser := &Config{ - Dns: []string{"3.3.3.3"}, - PortSpecs: []string{"3333:2222", "3333:3333"}, - Env: []string{"VAR2=3", "VAR3=3"}, - Volumes: volumesUser, - } - - if err := MergeConfig(configUser, configImage); err != nil { - t.Error(err) - } - - if len(configUser.Dns) != 3 { - t.Fatalf("Expected 3 dns, 1.1.1.1, 2.2.2.2 and 3.3.3.3, found %d", len(configUser.Dns)) - } - for _, dns := range configUser.Dns { - if dns != "1.1.1.1" && dns != "2.2.2.2" && dns != "3.3.3.3" { - t.Fatalf("Expected 1.1.1.1 or 2.2.2.2 or 3.3.3.3, found %s", dns) - } - } - - if len(configUser.ExposedPorts) != 3 { - t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) - } - for portSpecs := range configUser.ExposedPorts { - if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { - t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs) - } - } - if len(configUser.Env) != 3 { - t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env)) - } - for _, env := range configUser.Env { - if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" { - t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env) - } - } - - if len(configUser.Volumes) != 3 { - t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes)) - } - for v := range configUser.Volumes { - if v != "/test1" && v != "/test2" && v != "/test3" { - t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v) - } - } - - if configUser.VolumesFrom != "1111" { - t.Fatalf("Expected VolumesFrom to be 1111, found %s", configUser.VolumesFrom) - } - - ports, _, err := parsePortSpecs([]string{"0000"}) - if err != nil { - t.Error(err) - } - configImage2 := &Config{ - ExposedPorts: ports, - } - - if err := MergeConfig(configUser, configImage2); err != nil { - t.Error(err) - } - - if len(configUser.ExposedPorts) != 4 { - t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) - } - for portSpecs := range configUser.ExposedPorts { - if portSpecs.Port() != "0000" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { - t.Fatalf("Expected 0000 or 1111 or 2222 or 3333, found %s", portSpecs) - } - } - -} - -func TestParseLxcConfOpt(t *testing.T) { - opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "} - - for _, o := range opts { - k, v, err := parseLxcOpt(o) - if err != nil { - t.FailNow() - } - if k != "lxc.utsname" { - t.Fail() - } - if v != "docker" { - t.Fail() - } - } -} - -func TestParseNetworkOptsPrivateOnly(t *testing.T) { - ports, bindings, err := parsePortSpecs([]string{"192.168.1.100::80"}) - if err != nil { - t.Fatal(err) - } - if len(ports) != 1 { - t.Logf("Expected 1 got %d", len(ports)) - t.FailNow() - } - if len(bindings) != 1 { - t.Logf("Expected 1 got %d", len(bindings)) - t.FailNow() - } - for k := range ports { - if k.Proto() != "tcp" { - t.Logf("Expected tcp got %s", k.Proto()) - t.Fail() - } - if k.Port() != "80" { - t.Logf("Expected 80 got %s", k.Port()) - t.Fail() - } - b, exists := bindings[k] - if !exists { - t.Log("Binding does not exist") - t.FailNow() - } - if len(b) != 1 { - t.Logf("Expected 1 got %d", len(b)) - t.FailNow() - } - s := b[0] - if s.HostPort != "" { - t.Logf("Expected \"\" got %s", s.HostPort) - t.Fail() - } - if s.HostIp != "192.168.1.100" { - t.Fail() - } - } -} - -func TestParseNetworkOptsPublic(t *testing.T) { - ports, bindings, err := parsePortSpecs([]string{"192.168.1.100:8080:80"}) - if err != nil { - t.Fatal(err) - } - if len(ports) != 1 { - t.Logf("Expected 1 got %d", len(ports)) - t.FailNow() - } - if len(bindings) != 1 { - t.Logf("Expected 1 got %d", len(bindings)) - t.FailNow() - } - for k := range ports { - if k.Proto() != "tcp" { - t.Logf("Expected tcp got %s", k.Proto()) - t.Fail() - } - if k.Port() != "80" { - t.Logf("Expected 80 got %s", k.Port()) - t.Fail() - } - b, exists := bindings[k] - if !exists { - t.Log("Binding does not exist") - t.FailNow() - } - if len(b) != 1 { - t.Logf("Expected 1 got %d", len(b)) - t.FailNow() - } - s := b[0] - if s.HostPort != "8080" { - t.Logf("Expected 8080 got %s", s.HostPort) - t.Fail() - } - if s.HostIp != "192.168.1.100" { - t.Fail() - } - } -} - -func TestParseNetworkOptsUdp(t *testing.T) { - ports, bindings, err := parsePortSpecs([]string{"192.168.1.100::6000/udp"}) - if err != nil { - t.Fatal(err) - } - if len(ports) != 1 { - t.Logf("Expected 1 got %d", len(ports)) - t.FailNow() - } - if len(bindings) != 1 { - t.Logf("Expected 1 got %d", len(bindings)) - t.FailNow() - } - for k := range ports { - if k.Proto() != "udp" { - t.Logf("Expected udp got %s", k.Proto()) - t.Fail() - } - if k.Port() != "6000" { - t.Logf("Expected 6000 got %s", k.Port()) - t.Fail() - } - b, exists := bindings[k] - if !exists { - t.Log("Binding does not exist") - t.FailNow() - } - if len(b) != 1 { - t.Logf("Expected 1 got %d", len(b)) - t.FailNow() - } - s := b[0] - if s.HostPort != "" { - t.Logf("Expected \"\" got %s", s.HostPort) - t.Fail() - } - if s.HostIp != "192.168.1.100" { - t.Fail() - } - } -}