Merge pull request #2694 from shykes/separate-integration-tests

Separate integration tests
This commit is contained in:
Michael Crosby 2013-11-15 18:21:34 -08:00
commit 51576069ad
32 changed files with 2152 additions and 1934 deletions

16
api.go
View File

@ -930,7 +930,7 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
if err != nil { if err != nil {
return err return err
} }
c, err := mkBuildContext(string(dockerFile), nil) c, err := MkBuildContext(string(dockerFile), nil)
if err != nil { if err != nil {
return err return err
} }
@ -1108,6 +1108,20 @@ func createRouter(srv *Server, logging bool) (*mux.Router, error) {
return r, nil return r, nil
} }
// ServeRequest processes a single http request to the docker remote api.
// FIXME: refactor this to be part of Server and not require re-creating a new
// router each time. This requires first moving ListenAndServe into Server.
func ServeRequest(srv *Server, apiversion float64, w http.ResponseWriter, req *http.Request) error {
router, err := createRouter(srv, false)
if err != nil {
return err
}
// Insert APIVERSION into the request as a convenience
req.URL.Path = fmt.Sprintf("/v%g%s", apiversion, req.URL.Path)
router.ServeHTTP(w, req)
return nil
}
func ListenAndServe(proto, addr string, srv *Server, logging bool) error { func ListenAndServe(proto, addr string, srv *Server, logging bool) error {
log.Printf("Listening for HTTP on %s (%s)\n", addr, proto) log.Printf("Listening for HTTP on %s (%s)\n", addr, proto)

20
api_unit_tests.go Normal file
View File

@ -0,0 +1,20 @@
package docker
import (
"testing"
)
func TestJsonContentType(t *testing.T) {
if !matchesContentType("application/json", "application/json") {
t.Fail()
}
if !matchesContentType("application/json; charset=utf-8", "application/json") {
t.Fail()
}
if matchesContentType("dockerapplication/json", "application/json") {
t.Fail()
}
}

View File

@ -1,11 +1,8 @@
package auth package auth
import ( import (
"crypto/rand"
"encoding/hex"
"io/ioutil" "io/ioutil"
"os" "os"
"strings"
"testing" "testing"
) )
@ -29,52 +26,6 @@ func TestEncodeAuth(t *testing.T) {
} }
} }
func TestLogin(t *testing.T) {
os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com")
defer os.Setenv("DOCKER_INDEX_URL", "")
authConfig := &AuthConfig{Username: "unittester", Password: "surlautrerivejetattendrai", Email: "noise+unittester@dotcloud.com"}
status, err := Login(authConfig, nil)
if err != nil {
t.Fatal(err)
}
if status != "Login Succeeded" {
t.Fatalf("Expected status \"Login Succeeded\", found \"%s\" instead", status)
}
}
func TestCreateAccount(t *testing.T) {
os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com")
defer os.Setenv("DOCKER_INDEX_URL", "")
tokenBuffer := make([]byte, 16)
_, err := rand.Read(tokenBuffer)
if err != nil {
t.Fatal(err)
}
token := hex.EncodeToString(tokenBuffer)[:12]
username := "ut" + token
authConfig := &AuthConfig{Username: username, Password: "test42", Email: "docker-ut+" + token + "@example.com"}
status, err := Login(authConfig, nil)
if err != nil {
t.Fatal(err)
}
expectedStatus := "Account created. Please use the confirmation link we sent" +
" to your e-mail to activate it."
if status != expectedStatus {
t.Fatalf("Expected status: \"%s\", found \"%s\" instead.", expectedStatus, status)
}
status, err = Login(authConfig, nil)
if err == nil {
t.Fatalf("Expected error but found nil instead")
}
expectedError := "Login: Account is not Active"
if !strings.Contains(err.Error(), expectedError) {
t.Fatalf("Expected message \"%s\" but found \"%s\" instead", expectedError, err)
}
}
func setupTempConfigFile() (*ConfigFile, error) { func setupTempConfigFile() (*ConfigFile, error) {
root, err := ioutil.TempDir("", "docker-test-auth") root, err := ioutil.TempDir("", "docker-test-auth")
if err != nil { if err != nil {

View File

@ -135,7 +135,7 @@ func (cli *DockerCli) CmdInsert(args ...string) error {
// mkBuildContext returns an archive of an empty context with the contents // mkBuildContext returns an archive of an empty context with the contents
// of `dockerfile` at the path ./Dockerfile // of `dockerfile` at the path ./Dockerfile
func mkBuildContext(dockerfile string, files [][2]string) (archive.Archive, error) { func MkBuildContext(dockerfile string, files [][2]string) (archive.Archive, error) {
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
tw := tar.NewWriter(buf) tw := tar.NewWriter(buf)
files = append(files, [2]string{"Dockerfile", dockerfile}) files = append(files, [2]string{"Dockerfile", dockerfile})
@ -185,7 +185,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
if err != nil { if err != nil {
return err return err
} }
context, err = mkBuildContext(string(dockerfile), nil) context, err = MkBuildContext(string(dockerfile), nil)
} else if utils.IsURL(cmd.Arg(0)) || utils.IsGIT(cmd.Arg(0)) { } else if utils.IsURL(cmd.Arg(0)) || utils.IsGIT(cmd.Arg(0)) {
isRemote = true isRemote = true
} else { } else {

149
config_test.go Normal file
View File

@ -0,0 +1,149 @@
package docker
import (
"testing"
)
func TestCompareConfig(t *testing.T) {
volumes1 := make(map[string]struct{})
volumes1["/test1"] = struct{}{}
config1 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"1111:1111", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes1,
}
config2 := Config{
Dns: []string{"0.0.0.0", "2.2.2.2"},
PortSpecs: []string{"1111:1111", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes1,
}
config3 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"0000:0000", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes1,
}
config4 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"0000:0000", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "22222222",
Volumes: volumes1,
}
volumes2 := make(map[string]struct{})
volumes2["/test2"] = struct{}{}
config5 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"0000:0000", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes2,
}
if CompareConfig(&config1, &config2) {
t.Fatalf("CompareConfig should return false, Dns are different")
}
if CompareConfig(&config1, &config3) {
t.Fatalf("CompareConfig should return false, PortSpecs are different")
}
if CompareConfig(&config1, &config4) {
t.Fatalf("CompareConfig should return false, VolumesFrom are different")
}
if CompareConfig(&config1, &config5) {
t.Fatalf("CompareConfig should return false, Volumes are different")
}
if !CompareConfig(&config1, &config1) {
t.Fatalf("CompareConfig should return true")
}
}
func TestMergeConfig(t *testing.T) {
volumesImage := make(map[string]struct{})
volumesImage["/test1"] = struct{}{}
volumesImage["/test2"] = struct{}{}
configImage := &Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"1111:1111", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "1111",
Volumes: volumesImage,
}
volumesUser := make(map[string]struct{})
volumesUser["/test3"] = struct{}{}
configUser := &Config{
Dns: []string{"3.3.3.3"},
PortSpecs: []string{"3333:2222", "3333:3333"},
Env: []string{"VAR2=3", "VAR3=3"},
Volumes: volumesUser,
}
if err := MergeConfig(configUser, configImage); err != nil {
t.Error(err)
}
if len(configUser.Dns) != 3 {
t.Fatalf("Expected 3 dns, 1.1.1.1, 2.2.2.2 and 3.3.3.3, found %d", len(configUser.Dns))
}
for _, dns := range configUser.Dns {
if dns != "1.1.1.1" && dns != "2.2.2.2" && dns != "3.3.3.3" {
t.Fatalf("Expected 1.1.1.1 or 2.2.2.2 or 3.3.3.3, found %s", dns)
}
}
if len(configUser.ExposedPorts) != 3 {
t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
}
for portSpecs := range configUser.ExposedPorts {
if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" {
t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs)
}
}
if len(configUser.Env) != 3 {
t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env))
}
for _, env := range configUser.Env {
if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" {
t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env)
}
}
if len(configUser.Volumes) != 3 {
t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes))
}
for v := range configUser.Volumes {
if v != "/test1" && v != "/test2" && v != "/test3" {
t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v)
}
}
if configUser.VolumesFrom != "1111" {
t.Fatalf("Expected VolumesFrom to be 1111, found %s", configUser.VolumesFrom)
}
ports, _, err := parsePortSpecs([]string{"0000"})
if err != nil {
t.Error(err)
}
configImage2 := &Config{
ExposedPorts: ports,
}
if err := MergeConfig(configUser, configImage2); err != nil {
t.Error(err)
}
if len(configUser.ExposedPorts) != 4 {
t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
}
for portSpecs := range configUser.ExposedPorts {
if portSpecs.Port() != "0000" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" {
t.Fatalf("Expected 0000 or 1111 or 2222 or 3333, found %s", portSpecs)
}
}
}

161
container_unit_test.go Normal file
View File

@ -0,0 +1,161 @@
package docker
import (
"testing"
)
func TestParseLxcConfOpt(t *testing.T) {
opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "}
for _, o := range opts {
k, v, err := parseLxcOpt(o)
if err != nil {
t.FailNow()
}
if k != "lxc.utsname" {
t.Fail()
}
if v != "docker" {
t.Fail()
}
}
}
func TestParseNetworkOptsPrivateOnly(t *testing.T) {
ports, bindings, err := parsePortSpecs([]string{"192.168.1.100::80"})
if err != nil {
t.Fatal(err)
}
if len(ports) != 1 {
t.Logf("Expected 1 got %d", len(ports))
t.FailNow()
}
if len(bindings) != 1 {
t.Logf("Expected 1 got %d", len(bindings))
t.FailNow()
}
for k := range ports {
if k.Proto() != "tcp" {
t.Logf("Expected tcp got %s", k.Proto())
t.Fail()
}
if k.Port() != "80" {
t.Logf("Expected 80 got %s", k.Port())
t.Fail()
}
b, exists := bindings[k]
if !exists {
t.Log("Binding does not exist")
t.FailNow()
}
if len(b) != 1 {
t.Logf("Expected 1 got %d", len(b))
t.FailNow()
}
s := b[0]
if s.HostPort != "" {
t.Logf("Expected \"\" got %s", s.HostPort)
t.Fail()
}
if s.HostIp != "192.168.1.100" {
t.Fail()
}
}
}
func TestParseNetworkOptsPublic(t *testing.T) {
ports, bindings, err := parsePortSpecs([]string{"192.168.1.100:8080:80"})
if err != nil {
t.Fatal(err)
}
if len(ports) != 1 {
t.Logf("Expected 1 got %d", len(ports))
t.FailNow()
}
if len(bindings) != 1 {
t.Logf("Expected 1 got %d", len(bindings))
t.FailNow()
}
for k := range ports {
if k.Proto() != "tcp" {
t.Logf("Expected tcp got %s", k.Proto())
t.Fail()
}
if k.Port() != "80" {
t.Logf("Expected 80 got %s", k.Port())
t.Fail()
}
b, exists := bindings[k]
if !exists {
t.Log("Binding does not exist")
t.FailNow()
}
if len(b) != 1 {
t.Logf("Expected 1 got %d", len(b))
t.FailNow()
}
s := b[0]
if s.HostPort != "8080" {
t.Logf("Expected 8080 got %s", s.HostPort)
t.Fail()
}
if s.HostIp != "192.168.1.100" {
t.Fail()
}
}
}
func TestParseNetworkOptsUdp(t *testing.T) {
ports, bindings, err := parsePortSpecs([]string{"192.168.1.100::6000/udp"})
if err != nil {
t.Fatal(err)
}
if len(ports) != 1 {
t.Logf("Expected 1 got %d", len(ports))
t.FailNow()
}
if len(bindings) != 1 {
t.Logf("Expected 1 got %d", len(bindings))
t.FailNow()
}
for k := range ports {
if k.Proto() != "udp" {
t.Logf("Expected udp got %s", k.Proto())
t.Fail()
}
if k.Port() != "6000" {
t.Logf("Expected 6000 got %s", k.Port())
t.Fail()
}
b, exists := bindings[k]
if !exists {
t.Log("Binding does not exist")
t.FailNow()
}
if len(b) != 1 {
t.Logf("Expected 1 got %d", len(b))
t.FailNow()
}
s := b[0]
if s.HostPort != "" {
t.Logf("Expected \"\" got %s", s.HostPort)
t.Fail()
}
if s.HostIp != "192.168.1.100" {
t.Fail()
}
}
}
func TestGetFullName(t *testing.T) {
name, err := getFullName("testing")
if err != nil {
t.Fatal(err)
}
if name != "/testing" {
t.Fatalf("Expected /testing got %s", name)
}
if _, err := getFullName(""); err == nil {
t.Fatal("Error should not be nil")
}
}

View File

@ -214,7 +214,7 @@ func (job *Job) GetenvList(key string) []string {
return l return l
} }
func (job *Job) SetenvList(key string, value []string) error { func (job *Job) SetenvJson(key string, value interface{}) error {
sval, err := json.Marshal(value) sval, err := json.Marshal(value)
if err != nil { if err != nil {
return err return err
@ -223,6 +223,10 @@ func (job *Job) SetenvList(key string, value []string) error {
return nil return nil
} }
func (job *Job) SetenvList(key string, value []string) error {
return job.SetenvJson(key, value)
}
func (job *Job) Setenv(key, value string) { func (job *Job) Setenv(key, value string) {
job.env = append(job.env, key+"="+value) job.env = append(job.env, key+"="+value)
} }

View File

@ -9,7 +9,6 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"path"
"testing" "testing"
"time" "time"
) )
@ -121,41 +120,6 @@ func TestRegister(t *testing.T) {
} }
} }
func TestMount(t *testing.T) {
graph := tempGraph(t)
defer os.RemoveAll(graph.Root)
archive, err := fakeTar()
if err != nil {
t.Fatal(err)
}
image, err := graph.Create(archive, nil, "Testing", "", nil)
if err != nil {
t.Fatal(err)
}
tmp, err := ioutil.TempDir("", "docker-test-graph-mount-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
rootfs := path.Join(tmp, "rootfs")
if err := os.MkdirAll(rootfs, 0700); err != nil {
t.Fatal(err)
}
rw := path.Join(tmp, "rw")
if err := os.MkdirAll(rw, 0700); err != nil {
t.Fatal(err)
}
if err := image.Mount(rootfs, rw); err != nil {
t.Fatal(err)
}
// FIXME: test for mount contents
defer func() {
if err := Unmount(rootfs); err != nil {
t.Error(err)
}
}()
}
// Test that an image can be deleted by its shorthand prefix // Test that an image can be deleted by its shorthand prefix
func TestDeletePrefix(t *testing.T) { func TestDeletePrefix(t *testing.T) {
graph := tempGraph(t) graph := tempGraph(t)

51
http_test.go Normal file
View File

@ -0,0 +1,51 @@
package docker
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
)
func TestGetBoolParam(t *testing.T) {
if ret, err := getBoolParam("true"); err != nil || !ret {
t.Fatalf("true -> true, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("True"); err != nil || !ret {
t.Fatalf("True -> true, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("1"); err != nil || !ret {
t.Fatalf("1 -> true, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam(""); err != nil || ret {
t.Fatalf("\"\" -> false, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("false"); err != nil || ret {
t.Fatalf("false -> false, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("0"); err != nil || ret {
t.Fatalf("0 -> false, nil | got %t %s", ret, err)
}
if ret, err := getBoolParam("faux"); err == nil || ret {
t.Fatalf("faux -> false, err | got %t %s", ret, err)
}
}
func TesthttpError(t *testing.T) {
r := httptest.NewRecorder()
httpError(r, fmt.Errorf("No such method"))
if r.Code != http.StatusNotFound {
t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code)
}
httpError(r, fmt.Errorf("This accound hasn't been activated"))
if r.Code != http.StatusForbidden {
t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code)
}
httpError(r, fmt.Errorf("Some error"))
if r.Code != http.StatusInternalServerError {
t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code)
}
}

File diff suppressed because it is too large Load Diff

63
integration/auth_test.go Normal file
View File

@ -0,0 +1,63 @@
package docker
import (
"github.com/dotcloud/docker/auth"
"crypto/rand"
"encoding/hex"
"os"
"strings"
"testing"
)
// FIXME: these tests have an external dependency on a staging index hosted
// on the docker.io infrastructure. That dependency should be removed.
// - Unit tests should have no side-effect dependencies.
// - Integration tests should have side-effects limited to the host environment being tested.
func TestLogin(t *testing.T) {
os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com")
defer os.Setenv("DOCKER_INDEX_URL", "")
authConfig := &auth.AuthConfig{Username: "unittester", Password: "surlautrerivejetattendrai", Email: "noise+unittester@dotcloud.com"}
status, err := auth.Login(authConfig, nil)
if err != nil {
t.Fatal(err)
}
if status != "Login Succeeded" {
t.Fatalf("Expected status \"Login Succeeded\", found \"%s\" instead", status)
}
}
func TestCreateAccount(t *testing.T) {
os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com")
defer os.Setenv("DOCKER_INDEX_URL", "")
tokenBuffer := make([]byte, 16)
_, err := rand.Read(tokenBuffer)
if err != nil {
t.Fatal(err)
}
token := hex.EncodeToString(tokenBuffer)[:12]
username := "ut" + token
authConfig := &auth.AuthConfig{Username: username, Password: "test42", Email: "docker-ut+" + token + "@example.com"}
status, err := auth.Login(authConfig, nil)
if err != nil {
t.Fatal(err)
}
expectedStatus := "Account created. Please use the confirmation link we sent" +
" to your e-mail to activate it."
if status != expectedStatus {
t.Fatalf("Expected status: \"%s\", found \"%s\" instead.", expectedStatus, status)
}
status, err = auth.Login(authConfig, nil)
if err == nil {
t.Fatalf("Expected error but found nil instead")
}
expectedError := "Login: Account is not Active"
if !strings.Contains(err.Error(), expectedError) {
t.Fatalf("Expected message \"%s\" but found \"%s\" instead", expectedError, err)
}
}

View File

@ -2,7 +2,9 @@ package docker
import ( import (
"fmt" "fmt"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/engine"
"io/ioutil" "io/ioutil"
"net" "net"
"net/http" "net/http"
@ -14,7 +16,7 @@ import (
// mkTestContext generates a build context from the contents of the provided dockerfile. // mkTestContext generates a build context from the contents of the provided dockerfile.
// This context is suitable for use as an argument to BuildFile.Build() // This context is suitable for use as an argument to BuildFile.Build()
func mkTestContext(dockerfile string, files [][2]string, t *testing.T) archive.Archive { func mkTestContext(dockerfile string, files [][2]string, t *testing.T) archive.Archive {
context, err := mkBuildContext(dockerfile, files) context, err := docker.MkBuildContext(dockerfile, files)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -228,17 +230,15 @@ func TestBuild(t *testing.T) {
} }
} }
func buildImage(context testContextTemplate, t *testing.T, srv *Server, useCache bool) *Image { func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, useCache bool) *docker.Image {
if srv == nil { if eng == nil {
runtime := mkRuntime(t) eng = NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
// FIXME: we might not need runtime, why not simply nuke
// the engine?
defer nuke(runtime) defer nuke(runtime)
srv = &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
} }
srv := mkServerFromEngine(eng, t)
httpServer, err := mkTestingFileServer(context.remoteFiles) httpServer, err := mkTestingFileServer(context.remoteFiles)
if err != nil { if err != nil {
@ -252,10 +252,17 @@ func buildImage(context testContextTemplate, t *testing.T, srv *Server, useCache
} }
port := httpServer.URL[idx+1:] port := httpServer.URL[idx+1:]
ip := srv.runtime.networkManager.bridgeNetwork.IP iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP")
if iIP == nil {
t.Fatal("Legacy bridgeIP field not set in engine")
}
ip, ok := iIP.(net.IP)
if !ok {
panic("Legacy bridgeIP field in engine does not cast to net.IP")
}
dockerfile := constructDockerfile(context.dockerfile, ip, port) dockerfile := constructDockerfile(context.dockerfile, ip, port)
buildfile := NewBuildFile(srv, ioutil.Discard, false, useCache, false) buildfile := docker.NewBuildFile(srv, ioutil.Discard, false, useCache, false)
id, err := buildfile.Build(mkTestContext(dockerfile, context.files, t)) id, err := buildfile.Build(mkTestContext(dockerfile, context.files, t))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -368,20 +375,14 @@ func TestBuildEntrypoint(t *testing.T) {
// testing #1405 - config.Cmd does not get cleaned up if // testing #1405 - config.Cmd does not get cleaned up if
// utilizing cache // utilizing cache
func TestBuildEntrypointRunCleanup(t *testing.T) { func TestBuildEntrypointRunCleanup(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
defer nuke(runtime) defer nuke(mkRuntimeFromEngine(eng, t))
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
img := buildImage(testContextTemplate{` img := buildImage(testContextTemplate{`
from {IMAGE} from {IMAGE}
run echo "hello" run echo "hello"
`, `,
nil, nil}, t, srv, true) nil, nil}, t, eng, true)
img = buildImage(testContextTemplate{` img = buildImage(testContextTemplate{`
from {IMAGE} from {IMAGE}
@ -389,7 +390,7 @@ func TestBuildEntrypointRunCleanup(t *testing.T) {
add foo /foo add foo /foo
entrypoint ["/bin/echo"] entrypoint ["/bin/echo"]
`, `,
[][2]string{{"foo", "HEYO"}}, nil}, t, srv, true) [][2]string{{"foo", "HEYO"}}, nil}, t, eng, true)
if len(img.Config.Cmd) != 0 { if len(img.Config.Cmd) != 0 {
t.Fail() t.Fail()
@ -397,14 +398,8 @@ func TestBuildEntrypointRunCleanup(t *testing.T) {
} }
func TestBuildImageWithCache(t *testing.T) { func TestBuildImageWithCache(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
defer nuke(runtime) defer nuke(mkRuntimeFromEngine(eng, t))
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
template := testContextTemplate{` template := testContextTemplate{`
from {IMAGE} from {IMAGE}
@ -412,11 +407,11 @@ func TestBuildImageWithCache(t *testing.T) {
`, `,
nil, nil} nil, nil}
img := buildImage(template, t, srv, true) img := buildImage(template, t, eng, true)
imageId := img.ID imageId := img.ID
img = nil img = nil
img = buildImage(template, t, srv, true) img = buildImage(template, t, eng, true)
if imageId != img.ID { if imageId != img.ID {
t.Logf("Image ids should match: %s != %s", imageId, img.ID) t.Logf("Image ids should match: %s != %s", imageId, img.ID)
@ -425,14 +420,8 @@ func TestBuildImageWithCache(t *testing.T) {
} }
func TestBuildImageWithoutCache(t *testing.T) { func TestBuildImageWithoutCache(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
defer nuke(runtime) defer nuke(mkRuntimeFromEngine(eng, t))
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
template := testContextTemplate{` template := testContextTemplate{`
from {IMAGE} from {IMAGE}
@ -440,11 +429,11 @@ func TestBuildImageWithoutCache(t *testing.T) {
`, `,
nil, nil} nil, nil}
img := buildImage(template, t, srv, true) img := buildImage(template, t, eng, true)
imageId := img.ID imageId := img.ID
img = nil img = nil
img = buildImage(template, t, srv, false) img = buildImage(template, t, eng, false)
if imageId == img.ID { if imageId == img.ID {
t.Logf("Image ids should not match: %s == %s", imageId, img.ID) t.Logf("Image ids should not match: %s == %s", imageId, img.ID)
@ -453,14 +442,9 @@ func TestBuildImageWithoutCache(t *testing.T) {
} }
func TestForbiddenContextPath(t *testing.T) { func TestForbiddenContextPath(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
defer nuke(runtime) defer nuke(mkRuntimeFromEngine(eng, t))
srv := mkServerFromEngine(eng, t)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
context := testContextTemplate{` context := testContextTemplate{`
from {IMAGE} from {IMAGE}
@ -481,10 +465,17 @@ func TestForbiddenContextPath(t *testing.T) {
} }
port := httpServer.URL[idx+1:] port := httpServer.URL[idx+1:]
ip := srv.runtime.networkManager.bridgeNetwork.IP iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP")
if iIP == nil {
t.Fatal("Legacy bridgeIP field not set in engine")
}
ip, ok := iIP.(net.IP)
if !ok {
panic("Legacy bridgeIP field in engine does not cast to net.IP")
}
dockerfile := constructDockerfile(context.dockerfile, ip, port) dockerfile := constructDockerfile(context.dockerfile, ip, port)
buildfile := NewBuildFile(srv, ioutil.Discard, false, true, false) buildfile := docker.NewBuildFile(srv, ioutil.Discard, false, true, false)
_, err = buildfile.Build(mkTestContext(dockerfile, context.files, t)) _, err = buildfile.Build(mkTestContext(dockerfile, context.files, t))
if err == nil { if err == nil {
@ -499,14 +490,8 @@ func TestForbiddenContextPath(t *testing.T) {
} }
func TestBuildADDFileNotFound(t *testing.T) { func TestBuildADDFileNotFound(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
defer nuke(runtime) defer nuke(mkRuntimeFromEngine(eng, t))
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
context := testContextTemplate{` context := testContextTemplate{`
from {IMAGE} from {IMAGE}
@ -526,10 +511,17 @@ func TestBuildADDFileNotFound(t *testing.T) {
} }
port := httpServer.URL[idx+1:] port := httpServer.URL[idx+1:]
ip := srv.runtime.networkManager.bridgeNetwork.IP iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP")
if iIP == nil {
t.Fatal("Legacy bridgeIP field not set in engine")
}
ip, ok := iIP.(net.IP)
if !ok {
panic("Legacy bridgeIP field in engine does not cast to net.IP")
}
dockerfile := constructDockerfile(context.dockerfile, ip, port) dockerfile := constructDockerfile(context.dockerfile, ip, port)
buildfile := NewBuildFile(srv, ioutil.Discard, false, true, false) buildfile := docker.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, false, true, false)
_, err = buildfile.Build(mkTestContext(dockerfile, context.files, t)) _, err = buildfile.Build(mkTestContext(dockerfile, context.files, t))
if err == nil { if err == nil {
@ -544,26 +536,20 @@ func TestBuildADDFileNotFound(t *testing.T) {
} }
func TestBuildInheritance(t *testing.T) { func TestBuildInheritance(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
defer nuke(runtime) defer nuke(mkRuntimeFromEngine(eng, t))
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
img := buildImage(testContextTemplate{` img := buildImage(testContextTemplate{`
from {IMAGE} from {IMAGE}
expose 4243 expose 4243
`, `,
nil, nil}, t, srv, true) nil, nil}, t, eng, true)
img2 := buildImage(testContextTemplate{fmt.Sprintf(` img2 := buildImage(testContextTemplate{fmt.Sprintf(`
from %s from %s
entrypoint ["/bin/echo"] entrypoint ["/bin/echo"]
`, img.ID), `, img.ID),
nil, nil}, t, srv, true) nil, nil}, t, eng, true)
// from child // from child
if img2.Config.Entrypoint[0] != "/bin/echo" { if img2.Config.Entrypoint[0] != "/bin/echo" {

View File

@ -3,6 +3,8 @@ package docker
import ( import (
"bufio" "bufio"
"fmt" "fmt"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/utils" "github.com/dotcloud/docker/utils"
"io" "io"
"io/ioutil" "io/ioutil"
@ -66,8 +68,8 @@ func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error
func TestRunHostname(t *testing.T) { func TestRunHostname(t *testing.T) {
stdout, stdoutPipe := io.Pipe() stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime) defer cleanup(globalEngine, t)
c := make(chan struct{}) c := make(chan struct{})
go func() { go func() {
@ -111,8 +113,8 @@ func TestRunHostname(t *testing.T) {
func TestRunWorkdir(t *testing.T) { func TestRunWorkdir(t *testing.T) {
stdout, stdoutPipe := io.Pipe() stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime) defer cleanup(globalEngine, t)
c := make(chan struct{}) c := make(chan struct{})
go func() { go func() {
@ -156,8 +158,8 @@ func TestRunWorkdir(t *testing.T) {
func TestRunWorkdirExists(t *testing.T) { func TestRunWorkdirExists(t *testing.T) {
stdout, stdoutPipe := io.Pipe() stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime) defer cleanup(globalEngine, t)
c := make(chan struct{}) c := make(chan struct{})
go func() { go func() {
@ -201,8 +203,8 @@ func TestRunExit(t *testing.T) {
stdin, stdinPipe := io.Pipe() stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe() stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime) defer cleanup(globalEngine, t)
c1 := make(chan struct{}) c1 := make(chan struct{})
go func() { go func() {
@ -254,8 +256,8 @@ func TestRunDisconnect(t *testing.T) {
stdin, stdinPipe := io.Pipe() stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe() stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime) defer cleanup(globalEngine, t)
c1 := make(chan struct{}) c1 := make(chan struct{})
go func() { go func() {
@ -299,8 +301,8 @@ func TestRunDisconnectTty(t *testing.T) {
stdin, stdinPipe := io.Pipe() stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe() stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime) defer cleanup(globalEngine, t)
c1 := make(chan struct{}) c1 := make(chan struct{})
go func() { go func() {
@ -356,8 +358,8 @@ func TestRunAttachStdin(t *testing.T) {
stdin, stdinPipe := io.Pipe() stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe() stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime) defer cleanup(globalEngine, t)
ch := make(chan struct{}) ch := make(chan struct{})
go func() { go func() {
@ -420,8 +422,8 @@ func TestRunDetach(t *testing.T) {
stdin, stdinPipe := io.Pipe() stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe() stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime) defer cleanup(globalEngine, t)
ch := make(chan struct{}) ch := make(chan struct{})
go func() { go func() {
@ -466,8 +468,8 @@ func TestAttachDetach(t *testing.T) {
stdin, stdinPipe := io.Pipe() stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe() stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime) defer cleanup(globalEngine, t)
ch := make(chan struct{}) ch := make(chan struct{})
go func() { go func() {
@ -477,7 +479,7 @@ func TestAttachDetach(t *testing.T) {
} }
}() }()
var container *Container var container *docker.Container
setTimeout(t, "Reading container's id timed out", 10*time.Second, func() { setTimeout(t, "Reading container's id timed out", 10*time.Second, func() {
buf := make([]byte, 1024) buf := make([]byte, 1024)
@ -498,7 +500,7 @@ func TestAttachDetach(t *testing.T) {
stdin, stdinPipe = io.Pipe() stdin, stdinPipe = io.Pipe()
stdout, stdoutPipe = io.Pipe() stdout, stdoutPipe = io.Pipe()
cli = NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) cli = docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
ch = make(chan struct{}) ch = make(chan struct{})
go func() { go func() {
@ -546,8 +548,8 @@ func TestAttachDetachTruncatedID(t *testing.T) {
stdin, stdinPipe := io.Pipe() stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe() stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime) defer cleanup(globalEngine, t)
go stdout.Read(make([]byte, 1024)) go stdout.Read(make([]byte, 1024))
setTimeout(t, "Starting container timed out", 2*time.Second, func() { setTimeout(t, "Starting container timed out", 2*time.Second, func() {
@ -560,7 +562,7 @@ func TestAttachDetachTruncatedID(t *testing.T) {
stdin, stdinPipe = io.Pipe() stdin, stdinPipe = io.Pipe()
stdout, stdoutPipe = io.Pipe() stdout, stdoutPipe = io.Pipe()
cli = NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) cli = docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
ch := make(chan struct{}) ch := make(chan struct{})
go func() { go func() {
@ -608,8 +610,8 @@ func TestAttachDisconnect(t *testing.T) {
stdin, stdinPipe := io.Pipe() stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe() stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime) defer cleanup(globalEngine, t)
go func() { go func() {
// Start a process in daemon mode // Start a process in daemon mode
@ -677,8 +679,8 @@ func TestAttachDisconnect(t *testing.T) {
func TestRunAutoRemove(t *testing.T) { func TestRunAutoRemove(t *testing.T) {
t.Skip("Fixme. Skipping test for now, race condition") t.Skip("Fixme. Skipping test for now, race condition")
stdout, stdoutPipe := io.Pipe() stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime) defer cleanup(globalEngine, t)
c := make(chan struct{}) c := make(chan struct{})
go func() { go func() {
@ -712,8 +714,8 @@ func TestRunAutoRemove(t *testing.T) {
} }
func TestCmdLogs(t *testing.T) { func TestCmdLogs(t *testing.T) {
cli := NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) cli := docker.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime) defer cleanup(globalEngine, t)
if err := cli.CmdRun(unitTestImageID, "sh", "-c", "ls -l"); err != nil { if err := cli.CmdRun(unitTestImageID, "sh", "-c", "ls -l"); err != nil {
t.Fatal(err) t.Fatal(err)
@ -730,8 +732,8 @@ func TestCmdLogs(t *testing.T) {
// Expected behaviour: using / as a bind mount source should throw an error // Expected behaviour: using / as a bind mount source should throw an error
func TestRunErrorBindMountRootSource(t *testing.T) { func TestRunErrorBindMountRootSource(t *testing.T) {
cli := NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr) cli := docker.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime) defer cleanup(globalEngine, t)
c := make(chan struct{}) c := make(chan struct{})
go func() { go func() {
@ -749,8 +751,8 @@ func TestRunErrorBindMountRootSource(t *testing.T) {
// Expected behaviour: error out when attempting to bind mount non-existing source paths // Expected behaviour: error out when attempting to bind mount non-existing source paths
func TestRunErrorBindNonExistingSource(t *testing.T) { func TestRunErrorBindNonExistingSource(t *testing.T) {
cli := NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr) cli := docker.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime) defer cleanup(globalEngine, t)
c := make(chan struct{}) c := make(chan struct{})
go func() { go func() {
@ -768,11 +770,10 @@ func TestRunErrorBindNonExistingSource(t *testing.T) {
func TestImagesViz(t *testing.T) { func TestImagesViz(t *testing.T) {
stdout, stdoutPipe := io.Pipe() stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime) defer cleanup(globalEngine, t)
srv := &Server{runtime: globalRuntime} image := buildTestImages(t, globalEngine)
image := buildTestImages(t, srv)
c := make(chan struct{}) c := make(chan struct{})
go func() { go func() {
@ -819,11 +820,10 @@ func TestImagesViz(t *testing.T) {
func TestImagesTree(t *testing.T) { func TestImagesTree(t *testing.T) {
stdout, stdoutPipe := io.Pipe() stdout, stdoutPipe := io.Pipe()
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime) defer cleanup(globalEngine, t)
srv := &Server{runtime: globalRuntime} image := buildTestImages(t, globalEngine)
image := buildTestImages(t, srv)
c := make(chan struct{}) c := make(chan struct{})
go func() { go func() {
@ -867,7 +867,7 @@ func TestImagesTree(t *testing.T) {
}) })
} }
func buildTestImages(t *testing.T, srv *Server) *Image { func buildTestImages(t *testing.T, eng *engine.Engine) *docker.Image {
var testBuilder = testContextTemplate{ var testBuilder = testContextTemplate{
` `
@ -880,9 +880,9 @@ run [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]
nil, nil,
nil, nil,
} }
image := buildImage(testBuilder, t, srv, true) image := buildImage(testBuilder, t, eng, true)
err := srv.ContainerTag(image.ID, "test", "latest", false) err := mkServerFromEngine(eng, t).ContainerTag(image.ID, "test", "latest", false)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -902,8 +902,8 @@ func TestRunCidFile(t *testing.T) {
} }
tmpCidFile := path.Join(tmpDir, "cid") tmpCidFile := path.Join(tmpDir, "cid")
cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
defer cleanup(globalRuntime) defer cleanup(globalEngine, t)
c := make(chan struct{}) c := make(chan struct{})
go func() { go func() {

View File

@ -3,10 +3,10 @@ package docker
import ( import (
"bufio" "bufio"
"fmt" "fmt"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/utils" "github.com/dotcloud/docker/utils"
"io" "io"
"io/ioutil" "io/ioutil"
"math/rand"
"os" "os"
"path" "path"
"regexp" "regexp"
@ -20,7 +20,7 @@ func TestIDFormat(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container1, _, err := runtime.Create( container1, _, err := runtime.Create(
&Config{ &docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/sh", "-c", "echo hello world"}, Cmd: []string{"/bin/sh", "-c", "echo hello world"},
}, },
@ -41,7 +41,7 @@ func TestIDFormat(t *testing.T) {
func TestMultipleAttachRestart(t *testing.T) { func TestMultipleAttachRestart(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container, _ := mkContainer( container, _, _ := mkContainer(
runtime, runtime,
[]string{"_", "/bin/sh", "-c", "i=1; while [ $i -le 5 ]; do i=`expr $i + 1`; echo hello; done"}, []string{"_", "/bin/sh", "-c", "i=1; while [ $i -le 5 ]; do i=`expr $i + 1`; echo hello; done"},
t, t,
@ -134,10 +134,11 @@ func TestMultipleAttachRestart(t *testing.T) {
} }
func TestDiff(t *testing.T) { func TestDiff(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime) defer nuke(runtime)
// Create a container and remove a file // Create a container and remove a file
container1, _ := mkContainer(runtime, []string{"_", "/bin/rm", "/etc/passwd"}, t) container1, _, _ := mkContainer(runtime, []string{"_", "/bin/rm", "/etc/passwd"}, t)
defer runtime.Destroy(container1) defer runtime.Destroy(container1)
// The changelog should be empty and not fail before run. See #1705 // The changelog should be empty and not fail before run. See #1705
@ -169,17 +170,13 @@ func TestDiff(t *testing.T) {
} }
// Commit the container // Commit the container
rwTar, err := container1.ExportRw() img, err := runtime.Commit(container1, "", "", "unit test commited image - diff", "", nil)
if err != nil {
t.Error(err)
}
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image - diff", "", nil)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
// Create a new container from the commited image // Create a new container from the commited image
container2, _ := mkContainer(runtime, []string{img.ID, "cat", "/etc/passwd"}, t) container2, _, _ := mkContainer(runtime, []string{img.ID, "cat", "/etc/passwd"}, t)
defer runtime.Destroy(container2) defer runtime.Destroy(container2)
if err := container2.Run(); err != nil { if err := container2.Run(); err != nil {
@ -198,7 +195,7 @@ func TestDiff(t *testing.T) {
} }
// Create a new container // Create a new container
container3, _ := mkContainer(runtime, []string{"_", "rm", "/bin/httpd"}, t) container3, _, _ := mkContainer(runtime, []string{"_", "rm", "/bin/httpd"}, t)
defer runtime.Destroy(container3) defer runtime.Destroy(container3)
if err := container3.Run(); err != nil { if err := container3.Run(); err != nil {
@ -224,7 +221,7 @@ func TestDiff(t *testing.T) {
func TestCommitAutoRun(t *testing.T) { func TestCommitAutoRun(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container1, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t) container1, _, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
defer runtime.Destroy(container1) defer runtime.Destroy(container1)
if container1.State.Running { if container1.State.Running {
@ -237,17 +234,13 @@ func TestCommitAutoRun(t *testing.T) {
t.Errorf("Container shouldn't be running") t.Errorf("Container shouldn't be running")
} }
rwTar, err := container1.ExportRw() img, err := runtime.Commit(container1, "", "", "unit test commited image", "", &docker.Config{Cmd: []string{"cat", "/world"}})
if err != nil {
t.Error(err)
}
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image", "", &Config{Cmd: []string{"cat", "/world"}})
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world // FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
container2, _ := mkContainer(runtime, []string{img.ID}, t) container2, _, _ := mkContainer(runtime, []string{img.ID}, t)
defer runtime.Destroy(container2) defer runtime.Destroy(container2)
stdout, err := container2.StdoutPipe() stdout, err := container2.StdoutPipe()
if err != nil { if err != nil {
@ -284,7 +277,7 @@ func TestCommitRun(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container1, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t) container1, _, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
defer runtime.Destroy(container1) defer runtime.Destroy(container1)
if container1.State.Running { if container1.State.Running {
@ -297,17 +290,13 @@ func TestCommitRun(t *testing.T) {
t.Errorf("Container shouldn't be running") t.Errorf("Container shouldn't be running")
} }
rwTar, err := container1.ExportRw() img, err := runtime.Commit(container1, "", "", "unit test commited image", "", nil)
if err != nil {
t.Error(err)
}
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image", "", nil)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world // FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
container2, _ := mkContainer(runtime, []string{img.ID, "cat", "/world"}, t) container2, _, _ := mkContainer(runtime, []string{img.ID, "cat", "/world"}, t)
defer runtime.Destroy(container2) defer runtime.Destroy(container2)
stdout, err := container2.StdoutPipe() stdout, err := container2.StdoutPipe()
if err != nil { if err != nil {
@ -343,7 +332,7 @@ func TestCommitRun(t *testing.T) {
func TestStart(t *testing.T) { func TestStart(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container, _ := mkContainer(runtime, []string{"-m", "33554432", "-c", "1000", "-i", "_", "/bin/cat"}, t) container, _, _ := mkContainer(runtime, []string{"-m", "33554432", "-c", "1000", "-i", "_", "/bin/cat"}, t)
defer runtime.Destroy(container) defer runtime.Destroy(container)
cStdin, err := container.StdinPipe() cStdin, err := container.StdinPipe()
@ -373,7 +362,7 @@ func TestStart(t *testing.T) {
func TestRun(t *testing.T) { func TestRun(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) container, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
defer runtime.Destroy(container) defer runtime.Destroy(container)
if container.State.Running { if container.State.Running {
@ -391,7 +380,7 @@ func TestOutput(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container, _, err := runtime.Create( container, _, err := runtime.Create(
&Config{ &docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"echo", "-n", "foobar"}, Cmd: []string{"echo", "-n", "foobar"},
}, },
@ -414,7 +403,7 @@ func TestContainerNetwork(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container, _, err := runtime.Create( container, _, err := runtime.Create(
&Config{ &docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"ping", "-c", "1", "127.0.0.1"}, Cmd: []string{"ping", "-c", "1", "127.0.0.1"},
}, },
@ -436,7 +425,7 @@ func TestKillDifferentUser(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container, _, err := runtime.Create(&Config{ container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"cat"}, Cmd: []string{"cat"},
OpenStdin: true, OpenStdin: true,
@ -448,7 +437,9 @@ func TestKillDifferentUser(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
defer runtime.Destroy(container) defer runtime.Destroy(container)
defer container.stdin.Close() // FIXME @shykes: this seems redundant, but is very old, I'm leaving it in case
// there is a side effect I'm not seeing.
// defer container.stdin.Close()
if container.State.Running { if container.State.Running {
t.Errorf("Container shouldn't be running") t.Errorf("Container shouldn't be running")
@ -490,22 +481,35 @@ func TestKillDifferentUser(t *testing.T) {
// Test that creating a container with a volume doesn't crash. Regression test for #995. // Test that creating a container with a volume doesn't crash. Regression test for #995.
func TestCreateVolume(t *testing.T) { func TestCreateVolume(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime) defer nuke(runtime)
config, hc, _, err := ParseRun([]string{"-v", "/var/lib/data", GetTestImage(runtime).ID, "echo", "hello", "world"}, nil) config, hc, _, err := docker.ParseRun([]string{"-v", "/var/lib/data", unitTestImageID, "echo", "hello", "world"}, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
c, _, err := runtime.Create(config, "") jobCreate := eng.Job("create")
if err != nil { if err := jobCreate.ImportEnv(config); err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer runtime.Destroy(c) var id string
c.hostConfig = hc jobCreate.StdoutParseString(&id)
if err := c.Start(); err != nil { if err := jobCreate.Run(); err != nil {
t.Fatal(err) t.Fatal(err)
} }
jobStart := eng.Job("start", id)
if err := jobStart.ImportEnv(hc); err != nil {
t.Fatal(err)
}
if err := jobStart.Run(); err != nil {
t.Fatal(err)
}
// FIXME: this hack can be removed once Wait is a job
c := runtime.Get(id)
if c == nil {
t.Fatalf("Couldn't retrieve container %s from runtime", id)
}
c.WaitTimeout(500 * time.Millisecond) c.WaitTimeout(500 * time.Millisecond)
c.Wait() c.Wait()
} }
@ -513,7 +517,7 @@ func TestCreateVolume(t *testing.T) {
func TestKill(t *testing.T) { func TestKill(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container, _, err := runtime.Create(&Config{ container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"sleep", "2"}, Cmd: []string{"sleep", "2"},
}, },
@ -557,7 +561,7 @@ func TestExitCode(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
trueContainer, _, err := runtime.Create(&Config{ trueContainer, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/true", ""}, Cmd: []string{"/bin/true", ""},
}, "") }, "")
@ -572,7 +576,7 @@ func TestExitCode(t *testing.T) {
t.Errorf("Unexpected exit code %d (expected 0)", trueContainer.State.ExitCode) t.Errorf("Unexpected exit code %d (expected 0)", trueContainer.State.ExitCode)
} }
falseContainer, _, err := runtime.Create(&Config{ falseContainer, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/false", ""}, Cmd: []string{"/bin/false", ""},
}, "") }, "")
@ -591,7 +595,7 @@ func TestExitCode(t *testing.T) {
func TestRestart(t *testing.T) { func TestRestart(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container, _, err := runtime.Create(&Config{ container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"echo", "-n", "foobar"}, Cmd: []string{"echo", "-n", "foobar"},
}, },
@ -622,7 +626,7 @@ func TestRestart(t *testing.T) {
func TestRestartStdin(t *testing.T) { func TestRestartStdin(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container, _, err := runtime.Create(&Config{ container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"cat"}, Cmd: []string{"cat"},
@ -700,7 +704,7 @@ func TestUser(t *testing.T) {
defer nuke(runtime) defer nuke(runtime)
// Default user must be root // Default user must be root
container, _, err := runtime.Create(&Config{ container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"id"}, Cmd: []string{"id"},
}, },
@ -719,7 +723,7 @@ func TestUser(t *testing.T) {
} }
// Set a username // Set a username
container, _, err = runtime.Create(&Config{ container, _, err = runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"id"}, Cmd: []string{"id"},
@ -740,7 +744,7 @@ func TestUser(t *testing.T) {
} }
// Set a UID // Set a UID
container, _, err = runtime.Create(&Config{ container, _, err = runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"id"}, Cmd: []string{"id"},
@ -761,7 +765,7 @@ func TestUser(t *testing.T) {
} }
// Set a different user by uid // Set a different user by uid
container, _, err = runtime.Create(&Config{ container, _, err = runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"id"}, Cmd: []string{"id"},
@ -784,7 +788,7 @@ func TestUser(t *testing.T) {
} }
// Set a different user by username // Set a different user by username
container, _, err = runtime.Create(&Config{ container, _, err = runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"id"}, Cmd: []string{"id"},
@ -805,7 +809,7 @@ func TestUser(t *testing.T) {
} }
// Test an wrong username // Test an wrong username
container, _, err = runtime.Create(&Config{ container, _, err = runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"id"}, Cmd: []string{"id"},
@ -827,7 +831,7 @@ func TestMultipleContainers(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container1, _, err := runtime.Create(&Config{ container1, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"sleep", "2"}, Cmd: []string{"sleep", "2"},
}, },
@ -838,7 +842,7 @@ func TestMultipleContainers(t *testing.T) {
} }
defer runtime.Destroy(container1) defer runtime.Destroy(container1)
container2, _, err := runtime.Create(&Config{ container2, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"sleep", "2"}, Cmd: []string{"sleep", "2"},
}, },
@ -882,7 +886,7 @@ func TestMultipleContainers(t *testing.T) {
func TestStdin(t *testing.T) { func TestStdin(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container, _, err := runtime.Create(&Config{ container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"cat"}, Cmd: []string{"cat"},
@ -927,7 +931,7 @@ func TestStdin(t *testing.T) {
func TestTty(t *testing.T) { func TestTty(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container, _, err := runtime.Create(&Config{ container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"cat"}, Cmd: []string{"cat"},
@ -974,7 +978,7 @@ func TestEnv(t *testing.T) {
os.Setenv("TRICKY", "tri\ncky\n") os.Setenv("TRICKY", "tri\ncky\n")
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
config, _, _, err := ParseRun([]string{"-e=FALSE=true", "-e=TRUE", "-e=TRICKY", GetTestImage(runtime).ID, "env"}, nil) config, _, _, err := docker.ParseRun([]string{"-e=FALSE=true", "-e=TRUE", "-e=TRICKY", GetTestImage(runtime).ID, "env"}, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1028,7 +1032,7 @@ func TestEntrypoint(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container, _, err := runtime.Create( container, _, err := runtime.Create(
&Config{ &docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Entrypoint: []string{"/bin/echo"}, Entrypoint: []string{"/bin/echo"},
Cmd: []string{"-n", "foobar"}, Cmd: []string{"-n", "foobar"},
@ -1052,7 +1056,7 @@ func TestEntrypointNoCmd(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container, _, err := runtime.Create( container, _, err := runtime.Create(
&Config{ &docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Entrypoint: []string{"/bin/echo", "foobar"}, Entrypoint: []string{"/bin/echo", "foobar"},
}, },
@ -1071,96 +1075,11 @@ func TestEntrypointNoCmd(t *testing.T) {
} }
} }
func grepFile(t *testing.T, path string, pattern string) {
f, err := os.Open(path)
if err != nil {
t.Fatal(err)
}
defer f.Close()
r := bufio.NewReader(f)
var (
line string
)
err = nil
for err == nil {
line, err = r.ReadString('\n')
if strings.Contains(line, pattern) == true {
return
}
}
t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path)
}
func TestLXCConfig(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
// Memory is allocated randomly for testing
rand.Seed(time.Now().UTC().UnixNano())
memMin := 33554432
memMax := 536870912
mem := memMin + rand.Intn(memMax-memMin)
// CPU shares as well
cpuMin := 100
cpuMax := 10000
cpu := cpuMin + rand.Intn(cpuMax-cpuMin)
container, _, err := runtime.Create(&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/true"},
Hostname: "foobar",
Memory: int64(mem),
CpuShares: int64(cpu),
},
"",
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
container.generateLXCConfig()
grepFile(t, container.lxcConfigPath(), "lxc.utsname = foobar")
grepFile(t, container.lxcConfigPath(),
fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem))
grepFile(t, container.lxcConfigPath(),
fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2))
}
func TestCustomLxcConfig(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
container, _, err := runtime.Create(&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/true"},
Hostname: "foobar",
},
"",
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
container.hostConfig = &HostConfig{LxcConf: []KeyValuePair{
{
Key: "lxc.utsname",
Value: "docker",
},
{
Key: "lxc.cgroup.cpuset.cpus",
Value: "0,1",
},
}}
container.generateLXCConfig()
grepFile(t, container.lxcConfigPath(), "lxc.utsname = docker")
grepFile(t, container.lxcConfigPath(), "lxc.cgroup.cpuset.cpus = 0,1")
}
func BenchmarkRunSequencial(b *testing.B) { func BenchmarkRunSequencial(b *testing.B) {
runtime := mkRuntime(b) runtime := mkRuntime(b)
defer nuke(runtime) defer nuke(runtime)
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
container, _, err := runtime.Create(&Config{ container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"echo", "-n", "foo"}, Cmd: []string{"echo", "-n", "foo"},
}, },
@ -1193,7 +1112,7 @@ func BenchmarkRunParallel(b *testing.B) {
complete := make(chan error) complete := make(chan error)
tasks = append(tasks, complete) tasks = append(tasks, complete)
go func(i int, complete chan error) { go func(i int, complete chan error) {
container, _, err := runtime.Create(&Config{ container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"echo", "-n", "foo"}, Cmd: []string{"echo", "-n", "foo"},
}, },
@ -1244,11 +1163,12 @@ func tempDir(t *testing.T) string {
// Test for #1737 // Test for #1737
func TestCopyVolumeUidGid(t *testing.T) { func TestCopyVolumeUidGid(t *testing.T) {
r := mkRuntime(t) eng := NewTestEngine(t)
defer nuke(r) r := mkRuntimeFromEngine(eng, t)
defer r.Nuke()
// Add directory not owned by root // Add directory not owned by root
container1, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && touch /hello/test.txt && chown daemon.daemon /hello"}, t) container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && touch /hello/test.txt && chown daemon.daemon /hello"}, t)
defer r.Destroy(container1) defer r.Destroy(container1)
if container1.State.Running { if container1.State.Running {
@ -1261,11 +1181,7 @@ func TestCopyVolumeUidGid(t *testing.T) {
t.Errorf("Container shouldn't be running") t.Errorf("Container shouldn't be running")
} }
rwTar, err := container1.ExportRw() img, err := r.Commit(container1, "", "", "unit test commited image", "", nil)
if err != nil {
t.Error(err)
}
img, err := r.graph.Create(rwTar, container1, "unit test commited image", "", nil)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
@ -1273,7 +1189,7 @@ func TestCopyVolumeUidGid(t *testing.T) {
// Test that the uid and gid is copied from the image to the volume // Test that the uid and gid is copied from the image to the volume
tmpDir1 := tempDir(t) tmpDir1 := tempDir(t)
defer os.RemoveAll(tmpDir1) defer os.RemoveAll(tmpDir1)
stdout1, _ := runContainer(r, []string{"-v", "/hello", img.ID, "stat", "-c", "%U %G", "/hello"}, t) stdout1, _ := runContainer(eng, r, []string{"-v", "/hello", img.ID, "stat", "-c", "%U %G", "/hello"}, t)
if !strings.Contains(stdout1, "daemon daemon") { if !strings.Contains(stdout1, "daemon daemon") {
t.Fatal("Container failed to transfer uid and gid to volume") t.Fatal("Container failed to transfer uid and gid to volume")
} }
@ -1281,11 +1197,12 @@ func TestCopyVolumeUidGid(t *testing.T) {
// Test for #1582 // Test for #1582
func TestCopyVolumeContent(t *testing.T) { func TestCopyVolumeContent(t *testing.T) {
r := mkRuntime(t) eng := NewTestEngine(t)
defer nuke(r) r := mkRuntimeFromEngine(eng, t)
defer r.Nuke()
// Put some content in a directory of a container and commit it // Put some content in a directory of a container and commit it
container1, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello/local && echo hello > /hello/local/world"}, t) container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello/local && echo hello > /hello/local/world"}, t)
defer r.Destroy(container1) defer r.Destroy(container1)
if container1.State.Running { if container1.State.Running {
@ -1298,11 +1215,7 @@ func TestCopyVolumeContent(t *testing.T) {
t.Errorf("Container shouldn't be running") t.Errorf("Container shouldn't be running")
} }
rwTar, err := container1.ExportRw() img, err := r.Commit(container1, "", "", "unit test commited image", "", nil)
if err != nil {
t.Error(err)
}
img, err := r.graph.Create(rwTar, container1, "unit test commited image", "", nil)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
@ -1310,31 +1223,33 @@ func TestCopyVolumeContent(t *testing.T) {
// Test that the content is copied from the image to the volume // Test that the content is copied from the image to the volume
tmpDir1 := tempDir(t) tmpDir1 := tempDir(t)
defer os.RemoveAll(tmpDir1) defer os.RemoveAll(tmpDir1)
stdout1, _ := runContainer(r, []string{"-v", "/hello", img.ID, "find", "/hello"}, t) stdout1, _ := runContainer(eng, r, []string{"-v", "/hello", img.ID, "find", "/hello"}, t)
if !(strings.Contains(stdout1, "/hello/local/world") && strings.Contains(stdout1, "/hello/local")) { if !(strings.Contains(stdout1, "/hello/local/world") && strings.Contains(stdout1, "/hello/local")) {
t.Fatal("Container failed to transfer content to volume") t.Fatal("Container failed to transfer content to volume")
} }
} }
func TestBindMounts(t *testing.T) { func TestBindMounts(t *testing.T) {
r := mkRuntime(t) eng := NewTestEngine(t)
defer nuke(r) r := mkRuntimeFromEngine(eng, t)
defer r.Nuke()
tmpDir := tempDir(t) tmpDir := tempDir(t)
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
writeFile(path.Join(tmpDir, "touch-me"), "", t) writeFile(path.Join(tmpDir, "touch-me"), "", t)
// Test reading from a read-only bind mount // Test reading from a read-only bind mount
stdout, _ := runContainer(r, []string{"-v", fmt.Sprintf("%s:/tmp:ro", tmpDir), "_", "ls", "/tmp"}, t) stdout, _ := runContainer(eng, r, []string{"-v", fmt.Sprintf("%s:/tmp:ro", tmpDir), "_", "ls", "/tmp"}, t)
if !strings.Contains(stdout, "touch-me") { if !strings.Contains(stdout, "touch-me") {
t.Fatal("Container failed to read from bind mount") t.Fatal("Container failed to read from bind mount")
} }
// test writing to bind mount // test writing to bind mount
runContainer(r, []string{"-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "_", "touch", "/tmp/holla"}, t) runContainer(eng, r, []string{"-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "_", "touch", "/tmp/holla"}, t)
readFile(path.Join(tmpDir, "holla"), t) // Will fail if the file doesn't exist readFile(path.Join(tmpDir, "holla"), t) // Will fail if the file doesn't exist
// test mounting to an illegal destination directory // test mounting to an illegal destination directory
if _, err := runContainer(r, []string{"-v", fmt.Sprintf("%s:.", tmpDir), "_", "ls", "."}, nil); err == nil { if _, err := runContainer(eng, r, []string{"-v", fmt.Sprintf("%s:.", tmpDir), "_", "ls", "."}, nil); err == nil {
t.Fatal("Container bind mounted illegal directory") t.Fatal("Container bind mounted illegal directory")
} }
} }
@ -1344,7 +1259,7 @@ func TestFromVolumesInReadonlyMode(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container, _, err := runtime.Create( container, _, err := runtime.Create(
&Config{ &docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/echo", "-n", "foobar"}, Cmd: []string{"/bin/echo", "-n", "foobar"},
Volumes: map[string]struct{}{"/test": {}}, Volumes: map[string]struct{}{"/test": {}},
@ -1364,7 +1279,7 @@ func TestFromVolumesInReadonlyMode(t *testing.T) {
} }
container2, _, err := runtime.Create( container2, _, err := runtime.Create(
&Config{ &docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/echo", "-n", "foobar"}, Cmd: []string{"/bin/echo", "-n", "foobar"},
VolumesFrom: container.ID + ":ro", VolumesFrom: container.ID + ":ro",
@ -1405,7 +1320,7 @@ func TestVolumesFromReadonlyMount(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container, _, err := runtime.Create( container, _, err := runtime.Create(
&Config{ &docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/echo", "-n", "foobar"}, Cmd: []string{"/bin/echo", "-n", "foobar"},
Volumes: map[string]struct{}{"/test": {}}, Volumes: map[string]struct{}{"/test": {}},
@ -1425,7 +1340,7 @@ func TestVolumesFromReadonlyMount(t *testing.T) {
} }
container2, _, err := runtime.Create( container2, _, err := runtime.Create(
&Config{ &docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/echo", "-n", "foobar"}, Cmd: []string{"/bin/echo", "-n", "foobar"},
VolumesFrom: container.ID, VolumesFrom: container.ID,
@ -1461,7 +1376,7 @@ func TestRestartWithVolumes(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container, _, err := runtime.Create(&Config{ container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"echo", "-n", "foobar"}, Cmd: []string{"echo", "-n", "foobar"},
Volumes: map[string]struct{}{"/test": {}}, Volumes: map[string]struct{}{"/test": {}},
@ -1505,7 +1420,7 @@ func TestVolumesFromWithVolumes(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container, _, err := runtime.Create(&Config{ container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"},
Volumes: map[string]struct{}{"/test": {}}, Volumes: map[string]struct{}{"/test": {}},
@ -1534,7 +1449,7 @@ func TestVolumesFromWithVolumes(t *testing.T) {
} }
container2, _, err := runtime.Create( container2, _, err := runtime.Create(
&Config{ &docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"cat", "/test/foo"}, Cmd: []string{"cat", "/test/foo"},
VolumesFrom: container.ID, VolumesFrom: container.ID,
@ -1568,26 +1483,42 @@ func TestVolumesFromWithVolumes(t *testing.T) {
} }
func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) { func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime) defer nuke(runtime)
config, hc, _, err := ParseRun([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show"}, nil) config, hc, _, err := docker.ParseRun([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show"}, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
c, _, err := runtime.Create(config, "")
if err != nil { jobCreate := eng.Job("create")
if err := jobCreate.ImportEnv(config); err != nil {
t.Fatal(err) t.Fatal(err)
} }
var id string
jobCreate.StdoutParseString(&id)
if err := jobCreate.Run(); err != nil {
t.Fatal(err)
}
// FIXME: this hack can be removed once Wait is a job
c := runtime.Get(id)
if c == nil {
t.Fatalf("Couldn't retrieve container %s from runtime", id)
}
stdout, err := c.StdoutPipe() stdout, err := c.StdoutPipe()
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer runtime.Destroy(c)
c.hostConfig = hc jobStart := eng.Job("start", id)
if err := c.Start(); err != nil { if err := jobStart.ImportEnv(hc); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if err := jobStart.Run(); err != nil {
t.Fatal(err)
}
c.WaitTimeout(500 * time.Millisecond) c.WaitTimeout(500 * time.Millisecond)
c.Wait() c.Wait()
output, err := ioutil.ReadAll(stdout) output, err := ioutil.ReadAll(stdout)
@ -1602,37 +1533,40 @@ func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
if !strings.HasSuffix(interfaces[0], ": lo") { if !strings.HasSuffix(interfaces[0], ": lo") {
t.Fatalf("Wrong interface in test container: expected [*: lo], got %s", interfaces) t.Fatalf("Wrong interface in test container: expected [*: lo], got %s", interfaces)
} }
} }
func TestPrivilegedCanMknod(t *testing.T) { func TestPrivilegedCanMknod(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
defer nuke(runtime) runtime := mkRuntimeFromEngine(eng, t)
if output, _ := runContainer(runtime, []string{"-privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" { defer runtime.Nuke()
if output, _ := runContainer(eng, runtime, []string{"-privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" {
t.Fatal("Could not mknod into privileged container") t.Fatal("Could not mknod into privileged container")
} }
} }
func TestPrivilegedCanMount(t *testing.T) { func TestPrivilegedCanMount(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
defer nuke(runtime) runtime := mkRuntimeFromEngine(eng, t)
if output, _ := runContainer(runtime, []string{"-privileged", "_", "sh", "-c", "mount -t tmpfs none /tmp && echo ok"}, t); output != "ok\n" { defer runtime.Nuke()
if output, _ := runContainer(eng, runtime, []string{"-privileged", "_", "sh", "-c", "mount -t tmpfs none /tmp && echo ok"}, t); output != "ok\n" {
t.Fatal("Could not mount into privileged container") t.Fatal("Could not mount into privileged container")
} }
} }
func TestPrivilegedCannotMknod(t *testing.T) { func TestPrivilegedCannotMknod(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
defer nuke(runtime) runtime := mkRuntimeFromEngine(eng, t)
if output, _ := runContainer(runtime, []string{"_", "sh", "-c", "mknod /tmp/sda b 8 0 || echo ok"}, t); output != "ok\n" { defer runtime.Nuke()
if output, _ := runContainer(eng, runtime, []string{"_", "sh", "-c", "mknod /tmp/sda b 8 0 || echo ok"}, t); output != "ok\n" {
t.Fatal("Could mknod into secure container") t.Fatal("Could mknod into secure container")
} }
} }
func TestPrivilegedCannotMount(t *testing.T) { func TestPrivilegedCannotMount(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
defer nuke(runtime) runtime := mkRuntimeFromEngine(eng, t)
if output, _ := runContainer(runtime, []string{"_", "sh", "-c", "mount -t tmpfs none /tmp || echo ok"}, t); output != "ok\n" { defer runtime.Nuke()
if output, _ := runContainer(eng, runtime, []string{"_", "sh", "-c", "mount -t tmpfs none /tmp || echo ok"}, t); output != "ok\n" {
t.Fatal("Could mount into secure container") t.Fatal("Could mount into secure container")
} }
} }
@ -1641,7 +1575,7 @@ func TestMultipleVolumesFrom(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container, _, err := runtime.Create(&Config{ container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"},
Volumes: map[string]struct{}{"/test": {}}, Volumes: map[string]struct{}{"/test": {}},
@ -1670,7 +1604,7 @@ func TestMultipleVolumesFrom(t *testing.T) {
} }
container2, _, err := runtime.Create( container2, _, err := runtime.Create(
&Config{ &docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"sh", "-c", "echo -n bar > /other/foo"}, Cmd: []string{"sh", "-c", "echo -n bar > /other/foo"},
Volumes: map[string]struct{}{"/other": {}}, Volumes: map[string]struct{}{"/other": {}},
@ -1692,7 +1626,7 @@ func TestMultipleVolumesFrom(t *testing.T) {
} }
container3, _, err := runtime.Create( container3, _, err := runtime.Create(
&Config{ &docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/echo", "-n", "foobar"}, Cmd: []string{"/bin/echo", "-n", "foobar"},
VolumesFrom: strings.Join([]string{container.ID, container2.ID}, ","), VolumesFrom: strings.Join([]string{container.ID, container2.ID}, ","),
@ -1720,7 +1654,7 @@ func TestRestartGhost(t *testing.T) {
defer nuke(runtime) defer nuke(runtime)
container, _, err := runtime.Create( container, _, err := runtime.Create(
&Config{ &docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"},
Volumes: map[string]struct{}{"/test": {}}, Volumes: map[string]struct{}{"/test": {}},

57
integration/graph_test.go Normal file
View File

@ -0,0 +1,57 @@
package docker
import (
"github.com/dotcloud/docker"
"io/ioutil"
"os"
"path"
"testing"
)
func TestMount(t *testing.T) {
graph := tempGraph(t)
defer os.RemoveAll(graph.Root)
archive, err := fakeTar()
if err != nil {
t.Fatal(err)
}
image, err := graph.Create(archive, nil, "Testing", "", nil)
if err != nil {
t.Fatal(err)
}
tmp, err := ioutil.TempDir("", "docker-test-graph-mount-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
rootfs := path.Join(tmp, "rootfs")
if err := os.MkdirAll(rootfs, 0700); err != nil {
t.Fatal(err)
}
rw := path.Join(tmp, "rw")
if err := os.MkdirAll(rw, 0700); err != nil {
t.Fatal(err)
}
if err := image.Mount(rootfs, rw); err != nil {
t.Fatal(err)
}
// FIXME: test for mount contents
defer func() {
if err := docker.Unmount(rootfs); err != nil {
t.Error(err)
}
}()
}
//FIXME: duplicate
func tempGraph(t *testing.T) *docker.Graph {
tmp, err := ioutil.TempDir("", "docker-graph-")
if err != nil {
t.Fatal(err)
}
graph, err := docker.NewGraph(tmp)
if err != nil {
t.Fatal(err)
}
return graph
}

View File

@ -0,0 +1,22 @@
package docker
import (
"github.com/dotcloud/docker/iptables"
"os"
"testing"
)
// FIXME: this test should be a unit test.
// For example by mocking os/exec to make sure iptables is not actually called.
func TestIptables(t *testing.T) {
if _, err := iptables.Raw("-L"); err != nil {
t.Fatal(err)
}
path := os.Getenv("PATH")
os.Setenv("PATH", "")
defer os.Setenv("PATH", path)
if _, err := iptables.Raw("-L"); err == nil {
t.Fatal("Not finding iptables in the PATH should cause an error")
}
}

View File

@ -3,6 +3,7 @@ package docker
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/sysinit" "github.com/dotcloud/docker/sysinit"
"github.com/dotcloud/docker/utils" "github.com/dotcloud/docker/utils"
@ -15,7 +16,6 @@ import (
"runtime" "runtime"
"strconv" "strconv"
"strings" "strings"
"sync"
"syscall" "syscall"
"testing" "testing"
"time" "time"
@ -32,39 +32,33 @@ const (
) )
var ( var (
globalRuntime *Runtime // FIXME: globalRuntime is deprecated by globalEngine. All tests should be converted.
globalRuntime *docker.Runtime
globalEngine *engine.Engine
startFds int startFds int
startGoroutines int startGoroutines int
) )
func nuke(runtime *Runtime) error { // FIXME: nuke() is deprecated by Runtime.Nuke()
var wg sync.WaitGroup func nuke(runtime *docker.Runtime) error {
for _, container := range runtime.List() { return runtime.Nuke()
wg.Add(1)
go func(c *Container) {
c.Kill()
wg.Done()
}(container)
}
wg.Wait()
runtime.Close()
os.Remove(filepath.Join(runtime.config.Root, "linkgraph.db"))
return os.RemoveAll(runtime.config.Root)
} }
func cleanup(runtime *Runtime) error { // FIXME: cleanup and nuke are redundant.
func cleanup(eng *engine.Engine, t *testing.T) error {
runtime := mkRuntimeFromEngine(eng, t)
for _, container := range runtime.List() { for _, container := range runtime.List() {
container.Kill() container.Kill()
runtime.Destroy(container) runtime.Destroy(container)
} }
images, err := runtime.graph.Map() srv := mkServerFromEngine(eng, t)
images, err := srv.Images(true, "")
if err != nil { if err != nil {
return err return err
} }
for _, image := range images { for _, image := range images {
if image.ID != unitTestImageID { if image.ID != unitTestImageID {
runtime.graph.Delete(image.ID) srv.ImageDelete(image.ID, false)
} }
} }
return nil return nil
@ -133,10 +127,9 @@ func setupBaseImage() {
log.Fatalf("Unable to create a runtime for tests:", err) log.Fatalf("Unable to create a runtime for tests:", err)
} }
srv := mkServerFromEngine(eng, log.New(os.Stderr, "", 0)) srv := mkServerFromEngine(eng, log.New(os.Stderr, "", 0))
runtime := srv.runtime
// If the unit test is not found, try to download it. // If the unit test is not found, try to download it.
if img, err := runtime.repositories.LookupImage(unitTestImageName); err != nil || img.ID != unitTestImageID { if img, err := srv.ImageInspect(unitTestImageName); err != nil || img.ID != unitTestImageID {
// Retrieve the Image // Retrieve the Image
if err := srv.ImagePull(unitTestImageName, "", os.Stdout, utils.NewStreamFormatter(false), nil, nil, true); err != nil { if err := srv.ImagePull(unitTestImageName, "", os.Stdout, utils.NewStreamFormatter(false), nil, nil, true); err != nil {
log.Fatalf("Unable to pull the test image: %s", err) log.Fatalf("Unable to pull the test image: %s", err)
@ -151,8 +144,8 @@ func spawnGlobalDaemon() {
} }
t := log.New(os.Stderr, "", 0) t := log.New(os.Stderr, "", 0)
eng := NewTestEngine(t) eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t) globalEngine = eng
globalRuntime = srv.runtime globalRuntime = mkRuntimeFromEngine(eng, t)
// Spawn a Daemon // Spawn a Daemon
go func() { go func() {
@ -174,8 +167,8 @@ func spawnGlobalDaemon() {
// FIXME: test that ImagePull(json=true) send correct json output // FIXME: test that ImagePull(json=true) send correct json output
func GetTestImage(runtime *Runtime) *Image { func GetTestImage(runtime *docker.Runtime) *docker.Image {
imgs, err := runtime.graph.Map() imgs, err := runtime.Graph().Map()
if err != nil { if err != nil {
log.Fatalf("Unable to get the test image:", err) log.Fatalf("Unable to get the test image:", err)
} }
@ -184,7 +177,7 @@ func GetTestImage(runtime *Runtime) *Image {
return image return image
} }
} }
log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, runtime.graph.Root, imgs) log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, runtime.Graph().Root, imgs)
return nil return nil
} }
@ -197,7 +190,7 @@ func TestRuntimeCreate(t *testing.T) {
t.Errorf("Expected 0 containers, %v found", len(runtime.List())) t.Errorf("Expected 0 containers, %v found", len(runtime.List()))
} }
container, _, err := runtime.Create(&Config{ container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"}, Cmd: []string{"ls", "-al"},
}, },
@ -239,12 +232,12 @@ func TestRuntimeCreate(t *testing.T) {
} }
// Make sure create with bad parameters returns an error // Make sure create with bad parameters returns an error
if _, _, err = runtime.Create(&Config{Image: GetTestImage(runtime).ID}, ""); err == nil { if _, _, err = runtime.Create(&docker.Config{Image: GetTestImage(runtime).ID}, ""); err == nil {
t.Fatal("Builder.Create should throw an error when Cmd is missing") t.Fatal("Builder.Create should throw an error when Cmd is missing")
} }
if _, _, err := runtime.Create( if _, _, err := runtime.Create(
&Config{ &docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{}, Cmd: []string{},
}, },
@ -253,7 +246,7 @@ func TestRuntimeCreate(t *testing.T) {
t.Fatal("Builder.Create should throw an error when Cmd is empty") t.Fatal("Builder.Create should throw an error when Cmd is empty")
} }
config := &Config{ config := &docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"/bin/ls"}, Cmd: []string{"/bin/ls"},
PortSpecs: []string{"80"}, PortSpecs: []string{"80"},
@ -266,7 +259,7 @@ func TestRuntimeCreate(t *testing.T) {
} }
// test expose 80:8000 // test expose 80:8000
container, warnings, err := runtime.Create(&Config{ container, warnings, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"}, Cmd: []string{"ls", "-al"},
PortSpecs: []string{"80:8000"}, PortSpecs: []string{"80:8000"},
@ -285,7 +278,7 @@ func TestDestroy(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container, _, err := runtime.Create(&Config{ container, _, err := runtime.Create(&docker.Config{
Image: GetTestImage(runtime).ID, Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"}, Cmd: []string{"ls", "-al"},
}, "") }, "")
@ -312,12 +305,6 @@ func TestDestroy(t *testing.T) {
t.Errorf("Unable to get newly created container") t.Errorf("Unable to get newly created container")
} }
// Make sure the container root directory does not exist anymore
_, err = os.Stat(container.root)
if err == nil || !os.IsNotExist(err) {
t.Errorf("Container root directory still exists after destroy")
}
// Test double destroy // Test double destroy
if err := runtime.Destroy(container); err == nil { if err := runtime.Destroy(container); err == nil {
// It should have failed // It should have failed
@ -329,13 +316,13 @@ func TestGet(t *testing.T) {
runtime := mkRuntime(t) runtime := mkRuntime(t)
defer nuke(runtime) defer nuke(runtime)
container1, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) container1, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
defer runtime.Destroy(container1) defer runtime.Destroy(container1)
container2, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) container2, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
defer runtime.Destroy(container2) defer runtime.Destroy(container2)
container3, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) container3, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
defer runtime.Destroy(container3) defer runtime.Destroy(container3)
if runtime.Get(container1.ID) != container1 { if runtime.Get(container1.ID) != container1 {
@ -352,15 +339,21 @@ func TestGet(t *testing.T) {
} }
func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container, string) { func startEchoServerContainer(t *testing.T, proto string) (*docker.Runtime, *docker.Container, string) {
var ( var (
err error err error
container *Container id string
strPort string strPort string
runtime = mkRuntime(t) eng = NewTestEngine(t)
port = 5554 runtime = mkRuntimeFromEngine(eng, t)
p Port port = 5554
p docker.Port
) )
defer func() {
if err != nil {
runtime.Nuke()
}
}()
for { for {
port += 1 port += 1
@ -373,37 +366,45 @@ func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container,
} else { } else {
t.Fatal(fmt.Errorf("Unknown protocol %v", proto)) t.Fatal(fmt.Errorf("Unknown protocol %v", proto))
} }
ep := make(map[Port]struct{}, 1) ep := make(map[docker.Port]struct{}, 1)
p = Port(fmt.Sprintf("%s/%s", strPort, proto)) p = docker.Port(fmt.Sprintf("%s/%s", strPort, proto))
ep[p] = struct{}{} ep[p] = struct{}{}
container, _, err = runtime.Create(&Config{ jobCreate := eng.Job("create")
Image: GetTestImage(runtime).ID, jobCreate.Setenv("Image", unitTestImageID)
Cmd: []string{"sh", "-c", cmd}, jobCreate.SetenvList("Cmd", []string{"sh", "-c", cmd})
PortSpecs: []string{fmt.Sprintf("%s/%s", strPort, proto)}, jobCreate.SetenvList("PortSpecs", []string{fmt.Sprintf("%s/%s", strPort, proto)})
ExposedPorts: ep, jobCreate.SetenvJson("ExposedPorts", ep)
}, "") jobCreate.StdoutParseString(&id)
if err != nil { if err := jobCreate.Run(); err != nil {
nuke(runtime)
t.Fatal(err) t.Fatal(err)
} }
// FIXME: this relies on the undocumented behavior of runtime.Create
if container != nil { // which will return a nil error AND container if the exposed ports
// are invalid. That behavior should be fixed!
if id != "" {
break break
} }
t.Logf("Port %v already in use, trying another one", strPort) t.Logf("Port %v already in use, trying another one", strPort)
} }
container.hostConfig = &HostConfig{ jobStart := eng.Job("start", id)
PortBindings: make(map[Port][]PortBinding), portBindings := make(map[docker.Port][]docker.PortBinding)
} portBindings[p] = []docker.PortBinding{
container.hostConfig.PortBindings[p] = []PortBinding{
{}, {},
} }
if err := container.Start(); err != nil { if err := jobStart.SetenvJson("PortsBindings", portBindings); err != nil {
nuke(runtime)
t.Fatal(err) t.Fatal(err)
} }
if err := jobStart.Run(); err != nil {
t.Fatal(err)
}
container := runtime.Get(id)
if container == nil {
t.Fatalf("Couldn't fetch test container %s", id)
}
setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() { setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
for !container.State.Running { for !container.State.Running {
@ -504,14 +505,15 @@ func TestAllocateUDPPortLocalhost(t *testing.T) {
} }
func TestRestore(t *testing.T) { func TestRestore(t *testing.T) {
runtime1 := mkRuntime(t) eng := NewTestEngine(t)
defer nuke(runtime1) runtime1 := mkRuntimeFromEngine(eng, t)
defer runtime1.Nuke()
// Create a container with one instance of docker // Create a container with one instance of docker
container1, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t) container1, _, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
defer runtime1.Destroy(container1) defer runtime1.Destroy(container1)
// Create a second container meant to be killed // Create a second container meant to be killed
container2, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t) container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
defer runtime1.Destroy(container2) defer runtime1.Destroy(container2)
// Start the container non blocking // Start the container non blocking
@ -545,12 +547,19 @@ func TestRestore(t *testing.T) {
// Here are are simulating a docker restart - that is, reloading all containers // Here are are simulating a docker restart - that is, reloading all containers
// from scratch // from scratch
runtime1.config.AutoRestart = false root := eng.Root()
runtime2, err := NewRuntimeFromDirectory(runtime1.config) eng, err := engine.New(root)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer nuke(runtime2) job := eng.Job("initapi")
job.Setenv("Root", eng.Root())
job.SetenvBool("Autorestart", false)
if err := job.Run(); err != nil {
t.Fatal(err)
}
runtime2 := mkRuntimeFromEngine(eng, t)
if len(runtime2.List()) != 2 { if len(runtime2.List()) != 2 {
t.Errorf("Expected 2 container, %v found", len(runtime2.List())) t.Errorf("Expected 2 container, %v found", len(runtime2.List()))
} }
@ -575,14 +584,31 @@ func TestRestore(t *testing.T) {
} }
func TestReloadContainerLinks(t *testing.T) { func TestReloadContainerLinks(t *testing.T) {
runtime1 := mkRuntime(t) // FIXME: here we don't use NewTestEngine because it calls initapi with Autorestart=false,
// and we want to set it to true.
root, err := newTestDirectory(unitTestStoreBase)
if err != nil {
t.Fatal(err)
}
eng, err := engine.New(root)
if err != nil {
t.Fatal(err)
}
job := eng.Job("initapi")
job.Setenv("Root", eng.Root())
job.SetenvBool("Autorestart", true)
if err := job.Run(); err != nil {
t.Fatal(err)
}
runtime1 := mkRuntimeFromEngine(eng, t)
defer nuke(runtime1) defer nuke(runtime1)
// Create a container with one instance of docker // Create a container with one instance of docker
container1, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/sh"}, t) container1, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/sh"}, t)
defer runtime1.Destroy(container1) defer runtime1.Destroy(container1)
// Create a second container meant to be killed // Create a second container meant to be killed
container2, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t) container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
defer runtime1.Destroy(container2) defer runtime1.Destroy(container2)
// Start the container non blocking // Start the container non blocking
@ -590,7 +616,9 @@ func TestReloadContainerLinks(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
// Add a link to container 2 // Add a link to container 2
container1.hostConfig.Links = []string{"/" + container2.ID + ":first"} // FIXME @shykes: setting hostConfig.Links seems redundant with calling RegisterLink().
// Why do we need it @crosbymichael?
// container1.hostConfig.Links = []string{"/" + container2.ID + ":first"}
if err := runtime1.RegisterLink(container1, container2, "first"); err != nil { if err := runtime1.RegisterLink(container1, container2, "first"); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -612,12 +640,18 @@ func TestReloadContainerLinks(t *testing.T) {
// Here are are simulating a docker restart - that is, reloading all containers // Here are are simulating a docker restart - that is, reloading all containers
// from scratch // from scratch
runtime1.config.AutoRestart = true eng, err = engine.New(root)
runtime2, err := NewRuntimeFromDirectory(runtime1.config)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer nuke(runtime2) job = eng.Job("initapi")
job.Setenv("Root", eng.Root())
job.SetenvBool("Autorestart", false)
if err := job.Run(); err != nil {
t.Fatal(err)
}
runtime2 := mkRuntimeFromEngine(eng, t)
if len(runtime2.List()) != 2 { if len(runtime2.List()) != 2 {
t.Errorf("Expected 2 container, %v found", len(runtime2.List())) t.Errorf("Expected 2 container, %v found", len(runtime2.List()))
} }
@ -631,27 +665,32 @@ func TestReloadContainerLinks(t *testing.T) {
t.Fatalf("Expected 2 container alive, %d found", runningCount) t.Fatalf("Expected 2 container alive, %d found", runningCount)
} }
// FIXME: we no longer test if containers were registered in the right order,
// because there is no public
// Make sure container 2 ( the child of container 1 ) was registered and started first // Make sure container 2 ( the child of container 1 ) was registered and started first
// with the runtime // with the runtime
first := runtime2.containers.Front() //
if first.Value.(*Container).ID != container2.ID { containers := runtime2.List()
if len(containers) == 0 {
t.Fatalf("Runtime has no containers")
}
first := containers[0]
if first.ID != container2.ID {
t.Fatalf("Container 2 %s should be registered first in the runtime", container2.ID) t.Fatalf("Container 2 %s should be registered first in the runtime", container2.ID)
} }
// Verify that the link is still registered in the runtime // Verify that the link is still registered in the runtime
entity := runtime2.containerGraph.Get(container1.Name) if c := runtime2.Get(container1.Name); c == nil {
if entity == nil { t.Fatal("Named container is no longer registered after restart")
t.Fatal("Entity should not be nil")
} }
} }
func TestDefaultContainerName(t *testing.T) { func TestDefaultContainerName(t *testing.T) {
eng := NewTestEngine(t) eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t) runtime := mkRuntimeFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime) defer nuke(runtime)
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -663,29 +702,19 @@ func TestDefaultContainerName(t *testing.T) {
t.Fatalf("Expect /some_name got %s", container.Name) t.Fatalf("Expect /some_name got %s", container.Name)
} }
paths := runtime.containerGraph.RefPaths(containerID) if c := runtime.Get("/some_name"); c == nil {
if paths == nil || len(paths) == 0 { t.Fatalf("Couldn't retrieve test container as /some_name")
t.Fatalf("Could not find edges for %s", containerID) } else if c.ID != containerID {
} t.Fatalf("Container /some_name has ID %s instead of %s", c.ID, containerID)
edge := paths[0]
if edge.ParentID != "0" {
t.Fatalf("Expected engine got %s", edge.ParentID)
}
if edge.EntityID != containerID {
t.Fatalf("Expected %s got %s", containerID, edge.EntityID)
}
if edge.Name != "some_name" {
t.Fatalf("Expected some_name got %s", edge.Name)
} }
} }
func TestRandomContainerName(t *testing.T) { func TestRandomContainerName(t *testing.T) {
eng := NewTestEngine(t) eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t) runtime := mkRuntimeFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime) defer nuke(runtime)
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) config, _, _, err := docker.ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -697,29 +726,19 @@ func TestRandomContainerName(t *testing.T) {
t.Fatalf("Expected not empty container name") t.Fatalf("Expected not empty container name")
} }
paths := runtime.containerGraph.RefPaths(containerID) if c := runtime.Get(container.Name); c == nil {
if paths == nil || len(paths) == 0 { log.Fatalf("Could not lookup container %s by its name", container.Name)
t.Fatalf("Could not find edges for %s", containerID) } else if c.ID != containerID {
} log.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID)
edge := paths[0]
if edge.ParentID != "0" {
t.Fatalf("Expected engine got %s", edge.ParentID)
}
if edge.EntityID != containerID {
t.Fatalf("Expected %s got %s", containerID, edge.EntityID)
}
if edge.Name == "" {
t.Fatalf("Expected not empty container name")
} }
} }
func TestLinkChildContainer(t *testing.T) { func TestLinkChildContainer(t *testing.T) {
eng := NewTestEngine(t) eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t) runtime := mkRuntimeFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime) defer nuke(runtime)
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -735,7 +754,7 @@ func TestLinkChildContainer(t *testing.T) {
t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID)
} }
config, _, _, err = ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) config, _, _, err = docker.ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -758,11 +777,10 @@ func TestLinkChildContainer(t *testing.T) {
func TestGetAllChildren(t *testing.T) { func TestGetAllChildren(t *testing.T) {
eng := NewTestEngine(t) eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t) runtime := mkRuntimeFromEngine(eng, t)
runtime := srv.runtime
defer nuke(runtime) defer nuke(runtime)
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -778,7 +796,7 @@ func TestGetAllChildren(t *testing.T) {
t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID)
} }
config, _, _, err = ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) config, _, _, err = docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -810,19 +828,3 @@ func TestGetAllChildren(t *testing.T) {
} }
} }
} }
func TestGetFullName(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
name, err := runtime.getFullName("testing")
if err != nil {
t.Fatal(err)
}
if name != "/testing" {
t.Fatalf("Expected /testing got %s", name)
}
if _, err := runtime.getFullName(""); err == nil {
t.Fatal("Error should not be nil")
}
}

View File

@ -1,32 +1,31 @@
package docker package docker
import ( import (
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/utils" "github.com/dotcloud/docker/utils"
"io/ioutil" "io/ioutil"
"strings" "strings"
"testing" "testing"
"time"
) )
func TestContainerTagImageDelete(t *testing.T) { func TestContainerTagImageDelete(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
defer nuke(runtime) defer mkRuntimeFromEngine(eng, t).Nuke()
srv := &Server{runtime: runtime} srv := mkServerFromEngine(eng, t)
initialImages, err := srv.Images(false, "") initialImages, err := srv.Images(false, "")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if err := srv.ContainerTag(unitTestImageName, "utest", "tag1", false); err != nil {
if err := srv.runtime.repositories.Set("utest", "tag1", unitTestImageName, false); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if err := srv.runtime.repositories.Set("utest/docker", "tag2", unitTestImageName, false); err != nil { if err := srv.ContainerTag(unitTestImageName, "utest/docker", "tag2", false); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if err := srv.runtime.repositories.Set("utest:5000/docker", "tag3", unitTestImageName, false); err != nil { if err := srv.ContainerTag(unitTestImageName, "utest:5000/docker", "tag3", false); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -82,46 +81,43 @@ func TestContainerTagImageDelete(t *testing.T) {
func TestCreateRm(t *testing.T) { func TestCreateRm(t *testing.T) {
eng := NewTestEngine(t) eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t) srv := mkServerFromEngine(eng, t)
runtime := srv.runtime defer mkRuntimeFromEngine(eng, t).Nuke()
defer nuke(runtime)
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
id := createTestContainer(eng, config, t) id := createTestContainer(eng, config, t)
if len(runtime.List()) != 1 { if c := srv.Containers(true, false, -1, "", ""); len(c) != 1 {
t.Errorf("Expected 1 container, %v found", len(runtime.List())) t.Errorf("Expected 1 container, %v found", len(c))
} }
if err = srv.ContainerDestroy(id, true, false); err != nil { if err = srv.ContainerDestroy(id, true, false); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if len(runtime.List()) != 0 { if c := srv.Containers(true, false, -1, "", ""); len(c) != 0 {
t.Errorf("Expected 0 container, %v found", len(runtime.List())) t.Errorf("Expected 0 container, %v found", len(c))
} }
} }
func TestCreateRmVolumes(t *testing.T) { func TestCreateRmVolumes(t *testing.T) {
eng := NewTestEngine(t) eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t) srv := mkServerFromEngine(eng, t)
runtime := srv.runtime defer mkRuntimeFromEngine(eng, t).Nuke()
defer nuke(runtime)
config, hostConfig, _, err := ParseRun([]string{"-v", "/srv", GetTestImage(runtime).ID, "echo test"}, nil) config, hostConfig, _, err := docker.ParseRun([]string{"-v", "/srv", unitTestImageID, "echo test"}, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
id := createTestContainer(eng, config, t) id := createTestContainer(eng, config, t)
if len(runtime.List()) != 1 { if c := srv.Containers(true, false, -1, "", ""); len(c) != 1 {
t.Errorf("Expected 1 container, %v found", len(runtime.List())) t.Errorf("Expected 1 container, %v found", len(c))
} }
job := eng.Job("start", id) job := eng.Job("start", id)
@ -141,18 +137,17 @@ func TestCreateRmVolumes(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if len(runtime.List()) != 0 { if c := srv.Containers(true, false, -1, "", ""); len(c) != 0 {
t.Errorf("Expected 0 container, %v found", len(runtime.List())) t.Errorf("Expected 0 container, %v found", len(c))
} }
} }
func TestCommit(t *testing.T) { func TestCommit(t *testing.T) {
eng := NewTestEngine(t) eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t) srv := mkServerFromEngine(eng, t)
runtime := srv.runtime defer mkRuntimeFromEngine(eng, t).Nuke()
defer nuke(runtime)
config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "/bin/cat"}, nil) config, _, _, err := docker.ParseRun([]string{unitTestImageID, "/bin/cat"}, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -167,18 +162,17 @@ func TestCommit(t *testing.T) {
func TestCreateStartRestartStopStartKillRm(t *testing.T) { func TestCreateStartRestartStopStartKillRm(t *testing.T) {
eng := NewTestEngine(t) eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t) srv := mkServerFromEngine(eng, t)
runtime := srv.runtime defer mkRuntimeFromEngine(eng, t).Nuke()
defer nuke(runtime)
config, hostConfig, _, err := ParseRun([]string{GetTestImage(runtime).ID, "/bin/cat"}, nil) config, hostConfig, _, err := docker.ParseRun([]string{unitTestImageID, "/bin/cat"}, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
id := createTestContainer(eng, config, t) id := createTestContainer(eng, config, t)
if len(runtime.List()) != 1 { if c := srv.Containers(true, false, -1, "", ""); len(c) != 1 {
t.Errorf("Expected 1 container, %v found", len(runtime.List())) t.Errorf("Expected 1 container, %v found", len(c))
} }
job := eng.Job("start", id) job := eng.Job("start", id)
@ -214,21 +208,18 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if len(runtime.List()) != 0 { if c := srv.Containers(true, false, -1, "", ""); len(c) != 0 {
t.Errorf("Expected 0 container, %v found", len(runtime.List())) t.Errorf("Expected 0 container, %v found", len(c))
} }
} }
func TestRunWithTooLowMemoryLimit(t *testing.T) { func TestRunWithTooLowMemoryLimit(t *testing.T) {
eng := NewTestEngine(t) eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t) defer mkRuntimeFromEngine(eng, t).Nuke()
runtime := srv.runtime
defer nuke(runtime)
// Try to create a container with a memory limit of 1 byte less than the minimum allowed limit. // Try to create a container with a memory limit of 1 byte less than the minimum allowed limit.
job := eng.Job("create") job := eng.Job("create")
job.Setenv("Image", GetTestImage(runtime).ID) job.Setenv("Image", unitTestImageID)
job.Setenv("Memory", "524287") job.Setenv("Memory", "524287")
job.Setenv("CpuShares", "1000") job.Setenv("CpuShares", "1000")
job.SetenvList("Cmd", []string{"/bin/cat"}) job.SetenvList("Cmd", []string{"/bin/cat"})
@ -239,163 +230,17 @@ func TestRunWithTooLowMemoryLimit(t *testing.T) {
} }
} }
func TestContainerTop(t *testing.T) {
t.Skip("Fixme. Skipping test for now. Reported error: 'server_test.go:236: Expected 2 processes, found 1.'")
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{runtime: runtime}
c, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "sleep 2"}, t)
c, err := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "sleep 2"}, t)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(c)
if err := c.Start(); err != nil {
t.Fatal(err)
}
// Give some time to the process to start
c.WaitTimeout(500 * time.Millisecond)
if !c.State.Running {
t.Errorf("Container should be running")
}
procs, err := srv.ContainerTop(c.ID, "")
if err != nil {
t.Fatal(err)
}
if len(procs.Processes) != 2 {
t.Fatalf("Expected 2 processes, found %d.", len(procs.Processes))
}
pos := -1
for i := 0; i < len(procs.Titles); i++ {
if procs.Titles[i] == "CMD" {
pos = i
break
}
}
if pos == -1 {
t.Fatalf("Expected CMD, not found.")
}
if procs.Processes[0][pos] != "sh" && procs.Processes[0][pos] != "busybox" {
t.Fatalf("Expected `busybox` or `sh`, found %s.", procs.Processes[0][pos])
}
if procs.Processes[1][pos] != "sh" && procs.Processes[1][pos] != "busybox" {
t.Fatalf("Expected `busybox` or `sh`, found %s.", procs.Processes[1][pos])
}
}
func TestPools(t *testing.T) {
runtime := mkRuntime(t)
srv := &Server{
runtime: runtime,
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
defer nuke(runtime)
err := srv.poolAdd("pull", "test1")
if err != nil {
t.Fatal(err)
}
err = srv.poolAdd("pull", "test2")
if err != nil {
t.Fatal(err)
}
err = srv.poolAdd("push", "test1")
if err == nil || err.Error() != "pull test1 is already in progress" {
t.Fatalf("Expected `pull test1 is already in progress`")
}
err = srv.poolAdd("pull", "test1")
if err == nil || err.Error() != "pull test1 is already in progress" {
t.Fatalf("Expected `pull test1 is already in progress`")
}
err = srv.poolAdd("wait", "test3")
if err == nil || err.Error() != "Unknown pool type" {
t.Fatalf("Expected `Unknown pool type`")
}
err = srv.poolRemove("pull", "test2")
if err != nil {
t.Fatal(err)
}
err = srv.poolRemove("pull", "test2")
if err != nil {
t.Fatal(err)
}
err = srv.poolRemove("pull", "test1")
if err != nil {
t.Fatal(err)
}
err = srv.poolRemove("push", "test1")
if err != nil {
t.Fatal(err)
}
err = srv.poolRemove("wait", "test3")
if err == nil || err.Error() != "Unknown pool type" {
t.Fatalf("Expected `Unknown pool type`")
}
}
func TestLogEvent(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
srv := &Server{
runtime: runtime,
events: make([]utils.JSONMessage, 0, 64),
listeners: make(map[string]chan utils.JSONMessage),
}
srv.LogEvent("fakeaction", "fakeid", "fakeimage")
listener := make(chan utils.JSONMessage)
srv.Lock()
srv.listeners["test"] = listener
srv.Unlock()
srv.LogEvent("fakeaction2", "fakeid", "fakeimage")
if len(srv.events) != 2 {
t.Fatalf("Expected 2 events, found %d", len(srv.events))
}
go func() {
time.Sleep(200 * time.Millisecond)
srv.LogEvent("fakeaction3", "fakeid", "fakeimage")
time.Sleep(200 * time.Millisecond)
srv.LogEvent("fakeaction4", "fakeid", "fakeimage")
}()
setTimeout(t, "Listening for events timed out", 2*time.Second, func() {
for i := 2; i < 4; i++ {
event := <-listener
if event != srv.events[i] {
t.Fatalf("Event received it different than expected")
}
}
})
}
func TestRmi(t *testing.T) { func TestRmi(t *testing.T) {
eng := NewTestEngine(t) eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t) srv := mkServerFromEngine(eng, t)
runtime := srv.runtime defer mkRuntimeFromEngine(eng, t).Nuke()
defer nuke(runtime)
initialImages, err := srv.Images(false, "") initialImages, err := srv.Images(false, "")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
config, hostConfig, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) config, hostConfig, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -471,19 +316,19 @@ func TestRmi(t *testing.T) {
} }
func TestImagesFilter(t *testing.T) { func TestImagesFilter(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
defer nuke(runtime) defer nuke(mkRuntimeFromEngine(eng, t))
srv := &Server{runtime: runtime} srv := mkServerFromEngine(eng, t)
if err := srv.runtime.repositories.Set("utest", "tag1", unitTestImageName, false); err != nil { if err := srv.ContainerTag(unitTestImageName, "utest", "tag1", false); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if err := srv.runtime.repositories.Set("utest/docker", "tag2", unitTestImageName, false); err != nil { if err := srv.ContainerTag(unitTestImageName, "utest/docker", "tag2", false); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if err := srv.runtime.repositories.Set("utest:5000/docker", "tag3", unitTestImageName, false); err != nil { if err := srv.ContainerTag(unitTestImageName, "utest:5000/docker", "tag3", false); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -525,9 +370,9 @@ func TestImagesFilter(t *testing.T) {
} }
func TestImageInsert(t *testing.T) { func TestImageInsert(t *testing.T) {
runtime := mkRuntime(t) eng := NewTestEngine(t)
defer nuke(runtime) defer mkRuntimeFromEngine(eng, t).Nuke()
srv := &Server{runtime: runtime} srv := mkServerFromEngine(eng, t)
sf := utils.NewStreamFormatter(true) sf := utils.NewStreamFormatter(true)
// bad image name fails // bad image name fails
@ -536,12 +381,12 @@ func TestImageInsert(t *testing.T) {
} }
// bad url fails // bad url fails
if err := srv.ImageInsert(GetTestImage(runtime).ID, "http://bad_host_name_that_will_totally_fail.com/", "/foo", ioutil.Discard, sf); err == nil { if err := srv.ImageInsert(unitTestImageID, "http://bad_host_name_that_will_totally_fail.com/", "/foo", ioutil.Discard, sf); err == nil {
t.Fatal("expected an error and got none") t.Fatal("expected an error and got none")
} }
// success returns nil // success returns nil
if err := srv.ImageInsert(GetTestImage(runtime).ID, "https://www.docker.io/static/img/docker-top-logo.png", "/foo", ioutil.Discard, sf); err != nil { if err := srv.ImageInsert(unitTestImageID, "https://www.docker.io/static/img/docker-top-logo.png", "/foo", ioutil.Discard, sf); err != nil {
t.Fatalf("expected no error, but got %v", err) t.Fatalf("expected no error, but got %v", err)
} }
} }

View File

@ -0,0 +1,63 @@
package docker
import (
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/utils"
"io/ioutil"
"testing"
"time"
)
func TestServerListOrderedImagesByCreationDate(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
srv := mkServerFromEngine(eng, t)
if err := generateImage("", srv); err != nil {
t.Fatal(err)
}
images, err := srv.Images(true, "")
if err != nil {
t.Fatal(err)
}
if images[0].Created < images[1].Created {
t.Error("Expected []APIImges to be ordered by most recent creation date.")
}
}
func TestServerListOrderedImagesByCreationDateAndTag(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
srv := mkServerFromEngine(eng, t)
err := generateImage("bar", srv)
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
err = generateImage("zed", srv)
if err != nil {
t.Fatal(err)
}
images, err := srv.Images(true, "")
if err != nil {
t.Fatal(err)
}
if images[0].RepoTags[0] != "repo:zed" && images[0].RepoTags[0] != "repo:bar" {
t.Errorf("Expected []APIImges to be ordered by most recent creation date. %s", images)
}
}
func generateImage(name string, srv *docker.Server) error {
archive, err := fakeTar()
if err != nil {
return err
}
return srv.ImageImport("-", "repo", name, archive, ioutil.Discard, utils.NewStreamFormatter(true))
}

332
integration/utils_test.go Normal file
View File

@ -0,0 +1,332 @@
package docker
import (
"archive/tar"
"bytes"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/utils"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"path"
"strings"
"testing"
"time"
)
// This file contains utility functions for docker's unit test suite.
// It has to be named XXX_test.go, apparently, in other to access private functions
// from other XXX_test.go functions.
// Create a temporary runtime suitable for unit testing.
// Call t.Fatal() at the first error.
func mkRuntime(f utils.Fataler) *docker.Runtime {
root, err := newTestDirectory(unitTestStoreBase)
if err != nil {
f.Fatal(err)
}
config := &docker.DaemonConfig{
Root: root,
AutoRestart: false,
}
r, err := docker.NewRuntimeFromDirectory(config)
if err != nil {
f.Fatal(err)
}
r.UpdateCapabilities(true)
return r
}
func createNamedTestContainer(eng *engine.Engine, config *docker.Config, f utils.Fataler, name string) (shortId string) {
job := eng.Job("create", name)
if err := job.ImportEnv(config); err != nil {
f.Fatal(err)
}
job.StdoutParseString(&shortId)
if err := job.Run(); err != nil {
f.Fatal(err)
}
return
}
func createTestContainer(eng *engine.Engine, config *docker.Config, f utils.Fataler) (shortId string) {
return createNamedTestContainer(eng, config, f, "")
}
func startContainer(eng *engine.Engine, id string, t utils.Fataler) {
job := eng.Job("start", id)
if err := job.Run(); err != nil {
t.Fatal(err)
}
}
func containerRun(eng *engine.Engine, id string, t utils.Fataler) {
startContainer(eng, id, t)
containerWait(eng, id, t)
}
func containerFileExists(eng *engine.Engine, id, dir string, t utils.Fataler) bool {
c := getContainer(eng, id, t)
if err := c.EnsureMounted(); err != nil {
t.Fatal(err)
}
if _, err := os.Stat(path.Join(c.RootfsPath(), dir)); err != nil {
if os.IsNotExist(err) {
return false
}
t.Fatal(err)
}
return true
}
func containerAttach(eng *engine.Engine, id string, t utils.Fataler) (io.WriteCloser, io.ReadCloser) {
c := getContainer(eng, id, t)
i, err := c.StdinPipe()
if err != nil {
t.Fatal(err)
}
o, err := c.StdoutPipe()
if err != nil {
t.Fatal(err)
}
return i, o
}
func containerWait(eng *engine.Engine, id string, t utils.Fataler) int {
return getContainer(eng, id, t).Wait()
}
func containerWaitTimeout(eng *engine.Engine, id string, t utils.Fataler) error {
return getContainer(eng, id, t).WaitTimeout(500 * time.Millisecond)
}
func containerKill(eng *engine.Engine, id string, t utils.Fataler) {
if err := getContainer(eng, id, t).Kill(); err != nil {
t.Fatal(err)
}
}
func containerRunning(eng *engine.Engine, id string, t utils.Fataler) bool {
return getContainer(eng, id, t).State.Running
}
func containerAssertExists(eng *engine.Engine, id string, t utils.Fataler) {
getContainer(eng, id, t)
}
func containerAssertNotExists(eng *engine.Engine, id string, t utils.Fataler) {
runtime := mkRuntimeFromEngine(eng, t)
if c := runtime.Get(id); c != nil {
t.Fatal(fmt.Errorf("Container %s should not exist", id))
}
}
// assertHttpNotError expect the given response to not have an error.
// Otherwise the it causes the test to fail.
func assertHttpNotError(r *httptest.ResponseRecorder, t utils.Fataler) {
// Non-error http status are [200, 400)
if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest {
t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code))
}
}
// assertHttpError expect the given response to have an error.
// Otherwise the it causes the test to fail.
func assertHttpError(r *httptest.ResponseRecorder, t utils.Fataler) {
// Non-error http status are [200, 400)
if !(r.Code < http.StatusOK || r.Code >= http.StatusBadRequest) {
t.Fatal(fmt.Errorf("Unexpected http success code: %v", r.Code))
}
}
func getContainer(eng *engine.Engine, id string, t utils.Fataler) *docker.Container {
runtime := mkRuntimeFromEngine(eng, t)
c := runtime.Get(id)
if c == nil {
t.Fatal(fmt.Errorf("No such container: %s", id))
}
return c
}
func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Server {
iSrv := eng.Hack_GetGlobalVar("httpapi.server")
if iSrv == nil {
panic("Legacy server field not set in engine")
}
srv, ok := iSrv.(*docker.Server)
if !ok {
panic("Legacy server field in engine does not cast to *docker.Server")
}
return srv
}
func mkRuntimeFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Runtime {
iRuntime := eng.Hack_GetGlobalVar("httpapi.runtime")
if iRuntime == nil {
panic("Legacy runtime field not set in engine")
}
runtime, ok := iRuntime.(*docker.Runtime)
if !ok {
panic("Legacy runtime field in engine does not cast to *docker.Runtime")
}
return runtime
}
func NewTestEngine(t utils.Fataler) *engine.Engine {
root, err := newTestDirectory(unitTestStoreBase)
if err != nil {
t.Fatal(err)
}
eng, err := engine.New(root)
if err != nil {
t.Fatal(err)
}
// Load default plugins
// (This is manually copied and modified from main() until we have a more generic plugin system)
job := eng.Job("initapi")
job.Setenv("Root", root)
job.SetenvBool("AutoRestart", false)
// TestGetEnabledCors and TestOptionsRoute require EnableCors=true
job.SetenvBool("EnableCors", true)
if err := job.Run(); err != nil {
t.Fatal(err)
}
return eng
}
func newTestDirectory(templateDir string) (dir string, err error) {
return utils.TestDirectory(templateDir)
}
func getCallerName(depth int) string {
return utils.GetCallerName(depth)
}
// Write `content` to the file at path `dst`, creating it if necessary,
// as well as any missing directories.
// The file is truncated if it already exists.
// Call t.Fatal() at the first error.
func writeFile(dst, content string, t *testing.T) {
// Create subdirectories if necessary
if err := os.MkdirAll(path.Dir(dst), 0700); err != nil && !os.IsExist(err) {
t.Fatal(err)
}
f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700)
if err != nil {
t.Fatal(err)
}
// Write content (truncate if it exists)
if _, err := io.Copy(f, strings.NewReader(content)); err != nil {
t.Fatal(err)
}
}
// Return the contents of file at path `src`.
// Call t.Fatal() at the first error (including if the file doesn't exist)
func readFile(src string, t *testing.T) (content string) {
f, err := os.Open(src)
if err != nil {
t.Fatal(err)
}
data, err := ioutil.ReadAll(f)
if err != nil {
t.Fatal(err)
}
return string(data)
}
// Create a test container from the given runtime `r` and run arguments `args`.
// If the image name is "_", (eg. []string{"-i", "-t", "_", "bash"}, it is
// dynamically replaced by the current test image.
// The caller is responsible for destroying the container.
// Call t.Fatal() at the first error.
func mkContainer(r *docker.Runtime, args []string, t *testing.T) (*docker.Container, *docker.HostConfig, error) {
config, hc, _, err := docker.ParseRun(args, nil)
defer func() {
if err != nil && t != nil {
t.Fatal(err)
}
}()
if err != nil {
return nil, nil, err
}
if config.Image == "_" {
config.Image = GetTestImage(r).ID
}
c, _, err := r.Create(config, "")
if err != nil {
return nil, nil, err
}
// NOTE: hostConfig is ignored.
// If `args` specify privileged mode, custom lxc conf, external mount binds,
// port redirects etc. they will be ignored.
// This is because the correct way to set these things is to pass environment
// to the `start` job.
// FIXME: this helper function should be deprecated in favor of calling
// `create` and `start` jobs directly.
return c, hc, nil
}
// Create a test container, start it, wait for it to complete, destroy it,
// and return its standard output as a string.
// The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image.
// If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally.
func runContainer(eng *engine.Engine, r *docker.Runtime, args []string, t *testing.T) (output string, err error) {
defer func() {
if err != nil && t != nil {
t.Fatal(err)
}
}()
container, hc, err := mkContainer(r, args, t)
if err != nil {
return "", err
}
defer r.Destroy(container)
stdout, err := container.StdoutPipe()
if err != nil {
return "", err
}
defer stdout.Close()
job := eng.Job("start", container.ID)
if err := job.ImportEnv(hc); err != nil {
return "", err
}
if err := job.Run(); err != nil {
return "", err
}
container.Wait()
data, err := ioutil.ReadAll(stdout)
if err != nil {
return "", err
}
output = string(data)
return
}
// FIXME: this is duplicated from graph_test.go in the docker package.
func fakeTar() (io.Reader, error) {
content := []byte("Hello world!\n")
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} {
hdr := new(tar.Header)
hdr.Size = int64(len(content))
hdr.Name = name
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
tw.Write([]byte(content))
}
tw.Close()
return buf, nil
}

View File

@ -1,18 +0,0 @@
package iptables
import (
"os"
"testing"
)
func TestIptables(t *testing.T) {
if _, err := Raw("-L"); err != nil {
t.Fatal(err)
}
path := os.Getenv("PATH")
os.Setenv("PATH", "")
defer os.Setenv("PATH", path)
if _, err := Raw("-L"); err == nil {
t.Fatal("Not finding iptables in the PATH should cause an error")
}
}

102
lxc_template_unit_test.go Normal file
View File

@ -0,0 +1,102 @@
package docker
import (
"bufio"
"fmt"
"io/ioutil"
"math/rand"
"os"
"strings"
"testing"
"time"
)
func TestLXCConfig(t *testing.T) {
root, err := ioutil.TempDir("", "TestLXCConfig")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(root)
// Memory is allocated randomly for testing
rand.Seed(time.Now().UTC().UnixNano())
memMin := 33554432
memMax := 536870912
mem := memMin + rand.Intn(memMax-memMin)
// CPU shares as well
cpuMin := 100
cpuMax := 10000
cpu := cpuMin + rand.Intn(cpuMax-cpuMin)
container := &Container{
root: root,
Config: &Config{
Hostname: "foobar",
Memory: int64(mem),
CpuShares: int64(cpu),
NetworkDisabled: true,
},
hostConfig: &HostConfig{
Privileged: false,
},
}
if err := container.generateLXCConfig(); err != nil {
t.Fatal(err)
}
grepFile(t, container.lxcConfigPath(), "lxc.utsname = foobar")
grepFile(t, container.lxcConfigPath(),
fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem))
grepFile(t, container.lxcConfigPath(),
fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2))
}
func TestCustomLxcConfig(t *testing.T) {
root, err := ioutil.TempDir("", "TestCustomLxcConfig")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(root)
container := &Container{
root: root,
Config: &Config{
Hostname: "foobar",
NetworkDisabled: true,
},
hostConfig: &HostConfig{
Privileged: false,
LxcConf: []KeyValuePair{
{
Key: "lxc.utsname",
Value: "docker",
},
{
Key: "lxc.cgroup.cpuset.cpus",
Value: "0,1",
},
},
},
}
if err := container.generateLXCConfig(); err != nil {
t.Fatal(err)
}
grepFile(t, container.lxcConfigPath(), "lxc.utsname = docker")
grepFile(t, container.lxcConfigPath(), "lxc.cgroup.cpuset.cpus = 0,1")
}
func grepFile(t *testing.T, path string, pattern string) {
f, err := os.Open(path)
if err != nil {
t.Fatal(err)
}
defer f.Close()
r := bufio.NewReader(f)
var (
line string
)
err = nil
for err == nil {
line, err = r.ReadString('\n')
if strings.Contains(line, pattern) == true {
return
}
}
t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path)
}

View File

@ -15,6 +15,7 @@ import (
"path" "path"
"sort" "sort"
"strings" "strings"
"sync"
"time" "time"
) )
@ -516,7 +517,12 @@ func (runtime *Runtime) Commit(container *Container, repository, tag, comment, a
return img, nil return img, nil
} }
// FIXME: this is deprecated by the getFullName *function*
func (runtime *Runtime) getFullName(name string) (string, error) { func (runtime *Runtime) getFullName(name string) (string, error) {
return getFullName(name)
}
func getFullName(name string) (string, error) {
if name == "" { if name == "" {
return "", fmt.Errorf("Container name cannot be empty") return "", fmt.Errorf("Container name cannot be empty")
} }
@ -655,6 +661,25 @@ func (runtime *Runtime) Close() error {
return runtime.containerGraph.Close() return runtime.containerGraph.Close()
} }
// Nuke kills all containers then removes all content
// from the content root, including images, volumes and
// container filesystems.
// Again: this will remove your entire docker runtime!
func (runtime *Runtime) Nuke() error {
var wg sync.WaitGroup
for _, container := range runtime.List() {
wg.Add(1)
go func(c *Container) {
c.Kill()
wg.Done()
}(container)
}
wg.Wait()
runtime.Close()
return os.RemoveAll(runtime.config.Root)
}
func linkLxcStart(root string) error { func linkLxcStart(root string) error {
sourcePath, err := exec.LookPath("lxc-start") sourcePath, err := exec.LookPath("lxc-start")
if err != nil { if err != nil {
@ -672,6 +697,14 @@ func linkLxcStart(root string) error {
return os.Symlink(sourcePath, targetPath) return os.Symlink(sourcePath, targetPath)
} }
// FIXME: this is a convenience function for integration tests
// which need direct access to runtime.graph.
// Once the tests switch to using engine and jobs, this method
// can go away.
func (runtime *Runtime) Graph() *Graph {
return runtime.graph
}
// History is a convenience type for storing a list of containers, // History is a convenience type for storing a list of containers,
// ordered by creation date. // ordered by creation date.
type History []*Container type History []*Container

View File

@ -62,6 +62,8 @@ func jobInitApi(job *engine.Job) string {
os.Exit(0) os.Exit(0)
}() }()
job.Eng.Hack_SetGlobalVar("httpapi.server", srv) job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
job.Eng.Hack_SetGlobalVar("httpapi.runtime", srv.runtime)
job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", srv.runtime.networkManager.bridgeNetwork.IP)
if err := job.Eng.Register("create", srv.ContainerCreate); err != nil { if err := job.Eng.Register("create", srv.ContainerCreate); err != nil {
return err.Error() return err.Error()
} }
@ -530,6 +532,7 @@ func (srv *Server) ContainerCommit(name, repo, tag, author, comment string, conf
return img.ID, err return img.ID, err
} }
// FIXME: this should be called ImageTag
func (srv *Server) ContainerTag(name, repo, tag string, force bool) error { func (srv *Server) ContainerTag(name, repo, tag string, force bool) error {
if err := srv.runtime.repositories.Set(repo, tag, name, force); err != nil { if err := srv.runtime.repositories.Set(repo, tag, name, force); err != nil {
return err return err
@ -1062,7 +1065,12 @@ func (srv *Server) ContainerCreate(job *engine.Job) string {
return err.Error() return err.Error()
} }
srv.LogEvent("create", container.ID, srv.runtime.repositories.ImageName(container.Image)) srv.LogEvent("create", container.ID, srv.runtime.repositories.ImageName(container.Image))
job.Printf("%s\n", container.ID) // FIXME: this is necessary because runtime.Create might return a nil container
// with a non-nil error. This should not happen! Once it's fixed we
// can remove this workaround.
if container != nil {
job.Printf("%s\n", container.ID)
}
for _, warning := range buildWarnings { for _, warning := range buildWarnings {
job.Errorf("%s\n", warning) job.Errorf("%s\n", warning)
} }
@ -1600,7 +1608,7 @@ func (srv *Server) HTTPRequestFactory(metaHeaders map[string][]string) *utils.HT
return srv.reqFactory return srv.reqFactory
} }
func (srv *Server) LogEvent(action, id, from string) { func (srv *Server) LogEvent(action, id, from string) *utils.JSONMessage {
now := time.Now().Unix() now := time.Now().Unix()
jm := utils.JSONMessage{Status: action, ID: id, From: from, Time: now} jm := utils.JSONMessage{Status: action, ID: id, From: from, Time: now}
srv.events = append(srv.events, jm) srv.events = append(srv.events, jm)
@ -1610,6 +1618,7 @@ func (srv *Server) LogEvent(action, id, from string) {
default: default:
} }
} }
return &jm
} }
type Server struct { type Server struct {

109
server_unit_test.go Normal file
View File

@ -0,0 +1,109 @@
package docker
import (
"github.com/dotcloud/docker/utils"
"testing"
"time"
)
func TestPools(t *testing.T) {
srv := &Server{
pullingPool: make(map[string]struct{}),
pushingPool: make(map[string]struct{}),
}
err := srv.poolAdd("pull", "test1")
if err != nil {
t.Fatal(err)
}
err = srv.poolAdd("pull", "test2")
if err != nil {
t.Fatal(err)
}
err = srv.poolAdd("push", "test1")
if err == nil || err.Error() != "pull test1 is already in progress" {
t.Fatalf("Expected `pull test1 is already in progress`")
}
err = srv.poolAdd("pull", "test1")
if err == nil || err.Error() != "pull test1 is already in progress" {
t.Fatalf("Expected `pull test1 is already in progress`")
}
err = srv.poolAdd("wait", "test3")
if err == nil || err.Error() != "Unknown pool type" {
t.Fatalf("Expected `Unknown pool type`")
}
err = srv.poolRemove("pull", "test2")
if err != nil {
t.Fatal(err)
}
err = srv.poolRemove("pull", "test2")
if err != nil {
t.Fatal(err)
}
err = srv.poolRemove("pull", "test1")
if err != nil {
t.Fatal(err)
}
err = srv.poolRemove("push", "test1")
if err != nil {
t.Fatal(err)
}
err = srv.poolRemove("wait", "test3")
if err == nil || err.Error() != "Unknown pool type" {
t.Fatalf("Expected `Unknown pool type`")
}
}
func TestLogEvent(t *testing.T) {
srv := &Server{
events: make([]utils.JSONMessage, 0, 64),
listeners: make(map[string]chan utils.JSONMessage),
}
srv.LogEvent("fakeaction", "fakeid", "fakeimage")
listener := make(chan utils.JSONMessage)
srv.Lock()
srv.listeners["test"] = listener
srv.Unlock()
srv.LogEvent("fakeaction2", "fakeid", "fakeimage")
if len(srv.events) != 2 {
t.Fatalf("Expected 2 events, found %d", len(srv.events))
}
go func() {
time.Sleep(200 * time.Millisecond)
srv.LogEvent("fakeaction3", "fakeid", "fakeimage")
time.Sleep(200 * time.Millisecond)
srv.LogEvent("fakeaction4", "fakeid", "fakeimage")
}()
setTimeout(t, "Listening for events timed out", 2*time.Second, func() {
for i := 2; i < 4; i++ {
event := <-listener
if event != srv.events[i] {
t.Fatalf("Event received it different than expected")
}
}
})
}
// FIXME: this is duplicated from integration/commands_test.go
func setTimeout(t *testing.T, msg string, d time.Duration, f func()) {
c := make(chan bool)
// Make sure we are not too long
go func() {
time.Sleep(d)
c <- true
}()
go func() {
f()
c <- false
}()
if <-c && msg != "" {
t.Fatal(msg)
}
}

View File

@ -1,111 +0,0 @@
package docker
import (
"fmt"
"testing"
"time"
)
func TestServerListOrderedImagesByCreationDate(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
archive, err := fakeTar()
if err != nil {
t.Fatal(err)
}
_, err = runtime.graph.Create(archive, nil, "Testing", "", nil)
if err != nil {
t.Fatal(err)
}
srv := &Server{runtime: runtime}
images, err := srv.Images(true, "")
if err != nil {
t.Fatal(err)
}
if images[0].Created < images[1].Created {
t.Error("Expected []APIImges to be ordered by most recent creation date.")
}
}
func TestServerListOrderedImagesByCreationDateAndTag(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
err := generateImage("bar", runtime)
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
err = generateImage("zed", runtime)
if err != nil {
t.Fatal(err)
}
srv := &Server{runtime: runtime}
images, err := srv.Images(true, "")
if err != nil {
t.Fatal(err)
}
if images[0].RepoTags[0] != "repo:zed" && images[0].RepoTags[0] != "repo:bar" {
t.Errorf("Expected []APIImges to be ordered by most recent creation date. %s", images)
}
}
func generateImage(name string, runtime *Runtime) error {
archive, err := fakeTar()
if err != nil {
return err
}
image, err := runtime.graph.Create(archive, nil, "Testing", "", nil)
if err != nil {
return err
}
srv := &Server{runtime: runtime}
srv.ContainerTag(image.ID, "repo", name, false)
return nil
}
func TestSortUniquePorts(t *testing.T) {
ports := []Port{
Port("6379/tcp"),
Port("22/tcp"),
}
sortPorts(ports, func(ip, jp Port) bool {
return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp")
})
first := ports[0]
if fmt.Sprint(first) != "22/tcp" {
t.Log(fmt.Sprint(first))
t.Fail()
}
}
func TestSortSamePortWithDifferentProto(t *testing.T) {
ports := []Port{
Port("8888/tcp"),
Port("8888/udp"),
Port("6379/tcp"),
Port("6379/udp"),
}
sortPorts(ports, func(ip, jp Port) bool {
return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp")
})
first := ports[0]
if fmt.Sprint(first) != "6379/tcp" {
t.Fail()
}
}

41
sorter_unit_test.go Normal file
View File

@ -0,0 +1,41 @@
package docker
import (
"fmt"
"testing"
)
func TestSortUniquePorts(t *testing.T) {
ports := []Port{
Port("6379/tcp"),
Port("22/tcp"),
}
sortPorts(ports, func(ip, jp Port) bool {
return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp")
})
first := ports[0]
if fmt.Sprint(first) != "22/tcp" {
t.Log(fmt.Sprint(first))
t.Fail()
}
}
func TestSortSamePortWithDifferentProto(t *testing.T) {
ports := []Port{
Port("8888/tcp"),
Port("8888/udp"),
Port("6379/tcp"),
Port("6379/udp"),
}
sortPorts(ports, func(ip, jp Port) bool {
return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp")
})
first := ports[0]
if fmt.Sprint(first) != "6379/tcp" {
t.Fail()
}
}

View File

@ -1,46 +0,0 @@
package docker
import (
"testing"
)
func TestLookupImage(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
if img, err := runtime.repositories.LookupImage(unitTestImageName); err != nil {
t.Fatal(err)
} else if img == nil {
t.Errorf("Expected 1 image, none found")
}
if img, err := runtime.repositories.LookupImage(unitTestImageName + ":" + DEFAULTTAG); err != nil {
t.Fatal(err)
} else if img == nil {
t.Errorf("Expected 1 image, none found")
}
if img, err := runtime.repositories.LookupImage(unitTestImageName + ":" + "fail"); err == nil {
t.Errorf("Expected error, none found")
} else if img != nil {
t.Errorf("Expected 0 image, 1 found")
}
if img, err := runtime.repositories.LookupImage("fail:fail"); err == nil {
t.Errorf("Expected error, none found")
} else if img != nil {
t.Errorf("Expected 0 image, 1 found")
}
if img, err := runtime.repositories.LookupImage(unitTestImageID); err != nil {
t.Fatal(err)
} else if img == nil {
t.Errorf("Expected 1 image, none found")
}
if img, err := runtime.repositories.LookupImage(unitTestImageName + ":" + unitTestImageID); err != nil {
t.Fatal(err)
} else if img == nil {
t.Errorf("Expected 1 image, none found")
}
}

80
tags_unit_test.go Normal file
View File

@ -0,0 +1,80 @@
package docker
import (
"github.com/dotcloud/docker/utils"
"os"
"path"
"testing"
)
const (
testImageName string = "myapp"
testImageID string = "foo"
)
func mkTestTagStore(root string, t *testing.T) *TagStore {
graph, err := NewGraph(root)
if err != nil {
t.Fatal(err)
}
store, err := NewTagStore(path.Join(root, "tags"), graph)
if err != nil {
t.Fatal(err)
}
archive, err := fakeTar()
if err != nil {
t.Fatal(err)
}
img := &Image{ID: testImageID}
if err := graph.Register(nil, archive, img); err != nil {
t.Fatal(err)
}
if err := store.Set(testImageName, "", testImageID, false); err != nil {
t.Fatal(err)
}
return store
}
func TestLookupImage(t *testing.T) {
tmp, err := utils.TestDirectory("")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
store := mkTestTagStore(tmp, t)
if img, err := store.LookupImage(testImageName); err != nil {
t.Fatal(err)
} else if img == nil {
t.Errorf("Expected 1 image, none found")
}
if img, err := store.LookupImage(testImageName + ":" + DEFAULTTAG); err != nil {
t.Fatal(err)
} else if img == nil {
t.Errorf("Expected 1 image, none found")
}
if img, err := store.LookupImage(testImageName + ":" + "fail"); err == nil {
t.Errorf("Expected error, none found")
} else if img != nil {
t.Errorf("Expected 0 image, 1 found")
}
if img, err := store.LookupImage("fail:fail"); err == nil {
t.Errorf("Expected error, none found")
} else if img != nil {
t.Errorf("Expected 0 image, 1 found")
}
if img, err := store.LookupImage(testImageID); err != nil {
t.Fatal(err)
} else if img == nil {
t.Errorf("Expected 1 image, none found")
}
if img, err := store.LookupImage(testImageName + ":" + testImageID); err != nil {
t.Fatal(err)
} else if img == nil {
t.Errorf("Expected 1 image, none found")
}
}

View File

@ -1242,3 +1242,40 @@ func PartParser(template, data string) (map[string]string, error) {
} }
return out, nil return out, nil
} }
var globalTestID string
// TestDirectory creates a new temporary directory and returns its path.
// The contents of directory at path `templateDir` is copied into the
// new directory.
func TestDirectory(templateDir string) (dir string, err error) {
if globalTestID == "" {
globalTestID = RandomString()[:4]
}
prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2))
if prefix == "" {
prefix = "docker-test-"
}
dir, err = ioutil.TempDir("", prefix)
if err = os.Remove(dir); err != nil {
return
}
if templateDir != "" {
if err = CopyDirectory(templateDir, dir); err != nil {
return
}
}
return
}
// GetCallerName introspects the call stack and returns the name of the
// function `depth` levels down in the stack.
func GetCallerName(depth int) string {
// Use the caller function name as a prefix.
// This helps trace temp directories back to their test.
pc, _, _, _ := runtime.Caller(depth + 1)
callerLongName := runtime.FuncForPC(pc).Name()
parts := strings.Split(callerLongName, ".")
callerShortName := parts[len(parts)-1]
return callerShortName
}

View File

@ -1,493 +0,0 @@
package docker
import (
"fmt"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
"os"
"path"
"runtime"
"strings"
"testing"
)
// This file contains utility functions for docker's unit test suite.
// It has to be named XXX_test.go, apparently, in other to access private functions
// from other XXX_test.go functions.
var globalTestID string
// Create a temporary runtime suitable for unit testing.
// Call t.Fatal() at the first error.
func mkRuntime(f utils.Fataler) *Runtime {
root, err := newTestDirectory(unitTestStoreBase)
if err != nil {
f.Fatal(err)
}
config := &DaemonConfig{
Root: root,
AutoRestart: false,
}
r, err := NewRuntimeFromDirectory(config)
if err != nil {
f.Fatal(err)
}
r.UpdateCapabilities(true)
return r
}
func createNamedTestContainer(eng *engine.Engine, config *Config, f utils.Fataler, name string) (shortId string) {
job := eng.Job("create", name)
if err := job.ImportEnv(config); err != nil {
f.Fatal(err)
}
job.StdoutParseString(&shortId)
if err := job.Run(); err != nil {
f.Fatal(err)
}
return
}
func createTestContainer(eng *engine.Engine, config *Config, f utils.Fataler) (shortId string) {
return createNamedTestContainer(eng, config, f, "")
}
func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *Server {
iSrv := eng.Hack_GetGlobalVar("httpapi.server")
if iSrv == nil {
panic("Legacy server field not set in engine")
}
srv, ok := iSrv.(*Server)
if !ok {
panic("Legacy server field in engine does not cast to *Server")
}
return srv
}
func NewTestEngine(t utils.Fataler) *engine.Engine {
root, err := newTestDirectory(unitTestStoreBase)
if err != nil {
t.Fatal(err)
}
eng, err := engine.New(root)
if err != nil {
t.Fatal(err)
}
// Load default plugins
// (This is manually copied and modified from main() until we have a more generic plugin system)
job := eng.Job("initapi")
job.Setenv("Root", root)
job.SetenvBool("AutoRestart", false)
if err := job.Run(); err != nil {
t.Fatal(err)
}
return eng
}
func newTestDirectory(templateDir string) (dir string, err error) {
if globalTestID == "" {
globalTestID = GenerateID()[:4]
}
prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, getCallerName(2))
if prefix == "" {
prefix = "docker-test-"
}
dir, err = ioutil.TempDir("", prefix)
if err = os.Remove(dir); err != nil {
return
}
if err = utils.CopyDirectory(templateDir, dir); err != nil {
return
}
return
}
func getCallerName(depth int) string {
// Use the caller function name as a prefix.
// This helps trace temp directories back to their test.
pc, _, _, _ := runtime.Caller(depth + 1)
callerLongName := runtime.FuncForPC(pc).Name()
parts := strings.Split(callerLongName, ".")
callerShortName := parts[len(parts)-1]
return callerShortName
}
// Write `content` to the file at path `dst`, creating it if necessary,
// as well as any missing directories.
// The file is truncated if it already exists.
// Call t.Fatal() at the first error.
func writeFile(dst, content string, t *testing.T) {
// Create subdirectories if necessary
if err := os.MkdirAll(path.Dir(dst), 0700); err != nil && !os.IsExist(err) {
t.Fatal(err)
}
f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700)
if err != nil {
t.Fatal(err)
}
// Write content (truncate if it exists)
if _, err := io.Copy(f, strings.NewReader(content)); err != nil {
t.Fatal(err)
}
}
// Return the contents of file at path `src`.
// Call t.Fatal() at the first error (including if the file doesn't exist)
func readFile(src string, t *testing.T) (content string) {
f, err := os.Open(src)
if err != nil {
t.Fatal(err)
}
data, err := ioutil.ReadAll(f)
if err != nil {
t.Fatal(err)
}
return string(data)
}
// Create a test container from the given runtime `r` and run arguments `args`.
// If the image name is "_", (eg. []string{"-i", "-t", "_", "bash"}, it is
// dynamically replaced by the current test image.
// The caller is responsible for destroying the container.
// Call t.Fatal() at the first error.
func mkContainer(r *Runtime, args []string, t *testing.T) (*Container, error) {
config, hostConfig, _, err := ParseRun(args, nil)
defer func() {
if err != nil && t != nil {
t.Fatal(err)
}
}()
if err != nil {
return nil, err
}
if config.Image == "_" {
config.Image = GetTestImage(r).ID
}
c, _, err := r.Create(config, "")
if err != nil {
return nil, err
}
c.hostConfig = hostConfig
return c, nil
}
// Create a test container, start it, wait for it to complete, destroy it,
// and return its standard output as a string.
// The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image.
// If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally.
func runContainer(r *Runtime, args []string, t *testing.T) (output string, err error) {
defer func() {
if err != nil && t != nil {
t.Fatal(err)
}
}()
container, err := mkContainer(r, args, t)
if err != nil {
return "", err
}
defer r.Destroy(container)
stdout, err := container.StdoutPipe()
if err != nil {
return "", err
}
defer stdout.Close()
if err := container.Start(); err != nil {
return "", err
}
container.Wait()
data, err := ioutil.ReadAll(stdout)
if err != nil {
return "", err
}
output = string(data)
return
}
func TestCompareConfig(t *testing.T) {
volumes1 := make(map[string]struct{})
volumes1["/test1"] = struct{}{}
config1 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"1111:1111", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes1,
}
config2 := Config{
Dns: []string{"0.0.0.0", "2.2.2.2"},
PortSpecs: []string{"1111:1111", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes1,
}
config3 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"0000:0000", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes1,
}
config4 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"0000:0000", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "22222222",
Volumes: volumes1,
}
volumes2 := make(map[string]struct{})
volumes2["/test2"] = struct{}{}
config5 := Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"0000:0000", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "11111111",
Volumes: volumes2,
}
if CompareConfig(&config1, &config2) {
t.Fatalf("CompareConfig should return false, Dns are different")
}
if CompareConfig(&config1, &config3) {
t.Fatalf("CompareConfig should return false, PortSpecs are different")
}
if CompareConfig(&config1, &config4) {
t.Fatalf("CompareConfig should return false, VolumesFrom are different")
}
if CompareConfig(&config1, &config5) {
t.Fatalf("CompareConfig should return false, Volumes are different")
}
if !CompareConfig(&config1, &config1) {
t.Fatalf("CompareConfig should return true")
}
}
func TestMergeConfig(t *testing.T) {
volumesImage := make(map[string]struct{})
volumesImage["/test1"] = struct{}{}
volumesImage["/test2"] = struct{}{}
configImage := &Config{
Dns: []string{"1.1.1.1", "2.2.2.2"},
PortSpecs: []string{"1111:1111", "2222:2222"},
Env: []string{"VAR1=1", "VAR2=2"},
VolumesFrom: "1111",
Volumes: volumesImage,
}
volumesUser := make(map[string]struct{})
volumesUser["/test3"] = struct{}{}
configUser := &Config{
Dns: []string{"3.3.3.3"},
PortSpecs: []string{"3333:2222", "3333:3333"},
Env: []string{"VAR2=3", "VAR3=3"},
Volumes: volumesUser,
}
if err := MergeConfig(configUser, configImage); err != nil {
t.Error(err)
}
if len(configUser.Dns) != 3 {
t.Fatalf("Expected 3 dns, 1.1.1.1, 2.2.2.2 and 3.3.3.3, found %d", len(configUser.Dns))
}
for _, dns := range configUser.Dns {
if dns != "1.1.1.1" && dns != "2.2.2.2" && dns != "3.3.3.3" {
t.Fatalf("Expected 1.1.1.1 or 2.2.2.2 or 3.3.3.3, found %s", dns)
}
}
if len(configUser.ExposedPorts) != 3 {
t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
}
for portSpecs := range configUser.ExposedPorts {
if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" {
t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs)
}
}
if len(configUser.Env) != 3 {
t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env))
}
for _, env := range configUser.Env {
if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" {
t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env)
}
}
if len(configUser.Volumes) != 3 {
t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes))
}
for v := range configUser.Volumes {
if v != "/test1" && v != "/test2" && v != "/test3" {
t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v)
}
}
if configUser.VolumesFrom != "1111" {
t.Fatalf("Expected VolumesFrom to be 1111, found %s", configUser.VolumesFrom)
}
ports, _, err := parsePortSpecs([]string{"0000"})
if err != nil {
t.Error(err)
}
configImage2 := &Config{
ExposedPorts: ports,
}
if err := MergeConfig(configUser, configImage2); err != nil {
t.Error(err)
}
if len(configUser.ExposedPorts) != 4 {
t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
}
for portSpecs := range configUser.ExposedPorts {
if portSpecs.Port() != "0000" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" {
t.Fatalf("Expected 0000 or 1111 or 2222 or 3333, found %s", portSpecs)
}
}
}
func TestParseLxcConfOpt(t *testing.T) {
opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "}
for _, o := range opts {
k, v, err := parseLxcOpt(o)
if err != nil {
t.FailNow()
}
if k != "lxc.utsname" {
t.Fail()
}
if v != "docker" {
t.Fail()
}
}
}
func TestParseNetworkOptsPrivateOnly(t *testing.T) {
ports, bindings, err := parsePortSpecs([]string{"192.168.1.100::80"})
if err != nil {
t.Fatal(err)
}
if len(ports) != 1 {
t.Logf("Expected 1 got %d", len(ports))
t.FailNow()
}
if len(bindings) != 1 {
t.Logf("Expected 1 got %d", len(bindings))
t.FailNow()
}
for k := range ports {
if k.Proto() != "tcp" {
t.Logf("Expected tcp got %s", k.Proto())
t.Fail()
}
if k.Port() != "80" {
t.Logf("Expected 80 got %s", k.Port())
t.Fail()
}
b, exists := bindings[k]
if !exists {
t.Log("Binding does not exist")
t.FailNow()
}
if len(b) != 1 {
t.Logf("Expected 1 got %d", len(b))
t.FailNow()
}
s := b[0]
if s.HostPort != "" {
t.Logf("Expected \"\" got %s", s.HostPort)
t.Fail()
}
if s.HostIp != "192.168.1.100" {
t.Fail()
}
}
}
func TestParseNetworkOptsPublic(t *testing.T) {
ports, bindings, err := parsePortSpecs([]string{"192.168.1.100:8080:80"})
if err != nil {
t.Fatal(err)
}
if len(ports) != 1 {
t.Logf("Expected 1 got %d", len(ports))
t.FailNow()
}
if len(bindings) != 1 {
t.Logf("Expected 1 got %d", len(bindings))
t.FailNow()
}
for k := range ports {
if k.Proto() != "tcp" {
t.Logf("Expected tcp got %s", k.Proto())
t.Fail()
}
if k.Port() != "80" {
t.Logf("Expected 80 got %s", k.Port())
t.Fail()
}
b, exists := bindings[k]
if !exists {
t.Log("Binding does not exist")
t.FailNow()
}
if len(b) != 1 {
t.Logf("Expected 1 got %d", len(b))
t.FailNow()
}
s := b[0]
if s.HostPort != "8080" {
t.Logf("Expected 8080 got %s", s.HostPort)
t.Fail()
}
if s.HostIp != "192.168.1.100" {
t.Fail()
}
}
}
func TestParseNetworkOptsUdp(t *testing.T) {
ports, bindings, err := parsePortSpecs([]string{"192.168.1.100::6000/udp"})
if err != nil {
t.Fatal(err)
}
if len(ports) != 1 {
t.Logf("Expected 1 got %d", len(ports))
t.FailNow()
}
if len(bindings) != 1 {
t.Logf("Expected 1 got %d", len(bindings))
t.FailNow()
}
for k := range ports {
if k.Proto() != "udp" {
t.Logf("Expected udp got %s", k.Proto())
t.Fail()
}
if k.Port() != "6000" {
t.Logf("Expected 6000 got %s", k.Port())
t.Fail()
}
b, exists := bindings[k]
if !exists {
t.Log("Binding does not exist")
t.FailNow()
}
if len(b) != 1 {
t.Logf("Expected 1 got %d", len(b))
t.FailNow()
}
s := b[0]
if s.HostPort != "" {
t.Logf("Expected \"\" got %s", s.HostPort)
t.Fail()
}
if s.HostIp != "192.168.1.100" {
t.Fail()
}
}
}