Merge pull request #886 from vieux/add_resources_test

fix resources in docker 1.7
This commit is contained in:
Victor Vieux 2015-06-02 16:15:12 -07:00
commit 83f97f73e8
11 changed files with 323 additions and 18 deletions

2
Godeps/Godeps.json generated
View File

@ -106,7 +106,7 @@
},
{
"ImportPath": "github.com/samalba/dockerclient",
"Rev": "4e6f4a21c07510dd6446d28053351a275591f08d"
"Rev": "142d8fe0150952d52867ac222e3a02eb17916f01"
},
{
"ImportPath": "github.com/samuel/go-zookeeper/zk",

View File

@ -231,6 +231,30 @@ func (client *DockerClient) ContainerChanges(id string) ([]*ContainerChanges, er
return changes, nil
}
func (client *DockerClient) readJSONStream(stream io.ReadCloser, decode func(*json.Decoder) decodingResult, stopChan <-chan struct{}) <-chan decodingResult {
resultChan := make(chan decodingResult)
go func() {
decoder := json.NewDecoder(stream)
defer stream.Close()
defer close(resultChan)
for {
decodeResult := decode(decoder)
select {
case <-stopChan:
return
default:
resultChan <- decodeResult
if decodeResult.err != nil {
return
}
}
}
}()
return resultChan
}
func (client *DockerClient) StartContainer(id string, config *HostConfig) error {
data, err := json.Marshal(config)
if err != nil {
@ -271,6 +295,64 @@ func (client *DockerClient) KillContainer(id, signal string) error {
return nil
}
func (client *DockerClient) MonitorEvents(options *MonitorEventsOptions, stopChan <-chan struct{}) (<-chan EventOrError, error) {
v := url.Values{}
if options != nil {
if options.Since != 0 {
v.Add("since", strconv.Itoa(options.Since))
}
if options.Until != 0 {
v.Add("until", strconv.Itoa(options.Until))
}
if options.Filters != nil {
filterMap := make(map[string][]string)
if len(options.Filters.Event) > 0 {
filterMap["event"] = []string{options.Filters.Event}
}
if len(options.Filters.Image) > 0 {
filterMap["image"] = []string{options.Filters.Image}
}
if len(options.Filters.Container) > 0 {
filterMap["container"] = []string{options.Filters.Container}
}
if len(filterMap) > 0 {
filterJSONBytes, err := json.Marshal(filterMap)
if err != nil {
return nil, err
}
v.Add("filters", string(filterJSONBytes))
}
}
}
uri := fmt.Sprintf("%s/%s/events?%s", client.URL.String(), APIVersion, v.Encode())
resp, err := client.HTTPClient.Get(uri)
if err != nil {
return nil, err
}
decode := func(decoder *json.Decoder) decodingResult {
var event Event
if err := decoder.Decode(&event); err != nil {
return decodingResult{err: err}
} else {
return decodingResult{result: event}
}
}
decodingResultChan := client.readJSONStream(resp.Body, decode, stopChan)
eventOrErrorChan := make(chan EventOrError)
go func() {
for decodingResult := range decodingResultChan {
event, _ := decodingResult.result.(Event)
eventOrErrorChan <- EventOrError{
Event: event,
Error: decodingResult.err,
}
}
close(eventOrErrorChan)
}()
return eventOrErrorChan, nil
}
func (client *DockerClient) StartMonitorEvents(cb Callback, ec chan error, args ...interface{}) {
atomic.StoreInt32(&client.monitorEvents, 1)
go client.getEvents(cb, ec, args...)
@ -382,6 +464,20 @@ func (client *DockerClient) PullImage(name string, auth *AuthConfig) error {
return nil
}
func (client *DockerClient) InspectImage(id string) (*ImageInfo, error) {
uri := fmt.Sprintf("/%s/images/%s/json", APIVersion, id)
data, err := client.doRequest("GET", uri, nil, nil)
if err != nil {
return nil, err
}
info := &ImageInfo{}
err = json.Unmarshal(data, info)
if err != nil {
return nil, err
}
return info, nil
}
func (client *DockerClient) LoadImage(reader io.Reader) error {
data, err := ioutil.ReadAll(reader)
if err != nil {

View File

@ -2,7 +2,9 @@ package dockerclient
import (
"bytes"
"encoding/json"
"fmt"
"io"
"reflect"
"strings"
"testing"
@ -155,6 +157,58 @@ func TestContainerLogs(t *testing.T) {
}
}
func TestMonitorEvents(t *testing.T) {
client := testDockerClient(t)
decoder := json.NewDecoder(bytes.NewBufferString(eventsResp))
var expectedEvents []Event
for {
var event Event
if err := decoder.Decode(&event); err != nil {
if err == io.EOF {
break
} else {
t.Fatalf("cannot parse expected resp: %s", err.Error())
}
} else {
expectedEvents = append(expectedEvents, event)
}
}
// test passing stop chan
stopChan := make(chan struct{})
eventInfoChan, err := client.MonitorEvents(nil, stopChan)
if err != nil {
t.Fatalf("cannot get events from server: %s", err.Error())
}
eventInfo := <-eventInfoChan
if eventInfo.Error != nil || eventInfo.Event != expectedEvents[0] {
t.Fatalf("got:\n%#v\nexpected:\n%#v", eventInfo, expectedEvents[0])
}
close(stopChan)
for i := 0; i < 3; i++ {
_, ok := <-eventInfoChan
if i == 2 && ok {
t.Fatalf("read more than 2 events successfully after closing stopChan")
}
}
// test when you don't pass stop chan
eventInfoChan, err = client.MonitorEvents(nil, nil)
if err != nil {
t.Fatalf("cannot get events from server: %s", err.Error())
}
for i, expectedEvent := range expectedEvents {
t.Logf("on iter %d\n", i)
eventInfo := <-eventInfoChan
if eventInfo.Error != nil || eventInfo.Event != expectedEvent {
t.Fatalf("index %d, got:\n%#v\nexpected:\n%#v", i, eventInfo, expectedEvent)
}
t.Logf("done with iter %d\n", i)
}
}
func TestDockerClientInterface(t *testing.T) {
iface := reflect.TypeOf((*Client)(nil)).Elem()
test := testDockerClient(t)

View File

@ -10,10 +10,10 @@ import (
"strconv"
"time"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/jsonlog"
"github.com/docker/docker/pkg/stdcopy"
"github.com/docker/docker/pkg/timeutils"
"github.com/docker/docker/utils"
"github.com/gorilla/mux"
)
@ -30,6 +30,7 @@ func init() {
r.HandleFunc(baseURL+"/containers/{id}/changes", handleContainerChanges).Methods("GET")
r.HandleFunc(baseURL+"/containers/{id}/kill", handleContainerKill).Methods("POST")
r.HandleFunc(baseURL+"/images/create", handleImagePull).Methods("POST")
r.HandleFunc(baseURL+"/events", handleEvents).Methods("GET")
testHTTPServer = httptest.NewServer(handlerAccessLog(r))
}
@ -74,7 +75,7 @@ func handleImagePull(w http.ResponseWriter, r *http.Request) {
func handleContainerLogs(w http.ResponseWriter, r *http.Request) {
var outStream, errStream io.Writer
outStream = utils.NewWriteFlusher(w)
outStream = ioutils.NewWriteFlusher(w)
// not sure how to test follow
if err := r.ParseForm(); err != nil {
@ -228,3 +229,7 @@ func handlerGetContainers(w http.ResponseWriter, r *http.Request) {
}
w.Write([]byte(body))
}
func handleEvents(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(eventsResp))
}

View File

@ -9,3 +9,5 @@ var haproxyPullOutput = `{"status":"The image you are pulling has been verified"
{"status":"Already exists","progressDetail":{},"id":"511136ea3c5a"}{"status":"Already exists","progressDetail":{},"id":"1aeada447715"}{"status":"Already exists","progressDetail":{},"id":"479215127fa7"}{"status":"Already exists","progressDetail":{},"id":"66301eb54a7d"}{"status":"Already exists","progressDetail":{},"id":"e3990b07573f"}{"status":"Already exists","progressDetail":{},"id":"3d894e6f7e63"}{"status":"Already exists","progressDetail":{},"id":"4d949c40bc77"}{"status":"Already exists","progressDetail":{},"id":"55e031889365"}{"status":"Already exists","progressDetail":{},"id":"c7aa675e1876"}{"status":"The image you are pulling has been verified","id":"haproxy:latest"}
{"status":"Already exists","progressDetail":{},"id":"511136ea3c5a"}{"status":"Already exists","progressDetail":{},"id":"1aeada447715"}{"status":"Already exists","progressDetail":{},"id":"479215127fa7"}{"status":"Already exists","progressDetail":{},"id":"66301eb54a7d"}{"status":"Already exists","progressDetail":{},"id":"e3990b07573f"}{"status":"Already exists","progressDetail":{},"id":"ecb4b23ca7ce"}{"status":"Already exists","progressDetail":{},"id":"f453e940c177"}{"status":"Already exists","progressDetail":{},"id":"fc5ea1bc05ab"}{"status":"Already exists","progressDetail":{},"id":"380557f8f7b3"}{"status":"Status: Image is up to date for haproxy"}
`
var eventsResp = `{"status":"pull","id":"nginx:latest","time":1428620433}{"status":"create","id":"9b818c3b8291708fdcecd7c4086b75c222cb503be10a93d9c11040886032a48b","from":"nginx:latest","time":1428620433}{"status":"start","id":"9b818c3b8291708fdcecd7c4086b75c222cb503be10a93d9c11040886032a48b","from":"nginx:latest","time":1428620433}{"status":"die","id":"9b818c3b8291708fdcecd7c4086b75c222cb503be10a93d9c11040886032a48b","from":"nginx:latest","time":1428620442}{"status":"create","id":"352d0b412aae5a5d2b14ae9d88be59dc276602d9edb9dcc33e138e475b3e4720","from":"52.11.96.81/foobar/ubuntu:latest","time":1428620444}{"status":"start","id":"352d0b412aae5a5d2b14ae9d88be59dc276602d9edb9dcc33e138e475b3e4720","from":"52.11.96.81/foobar/ubuntu:latest","time":1428620444}{"status":"die","id":"352d0b412aae5a5d2b14ae9d88be59dc276602d9edb9dcc33e138e475b3e4720","from":"52.11.96.81/foobar/ubuntu:latest","time":1428620444}{"status":"pull","id":"debian:latest","time":1428620453}{"status":"create","id":"668887b5729946546b3072655dc6da08f0e3210111b68b704eb842adfce53f6c","from":"debian:latest","time":1428620453}{"status":"start","id":"668887b5729946546b3072655dc6da08f0e3210111b68b704eb842adfce53f6c","from":"debian:latest","time":1428620453}{"status":"die","id":"668887b5729946546b3072655dc6da08f0e3210111b68b704eb842adfce53f6c","from":"debian:latest","time":1428620453}{"status":"create","id":"eb4a19ec21ab29bbbffbf3ee2e2df9d99cb749780e1eff06a591cee5ba505180","from":"nginx:latest","time":1428620458}{"status":"start","id":"eb4a19ec21ab29bbbffbf3ee2e2df9d99cb749780e1eff06a591cee5ba505180","from":"nginx:latest","time":1428620458}{"status":"pause","id":"eb4a19ec21ab29bbbffbf3ee2e2df9d99cb749780e1eff06a591cee5ba505180","from":"nginx:latest","time":1428620462}{"status":"unpause","id":"eb4a19ec21ab29bbbffbf3ee2e2df9d99cb749780e1eff06a591cee5ba505180","from":"nginx:latest","time":1428620466}{"status":"die","id":"eb4a19ec21ab29bbbffbf3ee2e2df9d99cb749780e1eff06a591cee5ba505180","from":"nginx:latest","time":1428620469}`

View File

@ -12,6 +12,7 @@ type Client interface {
Info() (*Info, error)
ListContainers(all, size bool, filters string) ([]Container, error)
InspectContainer(id string) (*ContainerInfo, error)
InspectImage(id string) (*ImageInfo, error)
CreateContainer(config *ContainerConfig, name string) (string, error)
ContainerLogs(id string, options *LogOptions) (io.ReadCloser, error)
ContainerChanges(id string) ([]*ContainerChanges, error)
@ -20,6 +21,11 @@ type Client interface {
StopContainer(id string, timeout int) error
RestartContainer(id string, timeout int) error
KillContainer(id, signal string) error
// MonitorEvents takes options and an optional stop channel, and returns
// an EventOrError channel. If an error is ever sent, then no more
// events will be sent. If a stop channel is provided, events will stop
// being monitored after the stop channel is closed.
MonitorEvents(options *MonitorEventsOptions, stopChan <-chan struct{}) (<-chan EventOrError, error)
StartMonitorEvents(cb Callback, ec chan error, args ...interface{})
StopAllMonitorEvents()
StartMonitorStats(id string, cb StatCallback, ec chan error, args ...interface{})

View File

@ -30,6 +30,11 @@ func (client *MockClient) InspectContainer(id string) (*dockerclient.ContainerIn
return args.Get(0).(*dockerclient.ContainerInfo), args.Error(1)
}
func (client *MockClient) InspectImage(id string) (*dockerclient.ImageInfo, error) {
args := client.Mock.Called(id)
return args.Get(0).(*dockerclient.ImageInfo), args.Error(1)
}
func (client *MockClient) CreateContainer(config *dockerclient.ContainerConfig, name string) (string, error) {
args := client.Mock.Called(config, name)
return args.String(0), args.Error(1)
@ -65,6 +70,11 @@ func (client *MockClient) KillContainer(id, signal string) error {
return args.Error(0)
}
func (client *MockClient) MonitorEvents(options *dockerclient.MonitorEventsOptions, stopChan <-chan struct{}) (<-chan dockerclient.EventOrError, error) {
args := client.Mock.Called(options, stopChan)
return args.Get(0).(<-chan dockerclient.EventOrError), args.Error(1)
}
func (client *MockClient) StartMonitorEvents(cb dockerclient.Callback, ec chan error, args ...interface{}) {
client.Mock.Called(cb, ec, args)
}

View File

@ -20,6 +20,7 @@ type ContainerConfig struct {
AttachStderr bool
PortSpecs []string
ExposedPorts map[string]struct{}
MacAddress string
Tty bool
OpenStdin bool
StdinOnce bool
@ -41,6 +42,10 @@ type HostConfig struct {
Binds []string
ContainerIDFile string
LxcConf []map[string]string
Memory int64
MemorySwap int64
CpuShares int64
CpusetCpus string
Privileged bool
PortBindings map[string][]PortBinding
Links []string
@ -73,6 +78,18 @@ type LogOptions struct {
Tail int64
}
type MonitorEventsFilters struct {
Event string `json:",omitempty"`
Image string `json:",omitempty"`
Container string `json:",omitempty"`
}
type MonitorEventsOptions struct {
Since int
Until int
Filters *MonitorEventsFilters `json:",omitempty"`
}
type RestartPolicy struct {
Name string
MaximumRetryCount int64
@ -142,6 +159,22 @@ func (s *State) StateString() string {
return "exited"
}
type ImageInfo struct {
Architecture string
Author string
Comment string
Config *ContainerConfig
Container string
ContainerConfig *ContainerConfig
Created time.Time
DockerVersion string
Id string
Os string
Parent string
Size int64
VirtualSize int64
}
type ContainerInfo struct {
Id string
Created string
@ -198,9 +231,13 @@ type Event struct {
}
type Version struct {
Version string
GitCommit string
GoVersion string
ApiVersion string
Arch string
GitCommit string
GoVersion string
KernelVersion string
Os string
Version string
}
type RespContainersCreate struct {
@ -217,19 +254,38 @@ type Image struct {
VirtualSize int64
}
// Info is the struct returned by /info
// The API is currently in flux, so Debug, MemoryLimit, SwapLimit, and
// IPv4Forwarding are interfaces because in docker 1.6.1 they are 0 or 1 but in
// master they are bools.
type Info struct {
ID string
Containers int64
Driver string
DriverStatus [][]string
ExecutionDriver string
Images int64
KernelVersion string
OperatingSystem string
NCPU int64
MemTotal int64
Name string
Labels []string
ID string
Containers int64
Driver string
DriverStatus [][]string
ExecutionDriver string
Images int64
KernelVersion string
OperatingSystem string
NCPU int64
MemTotal int64
Name string
Labels []string
Debug interface{}
NFd int64
NGoroutines int64
SystemTime time.Time
NEventsListener int64
InitPath string
InitSha1 string
IndexServerAddress string
MemoryLimit interface{}
SwapLimit interface{}
IPv4Forwarding interface{}
DockerRootDir string
HttpProxy string
HttpsProxy string
NoProxy string
}
type ImageDelete struct {
@ -237,6 +293,16 @@ type ImageDelete struct {
Untagged string
}
type EventOrError struct {
Event
Error error
}
type decodingResult struct {
result interface{}
err error
}
// The following are types for the API stats endpoint
type ThrottlingData struct {
// Number of periods with throttling active

View File

@ -24,6 +24,42 @@ func parseEnv(e string) (bool, string, string) {
return false, "", ""
}
// FIXME: Temporary fix to handle forward/backward compatibility between Docker <1.6 and >=1.7
// ContainerConfig should be handling converting to/from different docker versions
func consolidateResourceFields(c *dockerclient.ContainerConfig) {
if c.Memory != c.HostConfig.Memory {
if c.Memory != 0 {
c.HostConfig.Memory = c.Memory
} else {
c.Memory = c.HostConfig.Memory
}
}
if c.MemorySwap != c.HostConfig.MemorySwap {
if c.MemorySwap != 0 {
c.HostConfig.MemorySwap = c.MemorySwap
} else {
c.MemorySwap = c.HostConfig.MemorySwap
}
}
if c.CpuShares != c.HostConfig.CpuShares {
if c.CpuShares != 0 {
c.HostConfig.CpuShares = c.CpuShares
} else {
c.CpuShares = c.HostConfig.CpuShares
}
}
if c.Cpuset != c.HostConfig.CpusetCpus {
if c.Cpuset != "" {
c.HostConfig.CpusetCpus = c.Cpuset
} else {
c.Cpuset = c.HostConfig.CpusetCpus
}
}
}
// BuildContainerConfig creates a cluster.ContainerConfig from a dockerclient.ContainerConfig
func BuildContainerConfig(c dockerclient.ContainerConfig) *ContainerConfig {
var (
@ -75,6 +111,8 @@ func BuildContainerConfig(c dockerclient.ContainerConfig) *ContainerConfig {
}
}
consolidateResourceFields(&c)
return &ContainerConfig{c}
}

View File

@ -64,3 +64,20 @@ func TestAffinities(t *testing.T) {
assert.Len(t, config.Affinities(), 1)
assert.Equal(t, len(config.Affinities()), 1)
}
func TestConsolidateResourceFields(t *testing.T) {
for _, config := range []*ContainerConfig{
BuildContainerConfig(dockerclient.ContainerConfig{Memory: 4242, MemorySwap: 4343, CpuShares: 4444, Cpuset: "1-2"}),
BuildContainerConfig(dockerclient.ContainerConfig{HostConfig: dockerclient.HostConfig{Memory: 4242, MemorySwap: 4343, CpuShares: 4444, CpusetCpus: "1-2"}}),
} {
assert.Equal(t, config.Memory, int64(4242))
assert.Equal(t, config.MemorySwap, int64(4343))
assert.Equal(t, config.CpuShares, int64(4444))
assert.Equal(t, config.Cpuset, "1-2")
assert.Equal(t, config.HostConfig.Memory, int64(4242))
assert.Equal(t, config.HostConfig.MemorySwap, int64(4343))
assert.Equal(t, config.HostConfig.CpuShares, int64(4444))
assert.Equal(t, config.HostConfig.CpusetCpus, "1-2")
}
}

View File

@ -21,3 +21,14 @@ function teardown() {
# verify, container is running
[ -n $(docker_swarm ps -q --filter=name=test_container --filter=status=running) ]
}
@test "docker run not enough resources" {
start_docker_with_busybox 1
swarm_manage
run docker_swarm run -d --name test_container -m 1000g busybox ls
[ "$status" -ne 0 ]
run docker_swarm run -d --name test_container -c 1000 busybox ls
[ "$status" -ne 0 ]
}