mirror of https://github.com/docker/docs.git
Merge pull request #738 from aluzzardi/ps-filters
docker ps: Support for filters and improve state management.
This commit is contained in:
commit
0317189ef2
|
@ -32,18 +32,18 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/ioutils",
|
||||
"Comment": "v1.4.1-216-g12fef2d",
|
||||
"Rev": "12fef2d8df4e5da024418e324f7e3c3e82220a27"
|
||||
"Comment": "v1.4.1-3245-g443437f",
|
||||
"Rev": "443437f5ea04da9d62bf3e05d7951f7d30e77d96"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/parsers/filters",
|
||||
"Comment": "v1.4.1-216-g12fef2d",
|
||||
"Rev": "12fef2d8df4e5da024418e324f7e3c3e82220a27"
|
||||
"Comment": "v1.4.1-3245-g443437f",
|
||||
"Rev": "443437f5ea04da9d62bf3e05d7951f7d30e77d96"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/units",
|
||||
"Comment": "v1.4.1-216-g12fef2d",
|
||||
"Rev": "12fef2d8df4e5da024418e324f7e3c3e82220a27"
|
||||
"Comment": "v1.4.1-3245-g443437f",
|
||||
"Rev": "443437f5ea04da9d62bf3e05d7951f7d30e77d96"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gorilla/context",
|
||||
|
@ -60,7 +60,7 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/samalba/dockerclient",
|
||||
"Rev": "8b9427265e82fddde577648b986fbe78d816b333"
|
||||
"Rev": "5e5020b90dd4657c33d446356556481182d2d66b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/samuel/go-zookeeper/zk",
|
||||
|
|
|
@ -2,8 +2,13 @@ package ioutils
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type readCloserWrapper struct {
|
||||
|
@ -42,20 +47,40 @@ func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
|
|||
}
|
||||
}
|
||||
|
||||
// bufReader allows the underlying reader to continue to produce
|
||||
// output by pre-emptively reading from the wrapped reader.
|
||||
// This is achieved by buffering this data in bufReader's
|
||||
// expanding buffer.
|
||||
type bufReader struct {
|
||||
sync.Mutex
|
||||
buf *bytes.Buffer
|
||||
reader io.Reader
|
||||
err error
|
||||
wait sync.Cond
|
||||
drainBuf []byte
|
||||
buf *bytes.Buffer
|
||||
reader io.Reader
|
||||
err error
|
||||
wait sync.Cond
|
||||
drainBuf []byte
|
||||
reuseBuf []byte
|
||||
maxReuse int64
|
||||
resetTimeout time.Duration
|
||||
bufLenResetThreshold int64
|
||||
maxReadDataReset int64
|
||||
}
|
||||
|
||||
func NewBufReader(r io.Reader) *bufReader {
|
||||
var timeout int
|
||||
if randVal, err := rand.Int(rand.Reader, big.NewInt(120)); err == nil {
|
||||
timeout = int(randVal.Int64()) + 180
|
||||
} else {
|
||||
timeout = 300
|
||||
}
|
||||
reader := &bufReader{
|
||||
buf: &bytes.Buffer{},
|
||||
drainBuf: make([]byte, 1024),
|
||||
reader: r,
|
||||
buf: &bytes.Buffer{},
|
||||
drainBuf: make([]byte, 1024),
|
||||
reuseBuf: make([]byte, 4096),
|
||||
maxReuse: 1000,
|
||||
resetTimeout: time.Second * time.Duration(timeout),
|
||||
bufLenResetThreshold: 100 * 1024,
|
||||
maxReadDataReset: 10 * 1024 * 1024,
|
||||
reader: r,
|
||||
}
|
||||
reader.wait.L = &reader.Mutex
|
||||
go reader.drain()
|
||||
|
@ -74,14 +99,94 @@ func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer *
|
|||
}
|
||||
|
||||
func (r *bufReader) drain() {
|
||||
var (
|
||||
duration time.Duration
|
||||
lastReset time.Time
|
||||
now time.Time
|
||||
reset bool
|
||||
bufLen int64
|
||||
dataSinceReset int64
|
||||
maxBufLen int64
|
||||
reuseBufLen int64
|
||||
reuseCount int64
|
||||
)
|
||||
reuseBufLen = int64(len(r.reuseBuf))
|
||||
lastReset = time.Now()
|
||||
for {
|
||||
n, err := r.reader.Read(r.drainBuf)
|
||||
dataSinceReset += int64(n)
|
||||
r.Lock()
|
||||
bufLen = int64(r.buf.Len())
|
||||
if bufLen > maxBufLen {
|
||||
maxBufLen = bufLen
|
||||
}
|
||||
|
||||
// Avoid unbounded growth of the buffer over time.
|
||||
// This has been discovered to be the only non-intrusive
|
||||
// solution to the unbounded growth of the buffer.
|
||||
// Alternative solutions such as compression, multiple
|
||||
// buffers, channels and other similar pieces of code
|
||||
// were reducing throughput, overall Docker performance
|
||||
// or simply crashed Docker.
|
||||
// This solution releases the buffer when specific
|
||||
// conditions are met to avoid the continuous resizing
|
||||
// of the buffer for long lived containers.
|
||||
//
|
||||
// Move data to the front of the buffer if it's
|
||||
// smaller than what reuseBuf can store
|
||||
if bufLen > 0 && reuseBufLen >= bufLen {
|
||||
n, _ := r.buf.Read(r.reuseBuf)
|
||||
r.buf.Write(r.reuseBuf[0:n])
|
||||
// Take action if the buffer has been reused too many
|
||||
// times and if there's data in the buffer.
|
||||
// The timeout is also used as means to avoid doing
|
||||
// these operations more often or less often than
|
||||
// required.
|
||||
// The various conditions try to detect heavy activity
|
||||
// in the buffer which might be indicators of heavy
|
||||
// growth of the buffer.
|
||||
} else if reuseCount >= r.maxReuse && bufLen > 0 {
|
||||
now = time.Now()
|
||||
duration = now.Sub(lastReset)
|
||||
timeoutReached := duration >= r.resetTimeout
|
||||
|
||||
// The timeout has been reached and the
|
||||
// buffered data couldn't be moved to the front
|
||||
// of the buffer, so the buffer gets reset.
|
||||
if timeoutReached && bufLen > reuseBufLen {
|
||||
reset = true
|
||||
}
|
||||
// The amount of buffered data is too high now,
|
||||
// reset the buffer.
|
||||
if timeoutReached && maxBufLen >= r.bufLenResetThreshold {
|
||||
reset = true
|
||||
}
|
||||
// Reset the buffer if a certain amount of
|
||||
// data has gone through the buffer since the
|
||||
// last reset.
|
||||
if timeoutReached && dataSinceReset >= r.maxReadDataReset {
|
||||
reset = true
|
||||
}
|
||||
// The buffered data is moved to a fresh buffer,
|
||||
// swap the old buffer with the new one and
|
||||
// reset all counters.
|
||||
if reset {
|
||||
newbuf := &bytes.Buffer{}
|
||||
newbuf.ReadFrom(r.buf)
|
||||
r.buf = newbuf
|
||||
lastReset = now
|
||||
reset = false
|
||||
dataSinceReset = 0
|
||||
maxBufLen = 0
|
||||
reuseCount = 0
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
r.err = err
|
||||
} else {
|
||||
r.buf.Write(r.drainBuf[0:n])
|
||||
}
|
||||
reuseCount++
|
||||
r.wait.Signal()
|
||||
r.Unlock()
|
||||
if err != nil {
|
||||
|
@ -112,3 +217,11 @@ func (r *bufReader) Close() error {
|
|||
}
|
||||
return closer.Close()
|
||||
}
|
||||
|
||||
func HashData(src io.Reader) (string, error) {
|
||||
h := sha256.New()
|
||||
if _, err := io.Copy(h, src); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
|
|
|
@ -2,11 +2,92 @@ package ioutils
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Implement io.Reader
|
||||
type errorReader struct{}
|
||||
|
||||
func (r *errorReader) Read(p []byte) (int, error) {
|
||||
return 0, fmt.Errorf("Error reader always fail.")
|
||||
}
|
||||
|
||||
func TestReadCloserWrapperClose(t *testing.T) {
|
||||
reader := strings.NewReader("A string reader")
|
||||
wrapper := NewReadCloserWrapper(reader, func() error {
|
||||
return fmt.Errorf("This will be called when closing")
|
||||
})
|
||||
err := wrapper.Close()
|
||||
if err == nil || !strings.Contains(err.Error(), "This will be called when closing") {
|
||||
t.Fatalf("readCloserWrapper should have call the anonymous func and thus, fail.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReaderErrWrapperReadOnError(t *testing.T) {
|
||||
called := false
|
||||
reader := &errorReader{}
|
||||
wrapper := NewReaderErrWrapper(reader, func() {
|
||||
called = true
|
||||
})
|
||||
_, err := wrapper.Read([]byte{})
|
||||
if err == nil || !strings.Contains(err.Error(), "Error reader always fail.") {
|
||||
t.Fatalf("readErrWrapper should returned an error")
|
||||
}
|
||||
if !called {
|
||||
t.Fatalf("readErrWrapper should have call the anonymous function on failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReaderErrWrapperRead(t *testing.T) {
|
||||
called := false
|
||||
reader := strings.NewReader("a string reader.")
|
||||
wrapper := NewReaderErrWrapper(reader, func() {
|
||||
called = true // Should not be called
|
||||
})
|
||||
// Read 20 byte (should be ok with the string above)
|
||||
num, err := wrapper.Read(make([]byte, 20))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if num != 16 {
|
||||
t.Fatalf("readerErrWrapper should have read 16 byte, but read %d", num)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewBufReaderWithDrainbufAndBuffer(t *testing.T) {
|
||||
reader, writer := io.Pipe()
|
||||
|
||||
drainBuffer := make([]byte, 1024)
|
||||
buffer := bytes.Buffer{}
|
||||
bufreader := NewBufReaderWithDrainbufAndBuffer(reader, drainBuffer, &buffer)
|
||||
|
||||
// Write everything down to a Pipe
|
||||
// Usually, a pipe should block but because of the buffered reader,
|
||||
// the writes will go through
|
||||
done := make(chan bool)
|
||||
go func() {
|
||||
writer.Write([]byte("hello world"))
|
||||
writer.Close()
|
||||
done <- true
|
||||
}()
|
||||
|
||||
// Drain the reader *after* everything has been written, just to verify
|
||||
// it is indeed buffering
|
||||
<-done
|
||||
|
||||
output, err := ioutil.ReadAll(bufreader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(output, []byte("hello world")) {
|
||||
t.Error(string(output))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBufReader(t *testing.T) {
|
||||
reader, writer := io.Pipe()
|
||||
bufreader := NewBufReader(reader)
|
||||
|
@ -32,3 +113,105 @@ func TestBufReader(t *testing.T) {
|
|||
t.Error(string(output))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBufReaderCloseWithNonReaderCloser(t *testing.T) {
|
||||
reader := strings.NewReader("buffer")
|
||||
bufreader := NewBufReader(reader)
|
||||
|
||||
if err := bufreader.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// implements io.ReadCloser
|
||||
type simpleReaderCloser struct{}
|
||||
|
||||
func (r *simpleReaderCloser) Read(p []byte) (n int, err error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (r *simpleReaderCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestBufReaderCloseWithReaderCloser(t *testing.T) {
|
||||
reader := &simpleReaderCloser{}
|
||||
bufreader := NewBufReader(reader)
|
||||
|
||||
err := bufreader.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestHashData(t *testing.T) {
|
||||
reader := strings.NewReader("hash-me")
|
||||
actual, err := HashData(reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := "sha256:4d11186aed035cc624d553e10db358492c84a7cd6b9670d92123c144930450aa"
|
||||
if actual != expected {
|
||||
t.Fatalf("Expecting %s, got %s", expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
type repeatedReader struct {
|
||||
readCount int
|
||||
maxReads int
|
||||
data []byte
|
||||
}
|
||||
|
||||
func newRepeatedReader(max int, data []byte) *repeatedReader {
|
||||
return &repeatedReader{0, max, data}
|
||||
}
|
||||
|
||||
func (r *repeatedReader) Read(p []byte) (int, error) {
|
||||
if r.readCount >= r.maxReads {
|
||||
return 0, io.EOF
|
||||
}
|
||||
r.readCount++
|
||||
n := copy(p, r.data)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func testWithData(data []byte, reads int) {
|
||||
reader := newRepeatedReader(reads, data)
|
||||
bufReader := NewBufReader(reader)
|
||||
io.Copy(ioutil.Discard, bufReader)
|
||||
}
|
||||
|
||||
func Benchmark1M10BytesReads(b *testing.B) {
|
||||
reads := 1000000
|
||||
readSize := int64(10)
|
||||
data := make([]byte, readSize)
|
||||
b.SetBytes(readSize * int64(reads))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
testWithData(data, reads)
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark1M1024BytesReads(b *testing.B) {
|
||||
reads := 1000000
|
||||
readSize := int64(1024)
|
||||
data := make([]byte, readSize)
|
||||
b.SetBytes(readSize * int64(reads))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
testWithData(data, reads)
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark10k32KBytesReads(b *testing.B) {
|
||||
reads := 10000
|
||||
readSize := int64(32 * 1024)
|
||||
data := make([]byte, readSize)
|
||||
b.SetBytes(readSize * int64(reads))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
testWithData(data, reads)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,3 +37,24 @@ func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
|
|||
closer: closer,
|
||||
}
|
||||
}
|
||||
|
||||
// Wrap a concrete io.Writer and hold a count of the number
|
||||
// of bytes written to the writer during a "session".
|
||||
// This can be convenient when write return is masked
|
||||
// (e.g., json.Encoder.Encode())
|
||||
type WriteCounter struct {
|
||||
Count int64
|
||||
Writer io.Writer
|
||||
}
|
||||
|
||||
func NewWriteCounter(w io.Writer) *WriteCounter {
|
||||
return &WriteCounter{
|
||||
Writer: w,
|
||||
}
|
||||
}
|
||||
|
||||
func (wc *WriteCounter) Write(p []byte) (count int, err error) {
|
||||
count, err = wc.Writer.Write(p)
|
||||
wc.Count += int64(count)
|
||||
return
|
||||
}
|
||||
|
|
65
Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers_test.go
generated
vendored
Normal file
65
Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers_test.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
package ioutils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestWriteCloserWrapperClose(t *testing.T) {
|
||||
called := false
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
wrapper := NewWriteCloserWrapper(writer, func() error {
|
||||
called = true
|
||||
return nil
|
||||
})
|
||||
if err := wrapper.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !called {
|
||||
t.Fatalf("writeCloserWrapper should have call the anonymous function.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNopWriteCloser(t *testing.T) {
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
wrapper := NopWriteCloser(writer)
|
||||
if err := wrapper.Close(); err != nil {
|
||||
t.Fatal("NopWriteCloser always return nil on Close.")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestNopWriter(t *testing.T) {
|
||||
nw := &NopWriter{}
|
||||
l, err := nw.Write([]byte{'c'})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if l != 1 {
|
||||
t.Fatalf("Expected 1 got %d", l)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteCounter(t *testing.T) {
|
||||
dummy1 := "This is a dummy string."
|
||||
dummy2 := "This is another dummy string."
|
||||
totalLength := int64(len(dummy1) + len(dummy2))
|
||||
|
||||
reader1 := strings.NewReader(dummy1)
|
||||
reader2 := strings.NewReader(dummy2)
|
||||
|
||||
var buffer bytes.Buffer
|
||||
wc := NewWriteCounter(&buffer)
|
||||
|
||||
reader1.WriteTo(wc)
|
||||
reader2.WriteTo(wc)
|
||||
|
||||
if wc.Count != totalLength {
|
||||
t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength)
|
||||
}
|
||||
|
||||
if buffer.String() != dummy1+dummy2 {
|
||||
t.Error("Wrong message written")
|
||||
}
|
||||
}
|
|
@ -58,13 +58,44 @@ func FromParam(p string) (Args, error) {
|
|||
if len(p) == 0 {
|
||||
return args, nil
|
||||
}
|
||||
err := json.Unmarshal([]byte(p), &args)
|
||||
if err != nil {
|
||||
if err := json.NewDecoder(strings.NewReader(p)).Decode(&args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return args, nil
|
||||
}
|
||||
|
||||
func (filters Args) MatchKVList(field string, sources map[string]string) bool {
|
||||
fieldValues := filters[field]
|
||||
|
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if len(fieldValues) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
if sources == nil || len(sources) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
outer:
|
||||
for _, name2match := range fieldValues {
|
||||
testKV := strings.SplitN(name2match, "=", 2)
|
||||
|
||||
for k, v := range sources {
|
||||
if len(testKV) == 1 {
|
||||
if k == testKV[0] {
|
||||
continue outer
|
||||
}
|
||||
} else if k == testKV[0] && v == testKV[1] {
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (filters Args) Match(field, source string) bool {
|
||||
fieldValues := filters[field]
|
||||
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
Victor Vieux <vieux@docker.com> (@vieux)
|
||||
Jessie Frazelle <jess@docker.com> (@jfrazelle)
|
|
@ -26,6 +26,7 @@ func HumanDuration(d time.Duration) string {
|
|||
return fmt.Sprintf("%d weeks", hours/24/7)
|
||||
} else if hours < 24*365*2 {
|
||||
return fmt.Sprintf("%d months", hours/24/30)
|
||||
} else {
|
||||
return fmt.Sprintf("%d years", hours/24/365)
|
||||
}
|
||||
return fmt.Sprintf("%f years", d.Hours()/24/365)
|
||||
}
|
||||
|
|
|
@ -41,6 +41,6 @@ func TestHumanDuration(t *testing.T) {
|
|||
assertEquals(t, "13 months", HumanDuration(13*month))
|
||||
assertEquals(t, "23 months", HumanDuration(23*month))
|
||||
assertEquals(t, "24 months", HumanDuration(24*month))
|
||||
assertEquals(t, "2.010959 years", HumanDuration(24*month+2*week))
|
||||
assertEquals(t, "3.164384 years", HumanDuration(3*year+2*month))
|
||||
assertEquals(t, "2 years", HumanDuration(24*month+2*week))
|
||||
assertEquals(t, "3 years", HumanDuration(3*year+2*month))
|
||||
}
|
||||
|
|
|
@ -37,23 +37,25 @@ var (
|
|||
var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
|
||||
var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
|
||||
|
||||
// CustomSize returns a human-readable approximation of a size
|
||||
// using custom format
|
||||
func CustomSize(format string, size float64, base float64, _map []string) string {
|
||||
i := 0
|
||||
for size >= base {
|
||||
size = size / base
|
||||
i++
|
||||
}
|
||||
return fmt.Sprintf(format, size, _map[i])
|
||||
}
|
||||
|
||||
// HumanSize returns a human-readable approximation of a size
|
||||
// using SI standard (eg. "44kB", "17MB")
|
||||
func HumanSize(size float64) string {
|
||||
return intToString(float64(size), 1000.0, decimapAbbrs)
|
||||
return CustomSize("%.4g %s", float64(size), 1000.0, decimapAbbrs)
|
||||
}
|
||||
|
||||
func BytesSize(size float64) string {
|
||||
return intToString(size, 1024.0, binaryAbbrs)
|
||||
}
|
||||
|
||||
func intToString(size, unit float64, _map []string) string {
|
||||
i := 0
|
||||
for size >= unit {
|
||||
size = size / unit
|
||||
i++
|
||||
}
|
||||
return fmt.Sprintf("%.4g %s", size, _map[i])
|
||||
return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs)
|
||||
}
|
||||
|
||||
// FromHumanSize returns an integer from a human-readable specification of a
|
||||
|
|
|
@ -4,7 +4,9 @@ Docker client library in Go
|
|||
|
||||
Well maintained docker client library.
|
||||
|
||||
Example:
|
||||
# How to use it?
|
||||
|
||||
Here is an example showing how to use it:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
@ -25,7 +27,7 @@ func main() {
|
|||
docker, _ := dockerclient.NewDockerClient("unix:///var/run/docker.sock", nil)
|
||||
|
||||
// Get only running containers
|
||||
containers, err := docker.ListContainers(false)
|
||||
containers, err := docker.ListContainers(false, false, "")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
@ -41,14 +43,19 @@ func main() {
|
|||
}
|
||||
|
||||
// Create a container
|
||||
containerConfig := &dockerclient.ContainerConfig{Image: "ubuntu:12.04", Cmd: []string{"bash"}}
|
||||
containerId, err := docker.CreateContainer(containerConfig)
|
||||
containerConfig := &dockerclient.ContainerConfig{
|
||||
Image: "ubuntu:14.04",
|
||||
Cmd: []string{"bash"},
|
||||
AttachStdin: true,
|
||||
Tty: true}
|
||||
containerId, err := docker.CreateContainer(containerConfig, "foobar")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Start the container
|
||||
err = docker.StartContainer(containerId)
|
||||
hostConfig := &dockerclient.HostConfig{}
|
||||
err = docker.StartContainer(containerId, hostConfig)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
@ -58,6 +65,19 @@ func main() {
|
|||
|
||||
// Listen to events
|
||||
docker.StartMonitorEvents(eventCallback, nil)
|
||||
|
||||
// Hold the execution to look at the events coming
|
||||
time.Sleep(3600 * time.Second)
|
||||
}
|
||||
```
|
||||
|
||||
# Maintainers
|
||||
|
||||
List of people you can ping for feedback on Pull Requests or any questions.
|
||||
|
||||
- [Sam Alba](https://github.com/samalba)
|
||||
- [Michael Crosby](https://github.com/crosbymichael)
|
||||
- [Andrea Luzzardi](https://github.com/aluzzardi)
|
||||
- [Victor Vieux](https://github.com/vieux)
|
||||
- [Evan Hazlett](https://github.com/ehazlett)
|
||||
- [Donald Huang](https://github.com/donhcd)
|
||||
|
|
|
@ -66,7 +66,25 @@ func NewDockerClientTimeout(daemonUrl string, tlsConfig *tls.Config, timeout tim
|
|||
|
||||
func (client *DockerClient) doRequest(method string, path string, body []byte, headers map[string]string) ([]byte, error) {
|
||||
b := bytes.NewBuffer(body)
|
||||
req, err := http.NewRequest(method, client.URL.String()+path, b)
|
||||
|
||||
reader, err := client.doStreamRequest(method, path, b, headers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer reader.Close()
|
||||
data, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (client *DockerClient) doStreamRequest(method string, path string, in io.Reader, headers map[string]string) (io.ReadCloser, error) {
|
||||
if (method == "POST" || method == "PUT") && in == nil {
|
||||
in = bytes.NewReader(nil)
|
||||
}
|
||||
req, err := http.NewRequest(method, client.URL.String()+path, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -83,18 +101,19 @@ func (client *DockerClient) doRequest(method string, path string, body []byte, h
|
|||
}
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode == 404 {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
if resp.StatusCode >= 400 {
|
||||
defer resp.Body.Close()
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, Error{StatusCode: resp.StatusCode, Status: resp.Status, msg: string(data)}
|
||||
}
|
||||
return data, nil
|
||||
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
func (client *DockerClient) Info() (*Info, error) {
|
||||
|
@ -464,3 +483,25 @@ func (client *DockerClient) RenameContainer(oldName string, newName string) erro
|
|||
_, err := client.doRequest("POST", uri, nil, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (client *DockerClient) ImportImage(source string, repository string, tag string, tar io.Reader) (io.ReadCloser, error) {
|
||||
var fromSrc string
|
||||
v := &url.Values{}
|
||||
if source == "" {
|
||||
fromSrc = "-"
|
||||
} else {
|
||||
fromSrc = source
|
||||
}
|
||||
|
||||
v.Set("fromSrc", fromSrc)
|
||||
v.Set("repo", repository)
|
||||
if tag != "" {
|
||||
v.Set("tag", tag)
|
||||
}
|
||||
|
||||
var in io.Reader
|
||||
if fromSrc == "-" {
|
||||
in = tar
|
||||
}
|
||||
return client.doStreamRequest("POST", "/images/create?"+v.Encode(), in, nil)
|
||||
}
|
||||
|
|
|
@ -34,4 +34,5 @@ type Client interface {
|
|||
PauseContainer(name string) error
|
||||
UnpauseContainer(name string) error
|
||||
RenameContainer(oldName string, newName string) error
|
||||
ImportImage(source string, repository string, tag string, tar io.Reader) (io.ReadCloser, error)
|
||||
}
|
||||
|
|
|
@ -135,3 +135,8 @@ func (client *MockClient) RenameContainer(oldName string, newName string) error
|
|||
args := client.Mock.Called(oldName, newName)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (client *MockClient) ImportImage(source string, repository string, tag string, tar io.Reader) (io.ReadCloser, error) {
|
||||
args := client.Mock.Called(source, repository, tag, tar)
|
||||
return args.Get(0).(io.ReadCloser), args.Error(1)
|
||||
}
|
||||
|
|
|
@ -46,6 +46,8 @@ type HostConfig struct {
|
|||
SecurityOpt []string
|
||||
NetworkMode string
|
||||
RestartPolicy RestartPolicy
|
||||
Ulimits []Ulimit
|
||||
LogConfig LogConfig
|
||||
}
|
||||
|
||||
type ExecConfig struct {
|
||||
|
@ -88,8 +90,11 @@ type ContainerInfo struct {
|
|||
Running bool
|
||||
Paused bool
|
||||
Restarting bool
|
||||
OOMKilled bool
|
||||
Dead bool
|
||||
Pid int
|
||||
ExitCode int
|
||||
Error string // contains last known error when starting the container
|
||||
StartedAt time.Time
|
||||
FinishedAt time.Time
|
||||
Ghost bool
|
||||
|
@ -256,3 +261,14 @@ type Stats struct {
|
|||
MemoryStats MemoryStats `json:"memory_stats,omitempty"`
|
||||
BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
|
||||
}
|
||||
|
||||
type Ulimit struct {
|
||||
Name string `json:"name"`
|
||||
Soft uint64 `json:"soft"`
|
||||
Hard uint64 `json:"hard"`
|
||||
}
|
||||
|
||||
type LogConfig struct {
|
||||
Type string `json:"type"`
|
||||
Config map[string]string `json:"config"`
|
||||
}
|
||||
|
|
|
@ -106,28 +106,104 @@ func getContainersJSON(c *context, w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
// Parse flags.
|
||||
all := r.Form.Get("all") == "1"
|
||||
limit, _ := strconv.Atoi(r.Form.Get("limit"))
|
||||
|
||||
out := []*dockerclient.Container{}
|
||||
// Parse filters.
|
||||
filters, err := dockerfilters.FromParam(r.Form.Get("filters"))
|
||||
if err != nil {
|
||||
httpError(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
filtExited := []int{}
|
||||
if i, ok := filters["exited"]; ok {
|
||||
for _, value := range i {
|
||||
code, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
httpError(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
filtExited = append(filtExited, code)
|
||||
}
|
||||
}
|
||||
if i, ok := filters["status"]; ok {
|
||||
for _, value := range i {
|
||||
if value == "exited" {
|
||||
all = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Filtering: select the containers we want to return.
|
||||
candidates := []*cluster.Container{}
|
||||
for _, container := range c.cluster.Containers() {
|
||||
tmp := (*container).Container
|
||||
// Skip stopped containers unless -a was specified.
|
||||
if !strings.Contains(tmp.Status, "Up") && !all && limit <= 0 {
|
||||
if !container.Info.State.Running && !all && limit <= 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip swarm containers unless -a was specified.
|
||||
if strings.Split(tmp.Image, ":")[0] == "swarm" && !all {
|
||||
if strings.Split(container.Image, ":")[0] == "swarm" && !all {
|
||||
continue
|
||||
}
|
||||
|
||||
// Apply filters.
|
||||
if !filters.Match("name", strings.TrimPrefix(container.Names[0], "/")) {
|
||||
continue
|
||||
}
|
||||
if !filters.Match("id", container.Id) {
|
||||
continue
|
||||
}
|
||||
if !filters.MatchKVList("label", container.Config.Labels) {
|
||||
continue
|
||||
}
|
||||
if !filters.Match("status", container.StateString()) {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(filtExited) > 0 {
|
||||
shouldSkip := true
|
||||
for _, code := range filtExited {
|
||||
if code == container.Info.State.ExitCode && !container.Info.State.Running {
|
||||
shouldSkip = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if shouldSkip {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
candidates = append(candidates, container)
|
||||
}
|
||||
|
||||
// Sort the candidates and apply limits.
|
||||
sort.Sort(sort.Reverse(ContainerSorter(candidates)))
|
||||
if limit > 0 && limit < len(candidates) {
|
||||
candidates = candidates[:limit]
|
||||
}
|
||||
|
||||
// Convert cluster.Container back into dockerclient.Container.
|
||||
out := []*dockerclient.Container{}
|
||||
for _, container := range candidates {
|
||||
// Create a copy of the underlying dockerclient.Container so we can
|
||||
// make changes without messing with cluster.Container.
|
||||
tmp := (*container).Container
|
||||
|
||||
// Update the Status. The one we have is stale from the last `docker ps` the engine sent.
|
||||
// `Status()` will generate a new one
|
||||
tmp.Status = container.Status()
|
||||
if !container.Engine.IsHealthy() {
|
||||
tmp.Status = "Pending"
|
||||
}
|
||||
|
||||
// TODO remove the Node Name in the name when we have a good solution
|
||||
tmp.Names = make([]string, len(container.Names))
|
||||
for i, name := range container.Names {
|
||||
tmp.Names[i] = "/" + container.Engine.Name + name
|
||||
}
|
||||
|
||||
// insert node IP
|
||||
tmp.Ports = make([]dockerclient.Port, len(container.Ports))
|
||||
for i, port := range container.Ports {
|
||||
|
@ -139,14 +215,9 @@ func getContainersJSON(c *context, w http.ResponseWriter, r *http.Request) {
|
|||
out = append(out, &tmp)
|
||||
}
|
||||
|
||||
sort.Sort(sort.Reverse(ContainerSorter(out)))
|
||||
|
||||
// Finally, send them back to the CLI.
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if limit > 0 && limit < len(out) {
|
||||
json.NewEncoder(w).Encode(out[:limit])
|
||||
} else {
|
||||
json.NewEncoder(w).Encode(out)
|
||||
}
|
||||
json.NewEncoder(w).Encode(out)
|
||||
}
|
||||
|
||||
// GET /containers/{name:.*}/json
|
||||
|
|
|
@ -1,12 +1,10 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"github.com/samalba/dockerclient"
|
||||
)
|
||||
import "github.com/docker/swarm/cluster"
|
||||
|
||||
// ContainerSorter implements the Sort interface to sort Docker containers.
|
||||
// It is not guaranteed to be a stable sort.
|
||||
type ContainerSorter []*dockerclient.Container
|
||||
type ContainerSorter []*cluster.Container
|
||||
|
||||
// Len returns the number of containers to be sorted.
|
||||
func (s ContainerSorter) Len() int {
|
||||
|
@ -21,5 +19,5 @@ func (s ContainerSorter) Swap(i, j int) {
|
|||
// Less reports whether the container with index i should sort before the container with index j.
|
||||
// Containers are sorted chronologically by when they were created.
|
||||
func (s ContainerSorter) Less(i, j int) bool {
|
||||
return s[i].Created < s[j].Created
|
||||
return s[i].Info.Created < s[j].Info.Created
|
||||
}
|
||||
|
|
|
@ -1,6 +1,12 @@
|
|||
package cluster
|
||||
|
||||
import "github.com/samalba/dockerclient"
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/units"
|
||||
"github.com/samalba/dockerclient"
|
||||
)
|
||||
|
||||
// Container is exported
|
||||
type Container struct {
|
||||
|
@ -10,3 +16,50 @@ type Container struct {
|
|||
Info dockerclient.ContainerInfo
|
||||
Engine *Engine
|
||||
}
|
||||
|
||||
// Status returns a human-readable description of the state
|
||||
// Stoken from docker/docker/daemon/state.go
|
||||
func (c *Container) Status() string {
|
||||
s := c.Info.State
|
||||
if s.Running {
|
||||
if s.Paused {
|
||||
return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
|
||||
}
|
||||
if s.Restarting {
|
||||
return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
|
||||
}
|
||||
|
||||
if s.Dead {
|
||||
return "Dead"
|
||||
}
|
||||
|
||||
if s.FinishedAt.IsZero() {
|
||||
return ""
|
||||
}
|
||||
|
||||
return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
|
||||
}
|
||||
|
||||
// StateString returns a single string to describe state
|
||||
// Stoken from docker/docker/daemon/state.go
|
||||
func (c *Container) StateString() string {
|
||||
s := c.Info.State
|
||||
if s.Running {
|
||||
if s.Paused {
|
||||
return "paused"
|
||||
}
|
||||
if s.Restarting {
|
||||
return "restarting"
|
||||
}
|
||||
return "running"
|
||||
}
|
||||
|
||||
if s.Dead {
|
||||
return "dead"
|
||||
}
|
||||
|
||||
return "exited"
|
||||
}
|
||||
|
|
|
@ -515,9 +515,9 @@ func (e *Engine) handler(ev *dockerclient.Event, _ chan error, args ...interface
|
|||
// These events refer to images so there's no need to update
|
||||
// containers.
|
||||
e.RefreshImages()
|
||||
case "start", "die":
|
||||
// If the container is started or stopped, we have to do an inspect in
|
||||
// order to get the new NetworkSettings.
|
||||
case "die", "kill", "oom", "pause", "start", "stop", "unpause":
|
||||
// If the container state changes, we have to do an inspect in
|
||||
// order to update container.Info and get the new NetworkSettings.
|
||||
e.refreshContainer(ev.Id, true)
|
||||
default:
|
||||
// Otherwise, do a "soft" refresh of the container.
|
||||
|
|
|
@ -44,3 +44,61 @@ function teardown() {
|
|||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "${lines[1]}" == *"false"* ]]
|
||||
}
|
||||
|
||||
@test "docker ps --filter" {
|
||||
start_docker_with_busybox 2
|
||||
swarm_manage
|
||||
|
||||
# Running
|
||||
firstID=$(docker_swarm run -d --name name1 --label "match=me" --label "second=tag" busybox sleep 10000)
|
||||
# Exited - successfull
|
||||
secondID=$(docker_swarm run -d --name name2 --label "match=me too" busybox true)
|
||||
docker_swarm wait "$secondID"
|
||||
# Exited - error
|
||||
thirdID=$(docker_swarm run -d --name name3 --label "nomatch=me" busybox false)
|
||||
docker_swarm wait "$thirdID"
|
||||
# Exited - error
|
||||
|
||||
# status
|
||||
run docker_swarm ps -q --no-trunc --filter=status=exited
|
||||
echo $output
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" != *"$firstID"* ]]
|
||||
[[ "$output" == *"$secondID"* ]]
|
||||
[[ "$output" == *"$thirdID"* ]]
|
||||
run docker_swarm ps -q -a --no-trunc --filter=status=running
|
||||
[[ "$output" == "$firstID" ]]
|
||||
|
||||
# id
|
||||
run docker_swarm ps -a -q --no-trunc --filter=id="$secondID"
|
||||
[[ "$output" == "$secondID" ]]
|
||||
run docker_swarm ps -a -q --no-trunc --filter=id="bogusID"
|
||||
[ "${#lines[@]}" -eq 0 ]
|
||||
|
||||
# name
|
||||
run docker_swarm ps -a -q --no-trunc --filter=name=name3
|
||||
[[ "$output" == "$thirdID" ]]
|
||||
run docker_swarm ps -a -q --no-trunc --filter=name=badname
|
||||
[ "${#lines[@]}" -eq 0 ]
|
||||
|
||||
# exit code
|
||||
run docker_swarm ps -a -q --no-trunc --filter=exited=0
|
||||
[[ "$output" == "$secondID" ]]
|
||||
run docker_swarm ps -a -q --no-trunc --filter=exited=1
|
||||
[[ "$output" == "$thirdID" ]]
|
||||
run docker_swarm ps -a -q --no-trunc --filter=exited=99
|
||||
[ "${#lines[@]}" -eq 0 ]
|
||||
|
||||
# labels
|
||||
run docker_swarm ps -a -q --no-trunc --filter=label=match=me
|
||||
[[ "$output" == "$firstID" ]]
|
||||
run docker_swarm ps -a -q --no-trunc --filter=label=match=me --filter=label=second=tag
|
||||
[[ "$output" == "$firstID" ]]
|
||||
run docker_swarm ps -a -q --no-trunc --filter=label=match=me --filter=label=second=tag-no
|
||||
[ "${#lines[@]}" -eq 0 ]
|
||||
run docker_swarm ps -a -q --no-trunc --filter=label=match
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" == *"$firstID"* ]]
|
||||
[[ "$output" == *"$secondID"* ]]
|
||||
[[ "$output" != *"$thirdID"* ]]
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue