add support for docker volume ls & inspect

Signed-off-by: Victor Vieux <vieux@docker.com>
This commit is contained in:
Victor Vieux 2015-09-04 16:03:42 -07:00
parent 6787aa8619
commit 77c96908f0
16 changed files with 204 additions and 1 deletions

2
Godeps/Godeps.json generated
View File

@ -104,7 +104,7 @@
},
{
"ImportPath": "github.com/samalba/dockerclient",
"Rev": "a87700686584dc383ddb7e2bfb042e0f7a8c371b"
"Rev": "77b723e2c0d07a21cc9483dc6108f95bd403b576"
},
{
"ImportPath": "github.com/samuel/go-zookeeper/zk",

View File

@ -744,3 +744,16 @@ func (client *DockerClient) BuildImage(image *BuildImage) (io.ReadCloser, error)
uri := fmt.Sprintf("/%s/build?%s", APIVersion, v.Encode())
return client.doStreamRequest("POST", uri, image.Context, headers)
}
func (client *DockerClient) ListVolumes() ([]*Volume, error) {
uri := fmt.Sprintf("/%s/volumes", APIVersion)
data, err := client.doRequest("GET", uri, nil, nil)
if err != nil {
return nil, err
}
var volumesList VolumesListResponse
if err := json.Unmarshal(data, &volumesList); err != nil {
return nil, err
}
return volumesList.Volumes, nil
}

View File

@ -45,4 +45,5 @@ type Client interface {
RenameContainer(oldName string, newName string) error
ImportImage(source string, repository string, tag string, tar io.Reader) (io.ReadCloser, error)
BuildImage(image *BuildImage) (io.ReadCloser, error)
ListVolumes() ([]*Volume, error)
}

View File

@ -170,3 +170,8 @@ func (client *MockClient) BuildImage(image *dockerclient.BuildImage) (io.ReadClo
args := client.Mock.Called(image)
return args.Get(0).(io.ReadCloser), args.Error(1)
}
func (client *MockClient) ListVolumes() ([]*dockerclient.Volume, error) {
args := client.Mock.Called()
return args.Get(0).([]*dockerclient.Volume), args.Error(1)
}

View File

@ -145,3 +145,7 @@ func (client *NopClient) ImportImage(source string, repository string, tag strin
func (client *NopClient) BuildImage(image *dockerclient.BuildImage) (io.ReadCloser, error) {
return nil, ErrNoEngine
}
func (client *NopClient) ListVolumes() ([]*dockerclient.Volume, error) {
return nil, ErrNoEngine
}

View File

@ -442,3 +442,13 @@ type BuildImage struct {
CpuSetMems string
CgroupParent string
}
type Volume struct {
Name string // Name is the name of the volume
Driver string // Driver is the Driver name used to create the volume
Mountpoint string // Mountpoint is the location on disk of the volume
}
type VolumesListResponse struct {
Volumes []*Volume // Volumes is the list of volumes being returned
}

View File

@ -145,6 +145,16 @@ func getImagesJSON(c *context, w http.ResponseWriter, r *http.Request) {
json.NewEncoder(w).Encode(images)
}
// GET /volumes
func getVolumes(c *context, w http.ResponseWriter, r *http.Request) {
volumes := struct {
Volumes []*cluster.Volume
}{c.cluster.Volumes()}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(volumes)
}
// GET /containers/ps
// GET /containers/json
func getContainersJSON(c *context, w http.ResponseWriter, r *http.Request) {
@ -532,6 +542,16 @@ func ping(c *context, w http.ResponseWriter, r *http.Request) {
w.Write([]byte{'O', 'K'})
}
// Proxy a request to the right node
func proxyVolume(c *context, w http.ResponseWriter, r *http.Request) {
var name = mux.Vars(r)["volumename"]
if volume := c.cluster.Volume(name); volume != nil {
proxy(c.tlsConfig, volume.Engine.Addr, w, r)
return
}
httpError(w, fmt.Sprintf("No such volume: %s", name), http.StatusNotFound)
}
// Proxy a request to the right node
func proxyContainer(c *context, w http.ResponseWriter, r *http.Request) {
name, container, err := getContainerFromVars(c, mux.Vars(r))

View File

@ -47,6 +47,8 @@ var routes = map[string]map[string]handler{
"/containers/{name:.*}/stats": proxyContainer,
"/containers/{name:.*}/attach/ws": proxyHijack,
"/exec/{execid:.*}/json": proxyContainer,
"/volumes": getVolumes,
"/volumes/{volumename:.*}": proxyVolume,
},
"POST": {
"/auth": proxyRandom,

View File

@ -31,6 +31,12 @@ type Cluster interface {
// cluster.Containers().Get(IDOrName)
Container(IDOrName string) *Container
// Return all volumes
Volumes() []*Volume
// Return one volume from the cluster
Volume(name string) *Volume
// Pull images
// `callback` can be called multiple time
// `where` is where it is being pulled

View File

@ -57,6 +57,7 @@ type Engine struct {
stopCh chan struct{}
containers map[string]*Container
images []*Image
volumes []*Volume
client dockerclient.Client
eventHandler EventHandler
healthy bool
@ -103,6 +104,9 @@ func (e *Engine) ConnectWithClient(client dockerclient.Client) error {
return err
}
// Do not check error as older daemon don't support this call
e.RefreshVolumes()
// Start the update loop.
go e.refreshLoop()
@ -198,6 +202,21 @@ func (e *Engine) RefreshImages() error {
return nil
}
// RefreshVolumes refreshes the list of volumes on the engine.
func (e *Engine) RefreshVolumes() error {
volumes, err := e.client.ListVolumes()
if err != nil {
return err
}
e.Lock()
e.volumes = nil
for _, volume := range volumes {
e.volumes = append(e.volumes, &Volume{Volume: *volume, Engine: e})
}
e.Unlock()
return nil
}
// RefreshContainers will refresh the list and status of containers running on the engine. If `full` is
// true, each container will be inspected.
// FIXME: unexport this method after mesos scheduler stops using it directly
@ -312,6 +331,8 @@ func (e *Engine) refreshLoop() {
err = e.RefreshContainers(false)
if err == nil {
// Do not check error as older daemon don't support this call
e.RefreshVolumes()
err = e.RefreshImages()
}
@ -521,6 +542,18 @@ func (e *Engine) Images(all bool) []*Image {
return images
}
// Volumes returns all the volumes in the engine
func (e *Engine) Volumes() []*Volume {
e.RLock()
volumes := make([]*Volume, 0, len(e.volumes))
for _, volume := range e.volumes {
volumes = append(volumes, volume)
}
e.RUnlock()
return volumes
}
// Image returns the image with IDOrName in the engine
func (e *Engine) Image(IDOrName string) *Image {
e.RLock()
@ -549,9 +582,11 @@ func (e *Engine) handler(ev *dockerclient.Event, _ chan error, args ...interface
// If the container state changes, we have to do an inspect in
// order to update container.Info and get the new NetworkSettings.
e.refreshContainer(ev.Id, true)
e.RefreshVolumes()
default:
// Otherwise, do a "soft" refresh of the container.
e.refreshContainer(ev.Id, false)
e.RefreshVolumes()
}
// If there is no event handler registered, abort right now.

View File

@ -73,6 +73,7 @@ func TestEngineCpusMemory(t *testing.T) {
client.On("Version").Return(mockVersion, nil)
client.On("ListContainers", true, false, "").Return([]dockerclient.Container{}, nil)
client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil)
client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil)
client.On("StartMonitorEvents", mock.Anything, mock.Anything, mock.Anything).Return()
assert.NoError(t, engine.ConnectWithClient(client))
@ -94,6 +95,7 @@ func TestEngineSpecs(t *testing.T) {
client.On("Version").Return(mockVersion, nil)
client.On("ListContainers", true, false, "").Return([]dockerclient.Container{}, nil)
client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil)
client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil)
client.On("StartMonitorEvents", mock.Anything, mock.Anything, mock.Anything).Return()
assert.NoError(t, engine.ConnectWithClient(client))
@ -123,6 +125,7 @@ func TestEngineState(t *testing.T) {
// The client will return one container at first, then a second one will appear.
client.On("ListContainers", true, false, "").Return([]dockerclient.Container{{Id: "one"}}, nil).Once()
client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil).Once()
client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil)
client.On("InspectContainer", "one").Return(&dockerclient.ContainerInfo{Config: &dockerclient.ContainerConfig{CpuShares: 100}}, nil).Once()
client.On("ListContainers", true, false, fmt.Sprintf("{%q:[%q]}", "id", "two")).Return([]dockerclient.Container{{Id: "two"}}, nil).Once()
client.On("InspectContainer", "two").Return(&dockerclient.ContainerInfo{Config: &dockerclient.ContainerConfig{CpuShares: 100}}, nil).Once()
@ -168,6 +171,7 @@ func TestCreateContainer(t *testing.T) {
client.On("StartMonitorEvents", mock.Anything, mock.Anything, mock.Anything).Return()
client.On("ListContainers", true, false, "").Return([]dockerclient.Container{}, nil).Once()
client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil).Once()
client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil)
assert.NoError(t, engine.ConnectWithClient(client))
assert.True(t, engine.isConnected())
@ -181,6 +185,7 @@ func TestCreateContainer(t *testing.T) {
client.On("CreateContainer", &mockConfig, name).Return(id, nil).Once()
client.On("ListContainers", true, false, fmt.Sprintf(`{"id":[%q]}`, id)).Return([]dockerclient.Container{{Id: id}}, nil).Once()
client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil).Once()
client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil)
client.On("InspectContainer", id).Return(&dockerclient.ContainerInfo{Config: &config.ContainerConfig}, nil).Once()
container, err := engine.Create(config, name, false)
assert.Nil(t, err)
@ -204,6 +209,7 @@ func TestCreateContainer(t *testing.T) {
client.On("CreateContainer", &mockConfig, name).Return(id, nil).Once()
client.On("ListContainers", true, false, fmt.Sprintf(`{"id":[%q]}`, id)).Return([]dockerclient.Container{{Id: id}}, nil).Once()
client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil).Once()
client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil)
client.On("InspectContainer", id).Return(&dockerclient.ContainerInfo{Config: &config.ContainerConfig}, nil).Once()
container, err = engine.Create(config, name, true)
assert.Nil(t, err)
@ -251,6 +257,7 @@ func TestUsedCpus(t *testing.T) {
client.On("StartMonitorEvents", mock.Anything, mock.Anything, mock.Anything).Return()
client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil).Once()
client.On("ListContainers", true, false, "").Return([]dockerclient.Container{{Id: "test"}}, nil).Once()
client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil)
client.On("InspectContainer", "test").Return(&dockerclient.ContainerInfo{Config: &dockerclient.ContainerConfig{CpuShares: cpuShares}}, nil).Once()
engine.ConnectWithClient(client)
@ -279,6 +286,7 @@ func TestContainerRemovedDuringRefresh(t *testing.T) {
client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil)
client.On("StartMonitorEvents", mock.Anything, mock.Anything, mock.Anything).Return()
client.On("ListContainers", true, false, "").Return([]dockerclient.Container{container1, container2}, nil)
client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil)
client.On("InspectContainer", "c1").Return(info1, errors.New("Not found"))
client.On("InspectContainer", "c2").Return(info2, nil)

View File

@ -292,6 +292,16 @@ func (c *Cluster) RenameContainer(container *cluster.Container, newName string)
return nil
}
// Volumes returns all the volumes in the cluster.
func (c *Cluster) Volumes() []*cluster.Volume {
return nil
}
// Volume returns the volume name in the cluster
func (c *Cluster) Volume(name string) *cluster.Volume {
return nil
}
// listNodes returns all the nodess in the cluster.
func (c *Cluster) listNodes() []*node.Node {
c.RLock()

View File

@ -470,6 +470,39 @@ func (c *Cluster) Container(IDOrName string) *cluster.Container {
}
// Volumes returns all the volumes in the cluster.
func (c *Cluster) Volumes() []*cluster.Volume {
c.RLock()
defer c.RUnlock()
out := []*cluster.Volume{}
for _, e := range c.engines {
out = append(out, e.Volumes()...)
}
return out
}
// Volume returns the volume name in the cluster
func (c *Cluster) Volume(name string) *cluster.Volume {
// Abort immediately if the name is empty.
if len(name) == 0 {
return nil
}
c.RLock()
defer c.RUnlock()
for _, e := range c.engines {
for _, v := range e.Volumes() {
if v.Name == name {
return v
}
}
}
return nil
}
// listNodes returns all the engines in the cluster.
func (c *Cluster) listNodes() []*node.Node {
c.RLock()

View File

@ -130,6 +130,7 @@ func TestImportImage(t *testing.T) {
client.On("StartMonitorEvents", mock.Anything, mock.Anything, mock.Anything).Return()
client.On("ListContainers", true, false, "").Return([]dockerclient.Container{}, nil).Once()
client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil)
client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil)
// connect client
engine.ConnectWithClient(client)
@ -178,6 +179,7 @@ func TestLoadImage(t *testing.T) {
client.On("StartMonitorEvents", mock.Anything, mock.Anything, mock.Anything).Return()
client.On("ListContainers", true, false, "").Return([]dockerclient.Container{}, nil).Once()
client.On("ListImages", mock.Anything).Return([]*dockerclient.Image{}, nil)
client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil)
// connect client
engine.ConnectWithClient(client)
@ -229,6 +231,7 @@ func TestTagImage(t *testing.T) {
client.On("StartMonitorEvents", mock.Anything, mock.Anything, mock.Anything).Return()
client.On("ListContainers", true, false, "").Return([]dockerclient.Container{}, nil).Once()
client.On("ListImages", mock.Anything).Return(images, nil)
client.On("ListVolumes", mock.Anything).Return([]*dockerclient.Volume{}, nil)
// connect client
engine.ConnectWithClient(client)

10
cluster/volume.go Normal file
View File

@ -0,0 +1,10 @@
package cluster
import "github.com/samalba/dockerclient"
// Volume is exported
type Volume struct {
dockerclient.Volume
Engine *Engine
}

View File

@ -0,0 +1,43 @@
#!/usr/bin/env bats
load ../helpers
function teardown() {
swarm_manage_cleanup
stop_docker
}
@test "docker volume" {
start_docker_with_busybox 2
swarm_manage
# make sure no volume exist
run docker_swarm volume
[ "${#lines[@]}" -eq 1 ]
# run
docker_swarm run -d -v=/tmp busybox true
run docker_swarm volume
[ "${#lines[@]}" -eq 2 ]
docker_swarm run -d -v=/tmp busybox true
run docker_swarm volume
[ "${#lines[@]}" -eq 3 ]
}
@test "docker volume inspect" {
start_docker_with_busybox 2
swarm_manage
# run
docker_swarm run -d -v=/tmp busybox true
run docker_swarm volume ls -q
[ "${#lines[@]}" -eq 1 ]
run docker_swarm volume inspect ${output}
[ "${#lines[@]}" -eq 7 ]
[[ "${output}" == *"\"Driver\": \"local\""* ]]
}