mirror of https://github.com/docker/docs.git
store swarm config in driver; vendor Godeps; generate swarm token
Signed-off-by: Evan Hazlett <ejhazlett@gmail.com>
This commit is contained in:
parent
16e8159503
commit
a949e12b6e
|
|
@ -115,6 +115,11 @@
|
|||
"ImportPath": "github.com/docker/libtrust",
|
||||
"Rev": "6b7834910dcbb3021adc193411d01f65595445fb"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/swarm/discovery",
|
||||
"Comment": "v0.1.0-rc1-75-g86a028d",
|
||||
"Rev": "86a028d7c1cb593744dfc8d9e3c1b6b22fbc18b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/go-querystring/query",
|
||||
"Rev": "30f7a39f4a218feb5325f3aebc60c32a572a8274"
|
||||
|
|
|
|||
172
Godeps/_workspace/src/github.com/docker/swarm/discovery/README.md
generated
vendored
Normal file
172
Godeps/_workspace/src/github.com/docker/swarm/discovery/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,172 @@
|
|||
Discovery
|
||||
=========
|
||||
|
||||
`Docker Swarm` comes with multiple Discovery backends
|
||||
|
||||
## Examples
|
||||
|
||||
##### Using the hosted discovery service
|
||||
|
||||
```bash
|
||||
# create a cluster
|
||||
$ swarm create
|
||||
6856663cdefdec325839a4b7e1de38e8 # <- this is your unique <cluster_id>
|
||||
|
||||
# on each of your nodes, start the swarm agent
|
||||
# <node_ip> doesn't have to be public (eg. 192.168.0.X),
|
||||
# as long as the other nodes can reach it, it is fine.
|
||||
$ swarm join --addr=<node_ip:2375> token://<cluster_id>
|
||||
|
||||
# start the manager on any machine or your laptop
|
||||
$ swarm manage -H tcp://<swarm_ip:swarm_port> token://<cluster_id>
|
||||
|
||||
# use the regular docker cli
|
||||
$ docker -H tcp://<swarm_ip:swarm_port> info
|
||||
$ docker -H tcp://<swarm_ip:swarm_port> run ...
|
||||
$ docker -H tcp://<swarm_ip:swarm_port> ps
|
||||
$ docker -H tcp://<swarm_ip:swarm_port> logs ...
|
||||
...
|
||||
|
||||
# list nodes in your cluster
|
||||
$ swarm list token://<cluster_id>
|
||||
<node_ip:2375>
|
||||
```
|
||||
|
||||
###### Using a static file describing the cluster
|
||||
|
||||
```bash
|
||||
# for each of your nodes, add a line to a file
|
||||
# <node_ip> doesn't have to be public (eg. 192.168.0.X),
|
||||
# as long as the other nodes can reach it, it is fine.
|
||||
$ echo <node_ip1:2375> >> /tmp/my_cluster
|
||||
$ echo <node_ip2:2375> >> /tmp/my_cluster
|
||||
$ echo <node_ip3:2375> >> /tmp/my_cluster
|
||||
|
||||
# start the manager on any machine or your laptop
|
||||
$ swarm manage -H tcp://<swarm_ip:swarm_port> file:///tmp/my_cluster
|
||||
|
||||
# use the regular docker cli
|
||||
$ docker -H tcp://<swarm_ip:swarm_port> info
|
||||
$ docker -H tcp://<swarm_ip:swarm_port> run ...
|
||||
$ docker -H tcp://<swarm_ip:swarm_port> ps
|
||||
$ docker -H tcp://<swarm_ip:swarm_port> logs ...
|
||||
...
|
||||
|
||||
# list nodes in your cluster
|
||||
$ swarm list file:///tmp/my_cluster
|
||||
<node_ip1:2375>
|
||||
<node_ip2:2375>
|
||||
<node_ip3:2375>
|
||||
```
|
||||
|
||||
###### Using etcd
|
||||
|
||||
```bash
|
||||
# on each of your nodes, start the swarm agent
|
||||
# <node_ip> doesn't have to be public (eg. 192.168.0.X),
|
||||
# as long as the other nodes can reach it, it is fine.
|
||||
$ swarm join --addr=<node_ip:2375> etcd://<etcd_ip>/<path>
|
||||
|
||||
# start the manager on any machine or your laptop
|
||||
$ swarm manage -H tcp://<swarm_ip:swarm_port> etcd://<etcd_ip>/<path>
|
||||
|
||||
# use the regular docker cli
|
||||
$ docker -H tcp://<swarm_ip:swarm_port> info
|
||||
$ docker -H tcp://<swarm_ip:swarm_port> run ...
|
||||
$ docker -H tcp://<swarm_ip:swarm_port> ps
|
||||
$ docker -H tcp://<swarm_ip:swarm_port> logs ...
|
||||
...
|
||||
|
||||
# list nodes in your cluster
|
||||
$ swarm list etcd://<etcd_ip>/<path>
|
||||
<node_ip:2375>
|
||||
```
|
||||
|
||||
###### Using consul
|
||||
|
||||
```bash
|
||||
# on each of your nodes, start the swarm agent
|
||||
# <node_ip> doesn't have to be public (eg. 192.168.0.X),
|
||||
# as long as the other nodes can reach it, it is fine.
|
||||
$ swarm join --addr=<node_ip:2375> consul://<consul_addr>/<path>
|
||||
|
||||
# start the manager on any machine or your laptop
|
||||
$ swarm manage -H tcp://<swarm_ip:swarm_port> consul://<consul_addr>/<path>
|
||||
|
||||
# use the regular docker cli
|
||||
$ docker -H tcp://<swarm_ip:swarm_port> info
|
||||
$ docker -H tcp://<swarm_ip:swarm_port> run ...
|
||||
$ docker -H tcp://<swarm_ip:swarm_port> ps
|
||||
$ docker -H tcp://<swarm_ip:swarm_port> logs ...
|
||||
...
|
||||
|
||||
# list nodes in your cluster
|
||||
$ swarm list consul://<consul_addr>/<path>
|
||||
<node_ip:2375>
|
||||
```
|
||||
|
||||
###### Using zookeeper
|
||||
|
||||
```bash
|
||||
# on each of your nodes, start the swarm agent
|
||||
# <node_ip> doesn't have to be public (eg. 192.168.0.X),
|
||||
# as long as the other nodes can reach it, it is fine.
|
||||
$ swarm join --addr=<node_ip:2375> zk://<zookeeper_addr1>,<zookeeper_addr2>/<path>
|
||||
|
||||
# start the manager on any machine or your laptop
|
||||
$ swarm manage -H tcp://<swarm_ip:swarm_port> zk://<zookeeper_addr1>,<zookeeper_addr2>/<path>
|
||||
|
||||
# use the regular docker cli
|
||||
$ docker -H tcp://<swarm_ip:swarm_port> info
|
||||
$ docker -H tcp://<swarm_ip:swarm_port> run ...
|
||||
$ docker -H tcp://<swarm_ip:swarm_port> ps
|
||||
$ docker -H tcp://<swarm_ip:swarm_port> logs ...
|
||||
...
|
||||
|
||||
# list nodes in your cluster
|
||||
$ swarm list zk://<zookeeper_addr1>,<zookeeper_addr2>/<path>
|
||||
<node_ip:2375>
|
||||
```
|
||||
|
||||
###### Using a static list of ips
|
||||
|
||||
```bash
|
||||
# start the manager on any machine or your laptop
|
||||
$ swarm manage -H <swarm_ip:swarm_port> nodes://<node_ip1:2375>,<node_ip2:2375>
|
||||
#or
|
||||
$ swarm manage -H <swarm_ip:swarm_port> nodes://<node_ip1:2375>,<node_ip2:2375>
|
||||
|
||||
# use the regular docker cli
|
||||
$ docker -H <swarm_ip:swarm_port> info
|
||||
$ docker -H <swarm_ip:swarm_port> run ...
|
||||
$ docker -H <swarm_ip:swarm_port> ps
|
||||
$ docker -H <swarm_ip:swarm_port> logs ...
|
||||
...
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributing a new discovery backend is easy,
|
||||
simply implements this interface:
|
||||
|
||||
```go
|
||||
type DiscoveryService interface {
|
||||
Initialize(string, int) error
|
||||
Fetch() ([]string, error)
|
||||
Watch(WatchCallback)
|
||||
Register(string) error
|
||||
}
|
||||
```
|
||||
|
||||
######Initialize
|
||||
take the `discovery` without the scheme and a heartbeat (in seconds)
|
||||
|
||||
######Fetch
|
||||
returns the list of all the nodes from the discovery
|
||||
|
||||
######Watch
|
||||
triggers an update (`Fetch`),it can happen either via
|
||||
a timer (like `token`) or use backend specific features (like `etcd`)
|
||||
|
||||
######Register
|
||||
add a new node to the discovery
|
||||
113
Godeps/_workspace/src/github.com/docker/swarm/discovery/consul/consul.go
generated
vendored
Normal file
113
Godeps/_workspace/src/github.com/docker/swarm/discovery/consul/consul.go
generated
vendored
Normal file
|
|
@ -0,0 +1,113 @@
|
|||
package consul
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
consul "github.com/armon/consul-api"
|
||||
"github.com/docker/swarm/discovery"
|
||||
)
|
||||
|
||||
type ConsulDiscoveryService struct {
|
||||
heartbeat time.Duration
|
||||
client *consul.Client
|
||||
prefix string
|
||||
lastIndex uint64
|
||||
}
|
||||
|
||||
func init() {
|
||||
discovery.Register("consul", &ConsulDiscoveryService{})
|
||||
}
|
||||
|
||||
func (s *ConsulDiscoveryService) Initialize(uris string, heartbeat int) error {
|
||||
parts := strings.SplitN(uris, "/", 2)
|
||||
if len(parts) < 2 {
|
||||
return fmt.Errorf("invalid format %q, missing <path>", uris)
|
||||
}
|
||||
addr := parts[0]
|
||||
path := parts[1]
|
||||
|
||||
config := consul.DefaultConfig()
|
||||
config.Address = addr
|
||||
|
||||
client, err := consul.NewClient(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.client = client
|
||||
s.heartbeat = time.Duration(heartbeat) * time.Second
|
||||
s.prefix = path + "/"
|
||||
kv := s.client.KV()
|
||||
p := &consul.KVPair{Key: s.prefix, Value: nil}
|
||||
if _, err = kv.Put(p, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
_, meta, err := kv.Get(s.prefix, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.lastIndex = meta.LastIndex
|
||||
return nil
|
||||
}
|
||||
func (s *ConsulDiscoveryService) Fetch() ([]*discovery.Node, error) {
|
||||
kv := s.client.KV()
|
||||
pairs, _, err := kv.List(s.prefix, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var nodes []*discovery.Node
|
||||
|
||||
for _, pair := range pairs {
|
||||
if pair.Key == s.prefix {
|
||||
continue
|
||||
}
|
||||
node, err := discovery.NewNode(string(pair.Value))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (s *ConsulDiscoveryService) Watch(callback discovery.WatchCallback) {
|
||||
for _ = range s.waitForChange() {
|
||||
log.WithField("name", "consul").Debug("Discovery watch triggered")
|
||||
nodes, err := s.Fetch()
|
||||
if err == nil {
|
||||
callback(nodes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ConsulDiscoveryService) Register(addr string) error {
|
||||
kv := s.client.KV()
|
||||
p := &consul.KVPair{Key: path.Join(s.prefix, addr), Value: []byte(addr)}
|
||||
_, err := kv.Put(p, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *ConsulDiscoveryService) waitForChange() <-chan uint64 {
|
||||
c := make(chan uint64)
|
||||
go func() {
|
||||
for {
|
||||
kv := s.client.KV()
|
||||
option := &consul.QueryOptions{
|
||||
WaitIndex: s.lastIndex,
|
||||
WaitTime: s.heartbeat}
|
||||
_, meta, err := kv.List(s.prefix, option)
|
||||
if err != nil {
|
||||
log.WithField("name", "consul").Errorf("Discovery error: %v", err)
|
||||
break
|
||||
}
|
||||
s.lastIndex = meta.LastIndex
|
||||
c <- s.lastIndex
|
||||
}
|
||||
close(c)
|
||||
}()
|
||||
return c
|
||||
}
|
||||
20
Godeps/_workspace/src/github.com/docker/swarm/discovery/consul/consul_test.go
generated
vendored
Normal file
20
Godeps/_workspace/src/github.com/docker/swarm/discovery/consul/consul_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
package consul
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestInitialize(t *testing.T) {
|
||||
discovery := &ConsulDiscoveryService{}
|
||||
|
||||
assert.Equal(t, discovery.Initialize("127.0.0.1", 0).Error(), "invalid format \"127.0.0.1\", missing <path>")
|
||||
|
||||
assert.Error(t, discovery.Initialize("127.0.0.1/path", 0))
|
||||
assert.Equal(t, discovery.prefix, "path/")
|
||||
|
||||
assert.Error(t, discovery.Initialize("127.0.0.1,127.0.0.2,127.0.0.3/path", 0))
|
||||
assert.Equal(t, discovery.prefix, "path/")
|
||||
|
||||
}
|
||||
78
Godeps/_workspace/src/github.com/docker/swarm/discovery/discovery.go
generated
vendored
Normal file
78
Godeps/_workspace/src/github.com/docker/swarm/discovery/discovery.go
generated
vendored
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
package discovery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Node struct {
|
||||
Host string
|
||||
Port string
|
||||
}
|
||||
|
||||
func NewNode(url string) (*Node, error) {
|
||||
host, port, err := net.SplitHostPort(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Node{host, port}, nil
|
||||
}
|
||||
|
||||
func (n Node) String() string {
|
||||
return fmt.Sprintf("%s:%s", n.Host, n.Port)
|
||||
}
|
||||
|
||||
type WatchCallback func(nodes []*Node)
|
||||
|
||||
type DiscoveryService interface {
|
||||
Initialize(string, int) error
|
||||
Fetch() ([]*Node, error)
|
||||
Watch(WatchCallback)
|
||||
Register(string) error
|
||||
}
|
||||
|
||||
var (
|
||||
discoveries map[string]DiscoveryService
|
||||
ErrNotSupported = errors.New("discovery service not supported")
|
||||
ErrNotImplemented = errors.New("not implemented in this discovery service")
|
||||
)
|
||||
|
||||
func init() {
|
||||
discoveries = make(map[string]DiscoveryService)
|
||||
}
|
||||
|
||||
func Register(scheme string, d DiscoveryService) error {
|
||||
if _, exists := discoveries[scheme]; exists {
|
||||
return fmt.Errorf("scheme already registered %s", scheme)
|
||||
}
|
||||
log.WithField("name", scheme).Debug("Registering discovery service")
|
||||
discoveries[scheme] = d
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parse(rawurl string) (string, string) {
|
||||
parts := strings.SplitN(rawurl, "://", 2)
|
||||
|
||||
// nodes:port,node2:port => nodes://node1:port,node2:port
|
||||
if len(parts) == 1 {
|
||||
return "nodes", parts[0]
|
||||
}
|
||||
return parts[0], parts[1]
|
||||
}
|
||||
|
||||
func New(rawurl string, heartbeat int) (DiscoveryService, error) {
|
||||
scheme, uri := parse(rawurl)
|
||||
|
||||
if discovery, exists := discoveries[scheme]; exists {
|
||||
log.WithFields(log.Fields{"name": scheme, "uri": uri}).Debug("Initializing discovery service")
|
||||
err := discovery.Initialize(uri, heartbeat)
|
||||
return discovery, err
|
||||
}
|
||||
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
39
Godeps/_workspace/src/github.com/docker/swarm/discovery/discovery_test.go
generated
vendored
Normal file
39
Godeps/_workspace/src/github.com/docker/swarm/discovery/discovery_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
package discovery
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewNode(t *testing.T) {
|
||||
node, err := NewNode("127.0.0.1:2375")
|
||||
assert.Equal(t, node.Host, "127.0.0.1")
|
||||
assert.Equal(t, node.Port, "2375")
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = NewNode("127.0.0.1")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
scheme, uri := parse("127.0.0.1:2375")
|
||||
assert.Equal(t, scheme, "nodes")
|
||||
assert.Equal(t, uri, "127.0.0.1:2375")
|
||||
|
||||
scheme, uri = parse("localhost:2375")
|
||||
assert.Equal(t, scheme, "nodes")
|
||||
assert.Equal(t, uri, "localhost:2375")
|
||||
|
||||
scheme, uri = parse("scheme://127.0.0.1:2375")
|
||||
assert.Equal(t, scheme, "scheme")
|
||||
assert.Equal(t, uri, "127.0.0.1:2375")
|
||||
|
||||
scheme, uri = parse("scheme://localhost:2375")
|
||||
assert.Equal(t, scheme, "scheme")
|
||||
assert.Equal(t, uri, "localhost:2375")
|
||||
|
||||
scheme, uri = parse("")
|
||||
assert.Equal(t, scheme, "nodes")
|
||||
assert.Equal(t, uri, "")
|
||||
}
|
||||
87
Godeps/_workspace/src/github.com/docker/swarm/discovery/etcd/etcd.go
generated
vendored
Normal file
87
Godeps/_workspace/src/github.com/docker/swarm/discovery/etcd/etcd.go
generated
vendored
Normal file
|
|
@ -0,0 +1,87 @@
|
|||
package etcd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/coreos/go-etcd/etcd"
|
||||
"github.com/docker/swarm/discovery"
|
||||
)
|
||||
|
||||
type EtcdDiscoveryService struct {
|
||||
ttl uint64
|
||||
client *etcd.Client
|
||||
path string
|
||||
}
|
||||
|
||||
func init() {
|
||||
discovery.Register("etcd", &EtcdDiscoveryService{})
|
||||
}
|
||||
|
||||
func (s *EtcdDiscoveryService) Initialize(uris string, heartbeat int) error {
|
||||
var (
|
||||
// split here because uris can contain multiples ips
|
||||
// like `etcd://192.168.0.1,192.168.0.2,192.168.0.3/path`
|
||||
parts = strings.SplitN(uris, "/", 2)
|
||||
ips = strings.Split(parts[0], ",")
|
||||
machines []string
|
||||
)
|
||||
|
||||
if len(parts) != 2 {
|
||||
return fmt.Errorf("invalid format %q, missing <path>", uris)
|
||||
}
|
||||
|
||||
for _, ip := range ips {
|
||||
machines = append(machines, "http://"+ip)
|
||||
}
|
||||
|
||||
s.client = etcd.NewClient(machines)
|
||||
s.ttl = uint64(heartbeat * 3 / 2)
|
||||
s.path = "/" + parts[1] + "/"
|
||||
if _, err := s.client.CreateDir(s.path, s.ttl); err != nil {
|
||||
if etcdError, ok := err.(*etcd.EtcdError); ok {
|
||||
if etcdError.ErrorCode != 105 { // skip key already exists
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (s *EtcdDiscoveryService) Fetch() ([]*discovery.Node, error) {
|
||||
resp, err := s.client.Get(s.path, true, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var nodes []*discovery.Node
|
||||
|
||||
for _, n := range resp.Node.Nodes {
|
||||
node, err := discovery.NewNode(n.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (s *EtcdDiscoveryService) Watch(callback discovery.WatchCallback) {
|
||||
watchChan := make(chan *etcd.Response)
|
||||
go s.client.Watch(s.path, 0, true, watchChan, nil)
|
||||
for _ = range watchChan {
|
||||
log.WithField("name", "etcd").Debug("Discovery watch triggered")
|
||||
nodes, err := s.Fetch()
|
||||
if err == nil {
|
||||
callback(nodes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *EtcdDiscoveryService) Register(addr string) error {
|
||||
_, err := s.client.Set(path.Join(s.path, addr), addr, s.ttl)
|
||||
return err
|
||||
}
|
||||
19
Godeps/_workspace/src/github.com/docker/swarm/discovery/etcd/etcd_test.go
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/docker/swarm/discovery/etcd/etcd_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
package etcd
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestInitialize(t *testing.T) {
|
||||
discovery := &EtcdDiscoveryService{}
|
||||
|
||||
assert.Equal(t, discovery.Initialize("127.0.0.1", 0).Error(), "invalid format \"127.0.0.1\", missing <path>")
|
||||
|
||||
assert.Error(t, discovery.Initialize("127.0.0.1/path", 0))
|
||||
assert.Equal(t, discovery.path, "/path/")
|
||||
|
||||
assert.Error(t, discovery.Initialize("127.0.0.1,127.0.0.2,127.0.0.3/path", 0))
|
||||
assert.Equal(t, discovery.path, "/path/")
|
||||
}
|
||||
57
Godeps/_workspace/src/github.com/docker/swarm/discovery/file/file.go
generated
vendored
Normal file
57
Godeps/_workspace/src/github.com/docker/swarm/discovery/file/file.go
generated
vendored
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
package file
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/swarm/discovery"
|
||||
)
|
||||
|
||||
type FileDiscoveryService struct {
|
||||
heartbeat int
|
||||
path string
|
||||
}
|
||||
|
||||
func init() {
|
||||
discovery.Register("file", &FileDiscoveryService{})
|
||||
}
|
||||
|
||||
func (s *FileDiscoveryService) Initialize(path string, heartbeat int) error {
|
||||
s.path = path
|
||||
s.heartbeat = heartbeat
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *FileDiscoveryService) Fetch() ([]*discovery.Node, error) {
|
||||
data, err := ioutil.ReadFile(s.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var nodes []*discovery.Node
|
||||
|
||||
for _, line := range strings.Split(string(data), "\n") {
|
||||
if line != "" {
|
||||
node, err := discovery.NewNode(line)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (s *FileDiscoveryService) Watch(callback discovery.WatchCallback) {
|
||||
for _ = range time.Tick(time.Duration(s.heartbeat) * time.Second) {
|
||||
nodes, err := s.Fetch()
|
||||
if err == nil {
|
||||
callback(nodes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *FileDiscoveryService) Register(addr string) error {
|
||||
return discovery.ErrNotImplemented
|
||||
}
|
||||
18
Godeps/_workspace/src/github.com/docker/swarm/discovery/file/file_test.go
generated
vendored
Normal file
18
Godeps/_workspace/src/github.com/docker/swarm/discovery/file/file_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
package file
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestInitialize(t *testing.T) {
|
||||
discovery := &FileDiscoveryService{}
|
||||
discovery.Initialize("/path/to/file", 0)
|
||||
assert.Equal(t, discovery.path, "/path/to/file")
|
||||
}
|
||||
|
||||
func TestRegister(t *testing.T) {
|
||||
discovery := &FileDiscoveryService{path: "/path/to/file"}
|
||||
assert.Error(t, discovery.Register("0.0.0.0"))
|
||||
}
|
||||
37
Godeps/_workspace/src/github.com/docker/swarm/discovery/nodes/nodes.go
generated
vendored
Normal file
37
Godeps/_workspace/src/github.com/docker/swarm/discovery/nodes/nodes.go
generated
vendored
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
package nodes
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/docker/swarm/discovery"
|
||||
)
|
||||
|
||||
type NodesDiscoveryService struct {
|
||||
nodes []*discovery.Node
|
||||
}
|
||||
|
||||
func init() {
|
||||
discovery.Register("nodes", &NodesDiscoveryService{})
|
||||
}
|
||||
|
||||
func (s *NodesDiscoveryService) Initialize(uris string, _ int) error {
|
||||
for _, ip := range strings.Split(uris, ",") {
|
||||
node, err := discovery.NewNode(ip)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.nodes = append(s.nodes, node)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
func (s *NodesDiscoveryService) Fetch() ([]*discovery.Node, error) {
|
||||
return s.nodes, nil
|
||||
}
|
||||
|
||||
func (s *NodesDiscoveryService) Watch(callback discovery.WatchCallback) {
|
||||
}
|
||||
|
||||
func (s *NodesDiscoveryService) Register(addr string) error {
|
||||
return discovery.ErrNotImplemented
|
||||
}
|
||||
20
Godeps/_workspace/src/github.com/docker/swarm/discovery/nodes/nodes_test.go
generated
vendored
Normal file
20
Godeps/_workspace/src/github.com/docker/swarm/discovery/nodes/nodes_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
package nodes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestInitialise(t *testing.T) {
|
||||
discovery := &NodesDiscoveryService{}
|
||||
discovery.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0)
|
||||
assert.Equal(t, len(discovery.nodes), 2)
|
||||
assert.Equal(t, discovery.nodes[0].String(), "1.1.1.1:1111")
|
||||
assert.Equal(t, discovery.nodes[1].String(), "2.2.2.2:2222")
|
||||
}
|
||||
|
||||
func TestRegister(t *testing.T) {
|
||||
discovery := &NodesDiscoveryService{}
|
||||
assert.Error(t, discovery.Register("0.0.0.0"))
|
||||
}
|
||||
31
Godeps/_workspace/src/github.com/docker/swarm/discovery/token/README.md
generated
vendored
Normal file
31
Godeps/_workspace/src/github.com/docker/swarm/discovery/token/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
#discovery.hub.docker.com
|
||||
|
||||
Docker Swarm comes with a simple discovery service built into the [Docker Hub](http://hub.docker.com)
|
||||
|
||||
The discovery service is still in alpha stage and currently hosted at `http://discovery-stage.hub.docker.com`
|
||||
|
||||
#####Create a new cluster
|
||||
`-> POST http://discovery.hub.docker.com/v1/clusters (data="")`
|
||||
|
||||
`<- <token>`
|
||||
|
||||
#####Add new nodes to a cluster
|
||||
`-> POST http://discovery.hub.docker.com/v1/clusters/<token> (data="<ip:port1>")`
|
||||
|
||||
`<- OK`
|
||||
|
||||
`-> POST http://discovery.hub.docker.com/v1/clusters/token (data="<ip:port2>")`
|
||||
|
||||
`<- OK`
|
||||
|
||||
|
||||
#####List nodes in a cluster
|
||||
`-> GET http://discovery.hub.docker.com/v1/clusters/token`
|
||||
|
||||
`<- ["<ip:port1>", "<ip:port2>"]`
|
||||
|
||||
|
||||
#####Delete a cluster (all the nodes in a cluster)
|
||||
`-> DELETE http://discovery.hub.docker.com/v1/clusters/token`
|
||||
|
||||
`<- OK`
|
||||
98
Godeps/_workspace/src/github.com/docker/swarm/discovery/token/token.go
generated
vendored
Normal file
98
Godeps/_workspace/src/github.com/docker/swarm/discovery/token/token.go
generated
vendored
Normal file
|
|
@ -0,0 +1,98 @@
|
|||
package token
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/swarm/discovery"
|
||||
)
|
||||
|
||||
const DISCOVERY_URL = "https://discovery-stage.hub.docker.com/v1"
|
||||
|
||||
type TokenDiscoveryService struct {
|
||||
heartbeat int
|
||||
url string
|
||||
token string
|
||||
}
|
||||
|
||||
func init() {
|
||||
discovery.Register("token", &TokenDiscoveryService{})
|
||||
}
|
||||
|
||||
func (s *TokenDiscoveryService) Initialize(urltoken string, heartbeat int) error {
|
||||
if i := strings.LastIndex(urltoken, "/"); i != -1 {
|
||||
s.url = "https://" + urltoken[:i]
|
||||
s.token = urltoken[i+1:]
|
||||
} else {
|
||||
s.url = DISCOVERY_URL
|
||||
s.token = urltoken
|
||||
}
|
||||
s.heartbeat = heartbeat
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fetch returns the list of nodes for the discovery service at the specified endpoint
|
||||
func (s *TokenDiscoveryService) Fetch() ([]*discovery.Node, error) {
|
||||
|
||||
resp, err := http.Get(fmt.Sprintf("%s/%s/%s", s.url, "clusters", s.token))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.Body != nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
|
||||
var addrs []string
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
if err := json.NewDecoder(resp.Body).Decode(&addrs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("Failed to fetch nodes, Discovery service returned %d HTTP status code", resp.StatusCode)
|
||||
}
|
||||
|
||||
var nodes []*discovery.Node
|
||||
for _, addr := range addrs {
|
||||
node, err := discovery.NewNode(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (s *TokenDiscoveryService) Watch(callback discovery.WatchCallback) {
|
||||
for _ = range time.Tick(time.Duration(s.heartbeat) * time.Second) {
|
||||
nodes, err := s.Fetch()
|
||||
if err == nil {
|
||||
callback(nodes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterNode adds a new node identified by the into the discovery service
|
||||
func (s *TokenDiscoveryService) Register(addr string) error {
|
||||
buf := strings.NewReader(addr)
|
||||
|
||||
_, err := http.Post(fmt.Sprintf("%s/%s/%s", s.url,
|
||||
"clusters", s.token), "application/json", buf)
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateCluster returns a unique cluster token
|
||||
func (s *TokenDiscoveryService) CreateCluster() (string, error) {
|
||||
resp, err := http.Post(fmt.Sprintf("%s/%s", s.url, "clusters"), "", nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
token, err := ioutil.ReadAll(resp.Body)
|
||||
return string(token), err
|
||||
}
|
||||
31
Godeps/_workspace/src/github.com/docker/swarm/discovery/token/token_test.go
generated
vendored
Normal file
31
Godeps/_workspace/src/github.com/docker/swarm/discovery/token/token_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
package token
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestInitialize(t *testing.T) {
|
||||
discovery := &TokenDiscoveryService{}
|
||||
discovery.Initialize("token", 0)
|
||||
assert.Equal(t, discovery.token, "token")
|
||||
assert.Equal(t, discovery.url, DISCOVERY_URL)
|
||||
|
||||
discovery.Initialize("custom/path/token", 0)
|
||||
assert.Equal(t, discovery.token, "token")
|
||||
assert.Equal(t, discovery.url, "https://custom/path")
|
||||
}
|
||||
|
||||
func TestRegister(t *testing.T) {
|
||||
discovery := &TokenDiscoveryService{token: "TEST_TOKEN", url: DISCOVERY_URL}
|
||||
expected := "127.0.0.1:2675"
|
||||
assert.NoError(t, discovery.Register(expected))
|
||||
|
||||
addrs, err := discovery.Fetch()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(addrs), 1)
|
||||
assert.Equal(t, addrs[0].String(), expected)
|
||||
|
||||
assert.NoError(t, discovery.Register(expected))
|
||||
}
|
||||
160
Godeps/_workspace/src/github.com/docker/swarm/discovery/zookeeper/zookeeper.go
generated
vendored
Normal file
160
Godeps/_workspace/src/github.com/docker/swarm/discovery/zookeeper/zookeeper.go
generated
vendored
Normal file
|
|
@ -0,0 +1,160 @@
|
|||
package zookeeper
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/docker/swarm/discovery"
|
||||
"github.com/samuel/go-zookeeper/zk"
|
||||
)
|
||||
|
||||
type ZkDiscoveryService struct {
|
||||
conn *zk.Conn
|
||||
path []string
|
||||
heartbeat int
|
||||
}
|
||||
|
||||
func init() {
|
||||
discovery.Register("zk", &ZkDiscoveryService{})
|
||||
}
|
||||
|
||||
func (s *ZkDiscoveryService) fullpath() string {
|
||||
return "/" + strings.Join(s.path, "/")
|
||||
}
|
||||
|
||||
func (s *ZkDiscoveryService) createFullpath() error {
|
||||
for i := 1; i <= len(s.path); i++ {
|
||||
newpath := "/" + strings.Join(s.path[:i], "/")
|
||||
_, err := s.conn.Create(newpath, []byte{1}, 0, zk.WorldACL(zk.PermAll))
|
||||
if err != nil {
|
||||
// It's OK if key already existed. Just skip.
|
||||
if err != zk.ErrNodeExists {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ZkDiscoveryService) Initialize(uris string, heartbeat int) error {
|
||||
var (
|
||||
// split here because uris can contain multiples ips
|
||||
// like `zk://192.168.0.1,192.168.0.2,192.168.0.3/path`
|
||||
parts = strings.SplitN(uris, "/", 2)
|
||||
ips = strings.Split(parts[0], ",")
|
||||
)
|
||||
|
||||
if len(parts) != 2 {
|
||||
return fmt.Errorf("invalid format %q, missing <path>", uris)
|
||||
}
|
||||
|
||||
if strings.Contains(parts[1], "/") {
|
||||
s.path = strings.Split(parts[1], "/")
|
||||
} else {
|
||||
s.path = []string{parts[1]}
|
||||
}
|
||||
|
||||
conn, _, err := zk.Connect(ips, time.Second)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.conn = conn
|
||||
s.heartbeat = heartbeat
|
||||
err = s.createFullpath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ZkDiscoveryService) Fetch() ([]*discovery.Node, error) {
|
||||
addrs, _, err := s.conn.Children(s.fullpath())
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s.createNodes(addrs)
|
||||
}
|
||||
|
||||
func (s *ZkDiscoveryService) createNodes(addrs []string) ([]*discovery.Node, error) {
|
||||
nodes := make([]*discovery.Node, 0)
|
||||
if addrs == nil {
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
node, err := discovery.NewNode(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (s *ZkDiscoveryService) Watch(callback discovery.WatchCallback) {
|
||||
|
||||
addrs, _, eventChan, err := s.conn.ChildrenW(s.fullpath())
|
||||
if err != nil {
|
||||
log.WithField("name", "zk").Debug("Discovery watch aborted")
|
||||
return
|
||||
}
|
||||
nodes, err := s.createNodes(addrs)
|
||||
if err == nil {
|
||||
callback(nodes)
|
||||
}
|
||||
|
||||
for e := range eventChan {
|
||||
if e.Type == zk.EventNodeChildrenChanged {
|
||||
log.WithField("name", "zk").Debug("Discovery watch triggered")
|
||||
nodes, err := s.Fetch()
|
||||
if err == nil {
|
||||
callback(nodes)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (s *ZkDiscoveryService) Register(addr string) error {
|
||||
nodePath := path.Join(s.fullpath(), addr)
|
||||
|
||||
// check existing for the parent path first
|
||||
exist, _, err := s.conn.Exists(s.fullpath())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if the parent path does not exist yet
|
||||
if exist == false {
|
||||
// create the parent first
|
||||
err = s.createFullpath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// if node path exists
|
||||
exist, _, err = s.conn.Exists(nodePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// delete it first
|
||||
if exist {
|
||||
err = s.conn.Delete(nodePath, -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// create the node path to store address information
|
||||
_, err = s.conn.Create(nodePath, []byte(addr), 0, zk.WorldACL(zk.PermAll))
|
||||
return err
|
||||
}
|
||||
39
Godeps/_workspace/src/github.com/docker/swarm/discovery/zookeeper/zookeeper_test.go
generated
vendored
Normal file
39
Godeps/_workspace/src/github.com/docker/swarm/discovery/zookeeper/zookeeper_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
package zookeeper
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/swarm/discovery"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestInitialize(t *testing.T) {
|
||||
service := &ZkDiscoveryService{}
|
||||
|
||||
assert.Equal(t, service.Initialize("127.0.0.1", 0).Error(), "invalid format \"127.0.0.1\", missing <path>")
|
||||
|
||||
assert.Error(t, service.Initialize("127.0.0.1/path", 0))
|
||||
assert.Equal(t, service.fullpath(), "/path")
|
||||
|
||||
assert.Error(t, service.Initialize("127.0.0.1,127.0.0.2,127.0.0.3/path", 0))
|
||||
assert.Equal(t, service.fullpath(), "/path")
|
||||
|
||||
assert.Error(t, service.Initialize("127.0.0.1,127.0.0.2,127.0.0.3/path/sub1/sub2", 0))
|
||||
assert.Equal(t, service.fullpath(), "/path/sub1/sub2")
|
||||
}
|
||||
|
||||
func TestCreateNodes(t *testing.T) {
|
||||
service := &ZkDiscoveryService{}
|
||||
|
||||
nodes, err := service.createNodes(nil)
|
||||
assert.Equal(t, nodes, []*discovery.Node{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
nodes, err = service.createNodes([]string{"127.0.0.1:2375", "127.0.0.2:2375"})
|
||||
assert.Equal(t, nodes[0].String(), "127.0.0.1:2375")
|
||||
assert.Equal(t, nodes[1].String(), "127.0.0.2:2375")
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = service.createNodes([]string{"127.0.0.1", "127.0.0.2"})
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
|
@ -1,432 +0,0 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package context defines the Context type, which carries deadlines,
|
||||
// cancelation signals, and other request-scoped values across API boundaries
|
||||
// and between processes.
|
||||
//
|
||||
// Incoming requests to a server should create a Context, and outgoing calls to
|
||||
// servers should accept a Context. The chain of function calls between must
|
||||
// propagate the Context, optionally replacing it with a modified copy created
|
||||
// using WithDeadline, WithTimeout, WithCancel, or WithValue.
|
||||
//
|
||||
// Programs that use Contexts should follow these rules to keep interfaces
|
||||
// consistent across packages and enable static analysis tools to check context
|
||||
// propagation:
|
||||
//
|
||||
// Do not store Contexts inside a struct type; instead, pass a Context
|
||||
// explicitly to each function that needs it. The Context should be the first
|
||||
// parameter, typically named ctx:
|
||||
//
|
||||
// func DoSomething(ctx context.Context, arg Arg) error {
|
||||
// // ... use ctx ...
|
||||
// }
|
||||
//
|
||||
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
|
||||
// if you are unsure about which Context to use.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
//
|
||||
// The same Context may be passed to functions running in different goroutines;
|
||||
// Contexts are safe for simultaneous use by multiple goroutines.
|
||||
//
|
||||
// See http://blog.golang.org/context for example code for a server that uses
|
||||
// Contexts.
|
||||
package context
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Context carries a deadline, a cancelation signal, and other values across
|
||||
// API boundaries.
|
||||
//
|
||||
// Context's methods may be called by multiple goroutines simultaneously.
|
||||
type Context interface {
|
||||
// Deadline returns the time when work done on behalf of this context
|
||||
// should be canceled. Deadline returns ok==false when no deadline is
|
||||
// set. Successive calls to Deadline return the same results.
|
||||
Deadline() (deadline time.Time, ok bool)
|
||||
|
||||
// Done returns a channel that's closed when work done on behalf of this
|
||||
// context should be canceled. Done may return nil if this context can
|
||||
// never be canceled. Successive calls to Done return the same value.
|
||||
//
|
||||
// WithCancel arranges for Done to be closed when cancel is called;
|
||||
// WithDeadline arranges for Done to be closed when the deadline
|
||||
// expires; WithTimeout arranges for Done to be closed when the timeout
|
||||
// elapses.
|
||||
//
|
||||
// Done is provided for use in select statements:
|
||||
//
|
||||
// // DoSomething calls DoSomethingSlow and returns as soon as
|
||||
// // it returns or ctx.Done is closed.
|
||||
// func DoSomething(ctx context.Context) (Result, error) {
|
||||
// c := make(chan Result, 1)
|
||||
// go func() { c <- DoSomethingSlow(ctx) }()
|
||||
// select {
|
||||
// case res := <-c:
|
||||
// return res, nil
|
||||
// case <-ctx.Done():
|
||||
// return nil, ctx.Err()
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// See http://blog.golang.org/pipelines for more examples of how to use
|
||||
// a Done channel for cancelation.
|
||||
Done() <-chan struct{}
|
||||
|
||||
// Err returns a non-nil error value after Done is closed. Err returns
|
||||
// Canceled if the context was canceled or DeadlineExceeded if the
|
||||
// context's deadline passed. No other values for Err are defined.
|
||||
// After Done is closed, successive calls to Err return the same value.
|
||||
Err() error
|
||||
|
||||
// Value returns the value associated with this context for key, or nil
|
||||
// if no value is associated with key. Successive calls to Value with
|
||||
// the same key returns the same result.
|
||||
//
|
||||
// Use context values only for request-scoped data that transits
|
||||
// processes and API boundaries, not for passing optional parameters to
|
||||
// functions.
|
||||
//
|
||||
// A key identifies a specific value in a Context. Functions that wish
|
||||
// to store values in Context typically allocate a key in a global
|
||||
// variable then use that key as the argument to context.WithValue and
|
||||
// Context.Value. A key can be any type that supports equality;
|
||||
// packages should define keys as an unexported type to avoid
|
||||
// collisions.
|
||||
//
|
||||
// Packages that define a Context key should provide type-safe accessors
|
||||
// for the values stores using that key:
|
||||
//
|
||||
// // Package user defines a User type that's stored in Contexts.
|
||||
// package user
|
||||
//
|
||||
// import "golang.org/x/net/context"
|
||||
//
|
||||
// // User is the type of value stored in the Contexts.
|
||||
// type User struct {...}
|
||||
//
|
||||
// // key is an unexported type for keys defined in this package.
|
||||
// // This prevents collisions with keys defined in other packages.
|
||||
// type key int
|
||||
//
|
||||
// // userKey is the key for user.User values in Contexts. It is
|
||||
// // unexported; clients use user.NewContext and user.FromContext
|
||||
// // instead of using this key directly.
|
||||
// var userKey key = 0
|
||||
//
|
||||
// // NewContext returns a new Context that carries value u.
|
||||
// func NewContext(ctx context.Context, u *User) context.Context {
|
||||
// return context.WithValue(ctx, userKey, u)
|
||||
// }
|
||||
//
|
||||
// // FromContext returns the User value stored in ctx, if any.
|
||||
// func FromContext(ctx context.Context) (*User, bool) {
|
||||
// u, ok := ctx.Value(userKey).(*User)
|
||||
// return u, ok
|
||||
// }
|
||||
Value(key interface{}) interface{}
|
||||
}
|
||||
|
||||
// Canceled is the error returned by Context.Err when the context is canceled.
|
||||
var Canceled = errors.New("context canceled")
|
||||
|
||||
// DeadlineExceeded is the error returned by Context.Err when the context's
|
||||
// deadline passes.
|
||||
var DeadlineExceeded = errors.New("context deadline exceeded")
|
||||
|
||||
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
|
||||
// struct{}, since vars of this type must have distinct addresses.
|
||||
type emptyCtx int
|
||||
|
||||
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
|
||||
return
|
||||
}
|
||||
|
||||
func (*emptyCtx) Done() <-chan struct{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*emptyCtx) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*emptyCtx) Value(key interface{}) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *emptyCtx) String() string {
|
||||
switch e {
|
||||
case background:
|
||||
return "context.Background"
|
||||
case todo:
|
||||
return "context.TODO"
|
||||
}
|
||||
return "unknown empty Context"
|
||||
}
|
||||
|
||||
var (
|
||||
background = new(emptyCtx)
|
||||
todo = new(emptyCtx)
|
||||
)
|
||||
|
||||
// Background returns a non-nil, empty Context. It is never canceled, has no
|
||||
// values, and has no deadline. It is typically used by the main function,
|
||||
// initialization, and tests, and as the top-level Context for incoming
|
||||
// requests.
|
||||
func Background() Context {
|
||||
return background
|
||||
}
|
||||
|
||||
// TODO returns a non-nil, empty Context. Code should use context.TODO when
|
||||
// it's unclear which Context to use or it's is not yet available (because the
|
||||
// surrounding function has not yet been extended to accept a Context
|
||||
// parameter). TODO is recognized by static analysis tools that determine
|
||||
// whether Contexts are propagated correctly in a program.
|
||||
func TODO() Context {
|
||||
return todo
|
||||
}
|
||||
|
||||
// A CancelFunc tells an operation to abandon its work.
|
||||
// A CancelFunc does not wait for the work to stop.
|
||||
// After the first call, subsequent calls to a CancelFunc do nothing.
|
||||
type CancelFunc func()
|
||||
|
||||
// WithCancel returns a copy of parent with a new Done channel. The returned
|
||||
// context's Done channel is closed when the returned cancel function is called
|
||||
// or when the parent context's Done channel is closed, whichever happens first.
|
||||
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
|
||||
c := newCancelCtx(parent)
|
||||
propagateCancel(parent, &c)
|
||||
return &c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
|
||||
// newCancelCtx returns an initialized cancelCtx.
|
||||
func newCancelCtx(parent Context) cancelCtx {
|
||||
return cancelCtx{
|
||||
Context: parent,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// propagateCancel arranges for child to be canceled when parent is.
|
||||
func propagateCancel(parent Context, child canceler) {
|
||||
if parent.Done() == nil {
|
||||
return // parent is never canceled
|
||||
}
|
||||
if p, ok := parentCancelCtx(parent); ok {
|
||||
p.mu.Lock()
|
||||
if p.err != nil {
|
||||
// parent has already been canceled
|
||||
child.cancel(false, p.err)
|
||||
} else {
|
||||
if p.children == nil {
|
||||
p.children = make(map[canceler]bool)
|
||||
}
|
||||
p.children[child] = true
|
||||
}
|
||||
p.mu.Unlock()
|
||||
} else {
|
||||
go func() {
|
||||
select {
|
||||
case <-parent.Done():
|
||||
child.cancel(false, parent.Err())
|
||||
case <-child.Done():
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// parentCancelCtx follows a chain of parent references until it finds a
|
||||
// *cancelCtx. This function understands how each of the concrete types in this
|
||||
// package represents its parent.
|
||||
func parentCancelCtx(parent Context) (*cancelCtx, bool) {
|
||||
for {
|
||||
switch c := parent.(type) {
|
||||
case *cancelCtx:
|
||||
return c, true
|
||||
case *timerCtx:
|
||||
return &c.cancelCtx, true
|
||||
case *valueCtx:
|
||||
parent = c.Context
|
||||
default:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A canceler is a context type that can be canceled directly. The
|
||||
// implementations are *cancelCtx and *timerCtx.
|
||||
type canceler interface {
|
||||
cancel(removeFromParent bool, err error)
|
||||
Done() <-chan struct{}
|
||||
}
|
||||
|
||||
// A cancelCtx can be canceled. When canceled, it also cancels any children
|
||||
// that implement canceler.
|
||||
type cancelCtx struct {
|
||||
Context
|
||||
|
||||
done chan struct{} // closed by the first cancel call.
|
||||
|
||||
mu sync.Mutex
|
||||
children map[canceler]bool // set to nil by the first cancel call
|
||||
err error // set to non-nil by the first cancel call
|
||||
}
|
||||
|
||||
func (c *cancelCtx) Done() <-chan struct{} {
|
||||
return c.done
|
||||
}
|
||||
|
||||
func (c *cancelCtx) Err() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.err
|
||||
}
|
||||
|
||||
func (c *cancelCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithCancel", c.Context)
|
||||
}
|
||||
|
||||
// cancel closes c.done, cancels each of c's children, and, if
|
||||
// removeFromParent is true, removes c from its parent's children.
|
||||
func (c *cancelCtx) cancel(removeFromParent bool, err error) {
|
||||
if err == nil {
|
||||
panic("context: internal error: missing cancel error")
|
||||
}
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return // already canceled
|
||||
}
|
||||
c.err = err
|
||||
close(c.done)
|
||||
for child := range c.children {
|
||||
// NOTE: acquiring the child's lock while holding parent's lock.
|
||||
child.cancel(false, err)
|
||||
}
|
||||
c.children = nil
|
||||
c.mu.Unlock()
|
||||
|
||||
if removeFromParent {
|
||||
if p, ok := parentCancelCtx(c.Context); ok {
|
||||
p.mu.Lock()
|
||||
if p.children != nil {
|
||||
delete(p.children, c)
|
||||
}
|
||||
p.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithDeadline returns a copy of the parent context with the deadline adjusted
|
||||
// to be no later than d. If the parent's deadline is already earlier than d,
|
||||
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
|
||||
// context's Done channel is closed when the deadline expires, when the returned
|
||||
// cancel function is called, or when the parent context's Done channel is
|
||||
// closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with the deadline
|
||||
// timer, so code should call cancel as soon as the operations running in this
|
||||
// Context complete.
|
||||
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
|
||||
if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
|
||||
// The current deadline is already sooner than the new one.
|
||||
return WithCancel(parent)
|
||||
}
|
||||
c := &timerCtx{
|
||||
cancelCtx: newCancelCtx(parent),
|
||||
deadline: deadline,
|
||||
}
|
||||
propagateCancel(parent, c)
|
||||
d := deadline.Sub(time.Now())
|
||||
if d <= 0 {
|
||||
c.cancel(true, DeadlineExceeded) // deadline has already passed
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.err == nil {
|
||||
c.timer = time.AfterFunc(d, func() {
|
||||
c.cancel(true, DeadlineExceeded)
|
||||
})
|
||||
}
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
|
||||
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
|
||||
// implement Done and Err. It implements cancel by stopping its timer then
|
||||
// delegating to cancelCtx.cancel.
|
||||
type timerCtx struct {
|
||||
cancelCtx
|
||||
timer *time.Timer // Under cancelCtx.mu.
|
||||
|
||||
deadline time.Time
|
||||
}
|
||||
|
||||
func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
|
||||
return c.deadline, true
|
||||
}
|
||||
|
||||
func (c *timerCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
|
||||
}
|
||||
|
||||
func (c *timerCtx) cancel(removeFromParent bool, err error) {
|
||||
c.cancelCtx.cancel(removeFromParent, err)
|
||||
c.mu.Lock()
|
||||
if c.timer != nil {
|
||||
c.timer.Stop()
|
||||
c.timer = nil
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
|
||||
//
|
||||
// Canceling this context releases resources associated with the deadline
|
||||
// timer, so code should call cancel as soon as the operations running in this
|
||||
// Context complete:
|
||||
//
|
||||
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
|
||||
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
// defer cancel() // releases resources if slowOperation completes before timeout elapses
|
||||
// return slowOperation(ctx)
|
||||
// }
|
||||
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
|
||||
return WithDeadline(parent, time.Now().Add(timeout))
|
||||
}
|
||||
|
||||
// WithValue returns a copy of parent in which the value associated with key is
|
||||
// val.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
func WithValue(parent Context, key interface{}, val interface{}) Context {
|
||||
return &valueCtx{parent, key, val}
|
||||
}
|
||||
|
||||
// A valueCtx carries a key-value pair. It implements Value for that key and
|
||||
// delegates all other calls to the embedded Context.
|
||||
type valueCtx struct {
|
||||
Context
|
||||
key, val interface{}
|
||||
}
|
||||
|
||||
func (c *valueCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
|
||||
}
|
||||
|
||||
func (c *valueCtx) Value(key interface{}) interface{} {
|
||||
if c.key == key {
|
||||
return c.val
|
||||
}
|
||||
return c.Context.Value(key)
|
||||
}
|
||||
|
|
@ -1,553 +0,0 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package context
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// otherContext is a Context that's not one of the types defined in context.go.
|
||||
// This lets us test code paths that differ based on the underlying type of the
|
||||
// Context.
|
||||
type otherContext struct {
|
||||
Context
|
||||
}
|
||||
|
||||
func TestBackground(t *testing.T) {
|
||||
c := Background()
|
||||
if c == nil {
|
||||
t.Fatalf("Background returned nil")
|
||||
}
|
||||
select {
|
||||
case x := <-c.Done():
|
||||
t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
|
||||
default:
|
||||
}
|
||||
if got, want := fmt.Sprint(c), "context.Background"; got != want {
|
||||
t.Errorf("Background().String() = %q want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTODO(t *testing.T) {
|
||||
c := TODO()
|
||||
if c == nil {
|
||||
t.Fatalf("TODO returned nil")
|
||||
}
|
||||
select {
|
||||
case x := <-c.Done():
|
||||
t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
|
||||
default:
|
||||
}
|
||||
if got, want := fmt.Sprint(c), "context.TODO"; got != want {
|
||||
t.Errorf("TODO().String() = %q want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithCancel(t *testing.T) {
|
||||
c1, cancel := WithCancel(Background())
|
||||
|
||||
if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want {
|
||||
t.Errorf("c1.String() = %q want %q", got, want)
|
||||
}
|
||||
|
||||
o := otherContext{c1}
|
||||
c2, _ := WithCancel(o)
|
||||
contexts := []Context{c1, o, c2}
|
||||
|
||||
for i, c := range contexts {
|
||||
if d := c.Done(); d == nil {
|
||||
t.Errorf("c[%d].Done() == %v want non-nil", i, d)
|
||||
}
|
||||
if e := c.Err(); e != nil {
|
||||
t.Errorf("c[%d].Err() == %v want nil", i, e)
|
||||
}
|
||||
|
||||
select {
|
||||
case x := <-c.Done():
|
||||
t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
cancel()
|
||||
time.Sleep(100 * time.Millisecond) // let cancelation propagate
|
||||
|
||||
for i, c := range contexts {
|
||||
select {
|
||||
case <-c.Done():
|
||||
default:
|
||||
t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i)
|
||||
}
|
||||
if e := c.Err(); e != Canceled {
|
||||
t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParentFinishesChild(t *testing.T) {
|
||||
// Context tree:
|
||||
// parent -> cancelChild
|
||||
// parent -> valueChild -> timerChild
|
||||
parent, cancel := WithCancel(Background())
|
||||
cancelChild, stop := WithCancel(parent)
|
||||
defer stop()
|
||||
valueChild := WithValue(parent, "key", "value")
|
||||
timerChild, stop := WithTimeout(valueChild, 10000*time.Hour)
|
||||
defer stop()
|
||||
|
||||
select {
|
||||
case x := <-parent.Done():
|
||||
t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
|
||||
case x := <-cancelChild.Done():
|
||||
t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x)
|
||||
case x := <-timerChild.Done():
|
||||
t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x)
|
||||
case x := <-valueChild.Done():
|
||||
t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x)
|
||||
default:
|
||||
}
|
||||
|
||||
// The parent's children should contain the two cancelable children.
|
||||
pc := parent.(*cancelCtx)
|
||||
cc := cancelChild.(*cancelCtx)
|
||||
tc := timerChild.(*timerCtx)
|
||||
pc.mu.Lock()
|
||||
if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] {
|
||||
t.Errorf("bad linkage: pc.children = %v, want %v and %v",
|
||||
pc.children, cc, tc)
|
||||
}
|
||||
pc.mu.Unlock()
|
||||
|
||||
if p, ok := parentCancelCtx(cc.Context); !ok || p != pc {
|
||||
t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc)
|
||||
}
|
||||
if p, ok := parentCancelCtx(tc.Context); !ok || p != pc {
|
||||
t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc)
|
||||
}
|
||||
|
||||
cancel()
|
||||
|
||||
pc.mu.Lock()
|
||||
if len(pc.children) != 0 {
|
||||
t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children)
|
||||
}
|
||||
pc.mu.Unlock()
|
||||
|
||||
// parent and children should all be finished.
|
||||
check := func(ctx Context, name string) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
default:
|
||||
t.Errorf("<-%s.Done() blocked, but shouldn't have", name)
|
||||
}
|
||||
if e := ctx.Err(); e != Canceled {
|
||||
t.Errorf("%s.Err() == %v want %v", name, e, Canceled)
|
||||
}
|
||||
}
|
||||
check(parent, "parent")
|
||||
check(cancelChild, "cancelChild")
|
||||
check(valueChild, "valueChild")
|
||||
check(timerChild, "timerChild")
|
||||
|
||||
// WithCancel should return a canceled context on a canceled parent.
|
||||
precanceledChild := WithValue(parent, "key", "value")
|
||||
select {
|
||||
case <-precanceledChild.Done():
|
||||
default:
|
||||
t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have")
|
||||
}
|
||||
if e := precanceledChild.Err(); e != Canceled {
|
||||
t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChildFinishesFirst(t *testing.T) {
|
||||
cancelable, stop := WithCancel(Background())
|
||||
defer stop()
|
||||
for _, parent := range []Context{Background(), cancelable} {
|
||||
child, cancel := WithCancel(parent)
|
||||
|
||||
select {
|
||||
case x := <-parent.Done():
|
||||
t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
|
||||
case x := <-child.Done():
|
||||
t.Errorf("<-child.Done() == %v want nothing (it should block)", x)
|
||||
default:
|
||||
}
|
||||
|
||||
cc := child.(*cancelCtx)
|
||||
pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background()
|
||||
if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) {
|
||||
t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok)
|
||||
}
|
||||
|
||||
if pcok {
|
||||
pc.mu.Lock()
|
||||
if len(pc.children) != 1 || !pc.children[cc] {
|
||||
t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc)
|
||||
}
|
||||
pc.mu.Unlock()
|
||||
}
|
||||
|
||||
cancel()
|
||||
|
||||
if pcok {
|
||||
pc.mu.Lock()
|
||||
if len(pc.children) != 0 {
|
||||
t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children)
|
||||
}
|
||||
pc.mu.Unlock()
|
||||
}
|
||||
|
||||
// child should be finished.
|
||||
select {
|
||||
case <-child.Done():
|
||||
default:
|
||||
t.Errorf("<-child.Done() blocked, but shouldn't have")
|
||||
}
|
||||
if e := child.Err(); e != Canceled {
|
||||
t.Errorf("child.Err() == %v want %v", e, Canceled)
|
||||
}
|
||||
|
||||
// parent should not be finished.
|
||||
select {
|
||||
case x := <-parent.Done():
|
||||
t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
|
||||
default:
|
||||
}
|
||||
if e := parent.Err(); e != nil {
|
||||
t.Errorf("parent.Err() == %v want nil", e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testDeadline(c Context, wait time.Duration, t *testing.T) {
|
||||
select {
|
||||
case <-time.After(wait):
|
||||
t.Fatalf("context should have timed out")
|
||||
case <-c.Done():
|
||||
}
|
||||
if e := c.Err(); e != DeadlineExceeded {
|
||||
t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeadline(t *testing.T) {
|
||||
c, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
|
||||
if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) {
|
||||
t.Errorf("c.String() = %q want prefix %q", got, prefix)
|
||||
}
|
||||
testDeadline(c, 200*time.Millisecond, t)
|
||||
|
||||
c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
|
||||
o := otherContext{c}
|
||||
testDeadline(o, 200*time.Millisecond, t)
|
||||
|
||||
c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
|
||||
o = otherContext{c}
|
||||
c, _ = WithDeadline(o, time.Now().Add(300*time.Millisecond))
|
||||
testDeadline(c, 200*time.Millisecond, t)
|
||||
}
|
||||
|
||||
func TestTimeout(t *testing.T) {
|
||||
c, _ := WithTimeout(Background(), 100*time.Millisecond)
|
||||
if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) {
|
||||
t.Errorf("c.String() = %q want prefix %q", got, prefix)
|
||||
}
|
||||
testDeadline(c, 200*time.Millisecond, t)
|
||||
|
||||
c, _ = WithTimeout(Background(), 100*time.Millisecond)
|
||||
o := otherContext{c}
|
||||
testDeadline(o, 200*time.Millisecond, t)
|
||||
|
||||
c, _ = WithTimeout(Background(), 100*time.Millisecond)
|
||||
o = otherContext{c}
|
||||
c, _ = WithTimeout(o, 300*time.Millisecond)
|
||||
testDeadline(c, 200*time.Millisecond, t)
|
||||
}
|
||||
|
||||
func TestCanceledTimeout(t *testing.T) {
|
||||
c, _ := WithTimeout(Background(), 200*time.Millisecond)
|
||||
o := otherContext{c}
|
||||
c, cancel := WithTimeout(o, 400*time.Millisecond)
|
||||
cancel()
|
||||
time.Sleep(100 * time.Millisecond) // let cancelation propagate
|
||||
select {
|
||||
case <-c.Done():
|
||||
default:
|
||||
t.Errorf("<-c.Done() blocked, but shouldn't have")
|
||||
}
|
||||
if e := c.Err(); e != Canceled {
|
||||
t.Errorf("c.Err() == %v want %v", e, Canceled)
|
||||
}
|
||||
}
|
||||
|
||||
type key1 int
|
||||
type key2 int
|
||||
|
||||
var k1 = key1(1)
|
||||
var k2 = key2(1) // same int as k1, different type
|
||||
var k3 = key2(3) // same type as k2, different int
|
||||
|
||||
func TestValues(t *testing.T) {
|
||||
check := func(c Context, nm, v1, v2, v3 string) {
|
||||
if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 {
|
||||
t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0)
|
||||
}
|
||||
if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 {
|
||||
t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0)
|
||||
}
|
||||
if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 {
|
||||
t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0)
|
||||
}
|
||||
}
|
||||
|
||||
c0 := Background()
|
||||
check(c0, "c0", "", "", "")
|
||||
|
||||
c1 := WithValue(Background(), k1, "c1k1")
|
||||
check(c1, "c1", "c1k1", "", "")
|
||||
|
||||
if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want {
|
||||
t.Errorf("c.String() = %q want %q", got, want)
|
||||
}
|
||||
|
||||
c2 := WithValue(c1, k2, "c2k2")
|
||||
check(c2, "c2", "c1k1", "c2k2", "")
|
||||
|
||||
c3 := WithValue(c2, k3, "c3k3")
|
||||
check(c3, "c2", "c1k1", "c2k2", "c3k3")
|
||||
|
||||
c4 := WithValue(c3, k1, nil)
|
||||
check(c4, "c4", "", "c2k2", "c3k3")
|
||||
|
||||
o0 := otherContext{Background()}
|
||||
check(o0, "o0", "", "", "")
|
||||
|
||||
o1 := otherContext{WithValue(Background(), k1, "c1k1")}
|
||||
check(o1, "o1", "c1k1", "", "")
|
||||
|
||||
o2 := WithValue(o1, k2, "o2k2")
|
||||
check(o2, "o2", "c1k1", "o2k2", "")
|
||||
|
||||
o3 := otherContext{c4}
|
||||
check(o3, "o3", "", "c2k2", "c3k3")
|
||||
|
||||
o4 := WithValue(o3, k3, nil)
|
||||
check(o4, "o4", "", "c2k2", "")
|
||||
}
|
||||
|
||||
func TestAllocs(t *testing.T) {
|
||||
bg := Background()
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
f func()
|
||||
limit float64
|
||||
gccgoLimit float64
|
||||
}{
|
||||
{
|
||||
desc: "Background()",
|
||||
f: func() { Background() },
|
||||
limit: 0,
|
||||
gccgoLimit: 0,
|
||||
},
|
||||
{
|
||||
desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1),
|
||||
f: func() {
|
||||
c := WithValue(bg, k1, nil)
|
||||
c.Value(k1)
|
||||
},
|
||||
limit: 3,
|
||||
gccgoLimit: 3,
|
||||
},
|
||||
{
|
||||
desc: "WithTimeout(bg, 15*time.Millisecond)",
|
||||
f: func() {
|
||||
c, _ := WithTimeout(bg, 15*time.Millisecond)
|
||||
<-c.Done()
|
||||
},
|
||||
limit: 8,
|
||||
gccgoLimit: 13,
|
||||
},
|
||||
{
|
||||
desc: "WithCancel(bg)",
|
||||
f: func() {
|
||||
c, cancel := WithCancel(bg)
|
||||
cancel()
|
||||
<-c.Done()
|
||||
},
|
||||
limit: 5,
|
||||
gccgoLimit: 8,
|
||||
},
|
||||
{
|
||||
desc: "WithTimeout(bg, 100*time.Millisecond)",
|
||||
f: func() {
|
||||
c, cancel := WithTimeout(bg, 100*time.Millisecond)
|
||||
cancel()
|
||||
<-c.Done()
|
||||
},
|
||||
limit: 8,
|
||||
gccgoLimit: 25,
|
||||
},
|
||||
} {
|
||||
limit := test.limit
|
||||
if runtime.Compiler == "gccgo" {
|
||||
// gccgo does not yet do escape analysis.
|
||||
// TOOD(iant): Remove this when gccgo does do escape analysis.
|
||||
limit = test.gccgoLimit
|
||||
}
|
||||
if n := testing.AllocsPerRun(100, test.f); n > limit {
|
||||
t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimultaneousCancels(t *testing.T) {
|
||||
root, cancel := WithCancel(Background())
|
||||
m := map[Context]CancelFunc{root: cancel}
|
||||
q := []Context{root}
|
||||
// Create a tree of contexts.
|
||||
for len(q) != 0 && len(m) < 100 {
|
||||
parent := q[0]
|
||||
q = q[1:]
|
||||
for i := 0; i < 4; i++ {
|
||||
ctx, cancel := WithCancel(parent)
|
||||
m[ctx] = cancel
|
||||
q = append(q, ctx)
|
||||
}
|
||||
}
|
||||
// Start all the cancels in a random order.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(m))
|
||||
for _, cancel := range m {
|
||||
go func(cancel CancelFunc) {
|
||||
cancel()
|
||||
wg.Done()
|
||||
}(cancel)
|
||||
}
|
||||
// Wait on all the contexts in a random order.
|
||||
for ctx := range m {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-time.After(1 * time.Second):
|
||||
buf := make([]byte, 10<<10)
|
||||
n := runtime.Stack(buf, true)
|
||||
t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n])
|
||||
}
|
||||
}
|
||||
// Wait for all the cancel functions to return.
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(1 * time.Second):
|
||||
buf := make([]byte, 10<<10)
|
||||
n := runtime.Stack(buf, true)
|
||||
t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n])
|
||||
}
|
||||
}
|
||||
|
||||
func TestInterlockedCancels(t *testing.T) {
|
||||
parent, cancelParent := WithCancel(Background())
|
||||
child, cancelChild := WithCancel(parent)
|
||||
go func() {
|
||||
parent.Done()
|
||||
cancelChild()
|
||||
}()
|
||||
cancelParent()
|
||||
select {
|
||||
case <-child.Done():
|
||||
case <-time.After(1 * time.Second):
|
||||
buf := make([]byte, 10<<10)
|
||||
n := runtime.Stack(buf, true)
|
||||
t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n])
|
||||
}
|
||||
}
|
||||
|
||||
func TestLayersCancel(t *testing.T) {
|
||||
testLayers(t, time.Now().UnixNano(), false)
|
||||
}
|
||||
|
||||
func TestLayersTimeout(t *testing.T) {
|
||||
testLayers(t, time.Now().UnixNano(), true)
|
||||
}
|
||||
|
||||
func testLayers(t *testing.T, seed int64, testTimeout bool) {
|
||||
rand.Seed(seed)
|
||||
errorf := func(format string, a ...interface{}) {
|
||||
t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...)
|
||||
}
|
||||
const (
|
||||
timeout = 200 * time.Millisecond
|
||||
minLayers = 30
|
||||
)
|
||||
type value int
|
||||
var (
|
||||
vals []*value
|
||||
cancels []CancelFunc
|
||||
numTimers int
|
||||
ctx = Background()
|
||||
)
|
||||
for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ {
|
||||
switch rand.Intn(3) {
|
||||
case 0:
|
||||
v := new(value)
|
||||
ctx = WithValue(ctx, v, v)
|
||||
vals = append(vals, v)
|
||||
case 1:
|
||||
var cancel CancelFunc
|
||||
ctx, cancel = WithCancel(ctx)
|
||||
cancels = append(cancels, cancel)
|
||||
case 2:
|
||||
var cancel CancelFunc
|
||||
ctx, cancel = WithTimeout(ctx, timeout)
|
||||
cancels = append(cancels, cancel)
|
||||
numTimers++
|
||||
}
|
||||
}
|
||||
checkValues := func(when string) {
|
||||
for _, key := range vals {
|
||||
if val := ctx.Value(key).(*value); key != val {
|
||||
errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
errorf("ctx should not be canceled yet")
|
||||
default:
|
||||
}
|
||||
if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) {
|
||||
t.Errorf("ctx.String() = %q want prefix %q", s, prefix)
|
||||
}
|
||||
t.Log(ctx)
|
||||
checkValues("before cancel")
|
||||
if testTimeout {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-time.After(timeout + timeout/10):
|
||||
errorf("ctx should have timed out")
|
||||
}
|
||||
checkValues("after timeout")
|
||||
} else {
|
||||
cancel := cancels[rand.Intn(len(cancels))]
|
||||
cancel()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
default:
|
||||
errorf("ctx should be canceled")
|
||||
}
|
||||
checkValues("after cancel")
|
||||
}
|
||||
}
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package context_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func ExampleWithTimeout() {
|
||||
// Pass a context with a timeout to tell a blocking function that it
|
||||
// should abandon its work after the timeout elapses.
|
||||
ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
select {
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
fmt.Println("overslept")
|
||||
case <-ctx.Done():
|
||||
fmt.Println(ctx.Err()) // prints "context deadline exceeded"
|
||||
}
|
||||
// Output:
|
||||
// context deadline exceeded
|
||||
}
|
||||
44
README.md
44
README.md
|
|
@ -46,6 +46,50 @@ You can also get the commands to export environment variables to use with the Do
|
|||
|
||||
Machine is still in its early stages. If you'd like to try out a preview build, [download it here](https://github.com/docker/machine/releases/latest).
|
||||
|
||||
## Swarm
|
||||
|
||||
Machine can create [Docker Swarm](https://github.com/docker/swarm) clusters.
|
||||
|
||||
First, create a Swarm token using `docker-machine create-swarm-token`. Optionally, you can use another discovery service. See the Swarm docs for details.
|
||||
|
||||
Once you have the token, you can create the cluster.
|
||||
|
||||
### Swarm Master
|
||||
|
||||
Create the Swarm master:
|
||||
|
||||
`docker-machine create -d virtualbox --swarm --swarm-master --swarm-discovery token://<TOKEN-FROM-ABOVE> swarm-master`
|
||||
|
||||
Replace `<TOKEN-FROM-ABOVE>` with your random token. This will create the Swarm master.
|
||||
|
||||
### Swarm Nodes
|
||||
|
||||
Now, create one or more Swarm nodes:
|
||||
|
||||
`docker-machine create -d virtualbox --swarm --swarm-discovery token://<TOKEN-FROM-ABOVE> swarm-node-00`
|
||||
|
||||
You now have a Swarm cluster. To connect to the Swarm master, you can use `docker-machine env --swarm swarm-master`
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
$ docker-machine env --swarm swarm-master
|
||||
export DOCKER_TLS_VERIFY=yes
|
||||
export DOCKER_CERT_PATH=/home/ehazlett/.docker/machines/.client
|
||||
export DOCKER_HOST=tcp://192.168.99.100:3376
|
||||
```
|
||||
|
||||
You can load this into your environment using `$(docker-machine env --swarm swarm-master)`.
|
||||
|
||||
Now you can use the Docker CLI to query:
|
||||
|
||||
```
|
||||
$ docker info
|
||||
Containers: 1
|
||||
Nodes: 1
|
||||
swarm-node-00: 192.168.99.101:2376
|
||||
```
|
||||
|
||||
## Drivers
|
||||
|
||||
### VirtualBox
|
||||
|
|
|
|||
58
commands.go
58
commands.go
|
|
@ -3,6 +3,7 @@ package main
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
|
@ -29,13 +30,17 @@ import (
|
|||
_ "github.com/docker/machine/drivers/vmwarevsphere"
|
||||
"github.com/docker/machine/state"
|
||||
"github.com/docker/machine/utils"
|
||||
"github.com/docker/swarm/discovery/token"
|
||||
)
|
||||
|
||||
type machineConfig struct {
|
||||
machineName string
|
||||
caCertPath string
|
||||
clientCertPath string
|
||||
clientKeyPath string
|
||||
machineUrl string
|
||||
swarmMaster bool
|
||||
swarmHost string
|
||||
}
|
||||
|
||||
type hostListItem struct {
|
||||
|
|
@ -163,6 +168,11 @@ var Commands = []cli.Command{
|
|||
Usage: "Create a machine",
|
||||
Action: cmdCreate,
|
||||
},
|
||||
{
|
||||
Name: "create-swarm-token",
|
||||
Usage: "Generate a Swarm Cluster Token",
|
||||
Action: cmdCreateSwarmToken,
|
||||
},
|
||||
{
|
||||
Name: "config",
|
||||
Usage: "Print the connection config for machine",
|
||||
|
|
@ -214,6 +224,12 @@ var Commands = []cli.Command{
|
|||
Name: "env",
|
||||
Usage: "Display the commands to set up the environment for the Docker client",
|
||||
Action: cmdEnv,
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "swarm",
|
||||
Usage: "Display the Swarm config instead of the Docker daemon",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "ssh",
|
||||
|
|
@ -298,6 +314,17 @@ func cmdCreate(c *cli.Context) {
|
|||
log.Infof("To point your Docker client at it, run this in your shell: $(%s env %s)", c.App.Name, name)
|
||||
}
|
||||
|
||||
func cmdCreateSwarmToken(c *cli.Context) {
|
||||
discovery := &token.TokenDiscoveryService{}
|
||||
discovery.Initialize("", 0)
|
||||
token, err := discovery.CreateCluster()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
|
||||
}
|
||||
fmt.Println(token)
|
||||
}
|
||||
|
||||
func cmdConfig(c *cli.Context) {
|
||||
cfg, err := getMachineConfig(c)
|
||||
if err != nil {
|
||||
|
|
@ -421,13 +448,37 @@ func cmdEnv(c *cli.Context) {
|
|||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
dockerHost := cfg.machineUrl
|
||||
if c.Bool("swarm") {
|
||||
if !cfg.swarmMaster {
|
||||
log.Fatalf("%s is not a swarm master", cfg.machineName)
|
||||
}
|
||||
u, err := url.Parse(cfg.swarmHost)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
parts := strings.Split(u.Host, ":")
|
||||
swarmPort := parts[1]
|
||||
|
||||
// get IP of machine to replace in case swarm host is 0.0.0.0
|
||||
mUrl, err := url.Parse(cfg.machineUrl)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
mParts := strings.Split(mUrl.Host, ":")
|
||||
machineIp := mParts[0]
|
||||
|
||||
dockerHost = fmt.Sprintf("tcp://%s:%s", machineIp, swarmPort)
|
||||
}
|
||||
|
||||
switch filepath.Base(os.Getenv("SHELL")) {
|
||||
case "fish":
|
||||
fmt.Printf("set -x DOCKER_TLS_VERIFY yes\nset -x DOCKER_CERT_PATH %s\nset -x DOCKER_HOST %s\n",
|
||||
utils.GetMachineClientCertDir(), cfg.machineUrl)
|
||||
utils.GetMachineClientCertDir(), dockerHost)
|
||||
default:
|
||||
fmt.Printf("export DOCKER_TLS_VERIFY=yes\nexport DOCKER_CERT_PATH=%s\nexport DOCKER_HOST=%s\n",
|
||||
utils.GetMachineClientCertDir(), cfg.machineUrl)
|
||||
utils.GetMachineClientCertDir(), dockerHost)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -594,9 +645,12 @@ func getMachineConfig(c *cli.Context) (*machineConfig, error) {
|
|||
}
|
||||
}
|
||||
return &machineConfig{
|
||||
machineName: name,
|
||||
caCertPath: caCert,
|
||||
clientCertPath: clientCert,
|
||||
clientKeyPath: clientKey,
|
||||
machineUrl: machineUrl,
|
||||
swarmMaster: machine.SwarmMaster,
|
||||
swarmHost: machine.SwarmHost,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -54,10 +54,10 @@ type Driver struct {
|
|||
Zone string
|
||||
CaCertPath string
|
||||
PrivateKeyPath string
|
||||
SwarmMaster bool
|
||||
SwarmHost string
|
||||
storePath string
|
||||
keyPath string
|
||||
swarmMaster bool
|
||||
swarmHost string
|
||||
}
|
||||
|
||||
type CreateFlags struct {
|
||||
|
|
@ -175,8 +175,8 @@ func (d *Driver) SetConfigFromFlags(flags drivers.DriverOptions) error {
|
|||
zone := flags.String("amazonec2-zone")
|
||||
d.Zone = zone[:]
|
||||
d.RootSize = int64(flags.Int("amazonec2-root-size"))
|
||||
d.swarmMaster = flags.Bool("swarm-master")
|
||||
d.swarmHost = flags.String("swarm-host")
|
||||
d.SwarmMaster = flags.Bool("swarm-master")
|
||||
d.SwarmHost = flags.String("swarm-host")
|
||||
|
||||
if d.AccessKey == "" {
|
||||
return fmt.Errorf("amazonec2 driver requires the --amazonec2-access-key option")
|
||||
|
|
@ -564,7 +564,7 @@ func (d *Driver) terminate() error {
|
|||
}
|
||||
|
||||
func (d *Driver) isSwarmMaster() bool {
|
||||
return d.swarmMaster
|
||||
return d.SwarmMaster
|
||||
}
|
||||
|
||||
func (d *Driver) configureSecurityGroup(groupName string) error {
|
||||
|
|
@ -611,7 +611,7 @@ func (d *Driver) configureSecurityGroup(groupName string) error {
|
|||
|
||||
// configure swarm permission if needed
|
||||
if d.isSwarmMaster() {
|
||||
u, err := url.Parse(d.swarmHost)
|
||||
u, err := url.Parse(d.SwarmHost)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error authorizing port for swarm: %s", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,6 +38,8 @@ type Driver struct {
|
|||
DockerPort int
|
||||
CaCertPath string
|
||||
PrivateKeyPath string
|
||||
SwarmMaster bool
|
||||
SwarmHost string
|
||||
storePath string
|
||||
}
|
||||
|
||||
|
|
@ -159,6 +161,8 @@ func (driver *Driver) SetConfigFromFlags(flags drivers.DriverOptions) error {
|
|||
driver.UserPassword = flags.String("azure-password")
|
||||
driver.DockerPort = flags.Int("azure-docker-port")
|
||||
driver.SSHPort = flags.Int("azure-ssh-port")
|
||||
driver.SwarmMaster = flags.Bool("swarm-master")
|
||||
driver.SwarmHost = flags.String("swarm-host")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -33,6 +33,8 @@ type Driver struct {
|
|||
CaCertPath string
|
||||
PrivateKeyPath string
|
||||
DriverKeyPath string
|
||||
SwarmMaster bool
|
||||
SwarmHost string
|
||||
storePath string
|
||||
}
|
||||
|
||||
|
|
@ -86,6 +88,8 @@ func (d *Driver) SetConfigFromFlags(flags drivers.DriverOptions) error {
|
|||
d.Image = flags.String("digitalocean-image")
|
||||
d.Region = flags.String("digitalocean-region")
|
||||
d.Size = flags.String("digitalocean-size")
|
||||
d.SwarmMaster = flags.Bool("swarm-master")
|
||||
d.SwarmHost = flags.String("swarm-host")
|
||||
|
||||
if d.AccessToken == "" {
|
||||
return fmt.Errorf("digitalocean driver requires the --digitalocean-access-token option")
|
||||
|
|
|
|||
|
|
@ -22,8 +22,8 @@ type ComputeUtil struct {
|
|||
zoneURL string
|
||||
globalURL string
|
||||
ipAddress string
|
||||
swarmMaster bool
|
||||
swarmHost string
|
||||
SwarmMaster bool
|
||||
SwarmHost string
|
||||
}
|
||||
|
||||
const (
|
||||
|
|
@ -53,8 +53,8 @@ func newComputeUtil(driver *Driver) (*ComputeUtil, error) {
|
|||
service: service,
|
||||
zoneURL: apiURL + driver.Project + "/zones/" + driver.Zone,
|
||||
globalURL: apiURL + driver.Project + "/global",
|
||||
swarmMaster: driver.swarmMaster,
|
||||
swarmHost: driver.swarmHost,
|
||||
SwarmMaster: driver.SwarmMaster,
|
||||
SwarmHost: driver.SwarmHost,
|
||||
}
|
||||
return &c, nil
|
||||
}
|
||||
|
|
@ -95,8 +95,8 @@ func (c *ComputeUtil) createFirewallRule() error {
|
|||
},
|
||||
}
|
||||
|
||||
if c.swarmMaster {
|
||||
u, err := url.Parse(c.swarmHost)
|
||||
if c.SwarmMaster {
|
||||
u, err := url.Parse(c.SwarmHost)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error authorizing port for swarm: %s", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,8 +31,8 @@ type Driver struct {
|
|||
PrivateKeyPath string
|
||||
sshKeyPath string
|
||||
publicSSHKeyPath string
|
||||
swarmMaster bool
|
||||
swarmHost string
|
||||
SwarmMaster bool
|
||||
SwarmHost string
|
||||
}
|
||||
|
||||
// CreateFlags are the command line flags used to create a driver.
|
||||
|
|
@ -111,8 +111,8 @@ func (driver *Driver) SetConfigFromFlags(flags drivers.DriverOptions) error {
|
|||
driver.DiskSize = flags.Int("google-disk-size")
|
||||
driver.UserName = flags.String("google-username")
|
||||
driver.Project = flags.String("google-project")
|
||||
driver.swarmMaster = flags.Bool("swarm-master")
|
||||
driver.swarmHost = flags.String("swarm-host")
|
||||
driver.SwarmMaster = flags.Bool("swarm-master")
|
||||
driver.SwarmHost = flags.String("swarm-host")
|
||||
if driver.Project == "" {
|
||||
return fmt.Errorf("Please specify the Google Cloud Project name using the option --google-project.")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -48,6 +48,8 @@ type Driver struct {
|
|||
CaCertPath string
|
||||
PrivateKeyPath string
|
||||
storePath string
|
||||
SwarmMaster bool
|
||||
SwarmHost string
|
||||
client Client
|
||||
}
|
||||
|
||||
|
|
@ -227,6 +229,8 @@ func (d *Driver) SetConfigFromFlags(flags drivers.DriverOptions) error {
|
|||
d.FloatingIpPool = flags.String("openstack-floatingip-pool")
|
||||
d.SSHUser = flags.String("openstack-ssh-user")
|
||||
d.SSHPort = flags.Int("openstack-ssh-port")
|
||||
d.SwarmMaster = flags.Bool("swarm-master")
|
||||
d.SwarmHost = flags.String("swarm-host")
|
||||
|
||||
installDocker, err := strconv.ParseBool(flags.String("openstack-docker-install"))
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -33,6 +33,8 @@ type CreateFlags struct {
|
|||
SSHPort *int
|
||||
CaCertPath string
|
||||
PrivateKeyPath string
|
||||
SwarmMaster bool
|
||||
SwarmHost string
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
@ -147,6 +149,8 @@ func (d *Driver) SetConfigFromFlags(flags drivers.DriverOptions) error {
|
|||
d.SSHUser = flags.String("rackspace-ssh-user")
|
||||
d.SSHPort = flags.Int("rackspace-ssh-port")
|
||||
d.EnableDockerInstall = flags.String("rackspace-docker-install") == "true"
|
||||
d.SwarmMaster = flags.Bool("swarm-master")
|
||||
d.SwarmHost = flags.String("swarm-host")
|
||||
|
||||
if d.Region == "" {
|
||||
return missingEnvOrOption("Region", "OS_REGION_NAME", "--rackspace-region")
|
||||
|
|
|
|||
|
|
@ -31,6 +31,8 @@ type Driver struct {
|
|||
MachineName string
|
||||
CaCertPath string
|
||||
PrivateKeyPath string
|
||||
SwarmMaster bool
|
||||
SwarmHost string
|
||||
}
|
||||
|
||||
type deviceConfig struct {
|
||||
|
|
@ -190,6 +192,9 @@ func (d *Driver) SetConfigFromFlags(flags drivers.DriverOptions) error {
|
|||
ApiKey: flags.String("softlayer-api-key"),
|
||||
}
|
||||
|
||||
d.SwarmMaster = flags.Bool("swarm-master")
|
||||
d.SwarmHost = flags.String("swarm-host")
|
||||
|
||||
if err := validateClientConfig(d.Client); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -36,6 +36,8 @@ type Driver struct {
|
|||
Boot2DockerURL string
|
||||
CaCertPath string
|
||||
PrivateKeyPath string
|
||||
SwarmMaster bool
|
||||
SwarmHost string
|
||||
storePath string
|
||||
}
|
||||
|
||||
|
|
@ -98,6 +100,9 @@ func (d *Driver) SetConfigFromFlags(flags drivers.DriverOptions) error {
|
|||
d.Memory = flags.Int("virtualbox-memory")
|
||||
d.DiskSize = flags.Int("virtualbox-disk-size")
|
||||
d.Boot2DockerURL = flags.String("virtualbox-boot2docker-url")
|
||||
d.SwarmMaster = flags.Bool("swarm-master")
|
||||
d.SwarmHost = flags.String("swarm-host")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -44,6 +44,8 @@ type Driver struct {
|
|||
Boot2DockerURL string
|
||||
CaCertPath string
|
||||
PrivateKeyPath string
|
||||
SwarmMaster bool
|
||||
SwarmHost string
|
||||
|
||||
storePath string
|
||||
}
|
||||
|
|
@ -98,6 +100,8 @@ func (d *Driver) SetConfigFromFlags(flags drivers.DriverOptions) error {
|
|||
d.DiskSize = flags.Int("vmwarefusion-disk-size")
|
||||
d.Boot2DockerURL = flags.String("vmwarefusion-boot2docker-url")
|
||||
d.ISO = path.Join(d.storePath, isoFilename)
|
||||
d.SwarmMaster = flags.Bool("swarm-master")
|
||||
d.SwarmHost = flags.String("swarm-host")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -43,9 +43,10 @@ type Driver struct {
|
|||
MemorySize int
|
||||
CaCertPath string
|
||||
PrivateKeyPath string
|
||||
|
||||
VAppID string
|
||||
storePath string
|
||||
SwarmMaster bool
|
||||
SwarmHost string
|
||||
VAppID string
|
||||
storePath string
|
||||
}
|
||||
|
||||
type CreateFlags struct {
|
||||
|
|
@ -175,6 +176,8 @@ func (driver *Driver) SetConfigFromFlags(flags drivers.DriverOptions) error {
|
|||
driver.UserPassword = flags.String("vmwarevcloudair-password")
|
||||
driver.VDCID = flags.String("vmwarevcloudair-vdcid")
|
||||
driver.PublicIP = flags.String("vmwarevcloudair-publicip")
|
||||
driver.SwarmMaster = flags.Bool("swarm-master")
|
||||
driver.SwarmHost = flags.String("swarm-host")
|
||||
|
||||
// Check for required Params
|
||||
if driver.UserName == "" || driver.UserPassword == "" || driver.VDCID == "" || driver.PublicIP == "" {
|
||||
|
|
|
|||
|
|
@ -54,6 +54,8 @@ type Driver struct {
|
|||
ISO string
|
||||
CaCertPath string
|
||||
PrivateKeyPath string
|
||||
SwarmMaster bool
|
||||
SwarmHost string
|
||||
|
||||
storePath string
|
||||
}
|
||||
|
|
@ -172,6 +174,8 @@ func (d *Driver) SetConfigFromFlags(flags drivers.DriverOptions) error {
|
|||
d.Datacenter = flags.String("vmwarevsphere-datacenter")
|
||||
d.Pool = flags.String("vmwarevsphere-pool")
|
||||
d.HostIP = flags.String("vmwarevsphere-compute-ip")
|
||||
d.SwarmMaster = flags.Bool("swarm-master")
|
||||
d.SwarmHost = flags.String("swarm-host")
|
||||
|
||||
d.ISO = path.Join(d.storePath, "boot2docker.iso")
|
||||
|
||||
|
|
|
|||
24
host.go
24
host.go
|
|
@ -5,6 +5,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
|
|
@ -12,6 +13,7 @@ import (
|
|||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/docker/machine/drivers"
|
||||
|
|
@ -25,7 +27,8 @@ var (
|
|||
)
|
||||
|
||||
const (
|
||||
swarmDockerImage = "ehazlett/swarm:latest"
|
||||
swarmDockerImage = "ehazlett/swarm:latest"
|
||||
swarmDiscoveryServiceEndpoint = "https://discovery-stage.hub.docker.com/v1"
|
||||
)
|
||||
|
||||
type Host struct {
|
||||
|
|
@ -37,6 +40,8 @@ type Host struct {
|
|||
ServerKeyPath string
|
||||
PrivateKeyPath string
|
||||
ClientCertPath string
|
||||
SwarmMaster bool
|
||||
SwarmHost string
|
||||
storePath string
|
||||
}
|
||||
|
||||
|
|
@ -49,7 +54,20 @@ type hostConfig struct {
|
|||
DriverName string
|
||||
}
|
||||
|
||||
func NewHost(name, driverName, storePath, caCert, privateKey string) (*Host, error) {
|
||||
func waitForDocker(addr string) error {
|
||||
for {
|
||||
conn, err := net.DialTimeout("tcp", addr, time.Second*5)
|
||||
if err != nil {
|
||||
time.Sleep(time.Second * 5)
|
||||
continue
|
||||
}
|
||||
conn.Close()
|
||||
break
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewHost(name, driverName, storePath, caCert, privateKey string, swarmMaster bool, swarmHost string) (*Host, error) {
|
||||
driver, err := drivers.NewDriver(driverName, name, storePath, caCert, privateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -60,6 +78,8 @@ func NewHost(name, driverName, storePath, caCert, privateKey string) (*Host, err
|
|||
Driver: driver,
|
||||
CaCertPath: caCert,
|
||||
PrivateKeyPath: privateKey,
|
||||
SwarmMaster: swarmMaster,
|
||||
SwarmHost: swarmHost,
|
||||
storePath: storePath,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
|||
11
host_test.go
11
host_test.go
|
|
@ -36,15 +36,18 @@ func getTestDriverFlags() *DriverOptionsMock {
|
|||
name := hostTestName
|
||||
flags := &DriverOptionsMock{
|
||||
Data: map[string]interface{}{
|
||||
"name": name,
|
||||
"url": "unix:///var/run/docker.sock",
|
||||
"name": name,
|
||||
"url": "unix:///var/run/docker.sock",
|
||||
"swarm": false,
|
||||
"swarm-host": "",
|
||||
"swarm-master": false,
|
||||
},
|
||||
}
|
||||
return flags
|
||||
}
|
||||
|
||||
func getDefaultTestHost() (*Host, error) {
|
||||
host, err := NewHost(hostTestName, hostTestDriverName, hostTestStorePath, hostTestCaCert, hostTestPrivateKey)
|
||||
host, err := NewHost(hostTestName, hostTestDriverName, hostTestStorePath, hostTestCaCert, hostTestPrivateKey, false, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -147,7 +150,7 @@ func TestGenerateClientCertificate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGenerateDockerConfigNonLocal(t *testing.T) {
|
||||
host, err := NewHost(hostTestName, hostTestDriverName, hostTestStorePath, hostTestCaCert, hostTestPrivateKey)
|
||||
host, err := NewHost(hostTestName, hostTestDriverName, hostTestStorePath, hostTestCaCert, hostTestPrivateKey, false, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
|||
2
store.go
2
store.go
|
|
@ -38,7 +38,7 @@ func (s *Store) Create(name string, driverName string, flags drivers.DriverOptio
|
|||
|
||||
hostPath := filepath.Join(s.Path, name)
|
||||
|
||||
host, err := NewHost(name, driverName, hostPath, s.CaCertPath, s.PrivateKeyPath)
|
||||
host, err := NewHost(name, driverName, hostPath, s.CaCertPath, s.PrivateKeyPath, flags.Bool("swarm-master"), flags.String("swarm-host"))
|
||||
if err != nil {
|
||||
return host, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,8 +37,10 @@ func TestStoreCreate(t *testing.T) {
|
|||
flags := &DriverOptionsMock{
|
||||
Data: map[string]interface{}{
|
||||
"url": "unix:///var/run/docker.sock",
|
||||
"swarm-master-ip": "",
|
||||
"swarm": false,
|
||||
"swarm-host": "",
|
||||
"swarm-master": false,
|
||||
"swarm-master-ip": "",
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -65,8 +67,10 @@ func TestStoreRemove(t *testing.T) {
|
|||
flags := &DriverOptionsMock{
|
||||
Data: map[string]interface{}{
|
||||
"url": "unix:///var/run/docker.sock",
|
||||
"swarm-master-ip": "",
|
||||
"swarm": false,
|
||||
"swarm-host": "",
|
||||
"swarm-master": false,
|
||||
"swarm-master-ip": "",
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -96,8 +100,10 @@ func TestStoreList(t *testing.T) {
|
|||
flags := &DriverOptionsMock{
|
||||
Data: map[string]interface{}{
|
||||
"url": "unix:///var/run/docker.sock",
|
||||
"swarm-master-ip": "",
|
||||
"swarm": false,
|
||||
"swarm-host": "",
|
||||
"swarm-master": false,
|
||||
"swarm-master-ip": "",
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -123,8 +129,10 @@ func TestStoreExists(t *testing.T) {
|
|||
flags := &DriverOptionsMock{
|
||||
Data: map[string]interface{}{
|
||||
"url": "unix:///var/run/docker.sock",
|
||||
"swarm-master-ip": "",
|
||||
"swarm": false,
|
||||
"swarm-host": "",
|
||||
"swarm-master": false,
|
||||
"swarm-master-ip": "",
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -155,8 +163,10 @@ func TestStoreLoad(t *testing.T) {
|
|||
flags := &DriverOptionsMock{
|
||||
Data: map[string]interface{}{
|
||||
"url": expectedURL,
|
||||
"swarm-master-ip": "",
|
||||
"swarm": false,
|
||||
"swarm-host": "",
|
||||
"swarm-master": false,
|
||||
"swarm-master-ip": "",
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -188,8 +198,10 @@ func TestStoreGetSetActive(t *testing.T) {
|
|||
flags := &DriverOptionsMock{
|
||||
Data: map[string]interface{}{
|
||||
"url": "unix:///var/run/docker.sock",
|
||||
"swarm-master-ip": "",
|
||||
"swarm": false,
|
||||
"swarm-host": "",
|
||||
"swarm-master": false,
|
||||
"swarm-master-ip": "",
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue