mirror of https://github.com/docker/docs.git
Merge pull request #17278 from mavenugo/etchosts
Vendoring libnetwork & libkv with fixes
This commit is contained in:
commit
37da495d4c
|
@ -21,12 +21,12 @@ clone git github.com/vdemeester/shakers 3c10293ce22b900c27acad7b28656196fcc2f73b
|
||||||
clone git golang.org/x/net 3cffabab72adf04f8e3b01c5baf775361837b5fe https://github.com/golang/net.git
|
clone git golang.org/x/net 3cffabab72adf04f8e3b01c5baf775361837b5fe https://github.com/golang/net.git
|
||||||
|
|
||||||
#get libnetwork packages
|
#get libnetwork packages
|
||||||
clone git github.com/docker/libnetwork f3c8ebf46b890d4612c5d98e792280d13abdb761
|
clone git github.com/docker/libnetwork bf041154d27ed34ed39722328c8f1b0144a56fe2
|
||||||
clone git github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
|
clone git github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
|
||||||
clone git github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
|
clone git github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
|
||||||
clone git github.com/hashicorp/memberlist 9a1e242e454d2443df330bdd51a436d5a9058fc4
|
clone git github.com/hashicorp/memberlist 9a1e242e454d2443df330bdd51a436d5a9058fc4
|
||||||
clone git github.com/hashicorp/serf 7151adcef72687bf95f451a2e0ba15cb19412bf2
|
clone git github.com/hashicorp/serf 7151adcef72687bf95f451a2e0ba15cb19412bf2
|
||||||
clone git github.com/docker/libkv 749af6c5b3fb755bec1738cc5e0d3a6f1574d730
|
clone git github.com/docker/libkv c2aac5dbbaa5c872211edea7c0f32b3bd67e7410
|
||||||
clone git github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
|
clone git github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
|
||||||
clone git github.com/vishvananda/netlink 4b5dce31de6d42af5bb9811c6d265472199e0fec
|
clone git github.com/vishvananda/netlink 4b5dce31de6d42af5bb9811c6d265472199e0fec
|
||||||
clone git github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
|
clone git github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
|
||||||
|
|
|
@ -794,6 +794,26 @@ func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr(c *check.C) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidrFixedCIDREqualBridgeNetwork(c *check.C) {
|
||||||
|
d := s.d
|
||||||
|
|
||||||
|
bridgeName := "external-bridge"
|
||||||
|
bridgeIP := "172.27.42.1/16"
|
||||||
|
|
||||||
|
out, err := createInterface(c, "bridge", bridgeName, bridgeIP)
|
||||||
|
c.Assert(err, check.IsNil, check.Commentf(out))
|
||||||
|
defer deleteInterface(c, bridgeName)
|
||||||
|
|
||||||
|
err = d.StartWithBusybox("--bridge", bridgeName, "--fixed-cidr", bridgeIP)
|
||||||
|
c.Assert(err, check.IsNil)
|
||||||
|
defer s.d.Restart()
|
||||||
|
|
||||||
|
out, err = d.Cmd("run", "-d", "busybox", "top")
|
||||||
|
c.Assert(err, check.IsNil, check.Commentf(out))
|
||||||
|
cid1 := strings.TrimSpace(out)
|
||||||
|
defer d.Cmd("stop", cid1)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Implicit(c *check.C) {
|
func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Implicit(c *check.C) {
|
||||||
defaultNetworkBridge := "docker0"
|
defaultNetworkBridge := "docker0"
|
||||||
deleteInterface(c, defaultNetworkBridge)
|
deleteInterface(c, defaultNetworkBridge)
|
||||||
|
|
|
@ -330,6 +330,9 @@ func (b *BoltDB) AtomicDelete(key string, previous *store.KVPair) (bool, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
val = bucket.Get([]byte(key))
|
val = bucket.Get([]byte(key))
|
||||||
|
if val == nil {
|
||||||
|
return store.ErrKeyNotFound
|
||||||
|
}
|
||||||
dbIndex := binary.LittleEndian.Uint64(val[:libkvmetadatalen])
|
dbIndex := binary.LittleEndian.Uint64(val[:libkvmetadatalen])
|
||||||
if dbIndex != previous.LastIndex {
|
if dbIndex != previous.LastIndex {
|
||||||
return store.ErrKeyModified
|
return store.ErrKeyModified
|
||||||
|
|
|
@ -467,6 +467,13 @@ func (s *Consul) AtomicDelete(key string, previous *store.KVPair) (bool, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &api.KVPair{Key: s.normalize(key), ModifyIndex: previous.LastIndex}
|
p := &api.KVPair{Key: s.normalize(key), ModifyIndex: previous.LastIndex}
|
||||||
|
|
||||||
|
// Extra Get operation to check on the key
|
||||||
|
_, err := s.Get(key)
|
||||||
|
if err != nil && err == store.ErrKeyNotFound {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
if work, _, err := s.client.KV().DeleteCAS(p, nil); err != nil {
|
if work, _, err := s.client.KV().DeleteCAS(p, nil); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
} else if !work {
|
} else if !work {
|
||||||
|
|
|
@ -368,6 +368,10 @@ func (s *Etcd) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
|
||||||
_, err := s.client.Delete(context.Background(), s.normalize(key), delOpts)
|
_, err := s.client.Delete(context.Background(), s.normalize(key), delOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if etcdError, ok := err.(etcd.Error); ok {
|
if etcdError, ok := err.(etcd.Error); ok {
|
||||||
|
// Key Not Found
|
||||||
|
if etcdError.Code == etcd.ErrorCodeKeyNotFound {
|
||||||
|
return false, store.ErrKeyNotFound
|
||||||
|
}
|
||||||
// Compare failed
|
// Compare failed
|
||||||
if etcdError.Code == etcd.ErrorCodeTestFailed {
|
if etcdError.Code == etcd.ErrorCodeTestFailed {
|
||||||
return false, store.ErrKeyModified
|
return false, store.ErrKeyModified
|
||||||
|
|
|
@ -347,9 +347,15 @@ func (s *Zookeeper) AtomicDelete(key string, previous *store.KVPair) (bool, erro
|
||||||
|
|
||||||
err := s.client.Delete(s.normalize(key), int32(previous.LastIndex))
|
err := s.client.Delete(s.normalize(key), int32(previous.LastIndex))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// Key not found
|
||||||
|
if err == zk.ErrNoNode {
|
||||||
|
return false, store.ErrKeyNotFound
|
||||||
|
}
|
||||||
|
// Compare failed
|
||||||
if err == zk.ErrBadVersion {
|
if err == zk.ErrBadVersion {
|
||||||
return false, store.ErrKeyModified
|
return false, store.ErrKeyModified
|
||||||
}
|
}
|
||||||
|
// General store error
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
|
|
|
@ -218,7 +218,14 @@ func (c *controller) initDiscovery(watcher discovery.Watcher) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
c.discovery = hostdiscovery.NewHostDiscovery(watcher)
|
c.discovery = hostdiscovery.NewHostDiscovery(watcher)
|
||||||
return c.discovery.Watch(c.hostJoinCallback, c.hostLeaveCallback)
|
return c.discovery.Watch(c.activeCallback, c.hostJoinCallback, c.hostLeaveCallback)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *controller) activeCallback() {
|
||||||
|
ds := c.getStore(datastore.GlobalScope)
|
||||||
|
if ds != nil && !ds.Active() {
|
||||||
|
ds.RestartWatch()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *controller) hostJoinCallback(nodes []net.IP) {
|
func (c *controller) hostJoinCallback(nodes []net.IP) {
|
||||||
|
|
|
@ -34,6 +34,10 @@ type DataStore interface {
|
||||||
Watchable() bool
|
Watchable() bool
|
||||||
// Watch for changes on a KVObject
|
// Watch for changes on a KVObject
|
||||||
Watch(kvObject KVObject, stopCh <-chan struct{}) (<-chan KVObject, error)
|
Watch(kvObject KVObject, stopCh <-chan struct{}) (<-chan KVObject, error)
|
||||||
|
// RestartWatch retriggers stopped Watches
|
||||||
|
RestartWatch()
|
||||||
|
// Active returns if the store is active
|
||||||
|
Active() bool
|
||||||
// List returns of a list of KVObjects belonging to the parent
|
// List returns of a list of KVObjects belonging to the parent
|
||||||
// key. The caller must pass a KVObject of the same type as
|
// key. The caller must pass a KVObject of the same type as
|
||||||
// the objects that need to be listed
|
// the objects that need to be listed
|
||||||
|
@ -56,6 +60,8 @@ type datastore struct {
|
||||||
scope string
|
scope string
|
||||||
store store.Store
|
store store.Store
|
||||||
cache *cache
|
cache *cache
|
||||||
|
watchCh chan struct{}
|
||||||
|
active bool
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -204,7 +210,7 @@ func newClient(scope string, kv string, addr string, config *store.Config, cache
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ds := &datastore{scope: scope, store: store}
|
ds := &datastore{scope: scope, store: store, active: true, watchCh: make(chan struct{})}
|
||||||
if cached {
|
if cached {
|
||||||
ds.cache = newCache(ds)
|
ds.cache = newCache(ds)
|
||||||
}
|
}
|
||||||
|
@ -239,6 +245,10 @@ func (ds *datastore) Scope() string {
|
||||||
return ds.scope
|
return ds.scope
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ds *datastore) Active() bool {
|
||||||
|
return ds.active
|
||||||
|
}
|
||||||
|
|
||||||
func (ds *datastore) Watchable() bool {
|
func (ds *datastore) Watchable() bool {
|
||||||
return ds.scope != LocalScope
|
return ds.scope != LocalScope
|
||||||
}
|
}
|
||||||
|
@ -259,6 +269,15 @@ func (ds *datastore) Watch(kvObject KVObject, stopCh <-chan struct{}) (<-chan KV
|
||||||
kvoCh := make(chan KVObject)
|
kvoCh := make(chan KVObject)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
retry_watch:
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Make sure to get a new instance of watch channel
|
||||||
|
ds.Lock()
|
||||||
|
watchCh := ds.watchCh
|
||||||
|
ds.Unlock()
|
||||||
|
|
||||||
|
loop:
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-stopCh:
|
case <-stopCh:
|
||||||
|
@ -269,12 +288,15 @@ func (ds *datastore) Watch(kvObject KVObject, stopCh <-chan struct{}) (<-chan KV
|
||||||
// for the watch can exit resulting in a nil value in
|
// for the watch can exit resulting in a nil value in
|
||||||
// channel.
|
// channel.
|
||||||
if kvPair == nil {
|
if kvPair == nil {
|
||||||
close(sCh)
|
ds.Lock()
|
||||||
return
|
ds.active = false
|
||||||
|
ds.Unlock()
|
||||||
|
break loop
|
||||||
}
|
}
|
||||||
|
|
||||||
dstO := ctor.New()
|
dstO := ctor.New()
|
||||||
|
|
||||||
if err := dstO.SetValue(kvPair.Value); err != nil {
|
if err = dstO.SetValue(kvPair.Value); err != nil {
|
||||||
log.Printf("Could not unmarshal kvpair value = %s", string(kvPair.Value))
|
log.Printf("Could not unmarshal kvpair value = %s", string(kvPair.Value))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -283,11 +305,31 @@ func (ds *datastore) Watch(kvObject KVObject, stopCh <-chan struct{}) (<-chan KV
|
||||||
kvoCh <- dstO
|
kvoCh <- dstO
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wait on watch channel for a re-trigger when datastore becomes active
|
||||||
|
<-watchCh
|
||||||
|
|
||||||
|
kvpCh, err = ds.store.Watch(Key(kvObject.Key()...), sCh)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Could not watch the key %s in store: %v", Key(kvObject.Key()...), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
goto retry_watch
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return kvoCh, nil
|
return kvoCh, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ds *datastore) RestartWatch() {
|
||||||
|
ds.Lock()
|
||||||
|
defer ds.Unlock()
|
||||||
|
|
||||||
|
ds.active = true
|
||||||
|
watchCh := ds.watchCh
|
||||||
|
ds.watchCh = make(chan struct{})
|
||||||
|
close(watchCh)
|
||||||
|
}
|
||||||
|
|
||||||
func (ds *datastore) KVStore() store.Store {
|
func (ds *datastore) KVStore() store.Store {
|
||||||
return ds.store
|
return ds.store
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Record Structure for a single host record
|
// Record Structure for a single host record
|
||||||
|
@ -21,8 +22,9 @@ func (r Record) WriteTo(w io.Writer) (int64, error) {
|
||||||
return int64(n), err
|
return int64(n), err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
// Default hosts config records slice
|
// Default hosts config records slice
|
||||||
var defaultContent = []Record{
|
defaultContent = []Record{
|
||||||
{Hosts: "localhost", IP: "127.0.0.1"},
|
{Hosts: "localhost", IP: "127.0.0.1"},
|
||||||
{Hosts: "localhost ip6-localhost ip6-loopback", IP: "::1"},
|
{Hosts: "localhost ip6-localhost ip6-loopback", IP: "::1"},
|
||||||
{Hosts: "ip6-localnet", IP: "fe00::0"},
|
{Hosts: "ip6-localnet", IP: "fe00::0"},
|
||||||
|
@ -31,11 +33,45 @@ var defaultContent = []Record{
|
||||||
{Hosts: "ip6-allrouters", IP: "ff02::2"},
|
{Hosts: "ip6-allrouters", IP: "ff02::2"},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A cache of path level locks for synchronizing /etc/hosts
|
||||||
|
// updates on a file level
|
||||||
|
pathMap = make(map[string]*sync.Mutex)
|
||||||
|
|
||||||
|
// A package level mutex to synchronize the cache itself
|
||||||
|
pathMutex sync.Mutex
|
||||||
|
)
|
||||||
|
|
||||||
|
func pathLock(path string) func() {
|
||||||
|
pathMutex.Lock()
|
||||||
|
defer pathMutex.Unlock()
|
||||||
|
|
||||||
|
pl, ok := pathMap[path]
|
||||||
|
if !ok {
|
||||||
|
pl = &sync.Mutex{}
|
||||||
|
pathMap[path] = pl
|
||||||
|
}
|
||||||
|
|
||||||
|
pl.Lock()
|
||||||
|
return func() {
|
||||||
|
pl.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drop drops the path string from the path cache
|
||||||
|
func Drop(path string) {
|
||||||
|
pathMutex.Lock()
|
||||||
|
defer pathMutex.Unlock()
|
||||||
|
|
||||||
|
delete(pathMap, path)
|
||||||
|
}
|
||||||
|
|
||||||
// Build function
|
// Build function
|
||||||
// path is path to host file string required
|
// path is path to host file string required
|
||||||
// IP, hostname, and domainname set main record leave empty for no master record
|
// IP, hostname, and domainname set main record leave empty for no master record
|
||||||
// extraContent is an array of extra host records.
|
// extraContent is an array of extra host records.
|
||||||
func Build(path, IP, hostname, domainname string, extraContent []Record) error {
|
func Build(path, IP, hostname, domainname string, extraContent []Record) error {
|
||||||
|
defer pathLock(path)()
|
||||||
|
|
||||||
content := bytes.NewBuffer(nil)
|
content := bytes.NewBuffer(nil)
|
||||||
if IP != "" {
|
if IP != "" {
|
||||||
//set main record
|
//set main record
|
||||||
|
@ -68,6 +104,8 @@ func Build(path, IP, hostname, domainname string, extraContent []Record) error {
|
||||||
|
|
||||||
// Add adds an arbitrary number of Records to an already existing /etc/hosts file
|
// Add adds an arbitrary number of Records to an already existing /etc/hosts file
|
||||||
func Add(path string, recs []Record) error {
|
func Add(path string, recs []Record) error {
|
||||||
|
defer pathLock(path)()
|
||||||
|
|
||||||
if len(recs) == 0 {
|
if len(recs) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -95,6 +133,8 @@ func Add(path string, recs []Record) error {
|
||||||
|
|
||||||
// Delete deletes an arbitrary number of Records already existing in /etc/hosts file
|
// Delete deletes an arbitrary number of Records already existing in /etc/hosts file
|
||||||
func Delete(path string, recs []Record) error {
|
func Delete(path string, recs []Record) error {
|
||||||
|
defer pathLock(path)()
|
||||||
|
|
||||||
if len(recs) == 0 {
|
if len(recs) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -118,6 +158,8 @@ func Delete(path string, recs []Record) error {
|
||||||
// IP is new IP address
|
// IP is new IP address
|
||||||
// hostname is hostname to search for to replace IP
|
// hostname is hostname to search for to replace IP
|
||||||
func Update(path, IP, hostname string) error {
|
func Update(path, IP, hostname string) error {
|
||||||
|
defer pathLock(path)()
|
||||||
|
|
||||||
old, err := ioutil.ReadFile(path)
|
old, err := ioutil.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -34,7 +34,7 @@ func NewHostDiscovery(watcher discovery.Watcher) HostDiscovery {
|
||||||
return &hostDiscovery{watcher: watcher, nodes: mapset.NewSet(), stopChan: make(chan struct{})}
|
return &hostDiscovery{watcher: watcher, nodes: mapset.NewSet(), stopChan: make(chan struct{})}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *hostDiscovery) Watch(joinCallback JoinCallback, leaveCallback LeaveCallback) error {
|
func (h *hostDiscovery) Watch(activeCallback ActiveCallback, joinCallback JoinCallback, leaveCallback LeaveCallback) error {
|
||||||
h.Lock()
|
h.Lock()
|
||||||
d := h.watcher
|
d := h.watcher
|
||||||
h.Unlock()
|
h.Unlock()
|
||||||
|
@ -42,15 +42,16 @@ func (h *hostDiscovery) Watch(joinCallback JoinCallback, leaveCallback LeaveCall
|
||||||
return types.BadRequestErrorf("invalid discovery watcher")
|
return types.BadRequestErrorf("invalid discovery watcher")
|
||||||
}
|
}
|
||||||
discoveryCh, errCh := d.Watch(h.stopChan)
|
discoveryCh, errCh := d.Watch(h.stopChan)
|
||||||
go h.monitorDiscovery(discoveryCh, errCh, joinCallback, leaveCallback)
|
go h.monitorDiscovery(discoveryCh, errCh, activeCallback, joinCallback, leaveCallback)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *hostDiscovery) monitorDiscovery(ch <-chan discovery.Entries, errCh <-chan error, joinCallback JoinCallback, leaveCallback LeaveCallback) {
|
func (h *hostDiscovery) monitorDiscovery(ch <-chan discovery.Entries, errCh <-chan error,
|
||||||
|
activeCallback ActiveCallback, joinCallback JoinCallback, leaveCallback LeaveCallback) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case entries := <-ch:
|
case entries := <-ch:
|
||||||
h.processCallback(entries, joinCallback, leaveCallback)
|
h.processCallback(entries, activeCallback, joinCallback, leaveCallback)
|
||||||
case err := <-errCh:
|
case err := <-errCh:
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("discovery error: %v", err)
|
log.Errorf("discovery error: %v", err)
|
||||||
|
@ -71,7 +72,8 @@ func (h *hostDiscovery) StopDiscovery() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *hostDiscovery) processCallback(entries discovery.Entries, joinCallback JoinCallback, leaveCallback LeaveCallback) {
|
func (h *hostDiscovery) processCallback(entries discovery.Entries,
|
||||||
|
activeCallback ActiveCallback, joinCallback JoinCallback, leaveCallback LeaveCallback) {
|
||||||
updated := hosts(entries)
|
updated := hosts(entries)
|
||||||
h.Lock()
|
h.Lock()
|
||||||
existing := h.nodes
|
existing := h.nodes
|
||||||
|
@ -79,6 +81,7 @@ func (h *hostDiscovery) processCallback(entries discovery.Entries, joinCallback
|
||||||
h.nodes = updated
|
h.nodes = updated
|
||||||
h.Unlock()
|
h.Unlock()
|
||||||
|
|
||||||
|
activeCallback()
|
||||||
if len(added) > 0 {
|
if len(added) > 0 {
|
||||||
joinCallback(added)
|
joinCallback(added)
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,13 +5,16 @@ import "net"
|
||||||
// JoinCallback provides a callback event for new node joining the cluster
|
// JoinCallback provides a callback event for new node joining the cluster
|
||||||
type JoinCallback func(entries []net.IP)
|
type JoinCallback func(entries []net.IP)
|
||||||
|
|
||||||
|
// ActiveCallback provides a callback event for active discovery event
|
||||||
|
type ActiveCallback func()
|
||||||
|
|
||||||
// LeaveCallback provides a callback event for node leaving the cluster
|
// LeaveCallback provides a callback event for node leaving the cluster
|
||||||
type LeaveCallback func(entries []net.IP)
|
type LeaveCallback func(entries []net.IP)
|
||||||
|
|
||||||
// HostDiscovery primary interface
|
// HostDiscovery primary interface
|
||||||
type HostDiscovery interface {
|
type HostDiscovery interface {
|
||||||
//Watch Node join and leave cluster events
|
//Watch Node join and leave cluster events
|
||||||
Watch(joinCallback JoinCallback, leaveCallback LeaveCallback) error
|
Watch(activeCallback ActiveCallback, joinCallback JoinCallback, leaveCallback LeaveCallback) error
|
||||||
// StopDiscovery stops the discovery perocess
|
// StopDiscovery stops the discovery perocess
|
||||||
StopDiscovery() error
|
StopDiscovery() error
|
||||||
// Fetch returns a list of host IPs that are currently discovered
|
// Fetch returns a list of host IPs that are currently discovered
|
||||||
|
|
|
@ -250,11 +250,6 @@ func (a *Allocator) insertBitMask(key SubnetKey, pool *net.IPNet) error {
|
||||||
ones, bits := pool.Mask.Size()
|
ones, bits := pool.Mask.Size()
|
||||||
numAddresses := uint64(1 << uint(bits-ones))
|
numAddresses := uint64(1 << uint(bits-ones))
|
||||||
|
|
||||||
if ipVer == v4 {
|
|
||||||
// Do not let broadcast address be reserved
|
|
||||||
numAddresses--
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allow /64 subnet
|
// Allow /64 subnet
|
||||||
if ipVer == v6 && numAddresses == 0 {
|
if ipVer == v6 && numAddresses == 0 {
|
||||||
numAddresses--
|
numAddresses--
|
||||||
|
@ -270,6 +265,11 @@ func (a *Allocator) insertBitMask(key SubnetKey, pool *net.IPNet) error {
|
||||||
// Do the same for IPv6 so that bridge ip starts with XXXX...::1
|
// Do the same for IPv6 so that bridge ip starts with XXXX...::1
|
||||||
h.Set(0)
|
h.Set(0)
|
||||||
|
|
||||||
|
// Do not let broadcast address be reserved
|
||||||
|
if ipVer == v4 {
|
||||||
|
h.Set(numAddresses - 1)
|
||||||
|
}
|
||||||
|
|
||||||
a.Lock()
|
a.Lock()
|
||||||
a.addresses[key] = h
|
a.addresses[key] = h
|
||||||
a.Unlock()
|
a.Unlock()
|
||||||
|
|
|
@ -78,7 +78,11 @@ func (a *allocator) ReleasePool(poolID string) error {
|
||||||
|
|
||||||
// RequestAddress requests an address from the address pool
|
// RequestAddress requests an address from the address pool
|
||||||
func (a *allocator) RequestAddress(poolID string, address net.IP, options map[string]string) (*net.IPNet, map[string]string, error) {
|
func (a *allocator) RequestAddress(poolID string, address net.IP, options map[string]string) (*net.IPNet, map[string]string, error) {
|
||||||
var prefAddress string
|
var (
|
||||||
|
prefAddress string
|
||||||
|
retAddress *net.IPNet
|
||||||
|
err error
|
||||||
|
)
|
||||||
if address != nil {
|
if address != nil {
|
||||||
prefAddress = address.String()
|
prefAddress = address.String()
|
||||||
}
|
}
|
||||||
|
@ -87,7 +91,9 @@ func (a *allocator) RequestAddress(poolID string, address net.IP, options map[st
|
||||||
if err := a.call("RequestAddress", req, res); err != nil {
|
if err := a.call("RequestAddress", req, res); err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
retAddress, err := types.ParseCIDR(res.Address)
|
if res.Address != "" {
|
||||||
|
retAddress, err = types.ParseCIDR(res.Address)
|
||||||
|
}
|
||||||
return retAddress, res.Data, err
|
return retAddress, res.Data, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -182,6 +182,10 @@ func (sb *sandbox) Delete() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Container is going away. Path cache in etchosts is most
|
||||||
|
// likely not required any more. Drop it.
|
||||||
|
etchosts.Drop(sb.config.hostsPath)
|
||||||
|
|
||||||
if sb.osSbox != nil {
|
if sb.osSbox != nil {
|
||||||
sb.osSbox.Destroy()
|
sb.osSbox.Destroy()
|
||||||
}
|
}
|
||||||
|
|
|
@ -308,6 +308,11 @@ func (c *controller) processEndpointCreate(nmap map[string]*netWatch, ep *endpoi
|
||||||
|
|
||||||
c.Lock()
|
c.Lock()
|
||||||
nw.localEps[ep.ID()] = ep
|
nw.localEps[ep.ID()] = ep
|
||||||
|
|
||||||
|
// If we had learned that from the kv store remove it
|
||||||
|
// from remote ep list now that we know that this is
|
||||||
|
// indeed a local endpoint
|
||||||
|
delete(nw.remoteEps, ep.ID())
|
||||||
c.Unlock()
|
c.Unlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue