mirror of https://github.com/docker/docs.git
godep update
Signed-off-by: Isabel Jimenez <contact@isabeljimenez.com>
This commit is contained in:
parent
5fe196d61f
commit
c86de629a8
|
@ -86,31 +86,38 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/mesos/mesos-go/auth",
|
||||
"Rev": "d23a18b51b1ddf4072a223b38cc8d05cffb1ea42"
|
||||
"Comment": "v0.0.2-27-g657252e",
|
||||
"Rev": "657252ec6fc27975cd86da9f708acd6f4bb1f9ac"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mesos/mesos-go/detector",
|
||||
"Rev": "d23a18b51b1ddf4072a223b38cc8d05cffb1ea42"
|
||||
"Comment": "v0.0.2-27-g657252e",
|
||||
"Rev": "657252ec6fc27975cd86da9f708acd6f4bb1f9ac"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mesos/mesos-go/mesosproto",
|
||||
"Rev": "d23a18b51b1ddf4072a223b38cc8d05cffb1ea42"
|
||||
"Comment": "v0.0.2-27-g657252e",
|
||||
"Rev": "657252ec6fc27975cd86da9f708acd6f4bb1f9ac"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mesos/mesos-go/mesosutil",
|
||||
"Rev": "d23a18b51b1ddf4072a223b38cc8d05cffb1ea42"
|
||||
"Comment": "v0.0.2-27-g657252e",
|
||||
"Rev": "657252ec6fc27975cd86da9f708acd6f4bb1f9ac"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mesos/mesos-go/messenger",
|
||||
"Rev": "d23a18b51b1ddf4072a223b38cc8d05cffb1ea42"
|
||||
"Comment": "v0.0.2-27-g657252e",
|
||||
"Rev": "657252ec6fc27975cd86da9f708acd6f4bb1f9ac"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mesos/mesos-go/scheduler",
|
||||
"Rev": "d23a18b51b1ddf4072a223b38cc8d05cffb1ea42"
|
||||
"Comment": "v0.0.2-27-g657252e",
|
||||
"Rev": "657252ec6fc27975cd86da9f708acd6f4bb1f9ac"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mesos/mesos-go/upid",
|
||||
"Rev": "d23a18b51b1ddf4072a223b38cc8d05cffb1ea42"
|
||||
"Comment": "v0.0.2-27-g657252e",
|
||||
"Rev": "657252ec6fc27975cd86da9f708acd6f4bb1f9ac"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pborman/uuid",
|
||||
|
|
|
@ -77,7 +77,7 @@ func init() {
|
|||
log.Fatal("expected to have a parent UPID in context")
|
||||
}
|
||||
process := process.New("sasl_authenticatee")
|
||||
tpid := &upid.UPID{
|
||||
tpid := upid.UPID{
|
||||
ID: process.Label(),
|
||||
Host: parent.Host,
|
||||
}
|
||||
|
|
10
Godeps/_workspace/src/github.com/mesos/mesos-go/auth/sasl/authenticatee_test.go
generated
vendored
10
Godeps/_workspace/src/github.com/mesos/mesos-go/auth/sasl/authenticatee_test.go
generated
vendored
|
@ -60,12 +60,17 @@ func TestAuthticatee_validLogin(t *testing.T) {
|
|||
factory := transportFactoryFunc(func() messenger.Messenger {
|
||||
transport = &MockTransport{messenger.NewMockedMessenger()}
|
||||
transport.On("Install").Return(nil)
|
||||
transport.On("UPID").Return(&tpid)
|
||||
transport.On("UPID").Return(tpid)
|
||||
transport.On("Start").Return(nil)
|
||||
transport.On("Stop").Return(nil)
|
||||
|
||||
mechMsg := make(chan struct{})
|
||||
stepMsg := make(chan struct{})
|
||||
|
||||
transport.On("Send", mock.Anything, &server, &mesos.AuthenticateMessage{
|
||||
Pid: proto.String(client.String()),
|
||||
}).Return(nil).Run(func(_ mock.Arguments) {
|
||||
defer close(mechMsg)
|
||||
transport.Recv(&server, &mesos.AuthenticationMechanismsMessage{
|
||||
Mechanisms: []string{crammd5.Name},
|
||||
})
|
||||
|
@ -74,6 +79,8 @@ func TestAuthticatee_validLogin(t *testing.T) {
|
|||
transport.On("Send", mock.Anything, &server, &mesos.AuthenticationStartMessage{
|
||||
Mechanism: proto.String(crammd5.Name),
|
||||
}).Return(nil).Run(func(_ mock.Arguments) {
|
||||
defer close(stepMsg)
|
||||
<-mechMsg
|
||||
transport.Recv(&server, &mesos.AuthenticationStepMessage{
|
||||
Data: []byte(`lsd;lfkgjs;dlfkgjs;dfklg`),
|
||||
})
|
||||
|
@ -82,6 +89,7 @@ func TestAuthticatee_validLogin(t *testing.T) {
|
|||
transport.On("Send", mock.Anything, &server, &mesos.AuthenticationStepMessage{
|
||||
Data: []byte(`foo cc7fd96cd80123ea844a7dba29a594ed`),
|
||||
}).Return(nil).Run(func(_ mock.Arguments) {
|
||||
<-stepMsg
|
||||
transport.Recv(&server, &mesos.AuthenticationCompletedMessage{})
|
||||
}).Once()
|
||||
|
||||
|
|
|
@ -1,444 +0,0 @@
|
|||
package zoo
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
log "github.com/golang/glog"
|
||||
"github.com/samuel/go-zookeeper/zk"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultSessionTimeout = 60 * time.Second
|
||||
defaultReconnectTimeout = 5 * time.Second
|
||||
currentPath = "."
|
||||
defaultRewatchDelay = 200 * time.Millisecond
|
||||
)
|
||||
|
||||
type stateType int32
|
||||
|
||||
const (
|
||||
disconnectedState stateType = iota
|
||||
connectionRequestedState
|
||||
connectionAttemptState
|
||||
connectedState
|
||||
)
|
||||
|
||||
func (s stateType) String() string {
|
||||
switch s {
|
||||
case disconnectedState:
|
||||
return "DISCONNECTED"
|
||||
case connectionRequestedState:
|
||||
return "REQUESTED"
|
||||
case connectionAttemptState:
|
||||
return "ATTEMPT"
|
||||
case connectedState:
|
||||
return "CONNECTED"
|
||||
default:
|
||||
panic(fmt.Sprintf("unrecognized state: %d", int32(s)))
|
||||
}
|
||||
}
|
||||
|
||||
type Client struct {
|
||||
conn Connector
|
||||
defaultFactory Factory
|
||||
factory Factory // must never be nil, use setFactory to update
|
||||
state stateType
|
||||
reconnCount uint64
|
||||
reconnDelay time.Duration
|
||||
rootPath string
|
||||
errorHandler ErrorHandler // must never be nil
|
||||
connectOnce sync.Once
|
||||
stopOnce sync.Once
|
||||
shouldStop chan struct{} // signal chan
|
||||
shouldReconn chan struct{} // message chan
|
||||
connLock sync.Mutex
|
||||
hasConnected chan struct{} // message chan
|
||||
rewatchDelay time.Duration
|
||||
}
|
||||
|
||||
func newClient(hosts []string, path string) (*Client, error) {
|
||||
zkc := &Client{
|
||||
reconnDelay: defaultReconnectTimeout,
|
||||
rewatchDelay: defaultRewatchDelay,
|
||||
rootPath: path,
|
||||
shouldStop: make(chan struct{}),
|
||||
shouldReconn: make(chan struct{}, 1),
|
||||
hasConnected: make(chan struct{}, 1),
|
||||
errorHandler: ErrorHandler(func(*Client, error) {}),
|
||||
defaultFactory: asFactory(func() (Connector, <-chan zk.Event, error) {
|
||||
return zk.Connect(hosts, defaultSessionTimeout)
|
||||
}),
|
||||
}
|
||||
zkc.setFactory(zkc.defaultFactory)
|
||||
// TODO(vlad): validate URIs
|
||||
return zkc, nil
|
||||
}
|
||||
|
||||
func (zkc *Client) setFactory(f Factory) {
|
||||
if f == nil {
|
||||
f = zkc.defaultFactory
|
||||
}
|
||||
zkc.factory = asFactory(func() (c Connector, ch <-chan zk.Event, err error) {
|
||||
select {
|
||||
case <-zkc.shouldStop:
|
||||
err = errors.New("client stopping")
|
||||
default:
|
||||
zkc.connLock.Lock()
|
||||
defer zkc.connLock.Unlock()
|
||||
if zkc.conn != nil {
|
||||
zkc.conn.Close()
|
||||
}
|
||||
c, ch, err = f.create()
|
||||
zkc.conn = c
|
||||
}
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
// return true only if the client's state was changed from `from` to `to`
|
||||
func (zkc *Client) stateChange(from, to stateType) (result bool) {
|
||||
defer func() {
|
||||
log.V(3).Infof("stateChange: from=%v to=%v result=%v", from, to, result)
|
||||
}()
|
||||
result = atomic.CompareAndSwapInt32((*int32)(&zkc.state), int32(from), int32(to))
|
||||
return
|
||||
}
|
||||
|
||||
// connect to zookeeper, blocks on the initial call to doConnect()
|
||||
func (zkc *Client) connect() {
|
||||
select {
|
||||
case <-zkc.shouldStop:
|
||||
return
|
||||
default:
|
||||
zkc.connectOnce.Do(func() {
|
||||
if zkc.stateChange(disconnectedState, connectionRequestedState) {
|
||||
if err := zkc.doConnect(); err != nil {
|
||||
log.Error(err)
|
||||
zkc.errorHandler(zkc, err)
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-zkc.shouldStop:
|
||||
zkc.connLock.Lock()
|
||||
defer zkc.connLock.Unlock()
|
||||
if zkc.conn != nil {
|
||||
zkc.conn.Close()
|
||||
}
|
||||
return
|
||||
case <-zkc.shouldReconn:
|
||||
if err := zkc.reconnect(); err != nil {
|
||||
log.Error(err)
|
||||
zkc.errorHandler(zkc, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// attempt to reconnect to zookeeper. will ignore attempts to reconnect
|
||||
// if not disconnected. if reconnection is attempted then this func will block
|
||||
// for at least reconnDelay before actually attempting to connect to zookeeper.
|
||||
func (zkc *Client) reconnect() error {
|
||||
if !zkc.stateChange(disconnectedState, connectionRequestedState) {
|
||||
log.V(4).Infoln("Ignoring reconnect, currently connected/connecting.")
|
||||
return nil
|
||||
}
|
||||
|
||||
defer func() { zkc.reconnCount++ }()
|
||||
|
||||
log.V(4).Infoln("Delaying reconnection for ", zkc.reconnDelay)
|
||||
<-time.After(zkc.reconnDelay)
|
||||
|
||||
return zkc.doConnect()
|
||||
}
|
||||
|
||||
func (zkc *Client) doConnect() error {
|
||||
if !zkc.stateChange(connectionRequestedState, connectionAttemptState) {
|
||||
log.V(4).Infoln("aborting doConnect, connection attempt already in progress or else disconnected")
|
||||
return nil
|
||||
}
|
||||
|
||||
// if we're not connected by the time we return then we failed.
|
||||
defer func() {
|
||||
zkc.stateChange(connectionAttemptState, disconnectedState)
|
||||
}()
|
||||
|
||||
// create Connector instance
|
||||
conn, sessionEvents, err := zkc.factory.create()
|
||||
if err != nil {
|
||||
// once the factory stops producing connectors, it's time to stop
|
||||
zkc.stop()
|
||||
return err
|
||||
}
|
||||
|
||||
zkc.connLock.Lock()
|
||||
zkc.conn = conn
|
||||
zkc.connLock.Unlock()
|
||||
|
||||
log.V(4).Infof("Created connection object of type %T\n", conn)
|
||||
connected := make(chan struct{})
|
||||
sessionExpired := make(chan struct{})
|
||||
go func() {
|
||||
defer close(sessionExpired)
|
||||
zkc.monitorSession(sessionEvents, connected)
|
||||
}()
|
||||
|
||||
// wait for connected confirmation
|
||||
select {
|
||||
case <-connected:
|
||||
if !zkc.stateChange(connectionAttemptState, connectedState) {
|
||||
log.V(4).Infoln("failed to transition to connected state")
|
||||
// we could be:
|
||||
// - disconnected ... reconnect() will try to connect again, otherwise;
|
||||
// - connected ... another goroutine already established a connection
|
||||
// - connectionRequested ... another goroutine is already trying to connect
|
||||
zkc.requestReconnect()
|
||||
}
|
||||
log.Infoln("zookeeper client connected")
|
||||
case <-sessionExpired:
|
||||
// connection was disconnected before it was ever really 'connected'
|
||||
if !zkc.stateChange(connectionAttemptState, disconnectedState) {
|
||||
//programming error
|
||||
panic("failed to transition from connection-attempt to disconnected state")
|
||||
}
|
||||
zkc.requestReconnect()
|
||||
case <-zkc.shouldStop:
|
||||
// noop
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// signal for reconnect unless we're shutting down
|
||||
func (zkc *Client) requestReconnect() {
|
||||
select {
|
||||
case <-zkc.shouldStop:
|
||||
// abort reconnect request, client is shutting down
|
||||
default:
|
||||
select {
|
||||
case zkc.shouldReconn <- struct{}{}:
|
||||
// reconnect request successful
|
||||
default:
|
||||
// reconnect chan is full: reconnect has already
|
||||
// been requested. move on.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// monitor a zookeeper session event channel, closes the 'connected' channel once
|
||||
// a zookeeper connection has been established. errors are forwarded to the client's
|
||||
// errorHandler. the closing of the sessionEvents chan triggers a call to client.onDisconnected.
|
||||
// this func blocks until either the client's shouldStop or sessionEvents chan are closed.
|
||||
func (zkc *Client) monitorSession(sessionEvents <-chan zk.Event, connected chan struct{}) {
|
||||
firstConnected := true
|
||||
for {
|
||||
select {
|
||||
case <-zkc.shouldStop:
|
||||
return
|
||||
case e, ok := <-sessionEvents:
|
||||
if !ok {
|
||||
// once sessionEvents is closed, the embedded ZK client will
|
||||
// no longer attempt to reconnect.
|
||||
zkc.onDisconnected()
|
||||
return
|
||||
} else if e.Err != nil {
|
||||
log.Errorf("received state error: %s", e.Err.Error())
|
||||
zkc.errorHandler(zkc, e.Err)
|
||||
}
|
||||
switch e.State {
|
||||
case zk.StateConnecting:
|
||||
log.Infoln("connecting to zookeeper..")
|
||||
|
||||
case zk.StateConnected:
|
||||
log.V(2).Infoln("received StateConnected")
|
||||
if firstConnected {
|
||||
close(connected) // signal session listener
|
||||
firstConnected = false
|
||||
}
|
||||
// let any listeners know about the change
|
||||
select {
|
||||
case <-zkc.shouldStop: // noop
|
||||
case zkc.hasConnected <- struct{}{}: // noop
|
||||
default: // message buf full, this becomes a non-blocking noop
|
||||
}
|
||||
|
||||
case zk.StateDisconnected:
|
||||
log.Infoln("zookeeper client disconnected")
|
||||
|
||||
case zk.StateExpired:
|
||||
log.Infoln("zookeeper client session expired")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// watch the child nodes for changes, at the specified path.
|
||||
// callers that specify a path of `currentPath` will watch the currently set rootPath,
|
||||
// otherwise the watchedPath is calculated as rootPath+path.
|
||||
// this func spawns a go routine to actually do the watching, and so returns immediately.
|
||||
// in the absense of errors a signalling channel is returned that will close
|
||||
// upon the termination of the watch (e.g. due to disconnection).
|
||||
func (zkc *Client) watchChildren(path string, watcher ChildWatcher) (<-chan struct{}, error) {
|
||||
watchPath := zkc.rootPath
|
||||
if path != "" && path != currentPath {
|
||||
watchPath = watchPath + path
|
||||
}
|
||||
|
||||
log.V(2).Infoln("Watching children for path", watchPath)
|
||||
watchEnded := make(chan struct{})
|
||||
go func() {
|
||||
defer close(watchEnded)
|
||||
zkc._watchChildren(watchPath, watcher)
|
||||
}()
|
||||
return watchEnded, nil
|
||||
}
|
||||
|
||||
// continuation of watchChildren. blocks until either underlying zk connector terminates, or else this
|
||||
// client is shut down. continuously renews child watches.
|
||||
func (zkc *Client) _watchChildren(watchPath string, watcher ChildWatcher) {
|
||||
watcher(zkc, watchPath) // prime the listener
|
||||
var zkevents <-chan zk.Event
|
||||
var err error
|
||||
first := true
|
||||
for {
|
||||
// we really only expect this to happen when zk session has expired,
|
||||
// give the connection a little time to re-establish itself
|
||||
for {
|
||||
//TODO(jdef) it would be better if we could listen for broadcast Connection/Disconnection events,
|
||||
//emitted whenever the embedded client cycles (read: when the connection state of this client changes).
|
||||
//As it currently stands, if the embedded client cycles fast enough, we may actually not notice it here
|
||||
//and keep on watching like nothing bad happened.
|
||||
if !zkc.isConnected() {
|
||||
log.Warningf("no longer connected to server, exiting child watch")
|
||||
return
|
||||
}
|
||||
if first {
|
||||
first = false
|
||||
} else {
|
||||
select {
|
||||
case <-zkc.shouldStop:
|
||||
return
|
||||
case <-time.After(zkc.rewatchDelay):
|
||||
}
|
||||
}
|
||||
_, _, zkevents, err = zkc.conn.ChildrenW(watchPath)
|
||||
if err == nil {
|
||||
log.V(2).Infoln("rewatching children for path", watchPath)
|
||||
break
|
||||
}
|
||||
log.V(1).Infof("unable to watch children for path %s: %s", watchPath, err.Error())
|
||||
zkc.errorHandler(zkc, err)
|
||||
}
|
||||
// zkevents is (at most) a one-trick channel
|
||||
// (a) a child event happens (no error)
|
||||
// (b) the embedded client is shutting down (zk.ErrClosing)
|
||||
// (c) the zk session expires (zk.ErrSessionExpired)
|
||||
select {
|
||||
case <-zkc.shouldStop:
|
||||
return
|
||||
case e, ok := <-zkevents:
|
||||
if !ok {
|
||||
log.Warningf("expected a single zk event before channel close")
|
||||
break // the select
|
||||
}
|
||||
switch e.Type {
|
||||
//TODO(jdef) should we not also watch for EventNode{Created,Deleted,DataChanged}?
|
||||
case zk.EventNodeChildrenChanged:
|
||||
log.V(2).Infoln("Handling: zk.EventNodeChildrenChanged")
|
||||
watcher(zkc, e.Path)
|
||||
continue
|
||||
default:
|
||||
if e.Err != nil {
|
||||
zkc.errorHandler(zkc, e.Err)
|
||||
if e.Type == zk.EventNotWatching && e.State == zk.StateDisconnected {
|
||||
if e.Err == zk.ErrClosing {
|
||||
log.V(1).Infof("watch invalidated, embedded client terminating")
|
||||
return
|
||||
}
|
||||
log.V(1).Infof("watch invalidated, attempting to watch again: %v", e.Err)
|
||||
} else {
|
||||
log.Warningf("received error while watching path %s: %s", watchPath, e.Err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (zkc *Client) onDisconnected() {
|
||||
if st := zkc.getState(); st == connectedState && zkc.stateChange(st, disconnectedState) {
|
||||
log.Infoln("disconnected from the server, reconnecting...")
|
||||
zkc.requestReconnect()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// return a channel that gets an empty struct every time a connection happens
|
||||
func (zkc *Client) connections() <-chan struct{} {
|
||||
return zkc.hasConnected
|
||||
}
|
||||
|
||||
func (zkc *Client) getState() stateType {
|
||||
return stateType(atomic.LoadInt32((*int32)(&zkc.state)))
|
||||
}
|
||||
|
||||
// convenience function
|
||||
func (zkc *Client) isConnected() bool {
|
||||
return zkc.getState() == connectedState
|
||||
}
|
||||
|
||||
// convenience function
|
||||
func (zkc *Client) isConnecting() bool {
|
||||
state := zkc.getState()
|
||||
return state == connectionRequestedState || state == connectionAttemptState
|
||||
}
|
||||
|
||||
// convenience function
|
||||
func (zkc *Client) isDisconnected() bool {
|
||||
return zkc.getState() == disconnectedState
|
||||
}
|
||||
|
||||
func (zkc *Client) list(path string) ([]string, error) {
|
||||
if !zkc.isConnected() {
|
||||
return nil, errors.New("Unable to list children, client not connected.")
|
||||
}
|
||||
|
||||
children, _, err := zkc.conn.Children(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return children, nil
|
||||
}
|
||||
|
||||
func (zkc *Client) data(path string) ([]byte, error) {
|
||||
if !zkc.isConnected() {
|
||||
return nil, errors.New("Unable to retrieve node data, client not connected.")
|
||||
}
|
||||
|
||||
data, _, err := zkc.conn.Get(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (zkc *Client) stop() {
|
||||
zkc.stopOnce.Do(func() {
|
||||
close(zkc.shouldStop)
|
||||
})
|
||||
}
|
||||
|
||||
// when this channel is closed the client is either stopping, or has stopped
|
||||
func (zkc *Client) stopped() <-chan struct{} {
|
||||
return zkc.shouldStop
|
||||
}
|
88
Godeps/_workspace/src/github.com/mesos/mesos-go/detector/zoo/client2.go
generated
vendored
Normal file
88
Godeps/_workspace/src/github.com/mesos/mesos-go/detector/zoo/client2.go
generated
vendored
Normal file
|
@ -0,0 +1,88 @@
|
|||
package zoo
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/samuel/go-zookeeper/zk"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultSessionTimeout = 60 * time.Second
|
||||
currentPath = "."
|
||||
)
|
||||
|
||||
var zkSessionTimeout = defaultSessionTimeout
|
||||
|
||||
type client2 struct {
|
||||
*zk.Conn
|
||||
path string
|
||||
done chan struct{} // signal chan, closes when the underlying connection terminates
|
||||
stopOnce sync.Once
|
||||
}
|
||||
|
||||
func connect2(hosts []string, path string) (*client2, error) {
|
||||
c, ev, err := zk.Connect(hosts, zkSessionTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
// close the 'done' chan when the zk event chan closes (signals termination of zk connection)
|
||||
defer close(done)
|
||||
for {
|
||||
if _, ok := <-ev; !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return &client2{
|
||||
Conn: c,
|
||||
path: path,
|
||||
done: done,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *client2) stopped() <-chan struct{} {
|
||||
return c.done
|
||||
}
|
||||
|
||||
func (c *client2) stop() {
|
||||
c.stopOnce.Do(c.Close)
|
||||
}
|
||||
|
||||
func (c *client2) data(path string) (data []byte, err error) {
|
||||
data, _, err = c.Get(path)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *client2) watchChildren(path string) (string, <-chan []string, <-chan error) {
|
||||
errCh := make(chan error, 1)
|
||||
snap := make(chan []string)
|
||||
|
||||
watchPath := c.path
|
||||
if path != "" && path != currentPath {
|
||||
watchPath = watchPath + path
|
||||
}
|
||||
go func() {
|
||||
defer close(errCh)
|
||||
for {
|
||||
children, _, ev, err := c.ChildrenW(watchPath)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
select {
|
||||
case snap <- children:
|
||||
case <-c.done:
|
||||
return
|
||||
}
|
||||
e := <-ev // wait for the next watch-related event
|
||||
if e.Err != nil {
|
||||
errCh <- e.Err
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return watchPath, snap, errCh
|
||||
}
|
|
@ -1,342 +0,0 @@
|
|||
package zoo
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
log "github.com/golang/glog"
|
||||
util "github.com/mesos/mesos-go/mesosutil"
|
||||
"github.com/samuel/go-zookeeper/zk"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var test_zk_hosts = []string{"localhost:2181"}
|
||||
|
||||
const (
|
||||
test_zk_path = "/test"
|
||||
)
|
||||
|
||||
func TestClientNew(t *testing.T) {
|
||||
path := "/mesos"
|
||||
chEvent := make(chan zk.Event)
|
||||
connector := makeMockConnector(path, chEvent)
|
||||
|
||||
c, err := newClient(test_zk_hosts, path)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, c)
|
||||
assert.False(t, c.isConnected())
|
||||
c.conn = connector
|
||||
|
||||
}
|
||||
|
||||
// This test requires zookeeper to be running.
|
||||
// You must also set env variable ZK_HOSTS to point to zk hosts.
|
||||
// The zk package does not offer a way to mock its connection function.
|
||||
func TestClientConnectIntegration(t *testing.T) {
|
||||
if os.Getenv("ZK_HOSTS") == "" {
|
||||
t.Skip("Skipping zk-server connection test: missing env ZK_HOSTS.")
|
||||
}
|
||||
hosts := strings.Split(os.Getenv("ZK_HOSTS"), ",")
|
||||
c, err := newClient(hosts, "/mesos")
|
||||
assert.NoError(t, err)
|
||||
c.errorHandler = ErrorHandler(func(c *Client, e error) {
|
||||
err = e
|
||||
})
|
||||
c.connect()
|
||||
assert.NoError(t, err)
|
||||
|
||||
c.connect()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, c.isConnected())
|
||||
}
|
||||
|
||||
func TestClientConnect(t *testing.T) {
|
||||
c, err := makeClient()
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, c.isConnected())
|
||||
c.connect()
|
||||
assert.True(t, c.isConnected())
|
||||
assert.False(t, c.isConnecting())
|
||||
}
|
||||
|
||||
func TestClient_FlappingConnection(t *testing.T) {
|
||||
c, err := newClient(test_zk_hosts, test_zk_path)
|
||||
c.reconnDelay = 10 * time.Millisecond // we don't want this test to take forever
|
||||
defer c.stop()
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
attempts := 0
|
||||
c.setFactory(asFactory(func() (Connector, <-chan zk.Event, error) {
|
||||
log.V(2).Infof("**** Using zk.Conn adapter ****")
|
||||
ch0 := make(chan zk.Event, 10) // session chan
|
||||
ch1 := make(chan zk.Event) // watch chan
|
||||
go func() {
|
||||
if attempts > 1 {
|
||||
t.Fatalf("only one connector instance is expected")
|
||||
}
|
||||
attempts++
|
||||
for i := 0; i < 4; i++ {
|
||||
ch0 <- zk.Event{
|
||||
Type: zk.EventSession,
|
||||
State: zk.StateConnecting,
|
||||
Path: test_zk_path,
|
||||
}
|
||||
ch0 <- zk.Event{
|
||||
Type: zk.EventSession,
|
||||
State: zk.StateConnected,
|
||||
Path: test_zk_path,
|
||||
}
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
ch0 <- zk.Event{
|
||||
Type: zk.EventSession,
|
||||
State: zk.StateDisconnected,
|
||||
Path: test_zk_path,
|
||||
}
|
||||
}
|
||||
}()
|
||||
connector := makeMockConnector(test_zk_path, ch1)
|
||||
return connector, ch0, nil
|
||||
}))
|
||||
|
||||
go c.connect()
|
||||
time.Sleep(2 * time.Second)
|
||||
assert.True(t, c.isConnected())
|
||||
assert.Equal(t, 1, attempts)
|
||||
}
|
||||
|
||||
func TestClientWatchChildren(t *testing.T) {
|
||||
c, err := makeClient()
|
||||
assert.NoError(t, err)
|
||||
c.errorHandler = ErrorHandler(func(c *Client, e error) {
|
||||
err = e
|
||||
})
|
||||
c.connect()
|
||||
assert.NoError(t, err)
|
||||
wCh := make(chan struct{}, 1)
|
||||
childrenWatcher := ChildWatcher(func(zkc *Client, path string) {
|
||||
log.V(4).Infoln("Path", path, "changed!")
|
||||
children, err := c.list(path)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, len(children))
|
||||
assert.Equal(t, "info_0", children[0])
|
||||
assert.Equal(t, "info_5", children[1])
|
||||
assert.Equal(t, "info_10", children[2])
|
||||
wCh <- struct{}{}
|
||||
})
|
||||
|
||||
_, err = c.watchChildren(currentPath, childrenWatcher)
|
||||
assert.NoError(t, err)
|
||||
|
||||
select {
|
||||
case <-wCh:
|
||||
case <-time.After(time.Millisecond * 700):
|
||||
panic("Waited too long...")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientWatchErrors(t *testing.T) {
|
||||
path := "/test"
|
||||
ch := make(chan zk.Event, 1)
|
||||
ch <- zk.Event{
|
||||
Type: zk.EventNotWatching,
|
||||
Err: errors.New("Event Error"),
|
||||
}
|
||||
|
||||
c, err := makeClient()
|
||||
c.state = connectedState
|
||||
|
||||
assert.NoError(t, err)
|
||||
c.conn = makeMockConnector(path, (<-chan zk.Event)(ch))
|
||||
wCh := make(chan struct{}, 1)
|
||||
c.errorHandler = ErrorHandler(func(zkc *Client, err error) {
|
||||
assert.Error(t, err)
|
||||
wCh <- struct{}{}
|
||||
})
|
||||
|
||||
c.watchChildren(currentPath, ChildWatcher(func(*Client, string) {}))
|
||||
|
||||
select {
|
||||
case <-wCh:
|
||||
case <-time.After(time.Millisecond * 700):
|
||||
t.Fatalf("timed out waiting for error message")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestWatchChildren_flappy(t *testing.T) {
|
||||
c, err := newClient(test_zk_hosts, test_zk_path)
|
||||
c.reconnDelay = 10 * time.Millisecond // we don't want this test to take forever
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
attempts := 0
|
||||
conn := NewMockConnector()
|
||||
defer func() {
|
||||
if !t.Failed() {
|
||||
conn.AssertExpectations(t)
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
// stop client and give it time to shut down the connector
|
||||
c.stop()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}()
|
||||
c.setFactory(asFactory(func() (Connector, <-chan zk.Event, error) {
|
||||
log.V(2).Infof("**** Using zk.Conn adapter ****")
|
||||
ch0 := make(chan zk.Event, 10) // session chan
|
||||
ch1 := make(chan zk.Event) // watch chan
|
||||
go func() {
|
||||
if attempts > 1 {
|
||||
t.Fatalf("only one connector instance is expected")
|
||||
}
|
||||
attempts++
|
||||
for i := 0; i < 4; i++ {
|
||||
ch0 <- zk.Event{
|
||||
Type: zk.EventSession,
|
||||
State: zk.StateConnecting,
|
||||
Path: test_zk_path,
|
||||
}
|
||||
ch0 <- zk.Event{
|
||||
Type: zk.EventSession,
|
||||
State: zk.StateConnected,
|
||||
Path: test_zk_path,
|
||||
}
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
ch0 <- zk.Event{
|
||||
Type: zk.EventSession,
|
||||
State: zk.StateDisconnected,
|
||||
Path: test_zk_path,
|
||||
}
|
||||
}
|
||||
ch0 <- zk.Event{
|
||||
Type: zk.EventSession,
|
||||
State: zk.StateConnecting,
|
||||
Path: test_zk_path,
|
||||
}
|
||||
ch0 <- zk.Event{
|
||||
Type: zk.EventSession,
|
||||
State: zk.StateConnected,
|
||||
Path: test_zk_path,
|
||||
}
|
||||
ch1 <- zk.Event{
|
||||
Type: zk.EventNodeChildrenChanged,
|
||||
Path: test_zk_path,
|
||||
}
|
||||
}()
|
||||
simulatedErr := errors.New("simulated watch error")
|
||||
conn.On("ChildrenW", test_zk_path).Return(nil, nil, nil, simulatedErr).Times(4)
|
||||
conn.On("ChildrenW", test_zk_path).Return([]string{test_zk_path}, &zk.Stat{}, (<-chan zk.Event)(ch1), nil)
|
||||
conn.On("Close").Return(nil)
|
||||
return conn, ch0, nil
|
||||
}))
|
||||
|
||||
go c.connect()
|
||||
var watchChildrenCount uint64
|
||||
watcherFunc := ChildWatcher(func(zkc *Client, path string) {
|
||||
log.V(1).Infof("ChildWatcher invoked %d", atomic.LoadUint64(&watchChildrenCount))
|
||||
})
|
||||
startTime := time.Now()
|
||||
endTime := startTime.Add(2 * time.Second)
|
||||
watcherLoop:
|
||||
for time.Now().Before(endTime) {
|
||||
log.V(1).Infof("entered watcherLoop")
|
||||
select {
|
||||
case <-c.connections():
|
||||
log.V(1).Infof("invoking watchChildren")
|
||||
if _, err := c.watchChildren(currentPath, watcherFunc); err == nil {
|
||||
// watching children succeeded!!
|
||||
t.Logf("child watch success")
|
||||
atomic.AddUint64(&watchChildrenCount, 1)
|
||||
} else {
|
||||
// setting the watch failed
|
||||
t.Logf("setting child watch failed: %v", err)
|
||||
continue watcherLoop
|
||||
}
|
||||
case <-c.stopped():
|
||||
t.Logf("detected client termination")
|
||||
break watcherLoop
|
||||
case <-time.After(endTime.Sub(time.Now())):
|
||||
}
|
||||
}
|
||||
|
||||
wantChildrenCount := atomic.LoadUint64(&watchChildrenCount)
|
||||
assert.Equal(t, uint64(5), wantChildrenCount, "expected watchChildrenCount = 5 instead of %d, should be reinvoked upon initial ChildrenW failures", wantChildrenCount)
|
||||
}
|
||||
|
||||
func makeClient() (*Client, error) {
|
||||
ch0 := make(chan zk.Event, 2)
|
||||
ch1 := make(chan zk.Event, 1)
|
||||
|
||||
ch0 <- zk.Event{
|
||||
State: zk.StateConnected,
|
||||
Path: test_zk_path,
|
||||
}
|
||||
ch1 <- zk.Event{
|
||||
Type: zk.EventNodeChildrenChanged,
|
||||
Path: test_zk_path,
|
||||
}
|
||||
go func() {
|
||||
time.Sleep(1 * time.Second)
|
||||
ch0 <- zk.Event{
|
||||
State: zk.StateDisconnected,
|
||||
}
|
||||
close(ch0)
|
||||
close(ch1)
|
||||
}()
|
||||
|
||||
c, err := newClient(test_zk_hosts, test_zk_path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// only allow a single connection
|
||||
first := true
|
||||
c.setFactory(asFactory(func() (Connector, <-chan zk.Event, error) {
|
||||
if !first {
|
||||
return nil, nil, errors.New("only a single connection attempt allowed for mock connector")
|
||||
} else {
|
||||
first = false
|
||||
}
|
||||
log.V(2).Infof("**** Using zk.Conn adapter ****")
|
||||
connector := makeMockConnector(test_zk_path, ch1)
|
||||
return connector, ch0, nil
|
||||
}))
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func makeMockConnector(path string, chEvent <-chan zk.Event) *MockConnector {
|
||||
log.V(2).Infoln("Making Connector mock.")
|
||||
conn := NewMockConnector()
|
||||
conn.On("Close").Return(nil)
|
||||
conn.On("ChildrenW", path).Return([]string{path}, &zk.Stat{}, chEvent, nil)
|
||||
conn.On("Children", path).Return([]string{"info_0", "info_5", "info_10"}, &zk.Stat{}, nil)
|
||||
conn.On("Get", fmt.Sprintf("%s/info_0", path)).Return(makeTestMasterInfo(), &zk.Stat{}, nil)
|
||||
|
||||
return conn
|
||||
}
|
||||
|
||||
func newTestMasterInfo(id int) []byte {
|
||||
miPb := util.NewMasterInfo(fmt.Sprintf("master(%d)@localhost:5050", id), 123456789, 400)
|
||||
data, err := proto.Marshal(miPb)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func makeTestMasterInfo() []byte {
|
||||
miPb := util.NewMasterInfo("master@localhost:5050", 123456789, 400)
|
||||
data, err := proto.Marshal(miPb)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return data
|
||||
}
|
|
@ -19,6 +19,7 @@
|
|||
package zoo
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
|
@ -37,25 +38,37 @@ import (
|
|||
const (
|
||||
// prefix for nodes listed at the ZK URL path
|
||||
nodePrefix = "info_"
|
||||
nodeJSONPrefix = "json.info_"
|
||||
defaultMinDetectorCyclePeriod = 1 * time.Second
|
||||
)
|
||||
|
||||
// reasonable default for a noop change listener
|
||||
var ignoreChanged = detector.OnMasterChanged(func(*mesos.MasterInfo) {})
|
||||
|
||||
type zkInterface interface {
|
||||
stopped() <-chan struct{}
|
||||
stop()
|
||||
data(string) ([]byte, error)
|
||||
watchChildren(string) (string, <-chan []string, <-chan error)
|
||||
}
|
||||
|
||||
type infoCodec func(path, node string) (*mesos.MasterInfo, error)
|
||||
|
||||
// Detector uses ZooKeeper to detect new leading master.
|
||||
type MasterDetector struct {
|
||||
client *Client
|
||||
client zkInterface
|
||||
leaderNode string
|
||||
|
||||
// for one-time zk client initiation
|
||||
bootstrap sync.Once
|
||||
bootstrapLock sync.RWMutex // guard against concurrent invocations of bootstrapFunc
|
||||
bootstrapFunc func() error // for one-time zk client initiation
|
||||
|
||||
// latch: only install, at most, one ignoreChanged listener; see MasterDetector.Detect
|
||||
ignoreInstalled int32
|
||||
|
||||
// detection should not signal master change listeners more frequently than this
|
||||
minDetectorCyclePeriod time.Duration
|
||||
done chan struct{}
|
||||
cancel func()
|
||||
}
|
||||
|
||||
// Internal constructor function
|
||||
|
@ -66,17 +79,20 @@ func NewMasterDetector(zkurls string) (*MasterDetector, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
client, err := newClient(zkHosts, zkPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
detector := &MasterDetector{
|
||||
client: client,
|
||||
minDetectorCyclePeriod: defaultMinDetectorCyclePeriod,
|
||||
done: make(chan struct{}),
|
||||
cancel: func() {},
|
||||
}
|
||||
|
||||
log.V(2).Infoln("Created new detector, watching ", zkHosts, zkPath)
|
||||
detector.bootstrapFunc = func() (err error) {
|
||||
if detector.client == nil {
|
||||
detector.client, err = connect2(zkHosts, zkPath)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
log.V(2).Infoln("Created new detector to watch", zkHosts, zkPath)
|
||||
return detector, nil
|
||||
}
|
||||
|
||||
|
@ -94,42 +110,35 @@ func parseZk(zkurls string) ([]string, string, error) {
|
|||
|
||||
// returns a chan that, when closed, indicates termination of the detector
|
||||
func (md *MasterDetector) Done() <-chan struct{} {
|
||||
return md.client.stopped()
|
||||
return md.done
|
||||
}
|
||||
|
||||
func (md *MasterDetector) Cancel() {
|
||||
md.client.stop()
|
||||
md.bootstrapLock.RLock()
|
||||
defer md.bootstrapLock.RUnlock()
|
||||
md.cancel()
|
||||
}
|
||||
|
||||
//TODO(jdef) execute async because we don't want to stall our client's event loop? if so
|
||||
//then we also probably want serial event delivery (aka. delivery via a chan) but then we
|
||||
//have to deal with chan buffer sizes .. ugh. This is probably the least painful for now.
|
||||
func (md *MasterDetector) childrenChanged(zkc *Client, path string, obs detector.MasterChanged) {
|
||||
log.V(2).Infof("fetching children at path '%v'", path)
|
||||
list, err := zkc.list(path)
|
||||
if err != nil {
|
||||
log.Warning(err)
|
||||
return
|
||||
}
|
||||
|
||||
func (md *MasterDetector) childrenChanged(path string, list []string, obs detector.MasterChanged) {
|
||||
md.notifyMasterChanged(path, list, obs)
|
||||
md.notifyAllMasters(path, list, obs)
|
||||
}
|
||||
|
||||
func (md *MasterDetector) notifyMasterChanged(path string, list []string, obs detector.MasterChanged) {
|
||||
topNode := selectTopNode(list)
|
||||
// mesos v0.24 writes JSON only, v0.23 writes json and protobuf, v0.22 and prior only write protobuf
|
||||
topNode, codec := md.selectTopNode(list)
|
||||
if md.leaderNode == topNode {
|
||||
log.V(2).Infof("ignoring children-changed event, leader has not changed: %v", path)
|
||||
return
|
||||
}
|
||||
|
||||
log.V(2).Infof("changing leader node from %s -> %s", md.leaderNode, topNode)
|
||||
log.V(2).Infof("changing leader node from %q -> %q", md.leaderNode, topNode)
|
||||
md.leaderNode = topNode
|
||||
|
||||
var masterInfo *mesos.MasterInfo
|
||||
if md.leaderNode != "" {
|
||||
var err error
|
||||
if masterInfo, err = md.pullMasterInfo(path, topNode); err != nil {
|
||||
if masterInfo, err = codec(path, topNode); err != nil {
|
||||
log.Errorln(err.Error())
|
||||
}
|
||||
}
|
||||
|
@ -156,7 +165,21 @@ func (md *MasterDetector) pullMasterInfo(path, node string) (*mesos.MasterInfo,
|
|||
masterInfo := &mesos.MasterInfo{}
|
||||
err = proto.Unmarshal(data, masterInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshall MasterInfo data from zookeeper: %v", err)
|
||||
return nil, fmt.Errorf("failed to unmarshal protobuf MasterInfo data from zookeeper: %v", err)
|
||||
}
|
||||
return masterInfo, nil
|
||||
}
|
||||
|
||||
func (md *MasterDetector) pullMasterJsonInfo(path, node string) (*mesos.MasterInfo, error) {
|
||||
data, err := md.client.data(fmt.Sprintf("%s/%s", path, node))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to retrieve leader data: %v", err)
|
||||
}
|
||||
|
||||
masterInfo := &mesos.MasterInfo{}
|
||||
err = json.Unmarshal(data, masterInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal json MasterInfo data from zookeeper: %v", err)
|
||||
}
|
||||
return masterInfo, nil
|
||||
}
|
||||
|
@ -167,18 +190,52 @@ func (md *MasterDetector) notifyAllMasters(path string, list []string, obs detec
|
|||
// not interested in entire master list
|
||||
return
|
||||
}
|
||||
masters := []*mesos.MasterInfo{}
|
||||
for _, node := range list {
|
||||
info, err := md.pullMasterInfo(path, node)
|
||||
|
||||
// mesos v0.24 writes JSON only, v0.23 writes json and protobuf, v0.22 and prior only write protobuf
|
||||
masters := map[string]*mesos.MasterInfo{}
|
||||
tryStore := func(node string, codec infoCodec) {
|
||||
info, err := codec(path, node)
|
||||
if err != nil {
|
||||
log.Errorln(err.Error())
|
||||
} else {
|
||||
masters = append(masters, info)
|
||||
masters[info.GetId()] = info
|
||||
}
|
||||
}
|
||||
for _, node := range list {
|
||||
// compare https://github.com/apache/mesos/blob/0.23.0/src/master/detector.cpp#L437
|
||||
if strings.HasPrefix(node, nodePrefix) {
|
||||
tryStore(node, md.pullMasterInfo)
|
||||
} else if strings.HasPrefix(node, nodeJSONPrefix) {
|
||||
tryStore(node, md.pullMasterJsonInfo)
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
masterList := make([]*mesos.MasterInfo, 0, len(masters))
|
||||
for _, v := range masters {
|
||||
masterList = append(masterList, v)
|
||||
}
|
||||
|
||||
log.V(2).Infof("notifying of master membership change: %+v", masters)
|
||||
logPanic(func() { all.UpdatedMasters(masters) })
|
||||
log.V(2).Infof("notifying of master membership change: %+v", masterList)
|
||||
logPanic(func() { all.UpdatedMasters(masterList) })
|
||||
}
|
||||
|
||||
func (md *MasterDetector) callBootstrap() (e error) {
|
||||
log.V(2).Infoln("invoking detector boostrap")
|
||||
md.bootstrapLock.Lock()
|
||||
defer md.bootstrapLock.Unlock()
|
||||
|
||||
clientConfigured := md.client != nil
|
||||
if e = md.bootstrapFunc(); e == nil && !clientConfigured && md.client != nil {
|
||||
// chain the lifetime of this detector to that of the newly created client impl
|
||||
client := md.client
|
||||
md.cancel = client.stop
|
||||
go func() {
|
||||
defer close(md.done)
|
||||
<-client.stopped()
|
||||
}()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// the first call to Detect will kickstart a connection to zookeeper. a nil change listener may
|
||||
|
@ -187,70 +244,111 @@ func (md *MasterDetector) notifyAllMasters(path string, list []string, obs detec
|
|||
// once, and each time the spec'd listener will be added to the list of those receiving notifications.
|
||||
func (md *MasterDetector) Detect(f detector.MasterChanged) (err error) {
|
||||
// kickstart zk client connectivity
|
||||
md.bootstrap.Do(func() { go md.client.connect() })
|
||||
if err := md.callBootstrap(); err != nil {
|
||||
log.V(3).Infoln("failed to execute bootstrap function", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if f == nil {
|
||||
// only ever install, at most, one ignoreChanged listener. multiple instances of it
|
||||
// just consume resources and generate misleading log messages.
|
||||
if !atomic.CompareAndSwapInt32(&md.ignoreInstalled, 0, 1) {
|
||||
log.V(3).Infoln("ignoreChanged listener already installed")
|
||||
return
|
||||
}
|
||||
f = ignoreChanged
|
||||
}
|
||||
|
||||
log.V(3).Infoln("spawning detect()")
|
||||
go md.detect(f)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MasterDetector) detect(f detector.MasterChanged) {
|
||||
log.V(3).Infoln("detecting children at", currentPath)
|
||||
detectLoop:
|
||||
for {
|
||||
started := time.Now()
|
||||
select {
|
||||
case <-md.Done():
|
||||
return
|
||||
case <-md.client.connections():
|
||||
// we let the golang runtime manage our listener list for us, in form of goroutines that
|
||||
// callback to the master change notification listen func's
|
||||
if watchEnded, err := md.client.watchChildren(currentPath, ChildWatcher(func(zkc *Client, path string) {
|
||||
md.childrenChanged(zkc, path, f)
|
||||
})); err == nil {
|
||||
log.V(2).Infoln("detector listener installed")
|
||||
default:
|
||||
}
|
||||
log.V(3).Infoln("watching children at", currentPath)
|
||||
path, childrenCh, errCh := md.client.watchChildren(currentPath)
|
||||
rewatch := false
|
||||
for {
|
||||
started := time.Now()
|
||||
select {
|
||||
case children := <-childrenCh:
|
||||
md.childrenChanged(path, children, f)
|
||||
case err, ok := <-errCh:
|
||||
// check for a tie first (required for predictability (tests)); the downside of
|
||||
// doing this is that a listener might get two callbacks back-to-back ("new leader",
|
||||
// followed by "no leader").
|
||||
select {
|
||||
case <-watchEnded:
|
||||
case children := <-childrenCh:
|
||||
md.childrenChanged(path, children, f)
|
||||
default:
|
||||
}
|
||||
if ok {
|
||||
log.V(1).Infoln("child watch ended with error, master lost; error was:", err.Error())
|
||||
} else {
|
||||
// detector shutdown likely...
|
||||
log.V(1).Infoln("child watch ended, master lost")
|
||||
}
|
||||
select {
|
||||
case <-md.Done():
|
||||
return
|
||||
default:
|
||||
if md.leaderNode != "" {
|
||||
log.V(1).Infof("child watch ended, signaling master lost")
|
||||
log.V(2).Infof("changing leader node from %q -> \"\"", md.leaderNode)
|
||||
md.leaderNode = ""
|
||||
f.OnMasterChanged(nil)
|
||||
}
|
||||
case <-md.client.stopped():
|
||||
return
|
||||
}
|
||||
} else {
|
||||
log.V(1).Infof("child watch ended with error: %v", err)
|
||||
continue detectLoop
|
||||
rewatch = true
|
||||
}
|
||||
}
|
||||
// rate-limit master changes
|
||||
if elapsed := time.Now().Sub(started); elapsed > 0 {
|
||||
log.V(2).Infoln("resting before next detection cycle")
|
||||
select {
|
||||
case <-md.Done():
|
||||
return
|
||||
case <-time.After(md.minDetectorCyclePeriod - elapsed): // noop
|
||||
// rate-limit master changes
|
||||
if elapsed := time.Now().Sub(started); elapsed > 0 {
|
||||
log.V(2).Infoln("resting before next detection cycle")
|
||||
select {
|
||||
case <-md.Done():
|
||||
return
|
||||
case <-time.After(md.minDetectorCyclePeriod - elapsed): // noop
|
||||
}
|
||||
}
|
||||
if rewatch {
|
||||
continue detectLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func selectTopNode(list []string) (node string) {
|
||||
func (md *MasterDetector) selectTopNode(list []string) (topNode string, codec infoCodec) {
|
||||
// mesos v0.24 writes JSON only, v0.23 writes json and protobuf, v0.22 and prior only write protobuf
|
||||
topNode = selectTopNodePrefix(list, nodeJSONPrefix)
|
||||
codec = md.pullMasterJsonInfo
|
||||
if topNode == "" {
|
||||
topNode = selectTopNodePrefix(list, nodePrefix)
|
||||
codec = md.pullMasterInfo
|
||||
|
||||
if topNode != "" {
|
||||
log.Warningf("Leading master is using a Protobuf binary format when registering "+
|
||||
"with Zookeeper (%s): this will be deprecated as of Mesos 0.24 (see MESOS-2340).",
|
||||
topNode)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func selectTopNodePrefix(list []string, pre string) (node string) {
|
||||
var leaderSeq uint64 = math.MaxUint64
|
||||
|
||||
for _, v := range list {
|
||||
if !strings.HasPrefix(v, nodePrefix) {
|
||||
if !strings.HasPrefix(v, pre) {
|
||||
continue // only care about participants
|
||||
}
|
||||
seqStr := strings.TrimPrefix(v, nodePrefix)
|
||||
seqStr := strings.TrimPrefix(v, pre)
|
||||
seq, err := strconv.ParseUint(seqStr, 10, 64)
|
||||
if err != nil {
|
||||
log.Warningf("unexpected zk node format '%s': %v", seqStr, err)
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
package zoo
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -12,14 +10,16 @@ import (
|
|||
log "github.com/golang/glog"
|
||||
"github.com/mesos/mesos-go/detector"
|
||||
mesos "github.com/mesos/mesos-go/mesosproto"
|
||||
util "github.com/mesos/mesos-go/mesosutil"
|
||||
"github.com/samuel/go-zookeeper/zk"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
const (
|
||||
zkurl = "zk://127.0.0.1:2181/mesos"
|
||||
zkurl_bad = "zk://127.0.0.1:2181"
|
||||
zkurl = "zk://127.0.0.1:2181/mesos"
|
||||
zkurl_bad = "zk://127.0.0.1:2181"
|
||||
test_zk_path = "/test"
|
||||
)
|
||||
|
||||
func TestParseZk_single(t *testing.T) {
|
||||
|
@ -43,361 +43,299 @@ func TestParseZk_multiIP(t *testing.T) {
|
|||
assert.Equal(t, "/mesos", path)
|
||||
}
|
||||
|
||||
func TestMasterDetectorStart(t *testing.T) {
|
||||
c, err := makeClient()
|
||||
assert.False(t, c.isConnected())
|
||||
md, err := NewMasterDetector(zkurl)
|
||||
defer md.Cancel()
|
||||
assert.NoError(t, err)
|
||||
c.errorHandler = ErrorHandler(func(c *Client, e error) {
|
||||
err = e
|
||||
})
|
||||
md.client = c // override zk.Conn with our own.
|
||||
md.client.connect()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, c.isConnected())
|
||||
type mockZkClient struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (m *mockZkClient) stopped() (a <-chan struct{}) {
|
||||
args := m.Called()
|
||||
if x := args.Get(0); x != nil {
|
||||
a = x.(<-chan struct{})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (m *mockZkClient) stop() {
|
||||
m.Called()
|
||||
}
|
||||
|
||||
func (m *mockZkClient) data(path string) (a []byte, b error) {
|
||||
args := m.Called(path)
|
||||
if x := args.Get(0); x != nil {
|
||||
a = x.([]byte)
|
||||
}
|
||||
b = args.Error(1)
|
||||
return
|
||||
}
|
||||
|
||||
func (m *mockZkClient) watchChildren(path string) (a string, b <-chan []string, c <-chan error) {
|
||||
args := m.Called(path)
|
||||
a = args.String(0)
|
||||
if x := args.Get(1); x != nil {
|
||||
b = x.(<-chan []string)
|
||||
}
|
||||
if x := args.Get(2); x != nil {
|
||||
c = x.(<-chan error)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// newMockZkClient returns a mocked implementation of zkInterface that implements expectations
|
||||
// for stop() and stopped(); multiple calls to stop() are safe.
|
||||
func newMockZkClient(initialChildren ...string) (mocked *mockZkClient, snaps chan []string, errs chan error) {
|
||||
var doneOnce sync.Once
|
||||
done := make(chan struct{})
|
||||
|
||||
mocked = &mockZkClient{}
|
||||
mocked.On("stop").Return().Run(func(_ mock.Arguments) { doneOnce.Do(func() { close(done) }) })
|
||||
mocked.On("stopped").Return((<-chan struct{})(done))
|
||||
|
||||
if initialChildren != nil {
|
||||
errs = make(chan error) // this is purposefully unbuffered (some tests depend on this)
|
||||
snaps = make(chan []string, 1)
|
||||
snaps <- initialChildren[:]
|
||||
mocked.On("watchChildren", currentPath).Return(
|
||||
test_zk_path, (<-chan []string)(snaps), (<-chan error)(errs)).Run(
|
||||
func(_ mock.Arguments) { log.V(1).Infoln("watchChildren invoked") })
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func newTestMasterInfo(id int) []byte {
|
||||
miPb := util.NewMasterInfo(fmt.Sprintf("master(%d)@localhost:5050", id), 123456789, 400)
|
||||
data, err := proto.Marshal(miPb)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func TestMasterDetectorChildrenChanged(t *testing.T) {
|
||||
wCh := make(chan struct{}, 1)
|
||||
|
||||
c, err := makeClient()
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, c.isConnected())
|
||||
|
||||
md, err := NewMasterDetector(zkurl)
|
||||
defer md.Cancel()
|
||||
assert.NoError(t, err)
|
||||
// override zk.Conn with our own.
|
||||
c.errorHandler = ErrorHandler(func(c *Client, e error) {
|
||||
err = e
|
||||
})
|
||||
md.client = c
|
||||
md.client.connect()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, c.isConnected())
|
||||
|
||||
path := test_zk_path
|
||||
snapDetected := make(chan struct{})
|
||||
md.bootstrapFunc = func() error {
|
||||
if md.client != nil {
|
||||
return nil
|
||||
}
|
||||
log.V(1).Infoln("bootstrapping detector")
|
||||
defer log.V(1).Infoln("bootstrapping detector ..finished")
|
||||
|
||||
mocked, _, errs := newMockZkClient("info_0", "info_5", "info_10")
|
||||
md.client = mocked
|
||||
md.minDetectorCyclePeriod = 10 * time.Millisecond // we don't have all day!
|
||||
|
||||
mocked.On("data", fmt.Sprintf("%s/info_0", path)).Return(newTestMasterInfo(0), nil)
|
||||
|
||||
// wait for the first child snapshot to be processed before signaling end-of-watch
|
||||
// (which is signalled by closing errs).
|
||||
go func() {
|
||||
defer close(errs)
|
||||
select {
|
||||
case <-snapDetected:
|
||||
case <-md.Done():
|
||||
t.Errorf("detector died before child snapshot")
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
called := 0
|
||||
md.Detect(detector.OnMasterChanged(func(master *mesos.MasterInfo) {
|
||||
lostMaster := make(chan struct{})
|
||||
const expectedLeader = "master(0)@localhost:5050"
|
||||
err = md.Detect(detector.OnMasterChanged(func(master *mesos.MasterInfo) {
|
||||
//expect 2 calls in sequence: the first setting a master
|
||||
//and the second clearing it
|
||||
switch called++; called {
|
||||
case 1:
|
||||
defer close(snapDetected)
|
||||
assert.NotNil(t, master)
|
||||
assert.Equal(t, master.GetId(), "master@localhost:5050")
|
||||
wCh <- struct{}{}
|
||||
assert.Equal(t, expectedLeader, master.GetId())
|
||||
case 2:
|
||||
md.Cancel()
|
||||
defer close(lostMaster)
|
||||
assert.Nil(t, master)
|
||||
wCh <- struct{}{}
|
||||
default:
|
||||
t.Fatalf("unexpected notification call attempt %d", called)
|
||||
t.Errorf("unexpected notification call attempt %d", called)
|
||||
}
|
||||
}))
|
||||
|
||||
startWait := time.Now()
|
||||
select {
|
||||
case <-wCh:
|
||||
case <-time.After(time.Second * 3):
|
||||
panic("Waited too long...")
|
||||
}
|
||||
|
||||
// wait for the disconnect event, should be triggered
|
||||
// 1s after the connected event
|
||||
waited := time.Now().Sub(startWait)
|
||||
time.Sleep((2 * time.Second) - waited)
|
||||
assert.False(t, c.isConnected())
|
||||
}
|
||||
|
||||
// single connector instance, session does not expire, but it's internal connection to zk is flappy
|
||||
func TestMasterDetectFlappingConnectionState(t *testing.T) {
|
||||
c, err := newClient(test_zk_hosts, test_zk_path)
|
||||
assert.NoError(t, err)
|
||||
|
||||
initialChildren := []string{"info_005", "info_010", "info_022"}
|
||||
connector := NewMockConnector()
|
||||
connector.On("Close").Return(nil)
|
||||
connector.On("Children", test_zk_path).Return(initialChildren, &zk.Stat{}, nil)
|
||||
fatalOn(t, 10*time.Second, lostMaster, "Waited too long for lost master")
|
||||
|
||||
select {
|
||||
case <-md.Done():
|
||||
assert.Equal(t, 2, called, "expected 2 detection callbacks instead of %d", called)
|
||||
case <-time.After(time.Second * 10):
|
||||
panic("Waited too long for detector shutdown...")
|
||||
}
|
||||
}
|
||||
|
||||
// single connector instance, it's internal connection to zk is flappy
|
||||
func TestMasterDetectorFlappyConnectionState(t *testing.T) {
|
||||
md, err := NewMasterDetector(zkurl)
|
||||
defer md.Cancel()
|
||||
assert.NoError(t, err)
|
||||
|
||||
const ITERATIONS = 3
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2) // async flapping, master change detection
|
||||
wg.Add(1 + ITERATIONS) // +1 for the initial snapshot that's sent for the first watch
|
||||
path := test_zk_path
|
||||
|
||||
first := true
|
||||
c.setFactory(asFactory(func() (Connector, <-chan zk.Event, error) {
|
||||
if !first {
|
||||
t.Fatalf("only one connector instance expected")
|
||||
return nil, nil, errors.New("ran out of connectors")
|
||||
} else {
|
||||
first = false
|
||||
md.bootstrapFunc = func() error {
|
||||
if md.client != nil {
|
||||
return nil
|
||||
}
|
||||
sessionEvents := make(chan zk.Event, 10)
|
||||
watchEvents := make(chan zk.Event, 10)
|
||||
log.V(1).Infoln("bootstrapping detector")
|
||||
defer log.V(1).Infoln("bootstrapping detector ..finished")
|
||||
|
||||
connector.On("Get", fmt.Sprintf("%s/info_005", test_zk_path)).Return(newTestMasterInfo(1), &zk.Stat{}, nil).Once()
|
||||
connector.On("ChildrenW", test_zk_path).Return([]string{test_zk_path}, &zk.Stat{}, (<-chan zk.Event)(watchEvents), nil)
|
||||
children := []string{"info_0", "info_5", "info_10"}
|
||||
mocked, snaps, errs := newMockZkClient(children...)
|
||||
md.client = mocked
|
||||
md.minDetectorCyclePeriod = 10 * time.Millisecond // we don't have all day!
|
||||
|
||||
mocked.On("data", fmt.Sprintf("%s/info_0", path)).Return(newTestMasterInfo(0), nil)
|
||||
|
||||
// the first snapshot will be sent immediately and the detector will be awaiting en event.
|
||||
// cycle through some connected/disconnected events but maintain the same snapshot
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
for attempt := 0; attempt < 5; attempt++ {
|
||||
sessionEvents <- zk.Event{
|
||||
Type: zk.EventSession,
|
||||
State: zk.StateConnected,
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
sessionEvents <- zk.Event{
|
||||
Type: zk.EventSession,
|
||||
State: zk.StateDisconnected,
|
||||
}
|
||||
}
|
||||
sessionEvents <- zk.Event{
|
||||
Type: zk.EventSession,
|
||||
State: zk.StateConnected,
|
||||
defer close(errs)
|
||||
for attempt := 0; attempt < ITERATIONS; attempt++ {
|
||||
// send an error, should cause the detector to re-issue a watch
|
||||
errs <- zk.ErrSessionExpired
|
||||
// the detection loop issues another watch, so send it a snapshot..
|
||||
// send another snapshot
|
||||
snaps <- children
|
||||
}
|
||||
}()
|
||||
return connector, sessionEvents, nil
|
||||
}))
|
||||
c.reconnDelay = 0 // there should be no reconnect, but just in case don't drag the test out
|
||||
return nil
|
||||
}
|
||||
|
||||
md, err := NewMasterDetector(zkurl)
|
||||
defer md.Cancel()
|
||||
assert.NoError(t, err)
|
||||
|
||||
c.errorHandler = ErrorHandler(func(c *Client, e error) {
|
||||
t.Logf("zk client error: %v", e)
|
||||
})
|
||||
md.client = c
|
||||
|
||||
startTime := time.Now()
|
||||
detected := false
|
||||
md.Detect(detector.OnMasterChanged(func(master *mesos.MasterInfo) {
|
||||
if detected {
|
||||
t.Fatalf("already detected master, was not expecting another change: %v", master)
|
||||
} else {
|
||||
detected = true
|
||||
assert.NotNil(t, master, fmt.Sprintf("on-master-changed %v", detected))
|
||||
t.Logf("Leader change detected at %v: '%+v'", time.Now().Sub(startTime), master)
|
||||
wg.Done()
|
||||
called := 0
|
||||
lostMaster := make(chan struct{})
|
||||
const EXPECTED_CALLS = (ITERATIONS * 2) + 2 // +1 for initial snapshot, +1 for final lost-leader (close(errs))
|
||||
err = md.Detect(detector.OnMasterChanged(func(master *mesos.MasterInfo) {
|
||||
called++
|
||||
log.V(3).Infof("detector invoked: called %d", called)
|
||||
switch {
|
||||
case called < EXPECTED_CALLS:
|
||||
if master != nil {
|
||||
wg.Done()
|
||||
assert.Equal(t, master.GetId(), "master(0)@localhost:5050")
|
||||
}
|
||||
case called == EXPECTED_CALLS:
|
||||
md.Cancel()
|
||||
defer close(lostMaster)
|
||||
assert.Nil(t, master)
|
||||
default:
|
||||
t.Errorf("unexpected notification call attempt %d", called)
|
||||
}
|
||||
}))
|
||||
assert.NoError(t, err)
|
||||
|
||||
completed := make(chan struct{})
|
||||
go func() {
|
||||
defer close(completed)
|
||||
wg.Wait()
|
||||
}()
|
||||
fatalAfter(t, 10*time.Second, wg.Wait, "Waited too long for new-master alerts")
|
||||
fatalOn(t, 3*time.Second, lostMaster, "Waited too long for lost master")
|
||||
|
||||
select {
|
||||
case <-completed: // expected
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Fatalf("failed to detect master change")
|
||||
case <-md.Done():
|
||||
assert.Equal(t, EXPECTED_CALLS, called, "expected %d detection callbacks instead of %d", EXPECTED_CALLS, called)
|
||||
case <-time.After(time.Second * 10):
|
||||
panic("Waited too long for detector shutdown...")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMasterDetectFlappingConnector(t *testing.T) {
|
||||
c, err := newClient(test_zk_hosts, test_zk_path)
|
||||
assert.NoError(t, err)
|
||||
|
||||
initialChildren := []string{"info_005", "info_010", "info_022"}
|
||||
connector := NewMockConnector()
|
||||
connector.On("Close").Return(nil)
|
||||
connector.On("Children", test_zk_path).Return(initialChildren, &zk.Stat{}, nil)
|
||||
|
||||
// timing
|
||||
// t=0 t=400ms t=800ms t=1200ms t=1600ms t=2000ms t=2400ms
|
||||
// |--=--=--=--|--=--=--=--|--=--=--=--|--=--=--=--|--=--=--=--|--=--=--=--|--=--=--=--|--=--=--=--
|
||||
// c1 d1 c3 d3 c5 d5 d6 ...
|
||||
// c2 d2 c4 d4 c6 c7 ...
|
||||
// M M' M M' M M'
|
||||
|
||||
attempt := 0
|
||||
c.setFactory(asFactory(func() (Connector, <-chan zk.Event, error) {
|
||||
attempt++
|
||||
sessionEvents := make(chan zk.Event, 5)
|
||||
watchEvents := make(chan zk.Event, 5)
|
||||
|
||||
sessionEvents <- zk.Event{
|
||||
Type: zk.EventSession,
|
||||
State: zk.StateConnected,
|
||||
}
|
||||
connector.On("Get", fmt.Sprintf("%s/info_005", test_zk_path)).Return(newTestMasterInfo(attempt), &zk.Stat{}, nil).Once()
|
||||
connector.On("ChildrenW", test_zk_path).Return([]string{test_zk_path}, &zk.Stat{}, (<-chan zk.Event)(watchEvents), nil)
|
||||
go func(attempt int) {
|
||||
defer close(sessionEvents)
|
||||
defer close(watchEvents)
|
||||
time.Sleep(400 * time.Millisecond)
|
||||
// this is the order in which the embedded zk implementation does it
|
||||
sessionEvents <- zk.Event{
|
||||
Type: zk.EventSession,
|
||||
State: zk.StateDisconnected,
|
||||
}
|
||||
connector.On("ChildrenW", test_zk_path).Return(nil, nil, nil, zk.ErrSessionExpired).Once()
|
||||
watchEvents <- zk.Event{
|
||||
Type: zk.EventNotWatching,
|
||||
State: zk.StateDisconnected,
|
||||
Path: test_zk_path,
|
||||
Err: zk.ErrSessionExpired,
|
||||
}
|
||||
}(attempt)
|
||||
return connector, sessionEvents, nil
|
||||
}))
|
||||
c.reconnDelay = 100 * time.Millisecond
|
||||
c.rewatchDelay = c.reconnDelay / 2
|
||||
|
||||
md, err := NewMasterDetector(zkurl)
|
||||
md.minDetectorCyclePeriod = 600 * time.Millisecond
|
||||
|
||||
defer md.Cancel()
|
||||
assert.NoError(t, err)
|
||||
|
||||
c.errorHandler = ErrorHandler(func(c *Client, e error) {
|
||||
t.Logf("zk client error: %v", e)
|
||||
})
|
||||
md.client = c
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(6) // 3 x (connected, disconnected)
|
||||
detected := 0
|
||||
startTime := time.Now()
|
||||
md.Detect(detector.OnMasterChanged(func(master *mesos.MasterInfo) {
|
||||
if detected > 5 {
|
||||
// ignore
|
||||
return
|
||||
}
|
||||
if (detected & 1) == 0 {
|
||||
assert.NotNil(t, master, fmt.Sprintf("on-master-changed-%d", detected))
|
||||
} else {
|
||||
assert.Nil(t, master, fmt.Sprintf("on-master-changed-%d", detected))
|
||||
}
|
||||
t.Logf("Leader change detected at %v: '%+v'", time.Now().Sub(startTime), master)
|
||||
detected++
|
||||
wg.Done()
|
||||
}))
|
||||
|
||||
completed := make(chan struct{})
|
||||
go func() {
|
||||
defer close(completed)
|
||||
wg.Wait()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-completed: // expected
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Fatalf("failed to detect flapping master changes")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMasterDetectMultiple(t *testing.T) {
|
||||
ch0 := make(chan zk.Event, 5)
|
||||
ch1 := make(chan zk.Event, 5)
|
||||
|
||||
ch0 <- zk.Event{
|
||||
Type: zk.EventSession,
|
||||
State: zk.StateConnected,
|
||||
}
|
||||
|
||||
c, err := newClient(test_zk_hosts, test_zk_path)
|
||||
assert.NoError(t, err)
|
||||
|
||||
initialChildren := []string{"info_005", "info_010", "info_022"}
|
||||
connector := NewMockConnector()
|
||||
connector.On("Close").Return(nil)
|
||||
connector.On("Children", test_zk_path).Return(initialChildren, &zk.Stat{}, nil).Once()
|
||||
connector.On("ChildrenW", test_zk_path).Return([]string{test_zk_path}, &zk.Stat{}, (<-chan zk.Event)(ch1), nil)
|
||||
|
||||
first := true
|
||||
c.setFactory(asFactory(func() (Connector, <-chan zk.Event, error) {
|
||||
log.V(2).Infof("**** Using zk.Conn adapter ****")
|
||||
if !first {
|
||||
return nil, nil, errors.New("only 1 connector allowed")
|
||||
} else {
|
||||
first = false
|
||||
}
|
||||
return connector, ch0, nil
|
||||
}))
|
||||
|
||||
func TestMasterDetector_multipleLeadershipChanges(t *testing.T) {
|
||||
md, err := NewMasterDetector(zkurl)
|
||||
defer md.Cancel()
|
||||
assert.NoError(t, err)
|
||||
|
||||
c.errorHandler = ErrorHandler(func(c *Client, e error) {
|
||||
err = e
|
||||
})
|
||||
md.client = c
|
||||
|
||||
// **** Test 4 consecutive ChildrenChangedEvents ******
|
||||
// setup event changes
|
||||
sequences := [][]string{
|
||||
leadershipChanges := [][]string{
|
||||
{"info_014", "info_010", "info_005"},
|
||||
{"info_005", "info_004", "info_022"},
|
||||
{}, // indicates no master
|
||||
{"info_017", "info_099", "info_200"},
|
||||
}
|
||||
|
||||
ITERATIONS := len(leadershipChanges)
|
||||
|
||||
// +1 for initial snapshot, +1 for final lost-leader (close(errs))
|
||||
EXPECTED_CALLS := (ITERATIONS + 2)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
startTime := time.Now()
|
||||
detected := 0
|
||||
md.Detect(detector.OnMasterChanged(func(master *mesos.MasterInfo) {
|
||||
if detected == 2 {
|
||||
assert.Nil(t, master, fmt.Sprintf("on-master-changed-%d", detected))
|
||||
} else {
|
||||
assert.NotNil(t, master, fmt.Sprintf("on-master-changed-%d", detected))
|
||||
wg.Add(ITERATIONS) // +1 for the initial snapshot that's sent for the first watch, -1 because set 3 is empty
|
||||
path := test_zk_path
|
||||
|
||||
md.bootstrapFunc = func() error {
|
||||
if md.client != nil {
|
||||
return nil
|
||||
}
|
||||
log.V(1).Infoln("bootstrapping detector")
|
||||
defer log.V(1).Infoln("bootstrapping detector ..finished")
|
||||
|
||||
children := []string{"info_0", "info_5", "info_10"}
|
||||
mocked, snaps, errs := newMockZkClient(children...)
|
||||
md.client = mocked
|
||||
md.minDetectorCyclePeriod = 10 * time.Millisecond // we don't have all day!
|
||||
|
||||
mocked.On("data", fmt.Sprintf("%s/info_0", path)).Return(newTestMasterInfo(0), nil)
|
||||
mocked.On("data", fmt.Sprintf("%s/info_005", path)).Return(newTestMasterInfo(5), nil)
|
||||
mocked.On("data", fmt.Sprintf("%s/info_004", path)).Return(newTestMasterInfo(4), nil)
|
||||
mocked.On("data", fmt.Sprintf("%s/info_017", path)).Return(newTestMasterInfo(17), nil)
|
||||
|
||||
// the first snapshot will be sent immediately and the detector will be awaiting en event.
|
||||
// cycle through some connected/disconnected events but maintain the same snapshot
|
||||
go func() {
|
||||
defer close(errs)
|
||||
for attempt := 0; attempt < ITERATIONS; attempt++ {
|
||||
snaps <- leadershipChanges[attempt]
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
called := 0
|
||||
lostMaster := make(chan struct{})
|
||||
expectedLeaders := []int{0, 5, 4, 17}
|
||||
leaderIdx := 0
|
||||
err = md.Detect(detector.OnMasterChanged(func(master *mesos.MasterInfo) {
|
||||
called++
|
||||
log.V(3).Infof("detector invoked: called %d", called)
|
||||
switch {
|
||||
case called < EXPECTED_CALLS:
|
||||
if master != nil {
|
||||
expectedLeader := fmt.Sprintf("master(%d)@localhost:5050", expectedLeaders[leaderIdx])
|
||||
assert.Equal(t, expectedLeader, master.GetId())
|
||||
leaderIdx++
|
||||
wg.Done()
|
||||
}
|
||||
case called == EXPECTED_CALLS:
|
||||
md.Cancel()
|
||||
defer close(lostMaster)
|
||||
assert.Nil(t, master)
|
||||
default:
|
||||
t.Errorf("unexpected notification call attempt %d", called)
|
||||
}
|
||||
t.Logf("Leader change detected at %v: '%+v'", time.Now().Sub(startTime), master)
|
||||
detected++
|
||||
wg.Done()
|
||||
}))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// 3 leadership changes + disconnect (leader change to '')
|
||||
wg.Add(4)
|
||||
|
||||
go func() {
|
||||
for i := range sequences {
|
||||
sorted := make([]string, len(sequences[i]))
|
||||
copy(sorted, sequences[i])
|
||||
sort.Strings(sorted)
|
||||
t.Logf("testing master change sequence %d, path '%v'", i, test_zk_path)
|
||||
connector.On("Children", test_zk_path).Return(sequences[i], &zk.Stat{}, nil).Once()
|
||||
if len(sequences[i]) > 0 {
|
||||
connector.On("Get", fmt.Sprintf("%s/%s", test_zk_path, sorted[0])).Return(newTestMasterInfo(i), &zk.Stat{}, nil).Once()
|
||||
}
|
||||
ch1 <- zk.Event{
|
||||
Type: zk.EventNodeChildrenChanged,
|
||||
Path: test_zk_path,
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond) // give async routines time to catch up
|
||||
}
|
||||
time.Sleep(1 * time.Second) // give async routines time to catch up
|
||||
t.Logf("disconnecting...")
|
||||
ch0 <- zk.Event{
|
||||
State: zk.StateDisconnected,
|
||||
}
|
||||
//TODO(jdef) does order of close matter here? probably, meaking client code is weak
|
||||
close(ch0)
|
||||
time.Sleep(500 * time.Millisecond) // give async routines time to catch up
|
||||
close(ch1)
|
||||
}()
|
||||
completed := make(chan struct{})
|
||||
go func() {
|
||||
defer close(completed)
|
||||
wg.Wait()
|
||||
}()
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Fatal(r)
|
||||
}
|
||||
}()
|
||||
fatalAfter(t, 10*time.Second, wg.Wait, "Waited too long for new-master alerts")
|
||||
fatalOn(t, 3*time.Second, lostMaster, "Waited too long for lost master")
|
||||
|
||||
select {
|
||||
case <-time.After(2 * time.Second):
|
||||
panic("timed out waiting for master changes to propagate")
|
||||
case <-completed:
|
||||
case <-md.Done():
|
||||
assert.Equal(t, EXPECTED_CALLS, called, "expected %d detection callbacks instead of %d", EXPECTED_CALLS, called)
|
||||
case <-time.After(time.Second * 10):
|
||||
panic("Waited too long for detector shutdown...")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMasterDetect_selectTopNode_none(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
nodeList := []string{}
|
||||
node := selectTopNode(nodeList)
|
||||
node := selectTopNodePrefix(nodeList, "foo")
|
||||
assert.Equal("", node)
|
||||
}
|
||||
|
||||
|
@ -410,10 +348,25 @@ func TestMasterDetect_selectTopNode_0000x(t *testing.T) {
|
|||
"info_0000000061",
|
||||
"info_0000000008",
|
||||
}
|
||||
node := selectTopNode(nodeList)
|
||||
node := selectTopNodePrefix(nodeList, nodePrefix)
|
||||
assert.Equal("info_0000000008", node)
|
||||
}
|
||||
|
||||
func TestMasterDetect_selectTopNode_mixJson(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
nodeList := []string{
|
||||
nodePrefix + "0000000046",
|
||||
nodePrefix + "0000000032",
|
||||
nodeJSONPrefix + "0000000046",
|
||||
nodeJSONPrefix + "0000000032",
|
||||
}
|
||||
node := selectTopNodePrefix(nodeList, nodeJSONPrefix)
|
||||
assert.Equal(nodeJSONPrefix+"0000000032", node)
|
||||
|
||||
node = selectTopNodePrefix(nodeList, nodePrefix)
|
||||
assert.Equal(nodePrefix+"0000000032", node)
|
||||
}
|
||||
|
||||
func TestMasterDetect_selectTopNode_mixedEntries(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
nodeList := []string{
|
||||
|
@ -424,7 +377,7 @@ func TestMasterDetect_selectTopNode_mixedEntries(t *testing.T) {
|
|||
"log_replicas_fdgwsdfgsdf",
|
||||
"bar",
|
||||
}
|
||||
node := selectTopNode(nodeList)
|
||||
node := selectTopNodePrefix(nodeList, nodePrefix)
|
||||
assert.Equal("info_0000000032", node)
|
||||
}
|
||||
|
||||
|
@ -451,15 +404,25 @@ func afterFunc(f func()) <-chan struct{} {
|
|||
}
|
||||
|
||||
func fatalAfter(t *testing.T, d time.Duration, f func(), msg string, args ...interface{}) {
|
||||
ch := afterFunc(f)
|
||||
fatalOn(t, d, afterFunc(f), msg, args...)
|
||||
}
|
||||
|
||||
func fatalOn(t *testing.T, d time.Duration, ch <-chan struct{}, msg string, args ...interface{}) {
|
||||
select {
|
||||
case <-ch:
|
||||
return
|
||||
case <-time.After(d):
|
||||
t.Fatalf(msg, args...)
|
||||
// check for a tie
|
||||
select {
|
||||
case <-ch:
|
||||
return
|
||||
default:
|
||||
t.Fatalf(msg, args...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO(jdef) refactor this to work with the new zkInterface
|
||||
func TestNotifyAllMasters(t *testing.T) {
|
||||
c, err := newClient(test_zk_hosts, test_zk_path)
|
||||
assert.NoError(t, err)
|
||||
|
@ -482,7 +445,7 @@ func TestNotifyAllMasters(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
c.errorHandler = ErrorHandler(func(c *Client, e error) {
|
||||
t.Fatalf("unexpected error: %v", e)
|
||||
t.Errorf("unexpected error: %v", e)
|
||||
})
|
||||
md.client = c
|
||||
|
||||
|
@ -562,3 +525,4 @@ func TestNotifyAllMasters(t *testing.T) {
|
|||
|
||||
connector.On("Close").Return(nil)
|
||||
}
|
||||
*/
|
||||
|
|
|
@ -1,88 +0,0 @@
|
|||
package zoo
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
log "github.com/golang/glog"
|
||||
util "github.com/mesos/mesos-go/mesosutil"
|
||||
"github.com/samuel/go-zookeeper/zk"
|
||||
)
|
||||
|
||||
type MockMasterDetector struct {
|
||||
*MasterDetector
|
||||
zkPath string
|
||||
conCh chan zk.Event
|
||||
sesCh chan zk.Event
|
||||
}
|
||||
|
||||
func NewMockMasterDetector(zkurls string) (*MockMasterDetector, error) {
|
||||
log.V(4).Infoln("Creating mock zk master detector")
|
||||
md, err := NewMasterDetector(zkurls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
u, _ := url.Parse(zkurls)
|
||||
m := &MockMasterDetector{
|
||||
MasterDetector: md,
|
||||
zkPath: u.Path,
|
||||
conCh: make(chan zk.Event, 5),
|
||||
sesCh: make(chan zk.Event, 5),
|
||||
}
|
||||
|
||||
path := m.zkPath
|
||||
connector := NewMockConnector()
|
||||
connector.On("Children", path).Return([]string{"info_0", "info_5", "info_10"}, &zk.Stat{}, nil)
|
||||
connector.On("Get", fmt.Sprintf("%s/info_0", path)).Return(m.makeMasterInfo(), &zk.Stat{}, nil)
|
||||
connector.On("Close").Return(nil)
|
||||
connector.On("ChildrenW", m.zkPath).Return([]string{m.zkPath}, &zk.Stat{}, (<-chan zk.Event)(m.sesCh), nil)
|
||||
|
||||
first := true
|
||||
m.client.setFactory(asFactory(func() (Connector, <-chan zk.Event, error) {
|
||||
if !first {
|
||||
return nil, nil, errors.New("only 1 connector allowed")
|
||||
} else {
|
||||
first = false
|
||||
}
|
||||
return connector, m.conCh, nil
|
||||
}))
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m *MockMasterDetector) Start() {
|
||||
m.client.connect()
|
||||
}
|
||||
|
||||
func (m *MockMasterDetector) ScheduleConnEvent(s zk.State) {
|
||||
log.V(4).Infof("Scheduling zk connection event with state: %v\n", s)
|
||||
go func() {
|
||||
m.conCh <- zk.Event{
|
||||
State: s,
|
||||
Path: m.zkPath,
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (m *MockMasterDetector) ScheduleSessEvent(t zk.EventType) {
|
||||
log.V(4).Infof("Scheduling zk session event with state: %v\n", t)
|
||||
go func() {
|
||||
m.sesCh <- zk.Event{
|
||||
Type: t,
|
||||
Path: m.zkPath,
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (m *MockMasterDetector) makeMasterInfo() []byte {
|
||||
miPb := util.NewMasterInfo("master", 123456789, 400)
|
||||
miPb.Pid = proto.String("master@127.0.0.1:5050")
|
||||
data, err := proto.Marshal(miPb)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return data
|
||||
}
|
|
@ -14,12 +14,6 @@ type Connector interface {
|
|||
Get(string) ([]byte, *zk.Stat, error)
|
||||
}
|
||||
|
||||
// interface for handling watcher event when zk.EventNodeChildrenChanged.
|
||||
type ChildWatcher func(*Client, string)
|
||||
|
||||
// interface for handling errors (session and watch related).
|
||||
type ErrorHandler func(*Client, error)
|
||||
|
||||
//Factory is an adapter to trap the creation of zk.Conn instances
|
||||
//since the official zk API does not expose an interface for zk.Conn.
|
||||
type Factory interface {
|
||||
|
|
1915
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/authentication.pb.go
generated
vendored
1915
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/authentication.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
13
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/authentication.proto
generated
vendored
13
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/authentication.proto
generated
vendored
|
@ -18,8 +18,19 @@
|
|||
|
||||
package mesosproto;
|
||||
|
||||
import "mesos.proto";
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
option (gogoproto.gostring_all) = true;
|
||||
option (gogoproto.equal_all) = true;
|
||||
option (gogoproto.verbose_equal_all) = true;
|
||||
option (gogoproto.goproto_stringer_all) = false;
|
||||
option (gogoproto.stringer_all) = true;
|
||||
option (gogoproto.populate_all) = true;
|
||||
option (gogoproto.testgen_all) = true;
|
||||
option (gogoproto.benchgen_all) = true;
|
||||
option (gogoproto.marshaler_all) = true;
|
||||
option (gogoproto.sizer_all) = true;
|
||||
option (gogoproto.unmarshaler_all) = true;
|
||||
|
||||
message AuthenticateMessage {
|
||||
required string pid = 1; // PID that needs to be authenticated.
|
||||
|
|
1777
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/authenticationpb_test.go
generated
vendored
Normal file
1777
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/authenticationpb_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2183
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/authorizer.pb.go
generated
vendored
Normal file
2183
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/authorizer.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
108
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/authorizer.proto
generated
vendored
Normal file
108
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/authorizer.proto
generated
vendored
Normal file
|
@ -0,0 +1,108 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package mesosproto;
|
||||
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
option (gogoproto.gostring_all) = true;
|
||||
option (gogoproto.equal_all) = true;
|
||||
option (gogoproto.verbose_equal_all) = true;
|
||||
option (gogoproto.goproto_stringer_all) = false;
|
||||
option (gogoproto.stringer_all) = true;
|
||||
option (gogoproto.populate_all) = true;
|
||||
option (gogoproto.testgen_all) = true;
|
||||
option (gogoproto.benchgen_all) = true;
|
||||
option (gogoproto.marshaler_all) = true;
|
||||
option (gogoproto.sizer_all) = true;
|
||||
option (gogoproto.unmarshaler_all) = true;
|
||||
|
||||
/**
|
||||
* ACLs used for local authorization (See authorization.md file in the
|
||||
* docs).
|
||||
*/
|
||||
message ACL {
|
||||
|
||||
// Entity is used to describe a subject(s) or an object(s) of an ACL.
|
||||
// NOTE:
|
||||
// To allow everyone access to an Entity set its type to 'ANY'.
|
||||
// To deny access to an Entity set its type to 'NONE'.
|
||||
message Entity {
|
||||
enum Type {
|
||||
SOME = 0;
|
||||
ANY = 1;
|
||||
NONE = 2;
|
||||
}
|
||||
optional Type type = 1 [default = SOME];
|
||||
repeated string values = 2; // Ignored for ANY/NONE.
|
||||
}
|
||||
|
||||
// ACLs.
|
||||
message RegisterFramework {
|
||||
// Subjects.
|
||||
required Entity principals = 1; // Framework principals.
|
||||
|
||||
// Objects.
|
||||
required Entity roles = 2; // Roles for resource offers.
|
||||
}
|
||||
|
||||
message RunTask {
|
||||
// Subjects.
|
||||
required Entity principals = 1; // Framework principals.
|
||||
|
||||
// Objects.
|
||||
required Entity users = 2; // Users to run the tasks/executors as.
|
||||
}
|
||||
|
||||
// Which principals are authorized to shutdown frameworks of other
|
||||
// principals.
|
||||
message ShutdownFramework {
|
||||
// Subjects.
|
||||
required Entity principals = 1;
|
||||
|
||||
// Objects.
|
||||
required Entity framework_principals = 2;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Collection of ACL.
|
||||
*
|
||||
* Each authorization request is evaluated against the ACLs in the order
|
||||
* they are defined.
|
||||
*
|
||||
* For simplicity, the ACLs for a given action are not aggregated even
|
||||
* when they have the same subjects or objects. The first ACL that
|
||||
* matches the request determines whether that request should be
|
||||
* permitted or not. An ACL matches iff both the subjects
|
||||
* (e.g., clients, principals) and the objects (e.g., urls, users,
|
||||
* roles) of the ACL match the request.
|
||||
*
|
||||
* If none of the ACLs match the request, the 'permissive' field
|
||||
* determines whether the request should be permitted or not.
|
||||
*
|
||||
* TODO(vinod): Do aggregation of ACLs when possible.
|
||||
*
|
||||
*/
|
||||
message ACLs {
|
||||
optional bool permissive = 1 [default = true];
|
||||
repeated ACL.RegisterFramework register_frameworks = 2;
|
||||
repeated ACL.RunTask run_tasks = 3;
|
||||
repeated ACL.ShutdownFramework shutdown_frameworks = 4;
|
||||
}
|
1405
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/authorizerpb_test.go
generated
vendored
Normal file
1405
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/authorizerpb_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2454
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/containerizer.pb.go
generated
vendored
2454
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/containerizer.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
11
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/containerizer.proto
generated
vendored
11
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/containerizer.proto
generated
vendored
|
@ -21,6 +21,17 @@ package mesosproto;
|
|||
import "mesos.proto";
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
option (gogoproto.gostring_all) = true;
|
||||
option (gogoproto.equal_all) = true;
|
||||
option (gogoproto.verbose_equal_all) = true;
|
||||
option (gogoproto.goproto_stringer_all) = false;
|
||||
option (gogoproto.stringer_all) = true;
|
||||
option (gogoproto.populate_all) = true;
|
||||
option (gogoproto.testgen_all) = true;
|
||||
option (gogoproto.benchgen_all) = true;
|
||||
option (gogoproto.marshaler_all) = true;
|
||||
option (gogoproto.sizer_all) = true;
|
||||
option (gogoproto.unmarshaler_all) = true;
|
||||
|
||||
/**
|
||||
* Encodes the launch command sent to the external containerizer
|
||||
|
|
1635
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/containerizerpb_test.go
generated
vendored
Normal file
1635
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/containerizerpb_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
|
@ -5,12 +5,14 @@
|
|||
package mesosproto
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// discarding unused import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// For use with detector callbacks
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -32,7 +32,6 @@ option (gogoproto.marshaler_all) = true;
|
|||
option (gogoproto.sizer_all) = true;
|
||||
option (gogoproto.unmarshaler_all) = true;
|
||||
|
||||
|
||||
/**
|
||||
* Status is used to indicate the state of the scheduler and executor
|
||||
* driver after function calls.
|
||||
|
@ -104,6 +103,32 @@ message ContainerID {
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
* A network address.
|
||||
*
|
||||
* TODO(bmahler): Use this more widely.
|
||||
*/
|
||||
message Address {
|
||||
// May contain a hostname, IP address, or both.
|
||||
optional string hostname = 1;
|
||||
optional string ip = 2;
|
||||
|
||||
required int32 port = 3;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Represents a URL.
|
||||
*/
|
||||
message URL {
|
||||
required string scheme = 1;
|
||||
required Address address = 2;
|
||||
optional string path = 3;
|
||||
repeated Parameter query = 4;
|
||||
optional string fragment = 5;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Describes a framework.
|
||||
*/
|
||||
|
@ -141,7 +166,8 @@ message FrameworkInfo {
|
|||
|
||||
// Used to indicate the current host from which the scheduler is
|
||||
// registered in the Mesos Web UI. If set to an empty string Mesos
|
||||
// will automagically set it to the current hostname.
|
||||
// will automagically set it to the current hostname if one is
|
||||
// available.
|
||||
optional string hostname = 7;
|
||||
|
||||
// This field should match the credential's principal the framework
|
||||
|
@ -170,6 +196,11 @@ message FrameworkInfo {
|
|||
// capabilities (e.g., ability to receive offers for revocable
|
||||
// resources).
|
||||
repeated Capability capabilities = 10;
|
||||
|
||||
// Labels are free-form key value pairs supplied by the framework
|
||||
// scheduler (e.g., to describe additional functionality offered by
|
||||
// the framework). These labels are not interpreted by Mesos itself.
|
||||
optional Labels labels = 11;
|
||||
}
|
||||
|
||||
|
||||
|
@ -353,11 +384,35 @@ message ExecutorInfo {
|
|||
*/
|
||||
message MasterInfo {
|
||||
required string id = 1;
|
||||
|
||||
// The IP address (only IPv4) as a packed 4-bytes integer,
|
||||
// stored in network order. Deprecated, use `address.ip` instead.
|
||||
required uint32 ip = 2;
|
||||
|
||||
// The TCP port the Master is listening on for incoming
|
||||
// HTTP requests; deprecated, use `address.port` instead.
|
||||
required uint32 port = 3 [default = 5050];
|
||||
|
||||
// In the default implementation, this will contain information
|
||||
// about both the IP address, port and Master name; it should really
|
||||
// not be relied upon by external tooling/frameworks and be
|
||||
// considered an "internal" implementation field.
|
||||
optional string pid = 4;
|
||||
|
||||
// The server's hostname, if available; it may be unreliable
|
||||
// in environments where the DNS configuration does not resolve
|
||||
// internal hostnames (eg, some public cloud providers).
|
||||
// Deprecated, use `address.hostname` instead.
|
||||
optional string hostname = 5;
|
||||
|
||||
// The running Master version, as a string; taken from the
|
||||
// generated "master/version.hpp".
|
||||
optional string version = 6;
|
||||
|
||||
// The full IP address (supports both IPv4 and IPv6 formats)
|
||||
// and supersedes the use of `ip`, `port` and `hostname`.
|
||||
// Since Mesos 0.24.
|
||||
optional Address address = 7;
|
||||
}
|
||||
|
||||
|
||||
|
@ -618,6 +673,7 @@ message ResourceStatistics {
|
|||
optional uint64 mem_mapped_file_bytes = 12;
|
||||
// This is only set if swap is enabled.
|
||||
optional uint64 mem_swap_bytes = 40;
|
||||
optional uint64 mem_unevictable_bytes = 41;
|
||||
|
||||
// Number of occurrences of different levels of memory pressure
|
||||
// events reported by memory cgroup. Pressure listening (re)starts
|
||||
|
@ -680,7 +736,9 @@ message ResourceUsage {
|
|||
|
||||
repeated Executor executors = 1;
|
||||
|
||||
// TODO(jieyu): Include slave's total resources here.
|
||||
// Slave's total resources including checkpointed dynamic
|
||||
// reservations and persistent volumes.
|
||||
repeated Resource total = 2;
|
||||
}
|
||||
|
||||
|
||||
|
@ -764,8 +822,6 @@ message PerfStatistics {
|
|||
* to proactively influence the allocator. If 'slave_id' is provided
|
||||
* then this request is assumed to only apply to resources on that
|
||||
* slave.
|
||||
*
|
||||
* TODO(vinod): Remove this once the old driver is removed.
|
||||
*/
|
||||
message Request {
|
||||
optional SlaveID slave_id = 1;
|
||||
|
@ -782,6 +838,10 @@ message Offer {
|
|||
required FrameworkID framework_id = 2;
|
||||
required SlaveID slave_id = 3;
|
||||
required string hostname = 4;
|
||||
|
||||
// URL for reaching the slave running on the host.
|
||||
optional URL url = 8;
|
||||
|
||||
repeated Resource resources = 5;
|
||||
repeated Attribute attributes = 7;
|
||||
repeated ExecutorID executor_ids = 6;
|
||||
|
@ -944,6 +1004,14 @@ message TaskStatus {
|
|||
// (true) or unhealthy (false) according to the HealthCheck field in
|
||||
// the command info.
|
||||
optional bool healthy = 8;
|
||||
|
||||
// Labels are free-form key value pairs which are exposed through
|
||||
// master and slave endpoints. Labels will not be interpreted or
|
||||
// acted upon by Mesos itself. As opposed to the data field, labels
|
||||
// will be kept in memory on master and slave processes. Therefore,
|
||||
// labels should be used to tag TaskStatus message with light-weight
|
||||
// meta-data.
|
||||
optional Labels labels = 12;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1019,81 +1087,6 @@ message Credentials {
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
* ACLs used for authorization.
|
||||
*/
|
||||
message ACL {
|
||||
|
||||
// Entity is used to describe a subject(s) or an object(s) of an ACL.
|
||||
// NOTE:
|
||||
// To allow everyone access to an Entity set its type to 'ANY'.
|
||||
// To deny access to an Entity set its type to 'NONE'.
|
||||
message Entity {
|
||||
enum Type {
|
||||
SOME = 0;
|
||||
ANY = 1;
|
||||
NONE = 2;
|
||||
}
|
||||
optional Type type = 1 [default = SOME];
|
||||
repeated string values = 2; // Ignored for ANY/NONE.
|
||||
}
|
||||
|
||||
// ACLs.
|
||||
message RegisterFramework {
|
||||
// Subjects.
|
||||
required Entity principals = 1; // Framework principals.
|
||||
|
||||
// Objects.
|
||||
required Entity roles = 2; // Roles for resource offers.
|
||||
}
|
||||
|
||||
message RunTask {
|
||||
// Subjects.
|
||||
required Entity principals = 1; // Framework principals.
|
||||
|
||||
// Objects.
|
||||
required Entity users = 2; // Users to run the tasks/executors as.
|
||||
}
|
||||
|
||||
// Which principals are authorized to shutdown frameworks of other
|
||||
// principals.
|
||||
message ShutdownFramework {
|
||||
// Subjects.
|
||||
required Entity principals = 1;
|
||||
|
||||
// Objects.
|
||||
required Entity framework_principals = 2;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Collection of ACL.
|
||||
*
|
||||
* Each authorization request is evaluated against the ACLs in the order
|
||||
* they are defined.
|
||||
*
|
||||
* For simplicity, the ACLs for a given action are not aggregated even
|
||||
* when they have the same subjects or objects. The first ACL that
|
||||
* matches the request determines whether that request should be
|
||||
* permitted or not. An ACL matches iff both the subjects
|
||||
* (e.g., clients, principals) and the objects (e.g., urls, users,
|
||||
* roles) of the ACL match the request.
|
||||
*
|
||||
* If none of the ACLs match the request, the 'permissive' field
|
||||
* determines whether the request should be permitted or not.
|
||||
*
|
||||
* TODO(vinod): Do aggregation of ACLs when possible.
|
||||
*
|
||||
*/
|
||||
message ACLs {
|
||||
optional bool permissive = 1 [default = true];
|
||||
repeated ACL.RegisterFramework register_frameworks = 2;
|
||||
repeated ACL.RunTask run_tasks = 3;
|
||||
repeated ACL.ShutdownFramework shutdown_frameworks = 4;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Rate (queries per second, QPS) limit for messages from a framework to master.
|
||||
* Strictly speaking they are the combined rate from all frameworks of the same
|
||||
|
@ -1138,24 +1131,76 @@ message RateLimits {
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
* Describe an image used by tasks or executors. Note that it's only
|
||||
* for tasks or executors launched by MesosContainerizer currently.
|
||||
* TODO(jieyu): This feature not fully supported in 0.24.0. Please do
|
||||
* not use it until this feature is announced.
|
||||
*/
|
||||
message Image {
|
||||
enum Type {
|
||||
APPC = 1;
|
||||
DOCKER = 2;
|
||||
}
|
||||
|
||||
// Protobuf for specifying an Appc container image. See:
|
||||
// https://github.com/appc/spec/blob/master/spec/aci.md
|
||||
message AppC {
|
||||
// The name of the image.
|
||||
required string name = 1;
|
||||
|
||||
// An image ID is a string of the format "hash-value", where
|
||||
// "hash" is the hash algorithm used and "value" is the hex
|
||||
// encoded string of the digest. Currently the only permitted
|
||||
// hash algorithm is sha512.
|
||||
optional string id = 2;
|
||||
|
||||
// Optional labels. Suggested labels: "version", "os", and "arch".
|
||||
optional Labels labels = 3;
|
||||
}
|
||||
|
||||
message Docker {
|
||||
// The name of the image. Expected in format repository[:tag].
|
||||
required string name = 1;
|
||||
}
|
||||
|
||||
required Type type = 1;
|
||||
|
||||
// Only one of the following image messages should be set to match
|
||||
// the type.
|
||||
optional AppC appc = 2;
|
||||
optional Docker docker = 3;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Describes a volume mapping either from host to container or vice
|
||||
* versa. Both paths can either refer to a directory or a file.
|
||||
*/
|
||||
message Volume {
|
||||
// Absolute path pointing to a directory or file in the container.
|
||||
required string container_path = 1;
|
||||
|
||||
// Absolute path pointing to a directory or file on the host or a path
|
||||
// relative to the container work directory.
|
||||
optional string host_path = 2;
|
||||
|
||||
enum Mode {
|
||||
RW = 1; // read-write.
|
||||
RO = 2; // read-only.
|
||||
}
|
||||
|
||||
required Mode mode = 3;
|
||||
|
||||
// Path pointing to a directory or file in the container. If the
|
||||
// path is a relative path, it is relative to the container work
|
||||
// directory. If the path is an absolute path, that path must
|
||||
// already exist.
|
||||
required string container_path = 1;
|
||||
|
||||
// The following specifies the source of this volume. At most one of
|
||||
// the following should be set.
|
||||
|
||||
// Absolute path pointing to a directory or file on the host or a
|
||||
// path relative to the container work directory.
|
||||
optional string host_path = 2;
|
||||
|
||||
// The source of the volume is an Image which describes a root
|
||||
// filesystem which will be provisioned by Mesos.
|
||||
optional Image image = 4;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1206,11 +1251,18 @@ message ContainerInfo {
|
|||
optional bool force_pull_image = 6;
|
||||
}
|
||||
|
||||
message MesosInfo {
|
||||
optional Image image = 1;
|
||||
}
|
||||
|
||||
required Type type = 1;
|
||||
repeated Volume volumes = 2;
|
||||
optional string hostname = 4;
|
||||
|
||||
// Only one of the following *Info messages should be set to match
|
||||
// the type.
|
||||
optional DockerInfo docker = 3;
|
||||
optional MesosInfo mesos = 5;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1277,3 +1329,36 @@ message DiscoveryInfo {
|
|||
optional Ports ports = 6;
|
||||
optional Labels labels = 7;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Protobuf for the Appc image manifest JSON schema:
|
||||
* https://github.com/appc/spec/blob/master/spec/aci.md#image-manifest-schema
|
||||
* Where possible, any field required in the schema is required in the protobuf
|
||||
* but some cannot be expressed, e.g., a repeated string that has at least one
|
||||
* element. Further validation should be performed after parsing the JSON into
|
||||
* the protobuf.
|
||||
* This version of Appc protobuf is based on Appc spec version 0.6.1.
|
||||
* TODO(xujyan): This protobuf currently defines a subset of fields in the spec
|
||||
* that Mesos makes use of to avoid confusion. New fields are going to be added
|
||||
* when Mesos starts to support them.
|
||||
*/
|
||||
message AppcImageManifest {
|
||||
required string acKind = 1;
|
||||
required string acVersion = 2;
|
||||
required string name = 3;
|
||||
|
||||
message Label {
|
||||
required string name = 1;
|
||||
required string value = 2;
|
||||
}
|
||||
|
||||
repeated Label labels = 4;
|
||||
|
||||
message Annotation {
|
||||
required string name = 1;
|
||||
required string value = 2;
|
||||
}
|
||||
|
||||
repeated Annotation annotations = 5;
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -21,10 +21,22 @@ package mesosproto;
|
|||
import "mesos.proto";
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
option (gogoproto.gostring_all) = true;
|
||||
option (gogoproto.equal_all) = true;
|
||||
option (gogoproto.verbose_equal_all) = true;
|
||||
option (gogoproto.goproto_stringer_all) = false;
|
||||
option (gogoproto.stringer_all) = true;
|
||||
option (gogoproto.populate_all) = true;
|
||||
option (gogoproto.testgen_all) = true;
|
||||
option (gogoproto.benchgen_all) = true;
|
||||
option (gogoproto.marshaler_all) = true;
|
||||
option (gogoproto.sizer_all) = true;
|
||||
option (gogoproto.unmarshaler_all) = true;
|
||||
|
||||
// TODO(benh): Provide comments for each of these messages. Also,
|
||||
// consider splitting these messages into different "packages" which
|
||||
// represent which messages get handled by which components (e.g., the
|
||||
// "mesos.internal.executor" package includes messages that the
|
||||
// "mesos.executor" package includes messages that the
|
||||
// executor handles).
|
||||
|
||||
|
||||
|
@ -194,8 +206,15 @@ message RunTaskMessage {
|
|||
// TODO(karya): Remove framework_id after MESOS-2559 has shipped.
|
||||
optional FrameworkID framework_id = 1 [deprecated = true];
|
||||
required FrameworkInfo framework = 2;
|
||||
required string pid = 3;
|
||||
required TaskInfo task = 4;
|
||||
|
||||
// The pid of the framework. This was moved to 'optional' in
|
||||
// 0.24.0 to support schedulers using the HTTP API. For now, we
|
||||
// continue to always set pid since it was required in 0.23.x.
|
||||
// When 'pid' is unset, or set to empty string, the slave will
|
||||
// forward executor messages through the master. For schedulers
|
||||
// still using the driver, this will remain set.
|
||||
optional string pid = 3;
|
||||
}
|
||||
|
||||
|
||||
|
@ -336,7 +355,9 @@ message ShutdownExecutorMessage {
|
|||
|
||||
message UpdateFrameworkMessage {
|
||||
required FrameworkID framework_id = 1;
|
||||
required string pid = 2;
|
||||
|
||||
// See the comment on RunTaskMessage.pid.
|
||||
optional string pid = 2;
|
||||
}
|
||||
|
||||
|
||||
|
|
11295
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/messagespb_test.go
generated
vendored
Normal file
11295
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/messagespb_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -21,6 +21,18 @@ package mesosproto;
|
|||
import "mesos.proto";
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
option (gogoproto.gostring_all) = true;
|
||||
option (gogoproto.equal_all) = true;
|
||||
option (gogoproto.verbose_equal_all) = true;
|
||||
option (gogoproto.goproto_stringer_all) = false;
|
||||
option (gogoproto.stringer_all) = true;
|
||||
option (gogoproto.populate_all) = true;
|
||||
option (gogoproto.testgen_all) = true;
|
||||
option (gogoproto.benchgen_all) = true;
|
||||
option (gogoproto.marshaler_all) = true;
|
||||
option (gogoproto.sizer_all) = true;
|
||||
option (gogoproto.unmarshaler_all) = true;
|
||||
|
||||
message Registry {
|
||||
message Master {
|
||||
required MasterInfo info = 1;
|
||||
|
|
945
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/registrypb_test.go
generated
vendored
Normal file
945
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/registrypb_test.go
generated
vendored
Normal file
|
@ -0,0 +1,945 @@
|
|||
// Code generated by protoc-gen-gogo.
|
||||
// source: registry.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
package mesosproto
|
||||
|
||||
import testing "testing"
|
||||
import math_rand "math/rand"
|
||||
import time "time"
|
||||
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
|
||||
import github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb"
|
||||
import fmt "fmt"
|
||||
import go_parser "go/parser"
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import math "math"
|
||||
|
||||
// discarding unused import gogoproto "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
func TestRegistryProto(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry(popr, false)
|
||||
data, err := github_com_gogo_protobuf_proto.Marshal(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &Registry{}
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(data, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
littlefuzz := make([]byte, len(data))
|
||||
copy(littlefuzz, data)
|
||||
for i := range data {
|
||||
data[i] = byte(popr.Intn(256))
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
if len(littlefuzz) > 0 {
|
||||
fuzzamount := 100
|
||||
for i := 0; i < fuzzamount; i++ {
|
||||
littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256))
|
||||
littlefuzz = append(littlefuzz, byte(popr.Intn(256)))
|
||||
}
|
||||
// shouldn't panic
|
||||
_ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegistryMarshalTo(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry(popr, false)
|
||||
size := p.Size()
|
||||
data := make([]byte, size)
|
||||
for i := range data {
|
||||
data[i] = byte(popr.Intn(256))
|
||||
}
|
||||
_, err := p.MarshalTo(data)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &Registry{}
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(data, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
for i := range data {
|
||||
data[i] = byte(popr.Intn(256))
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRegistryProtoMarshal(b *testing.B) {
|
||||
popr := math_rand.New(math_rand.NewSource(616))
|
||||
total := 0
|
||||
pops := make([]*Registry, 10000)
|
||||
for i := 0; i < 10000; i++ {
|
||||
pops[i] = NewPopulatedRegistry(popr, false)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
data, err := github_com_gogo_protobuf_proto.Marshal(pops[i%10000])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
total += len(data)
|
||||
}
|
||||
b.SetBytes(int64(total / b.N))
|
||||
}
|
||||
|
||||
func BenchmarkRegistryProtoUnmarshal(b *testing.B) {
|
||||
popr := math_rand.New(math_rand.NewSource(616))
|
||||
total := 0
|
||||
datas := make([][]byte, 10000)
|
||||
for i := 0; i < 10000; i++ {
|
||||
data, err := github_com_gogo_protobuf_proto.Marshal(NewPopulatedRegistry(popr, false))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
datas[i] = data
|
||||
}
|
||||
msg := &Registry{}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
total += len(datas[i%10000])
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(datas[i%10000], msg); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
b.SetBytes(int64(total / b.N))
|
||||
}
|
||||
|
||||
func TestRegistry_MasterProto(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry_Master(popr, false)
|
||||
data, err := github_com_gogo_protobuf_proto.Marshal(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &Registry_Master{}
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(data, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
littlefuzz := make([]byte, len(data))
|
||||
copy(littlefuzz, data)
|
||||
for i := range data {
|
||||
data[i] = byte(popr.Intn(256))
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
if len(littlefuzz) > 0 {
|
||||
fuzzamount := 100
|
||||
for i := 0; i < fuzzamount; i++ {
|
||||
littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256))
|
||||
littlefuzz = append(littlefuzz, byte(popr.Intn(256)))
|
||||
}
|
||||
// shouldn't panic
|
||||
_ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegistry_MasterMarshalTo(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry_Master(popr, false)
|
||||
size := p.Size()
|
||||
data := make([]byte, size)
|
||||
for i := range data {
|
||||
data[i] = byte(popr.Intn(256))
|
||||
}
|
||||
_, err := p.MarshalTo(data)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &Registry_Master{}
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(data, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
for i := range data {
|
||||
data[i] = byte(popr.Intn(256))
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRegistry_MasterProtoMarshal(b *testing.B) {
|
||||
popr := math_rand.New(math_rand.NewSource(616))
|
||||
total := 0
|
||||
pops := make([]*Registry_Master, 10000)
|
||||
for i := 0; i < 10000; i++ {
|
||||
pops[i] = NewPopulatedRegistry_Master(popr, false)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
data, err := github_com_gogo_protobuf_proto.Marshal(pops[i%10000])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
total += len(data)
|
||||
}
|
||||
b.SetBytes(int64(total / b.N))
|
||||
}
|
||||
|
||||
func BenchmarkRegistry_MasterProtoUnmarshal(b *testing.B) {
|
||||
popr := math_rand.New(math_rand.NewSource(616))
|
||||
total := 0
|
||||
datas := make([][]byte, 10000)
|
||||
for i := 0; i < 10000; i++ {
|
||||
data, err := github_com_gogo_protobuf_proto.Marshal(NewPopulatedRegistry_Master(popr, false))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
datas[i] = data
|
||||
}
|
||||
msg := &Registry_Master{}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
total += len(datas[i%10000])
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(datas[i%10000], msg); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
b.SetBytes(int64(total / b.N))
|
||||
}
|
||||
|
||||
func TestRegistry_SlaveProto(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry_Slave(popr, false)
|
||||
data, err := github_com_gogo_protobuf_proto.Marshal(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &Registry_Slave{}
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(data, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
littlefuzz := make([]byte, len(data))
|
||||
copy(littlefuzz, data)
|
||||
for i := range data {
|
||||
data[i] = byte(popr.Intn(256))
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
if len(littlefuzz) > 0 {
|
||||
fuzzamount := 100
|
||||
for i := 0; i < fuzzamount; i++ {
|
||||
littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256))
|
||||
littlefuzz = append(littlefuzz, byte(popr.Intn(256)))
|
||||
}
|
||||
// shouldn't panic
|
||||
_ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegistry_SlaveMarshalTo(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry_Slave(popr, false)
|
||||
size := p.Size()
|
||||
data := make([]byte, size)
|
||||
for i := range data {
|
||||
data[i] = byte(popr.Intn(256))
|
||||
}
|
||||
_, err := p.MarshalTo(data)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &Registry_Slave{}
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(data, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
for i := range data {
|
||||
data[i] = byte(popr.Intn(256))
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRegistry_SlaveProtoMarshal(b *testing.B) {
|
||||
popr := math_rand.New(math_rand.NewSource(616))
|
||||
total := 0
|
||||
pops := make([]*Registry_Slave, 10000)
|
||||
for i := 0; i < 10000; i++ {
|
||||
pops[i] = NewPopulatedRegistry_Slave(popr, false)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
data, err := github_com_gogo_protobuf_proto.Marshal(pops[i%10000])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
total += len(data)
|
||||
}
|
||||
b.SetBytes(int64(total / b.N))
|
||||
}
|
||||
|
||||
func BenchmarkRegistry_SlaveProtoUnmarshal(b *testing.B) {
|
||||
popr := math_rand.New(math_rand.NewSource(616))
|
||||
total := 0
|
||||
datas := make([][]byte, 10000)
|
||||
for i := 0; i < 10000; i++ {
|
||||
data, err := github_com_gogo_protobuf_proto.Marshal(NewPopulatedRegistry_Slave(popr, false))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
datas[i] = data
|
||||
}
|
||||
msg := &Registry_Slave{}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
total += len(datas[i%10000])
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(datas[i%10000], msg); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
b.SetBytes(int64(total / b.N))
|
||||
}
|
||||
|
||||
func TestRegistry_SlavesProto(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry_Slaves(popr, false)
|
||||
data, err := github_com_gogo_protobuf_proto.Marshal(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &Registry_Slaves{}
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(data, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
littlefuzz := make([]byte, len(data))
|
||||
copy(littlefuzz, data)
|
||||
for i := range data {
|
||||
data[i] = byte(popr.Intn(256))
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
if len(littlefuzz) > 0 {
|
||||
fuzzamount := 100
|
||||
for i := 0; i < fuzzamount; i++ {
|
||||
littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256))
|
||||
littlefuzz = append(littlefuzz, byte(popr.Intn(256)))
|
||||
}
|
||||
// shouldn't panic
|
||||
_ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegistry_SlavesMarshalTo(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry_Slaves(popr, false)
|
||||
size := p.Size()
|
||||
data := make([]byte, size)
|
||||
for i := range data {
|
||||
data[i] = byte(popr.Intn(256))
|
||||
}
|
||||
_, err := p.MarshalTo(data)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &Registry_Slaves{}
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(data, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
for i := range data {
|
||||
data[i] = byte(popr.Intn(256))
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRegistry_SlavesProtoMarshal(b *testing.B) {
|
||||
popr := math_rand.New(math_rand.NewSource(616))
|
||||
total := 0
|
||||
pops := make([]*Registry_Slaves, 10000)
|
||||
for i := 0; i < 10000; i++ {
|
||||
pops[i] = NewPopulatedRegistry_Slaves(popr, false)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
data, err := github_com_gogo_protobuf_proto.Marshal(pops[i%10000])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
total += len(data)
|
||||
}
|
||||
b.SetBytes(int64(total / b.N))
|
||||
}
|
||||
|
||||
func BenchmarkRegistry_SlavesProtoUnmarshal(b *testing.B) {
|
||||
popr := math_rand.New(math_rand.NewSource(616))
|
||||
total := 0
|
||||
datas := make([][]byte, 10000)
|
||||
for i := 0; i < 10000; i++ {
|
||||
data, err := github_com_gogo_protobuf_proto.Marshal(NewPopulatedRegistry_Slaves(popr, false))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
datas[i] = data
|
||||
}
|
||||
msg := &Registry_Slaves{}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
total += len(datas[i%10000])
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(datas[i%10000], msg); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
b.SetBytes(int64(total / b.N))
|
||||
}
|
||||
|
||||
func TestRegistryJSON(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry(popr, true)
|
||||
marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{}
|
||||
jsondata, err := marshaler.MarshalToString(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &Registry{}
|
||||
err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
func TestRegistry_MasterJSON(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry_Master(popr, true)
|
||||
marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{}
|
||||
jsondata, err := marshaler.MarshalToString(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &Registry_Master{}
|
||||
err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
func TestRegistry_SlaveJSON(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry_Slave(popr, true)
|
||||
marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{}
|
||||
jsondata, err := marshaler.MarshalToString(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &Registry_Slave{}
|
||||
err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
func TestRegistry_SlavesJSON(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry_Slaves(popr, true)
|
||||
marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{}
|
||||
jsondata, err := marshaler.MarshalToString(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &Registry_Slaves{}
|
||||
err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
func TestRegistryProtoText(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry(popr, true)
|
||||
data := github_com_gogo_protobuf_proto.MarshalTextString(p)
|
||||
msg := &Registry{}
|
||||
if err := github_com_gogo_protobuf_proto.UnmarshalText(data, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegistryProtoCompactText(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry(popr, true)
|
||||
data := github_com_gogo_protobuf_proto.CompactTextString(p)
|
||||
msg := &Registry{}
|
||||
if err := github_com_gogo_protobuf_proto.UnmarshalText(data, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegistry_MasterProtoText(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry_Master(popr, true)
|
||||
data := github_com_gogo_protobuf_proto.MarshalTextString(p)
|
||||
msg := &Registry_Master{}
|
||||
if err := github_com_gogo_protobuf_proto.UnmarshalText(data, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegistry_MasterProtoCompactText(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry_Master(popr, true)
|
||||
data := github_com_gogo_protobuf_proto.CompactTextString(p)
|
||||
msg := &Registry_Master{}
|
||||
if err := github_com_gogo_protobuf_proto.UnmarshalText(data, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegistry_SlaveProtoText(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry_Slave(popr, true)
|
||||
data := github_com_gogo_protobuf_proto.MarshalTextString(p)
|
||||
msg := &Registry_Slave{}
|
||||
if err := github_com_gogo_protobuf_proto.UnmarshalText(data, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegistry_SlaveProtoCompactText(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry_Slave(popr, true)
|
||||
data := github_com_gogo_protobuf_proto.CompactTextString(p)
|
||||
msg := &Registry_Slave{}
|
||||
if err := github_com_gogo_protobuf_proto.UnmarshalText(data, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegistry_SlavesProtoText(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry_Slaves(popr, true)
|
||||
data := github_com_gogo_protobuf_proto.MarshalTextString(p)
|
||||
msg := &Registry_Slaves{}
|
||||
if err := github_com_gogo_protobuf_proto.UnmarshalText(data, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegistry_SlavesProtoCompactText(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry_Slaves(popr, true)
|
||||
data := github_com_gogo_protobuf_proto.CompactTextString(p)
|
||||
msg := &Registry_Slaves{}
|
||||
if err := github_com_gogo_protobuf_proto.UnmarshalText(data, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegistryVerboseEqual(t *testing.T) {
|
||||
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
|
||||
p := NewPopulatedRegistry(popr, false)
|
||||
data, err := github_com_gogo_protobuf_proto.Marshal(p)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
msg := &Registry{}
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(data, msg); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("%#v !VerboseEqual %#v, since %v", msg, p, err)
|
||||
}
|
||||
}
|
||||
func TestRegistry_MasterVerboseEqual(t *testing.T) {
|
||||
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
|
||||
p := NewPopulatedRegistry_Master(popr, false)
|
||||
data, err := github_com_gogo_protobuf_proto.Marshal(p)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
msg := &Registry_Master{}
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(data, msg); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("%#v !VerboseEqual %#v, since %v", msg, p, err)
|
||||
}
|
||||
}
|
||||
func TestRegistry_SlaveVerboseEqual(t *testing.T) {
|
||||
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
|
||||
p := NewPopulatedRegistry_Slave(popr, false)
|
||||
data, err := github_com_gogo_protobuf_proto.Marshal(p)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
msg := &Registry_Slave{}
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(data, msg); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("%#v !VerboseEqual %#v, since %v", msg, p, err)
|
||||
}
|
||||
}
|
||||
func TestRegistry_SlavesVerboseEqual(t *testing.T) {
|
||||
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
|
||||
p := NewPopulatedRegistry_Slaves(popr, false)
|
||||
data, err := github_com_gogo_protobuf_proto.Marshal(p)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
msg := &Registry_Slaves{}
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(data, msg); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := p.VerboseEqual(msg); err != nil {
|
||||
t.Fatalf("%#v !VerboseEqual %#v, since %v", msg, p, err)
|
||||
}
|
||||
}
|
||||
func TestRegistryGoString(t *testing.T) {
|
||||
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
|
||||
p := NewPopulatedRegistry(popr, false)
|
||||
s1 := p.GoString()
|
||||
s2 := fmt.Sprintf("%#v", p)
|
||||
if s1 != s2 {
|
||||
t.Fatalf("GoString want %v got %v", s1, s2)
|
||||
}
|
||||
_, err := go_parser.ParseExpr(s1)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
func TestRegistry_MasterGoString(t *testing.T) {
|
||||
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
|
||||
p := NewPopulatedRegistry_Master(popr, false)
|
||||
s1 := p.GoString()
|
||||
s2 := fmt.Sprintf("%#v", p)
|
||||
if s1 != s2 {
|
||||
t.Fatalf("GoString want %v got %v", s1, s2)
|
||||
}
|
||||
_, err := go_parser.ParseExpr(s1)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
func TestRegistry_SlaveGoString(t *testing.T) {
|
||||
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
|
||||
p := NewPopulatedRegistry_Slave(popr, false)
|
||||
s1 := p.GoString()
|
||||
s2 := fmt.Sprintf("%#v", p)
|
||||
if s1 != s2 {
|
||||
t.Fatalf("GoString want %v got %v", s1, s2)
|
||||
}
|
||||
_, err := go_parser.ParseExpr(s1)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
func TestRegistry_SlavesGoString(t *testing.T) {
|
||||
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
|
||||
p := NewPopulatedRegistry_Slaves(popr, false)
|
||||
s1 := p.GoString()
|
||||
s2 := fmt.Sprintf("%#v", p)
|
||||
if s1 != s2 {
|
||||
t.Fatalf("GoString want %v got %v", s1, s2)
|
||||
}
|
||||
_, err := go_parser.ParseExpr(s1)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
func TestRegistrySize(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry(popr, true)
|
||||
size2 := github_com_gogo_protobuf_proto.Size(p)
|
||||
data, err := github_com_gogo_protobuf_proto.Marshal(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
size := p.Size()
|
||||
if len(data) != size {
|
||||
t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(data))
|
||||
}
|
||||
if size2 != size {
|
||||
t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2)
|
||||
}
|
||||
size3 := github_com_gogo_protobuf_proto.Size(p)
|
||||
if size3 != size {
|
||||
t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRegistrySize(b *testing.B) {
|
||||
popr := math_rand.New(math_rand.NewSource(616))
|
||||
total := 0
|
||||
pops := make([]*Registry, 1000)
|
||||
for i := 0; i < 1000; i++ {
|
||||
pops[i] = NewPopulatedRegistry(popr, false)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
total += pops[i%1000].Size()
|
||||
}
|
||||
b.SetBytes(int64(total / b.N))
|
||||
}
|
||||
|
||||
func TestRegistry_MasterSize(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry_Master(popr, true)
|
||||
size2 := github_com_gogo_protobuf_proto.Size(p)
|
||||
data, err := github_com_gogo_protobuf_proto.Marshal(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
size := p.Size()
|
||||
if len(data) != size {
|
||||
t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(data))
|
||||
}
|
||||
if size2 != size {
|
||||
t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2)
|
||||
}
|
||||
size3 := github_com_gogo_protobuf_proto.Size(p)
|
||||
if size3 != size {
|
||||
t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRegistry_MasterSize(b *testing.B) {
|
||||
popr := math_rand.New(math_rand.NewSource(616))
|
||||
total := 0
|
||||
pops := make([]*Registry_Master, 1000)
|
||||
for i := 0; i < 1000; i++ {
|
||||
pops[i] = NewPopulatedRegistry_Master(popr, false)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
total += pops[i%1000].Size()
|
||||
}
|
||||
b.SetBytes(int64(total / b.N))
|
||||
}
|
||||
|
||||
func TestRegistry_SlaveSize(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry_Slave(popr, true)
|
||||
size2 := github_com_gogo_protobuf_proto.Size(p)
|
||||
data, err := github_com_gogo_protobuf_proto.Marshal(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
size := p.Size()
|
||||
if len(data) != size {
|
||||
t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(data))
|
||||
}
|
||||
if size2 != size {
|
||||
t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2)
|
||||
}
|
||||
size3 := github_com_gogo_protobuf_proto.Size(p)
|
||||
if size3 != size {
|
||||
t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRegistry_SlaveSize(b *testing.B) {
|
||||
popr := math_rand.New(math_rand.NewSource(616))
|
||||
total := 0
|
||||
pops := make([]*Registry_Slave, 1000)
|
||||
for i := 0; i < 1000; i++ {
|
||||
pops[i] = NewPopulatedRegistry_Slave(popr, false)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
total += pops[i%1000].Size()
|
||||
}
|
||||
b.SetBytes(int64(total / b.N))
|
||||
}
|
||||
|
||||
func TestRegistry_SlavesSize(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedRegistry_Slaves(popr, true)
|
||||
size2 := github_com_gogo_protobuf_proto.Size(p)
|
||||
data, err := github_com_gogo_protobuf_proto.Marshal(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
size := p.Size()
|
||||
if len(data) != size {
|
||||
t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(data))
|
||||
}
|
||||
if size2 != size {
|
||||
t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2)
|
||||
}
|
||||
size3 := github_com_gogo_protobuf_proto.Size(p)
|
||||
if size3 != size {
|
||||
t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRegistry_SlavesSize(b *testing.B) {
|
||||
popr := math_rand.New(math_rand.NewSource(616))
|
||||
total := 0
|
||||
pops := make([]*Registry_Slaves, 1000)
|
||||
for i := 0; i < 1000; i++ {
|
||||
pops[i] = NewPopulatedRegistry_Slaves(popr, false)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
total += pops[i%1000].Size()
|
||||
}
|
||||
b.SetBytes(int64(total / b.N))
|
||||
}
|
||||
|
||||
func TestRegistryStringer(t *testing.T) {
|
||||
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
|
||||
p := NewPopulatedRegistry(popr, false)
|
||||
s1 := p.String()
|
||||
s2 := fmt.Sprintf("%v", p)
|
||||
if s1 != s2 {
|
||||
t.Fatalf("String want %v got %v", s1, s2)
|
||||
}
|
||||
}
|
||||
func TestRegistry_MasterStringer(t *testing.T) {
|
||||
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
|
||||
p := NewPopulatedRegistry_Master(popr, false)
|
||||
s1 := p.String()
|
||||
s2 := fmt.Sprintf("%v", p)
|
||||
if s1 != s2 {
|
||||
t.Fatalf("String want %v got %v", s1, s2)
|
||||
}
|
||||
}
|
||||
func TestRegistry_SlaveStringer(t *testing.T) {
|
||||
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
|
||||
p := NewPopulatedRegistry_Slave(popr, false)
|
||||
s1 := p.String()
|
||||
s2 := fmt.Sprintf("%v", p)
|
||||
if s1 != s2 {
|
||||
t.Fatalf("String want %v got %v", s1, s2)
|
||||
}
|
||||
}
|
||||
func TestRegistry_SlavesStringer(t *testing.T) {
|
||||
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
|
||||
p := NewPopulatedRegistry_Slaves(popr, false)
|
||||
s1 := p.String()
|
||||
s2 := fmt.Sprintf("%v", p)
|
||||
if s1 != s2 {
|
||||
t.Fatalf("String want %v got %v", s1, s2)
|
||||
}
|
||||
}
|
||||
|
||||
//These tests are generated by github.com/gogo/protobuf/plugin/testgen
|
File diff suppressed because it is too large
Load Diff
|
@ -21,6 +21,17 @@ package mesosproto;
|
|||
import "mesos.proto";
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
option (gogoproto.gostring_all) = true;
|
||||
option (gogoproto.equal_all) = true;
|
||||
option (gogoproto.verbose_equal_all) = true;
|
||||
option (gogoproto.goproto_stringer_all) = false;
|
||||
option (gogoproto.stringer_all) = true;
|
||||
option (gogoproto.populate_all) = true;
|
||||
option (gogoproto.testgen_all) = true;
|
||||
option (gogoproto.benchgen_all) = true;
|
||||
option (gogoproto.marshaler_all) = true;
|
||||
option (gogoproto.sizer_all) = true;
|
||||
option (gogoproto.unmarshaler_all) = true;
|
||||
|
||||
/**
|
||||
* Scheduler event API.
|
||||
|
@ -40,11 +51,26 @@ message Event {
|
|||
MESSAGE = 5; // See 'Message' below.
|
||||
FAILURE = 6; // See 'Failure' below.
|
||||
ERROR = 7; // See 'Error' below.
|
||||
|
||||
// Periodic message sent by the Mesos master according to
|
||||
// 'Subscribed.heartbeat_interval_seconds'. If the scheduler does
|
||||
// not receive any events (including heartbeats) for an extended
|
||||
// period of time (e.g., 5 x heartbeat_interval_seconds), there is
|
||||
// likely a network partition. In such a case the scheduler should
|
||||
// close the existing subscription connection and resubscribe
|
||||
// using a backoff strategy.
|
||||
HEARTBEAT = 8;
|
||||
}
|
||||
|
||||
// First event received when the scheduler subscribes.
|
||||
message Subscribed {
|
||||
required FrameworkID framework_id = 1;
|
||||
|
||||
// This value will be set if the master is sending heartbeats. See
|
||||
// the comment above on 'HEARTBEAT' for more details.
|
||||
// TODO(vinod): Implement heartbeats in the master once the master
|
||||
// can send HTTP events.
|
||||
optional double heartbeat_interval_seconds = 2;
|
||||
}
|
||||
|
||||
// Received whenever there are new resources that are offered to the
|
||||
|
@ -152,6 +178,7 @@ message Call {
|
|||
ACKNOWLEDGE = 8; // See 'Acknowledge' below.
|
||||
RECONCILE = 9; // See 'Reconcile' below.
|
||||
MESSAGE = 10; // See 'Message' below.
|
||||
REQUEST = 11; // See 'Request' below.
|
||||
|
||||
// TODO(benh): Consider adding an 'ACTIVATE' and 'DEACTIVATE' for
|
||||
// already subscribed frameworks as a way of stopping offers from
|
||||
|
@ -282,6 +309,16 @@ message Call {
|
|||
required bytes data = 3;
|
||||
}
|
||||
|
||||
// Requests a specific set of resources from Mesos's allocator. If
|
||||
// the allocator has support for this, corresponding offers will be
|
||||
// sent asynchronously via the OFFERS event(s).
|
||||
//
|
||||
// NOTE: The built-in hierarchical allocator doesn't have support
|
||||
// for this call and hence simply ignores it.
|
||||
message Request {
|
||||
repeated mesosproto.Request requests = 1;
|
||||
}
|
||||
|
||||
// Identifies who generated this call. Master assigns a framework id
|
||||
// when a new scheduler subscribes for the first time. Once assigned,
|
||||
// the scheduler must set the 'framework_id' here and within its
|
||||
|
@ -302,4 +339,5 @@ message Call {
|
|||
optional Acknowledge acknowledge = 8;
|
||||
optional Reconcile reconcile = 9;
|
||||
optional Message message = 10;
|
||||
optional Request request = 11;
|
||||
}
|
||||
|
|
4395
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/schedulerpb_test.go
generated
vendored
Normal file
4395
Godeps/_workspace/src/github.com/mesos/mesos-go/mesosproto/schedulerpb_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -32,7 +32,6 @@ option (gogoproto.marshaler_all) = true;
|
|||
option (gogoproto.sizer_all) = true;
|
||||
option (gogoproto.unmarshaler_all) = true;
|
||||
|
||||
|
||||
// Describes a state entry, a versioned (via a UUID) key/value pair.
|
||||
message Entry {
|
||||
required string name = 1;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -2,5 +2,5 @@ package mesosutil
|
|||
|
||||
const (
|
||||
// MesosVersion indicates the supported mesos version.
|
||||
MesosVersion = "0.20.0"
|
||||
MesosVersion = "0.24.0"
|
||||
)
|
||||
|
|
|
@ -36,6 +36,28 @@ func FilterResources(resources []*mesos.Resource, filter func(*mesos.Resource) b
|
|||
return result
|
||||
}
|
||||
|
||||
func AddResourceReservation(resource *mesos.Resource, principal string, role string) *mesos.Resource {
|
||||
resource.Reservation = &mesos.Resource_ReservationInfo{Principal: proto.String(principal)}
|
||||
resource.Role = proto.String(role)
|
||||
return resource
|
||||
}
|
||||
|
||||
func NewScalarResourceWithReservation(name string, value float64, principal string, role string) *mesos.Resource {
|
||||
return AddResourceReservation(NewScalarResource(name, value), principal, role)
|
||||
}
|
||||
|
||||
func NewRangesResourceWithReservation(name string, ranges []*mesos.Value_Range, principal string, role string) *mesos.Resource {
|
||||
return AddResourceReservation(NewRangesResource(name, ranges), principal, role)
|
||||
}
|
||||
|
||||
func NewSetResourceWithReservation(name string, items []string, principal string, role string) *mesos.Resource {
|
||||
return AddResourceReservation(NewSetResource(name, items), principal, role)
|
||||
}
|
||||
|
||||
func NewVolumeResourceWithReservation(val float64, containerPath string, persistenceId string, mode *mesos.Volume_Mode, principal string, role string) *mesos.Resource {
|
||||
return AddResourceReservation(NewVolumeResource(val, containerPath, persistenceId, mode), principal, role)
|
||||
}
|
||||
|
||||
func NewScalarResource(name string, val float64) *mesos.Resource {
|
||||
return &mesos.Resource{
|
||||
Name: proto.String(name),
|
||||
|
@ -58,7 +80,15 @@ func NewSetResource(name string, items []string) *mesos.Resource {
|
|||
Type: mesos.Value_SET.Enum(),
|
||||
Set: &mesos.Value_Set{Item: items},
|
||||
}
|
||||
}
|
||||
|
||||
func NewVolumeResource(val float64, containerPath string, persistenceId string, mode *mesos.Volume_Mode) *mesos.Resource {
|
||||
resource := NewScalarResource("disk", val)
|
||||
resource.Disk = &mesos.Resource_DiskInfo{
|
||||
Persistence: &mesos.Resource_DiskInfo_Persistence{Id: proto.String(persistenceId)},
|
||||
Volume: &mesos.Volume{ContainerPath: proto.String(containerPath), Mode: mode},
|
||||
}
|
||||
return resource
|
||||
}
|
||||
|
||||
func NewFrameworkID(id string) *mesos.FrameworkID {
|
||||
|
@ -153,3 +183,38 @@ func NewExecutorInfo(execId *mesos.ExecutorID, command *mesos.CommandInfo) *meso
|
|||
Command: command,
|
||||
}
|
||||
}
|
||||
|
||||
func NewCreateOperation(volumes []*mesos.Resource) *mesos.Offer_Operation {
|
||||
return &mesos.Offer_Operation{
|
||||
Type: mesos.Offer_Operation_CREATE.Enum(),
|
||||
Create: &mesos.Offer_Operation_Create{Volumes: volumes},
|
||||
}
|
||||
}
|
||||
|
||||
func NewDestroyOperation(volumes []*mesos.Resource) *mesos.Offer_Operation {
|
||||
return &mesos.Offer_Operation{
|
||||
Type: mesos.Offer_Operation_DESTROY.Enum(),
|
||||
Destroy: &mesos.Offer_Operation_Destroy{Volumes: volumes},
|
||||
}
|
||||
}
|
||||
|
||||
func NewReserveOperation(resources []*mesos.Resource) *mesos.Offer_Operation {
|
||||
return &mesos.Offer_Operation{
|
||||
Type: mesos.Offer_Operation_RESERVE.Enum(),
|
||||
Reserve: &mesos.Offer_Operation_Reserve{Resources: resources},
|
||||
}
|
||||
}
|
||||
|
||||
func NewUnreserveOperation(resources []*mesos.Resource) *mesos.Offer_Operation {
|
||||
return &mesos.Offer_Operation{
|
||||
Type: mesos.Offer_Operation_UNRESERVE.Enum(),
|
||||
Unreserve: &mesos.Offer_Operation_Unreserve{Resources: resources},
|
||||
}
|
||||
}
|
||||
|
||||
func NewLaunchOperation(tasks []*mesos.TaskInfo) *mesos.Offer_Operation {
|
||||
return &mesos.Offer_Operation{
|
||||
Type: mesos.Offer_Operation_LAUNCH.Enum(),
|
||||
Launch: &mesos.Offer_Operation_Launch{TaskInfos: tasks},
|
||||
}
|
||||
}
|
||||
|
|
641
Godeps/_workspace/src/github.com/mesos/mesos-go/messenger/decoder.go
generated
vendored
Normal file
641
Godeps/_workspace/src/github.com/mesos/mesos-go/messenger/decoder.go
generated
vendored
Normal file
|
@ -0,0 +1,641 @@
|
|||
package messenger
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
log "github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultReadTimeout = 5 * time.Second
|
||||
DefaultWriteTimeout = 5 * time.Second
|
||||
|
||||
// writeFlushPeriod is the amount of time we're willing to wait for a single
|
||||
// response buffer to be fully written to the underlying TCP connection; after
|
||||
// this amount of time the remaining bytes of the response are discarded. see
|
||||
// responseWriter().
|
||||
writeFlushPeriod = 30 * time.Second
|
||||
)
|
||||
|
||||
type decoderID int32
|
||||
|
||||
func (did decoderID) String() string {
|
||||
return "[" + strconv.Itoa(int(did)) + "]"
|
||||
}
|
||||
|
||||
func (did *decoderID) next() decoderID {
|
||||
return decoderID(atomic.AddInt32((*int32)(did), 1))
|
||||
}
|
||||
|
||||
var (
|
||||
errHijackFailed = errors.New("failed to hijack http connection")
|
||||
did decoderID // decoder ID counter
|
||||
closedChan = make(chan struct{})
|
||||
)
|
||||
|
||||
func init() {
|
||||
close(closedChan)
|
||||
}
|
||||
|
||||
type Decoder interface {
|
||||
Requests() <-chan *Request
|
||||
Err() <-chan error
|
||||
Cancel(bool)
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
*http.Request
|
||||
response chan<- Response // callers that are finished with a Request should ensure that response is *always* closed, regardless of whether a Response has been written.
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
code int
|
||||
reason string
|
||||
}
|
||||
|
||||
type httpDecoder struct {
|
||||
req *http.Request // original request
|
||||
kalive bool // keepalive
|
||||
chunked bool // chunked
|
||||
msg chan *Request
|
||||
con net.Conn
|
||||
rw *bufio.ReadWriter
|
||||
errCh chan error
|
||||
buf *bytes.Buffer
|
||||
lrc *io.LimitedReader
|
||||
shouldQuit chan struct{} // signal chan, closes upon calls to Cancel(...)
|
||||
forceQuit chan struct{} // signal chan, indicates that quit is NOT graceful; closes upon Cancel(false)
|
||||
cancelGuard sync.Mutex
|
||||
readTimeout time.Duration
|
||||
writeTimeout time.Duration
|
||||
idtag string // useful for debugging
|
||||
sendError func(err error) // abstraction for error handling
|
||||
outCh chan *bytes.Buffer // chan of responses to be written to the connection
|
||||
}
|
||||
|
||||
// DecodeHTTP hijacks an HTTP server connection and generates mesos libprocess HTTP
|
||||
// requests via the returned chan. Upon generation of an error in the error chan the
|
||||
// decoder's internal goroutine will terminate. This func returns immediately.
|
||||
// The caller should immediately *stop* using the ResponseWriter and Request that were
|
||||
// passed as parameters; the decoder assumes full control of the HTTP transport.
|
||||
func DecodeHTTP(w http.ResponseWriter, r *http.Request) Decoder {
|
||||
id := did.next()
|
||||
d := &httpDecoder{
|
||||
msg: make(chan *Request),
|
||||
errCh: make(chan error, 1),
|
||||
req: r,
|
||||
shouldQuit: make(chan struct{}),
|
||||
forceQuit: make(chan struct{}),
|
||||
readTimeout: DefaultReadTimeout,
|
||||
writeTimeout: DefaultWriteTimeout,
|
||||
idtag: id.String(),
|
||||
outCh: make(chan *bytes.Buffer),
|
||||
}
|
||||
d.sendError = d.defaultSendError
|
||||
go d.run(w)
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *httpDecoder) Requests() <-chan *Request {
|
||||
return d.msg
|
||||
}
|
||||
|
||||
func (d *httpDecoder) Err() <-chan error {
|
||||
return d.errCh
|
||||
}
|
||||
|
||||
// Cancel the decoding process; if graceful then process pending responses before terminating
|
||||
func (d *httpDecoder) Cancel(graceful bool) {
|
||||
log.V(2).Infof("%scancel:%t", d.idtag, graceful)
|
||||
d.cancelGuard.Lock()
|
||||
defer d.cancelGuard.Unlock()
|
||||
select {
|
||||
case <-d.shouldQuit:
|
||||
// already quitting, but perhaps gracefully?
|
||||
default:
|
||||
close(d.shouldQuit)
|
||||
}
|
||||
// allow caller to "upgrade" from a graceful cancel to a forced one
|
||||
if !graceful {
|
||||
select {
|
||||
case <-d.forceQuit:
|
||||
// already forcefully quitting
|
||||
default:
|
||||
close(d.forceQuit) // push it!
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *httpDecoder) run(res http.ResponseWriter) {
|
||||
defer func() {
|
||||
close(d.outCh) // we're finished generating response objects
|
||||
log.V(2).Infoln(d.idtag + "run: terminating")
|
||||
}()
|
||||
|
||||
for state := d.bootstrapState(res); state != nil; {
|
||||
next := state(d)
|
||||
state = next
|
||||
}
|
||||
}
|
||||
|
||||
// tryFlushResponse flushes the response buffer (if not empty); returns true if flush succeeded
|
||||
func (d *httpDecoder) tryFlushResponse(out *bytes.Buffer) {
|
||||
log.V(2).Infof(d.idtag+"try-flush-responses: %d bytes to flush", out.Len())
|
||||
// set a write deadline here so that we don't block for very long.
|
||||
err := d.setWriteTimeout()
|
||||
if err != nil {
|
||||
// this is a problem because if we can't set the timeout then we can't guarantee
|
||||
// how long a write op might block for. Log the error and skip this response.
|
||||
log.Errorln("failed to set write deadline, aborting response:", err.Error())
|
||||
} else {
|
||||
_, err = out.WriteTo(d.rw.Writer)
|
||||
if err != nil {
|
||||
if neterr, ok := err.(net.Error); ok && neterr.Timeout() && out.Len() > 0 {
|
||||
// we couldn't fully write before timing out, return rch and hope that
|
||||
// we have better luck next time.
|
||||
return
|
||||
}
|
||||
// we don't really know how to deal with other kinds of errors, so
|
||||
// log it and skip the rest of the response.
|
||||
log.Errorln("failed to write response buffer:", err.Error())
|
||||
}
|
||||
err = d.rw.Flush()
|
||||
if err != nil {
|
||||
if neterr, ok := err.(net.Error); ok && neterr.Timeout() && out.Len() > 0 {
|
||||
return
|
||||
}
|
||||
log.Errorln("failed to flush response buffer:", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(jdef) make this a func on Response, to write its contents to a *bytes.Buffer
|
||||
func (d *httpDecoder) buildResponseEntity(resp *Response) *bytes.Buffer {
|
||||
log.V(2).Infoln(d.idtag + "build-response-entity")
|
||||
|
||||
out := &bytes.Buffer{}
|
||||
|
||||
// generate new response buffer content and continue; buffer should have
|
||||
// at least a response status-line w/ Content-Length: 0
|
||||
out.WriteString("HTTP/1.1 ")
|
||||
out.WriteString(strconv.Itoa(resp.code))
|
||||
out.WriteString(" ")
|
||||
out.WriteString(resp.reason)
|
||||
out.WriteString(crlf + "Content-Length: 0" + crlf)
|
||||
|
||||
select {
|
||||
case <-d.shouldQuit:
|
||||
// this is the last request in the pipeline and we've been told to quit, so
|
||||
// indicate that the server will close the connection.
|
||||
out.WriteString("Connection: Close" + crlf)
|
||||
default:
|
||||
}
|
||||
out.WriteString(crlf) // this ends the HTTP response entity
|
||||
return out
|
||||
}
|
||||
|
||||
// updateForRequest updates the chunked and kalive fields of the decoder to align
|
||||
// with the header values of the request
|
||||
func (d *httpDecoder) updateForRequest() {
|
||||
// check "Transfer-Encoding" for "chunked"
|
||||
d.chunked = false
|
||||
for _, v := range d.req.Header["Transfer-Encoding"] {
|
||||
if v == "chunked" {
|
||||
d.chunked = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !d.chunked && d.req.ContentLength < 0 {
|
||||
// strongly suspect that Go's internal net/http lib is stripping
|
||||
// the Transfer-Encoding header from the initial request, so this
|
||||
// workaround makes a very mesos-specific assumption: an unknown
|
||||
// Content-Length indicates a chunked stream.
|
||||
d.chunked = true
|
||||
}
|
||||
|
||||
// check "Connection" for "Keep-Alive"
|
||||
d.kalive = d.req.Header.Get("Connection") == "Keep-Alive"
|
||||
|
||||
log.V(2).Infof(d.idtag+"update-for-request: chunked %v keep-alive %v", d.chunked, d.kalive)
|
||||
}
|
||||
|
||||
func (d *httpDecoder) readBodyContent() httpState {
|
||||
log.V(2).Info(d.idtag + "read-body-content")
|
||||
if d.chunked {
|
||||
d.buf = &bytes.Buffer{}
|
||||
return readChunkHeaderState
|
||||
} else {
|
||||
d.lrc = limit(d.rw.Reader, d.req.ContentLength)
|
||||
d.buf = &bytes.Buffer{}
|
||||
return readBodyState
|
||||
}
|
||||
}
|
||||
|
||||
const http202response = "HTTP/1.1 202 OK\r\nContent-Length: 0\r\n\r\n"
|
||||
|
||||
func (d *httpDecoder) generateRequest() httpState {
|
||||
log.V(2).Infof(d.idtag + "generate-request")
|
||||
// send a Request to msg
|
||||
b := d.buf.Bytes()
|
||||
rch := make(chan Response, 1)
|
||||
r := &Request{
|
||||
Request: &http.Request{
|
||||
Method: d.req.Method,
|
||||
URL: d.req.URL,
|
||||
Proto: d.req.Proto,
|
||||
ProtoMajor: d.req.ProtoMajor,
|
||||
ProtoMinor: d.req.ProtoMinor,
|
||||
Header: d.req.Header,
|
||||
Close: !d.kalive,
|
||||
Host: d.req.Host,
|
||||
RequestURI: d.req.RequestURI,
|
||||
Body: &body{bytes.NewBuffer(b)},
|
||||
ContentLength: int64(len(b)),
|
||||
},
|
||||
response: rch,
|
||||
}
|
||||
|
||||
select {
|
||||
case d.msg <- r:
|
||||
case <-d.forceQuit:
|
||||
return terminateState
|
||||
}
|
||||
|
||||
select {
|
||||
case <-d.forceQuit:
|
||||
return terminateState
|
||||
case resp, ok := <-rch:
|
||||
if ok {
|
||||
// response required, so build it and ship it
|
||||
out := d.buildResponseEntity(&resp)
|
||||
select {
|
||||
case <-d.forceQuit:
|
||||
return terminateState
|
||||
case d.outCh <- out:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if d.kalive {
|
||||
d.req = &http.Request{
|
||||
ContentLength: -1,
|
||||
Header: make(http.Header),
|
||||
}
|
||||
return awaitRequestState
|
||||
} else {
|
||||
return gracefulTerminateState
|
||||
}
|
||||
}
|
||||
|
||||
func (d *httpDecoder) defaultSendError(err error) {
|
||||
d.errCh <- err
|
||||
}
|
||||
|
||||
type httpState func(d *httpDecoder) httpState
|
||||
|
||||
// terminateState forcefully shuts down the state machine
|
||||
func terminateState(d *httpDecoder) httpState {
|
||||
log.V(2).Infoln(d.idtag + "terminate-state")
|
||||
// closing these chans tells Decoder users that it's wrapping up
|
||||
close(d.msg)
|
||||
close(d.errCh)
|
||||
|
||||
// attempt to forcefully close the connection and signal response handlers that
|
||||
// no further responses should be written
|
||||
d.Cancel(false)
|
||||
|
||||
if d.con != nil {
|
||||
d.con.Close()
|
||||
}
|
||||
|
||||
// there is no spoon
|
||||
return nil
|
||||
}
|
||||
|
||||
func gracefulTerminateState(d *httpDecoder) httpState {
|
||||
log.V(2).Infoln(d.idtag + "gracefully-terminate-state")
|
||||
// closing these chans tells Decoder users that it's wrapping up
|
||||
close(d.msg)
|
||||
close(d.errCh)
|
||||
|
||||
// gracefully terminate the connection; signal that we should flush pending
|
||||
// responses before closing the connection.
|
||||
d.Cancel(true)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func limit(r *bufio.Reader, limit int64) *io.LimitedReader {
|
||||
return &io.LimitedReader{
|
||||
R: r,
|
||||
N: limit,
|
||||
}
|
||||
}
|
||||
|
||||
// bootstrapState expects to be called when the standard net/http lib has already
|
||||
// read the initial request query line + headers from a connection. the request
|
||||
// is ready to be hijacked at this point.
|
||||
func (d *httpDecoder) bootstrapState(res http.ResponseWriter) httpState {
|
||||
log.V(2).Infoln(d.idtag + "bootstrap-state")
|
||||
|
||||
d.updateForRequest()
|
||||
|
||||
// hijack
|
||||
hj, ok := res.(http.Hijacker)
|
||||
if !ok {
|
||||
http.Error(res, "server does not support hijack", http.StatusInternalServerError)
|
||||
d.sendError(errHijackFailed)
|
||||
return terminateState
|
||||
}
|
||||
c, rw, err := hj.Hijack()
|
||||
if err != nil {
|
||||
http.Error(res, "failed to hijack the connection", http.StatusInternalServerError)
|
||||
d.sendError(errHijackFailed)
|
||||
return terminateState
|
||||
}
|
||||
|
||||
d.rw = rw
|
||||
d.con = c
|
||||
|
||||
go d.responseWriter()
|
||||
return d.readBodyContent()
|
||||
}
|
||||
|
||||
func (d *httpDecoder) responseWriter() {
|
||||
defer func() {
|
||||
log.V(3).Infoln(d.idtag + "response-writer: closing connection")
|
||||
d.con.Close()
|
||||
}()
|
||||
for buf := range d.outCh {
|
||||
//TODO(jdef) I worry about this busy-looping
|
||||
|
||||
// write & flush the buffer until there's nothing left in it, or else
|
||||
// we exceed the write/flush period.
|
||||
now := time.Now()
|
||||
for buf.Len() > 0 && time.Since(now) < writeFlushPeriod {
|
||||
select {
|
||||
case <-d.forceQuit:
|
||||
return
|
||||
default:
|
||||
}
|
||||
d.tryFlushResponse(buf)
|
||||
}
|
||||
if buf.Len() > 0 {
|
||||
//TODO(jdef) should we abort the entire connection instead? a partially written
|
||||
// response doesn't do anyone any good. That said, real libprocess agents don't
|
||||
// really care about the response channel anyway - the entire system is fire and
|
||||
// forget. So I've decided to err on the side that we might lose response bytes
|
||||
// in favor of completely reading the connection request stream before we terminate.
|
||||
log.Errorln(d.idtag + "failed to fully flush output buffer within write-flush period")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type body struct {
|
||||
*bytes.Buffer
|
||||
}
|
||||
|
||||
func (b *body) Close() error { return nil }
|
||||
|
||||
// checkTimeoutOrFail tests whether the given error is related to a timeout condition.
|
||||
// returns true if the caller should advance to the returned state.
|
||||
func (d *httpDecoder) checkTimeoutOrFail(err error, stateContinue httpState) (httpState, bool) {
|
||||
if err != nil {
|
||||
if neterr, ok := err.(net.Error); ok && neterr.Timeout() {
|
||||
select {
|
||||
case <-d.forceQuit:
|
||||
return terminateState, true
|
||||
case <-d.shouldQuit:
|
||||
return gracefulTerminateState, true
|
||||
default:
|
||||
return stateContinue, true
|
||||
}
|
||||
}
|
||||
d.sendError(err)
|
||||
return terminateState, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (d *httpDecoder) setReadTimeoutOrFail() bool {
|
||||
if d.readTimeout > 0 {
|
||||
err := d.con.SetReadDeadline(time.Now().Add(d.readTimeout))
|
||||
if err != nil {
|
||||
d.sendError(err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *httpDecoder) setWriteTimeout() error {
|
||||
if d.writeTimeout > 0 {
|
||||
return d.con.SetWriteDeadline(time.Now().Add(d.writeTimeout))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readChunkHeaderState(d *httpDecoder) httpState {
|
||||
log.V(2).Infoln(d.idtag + "read-chunk-header-state")
|
||||
tr := textproto.NewReader(d.rw.Reader)
|
||||
if !d.setReadTimeoutOrFail() {
|
||||
return terminateState
|
||||
}
|
||||
hexlen, err := tr.ReadLine()
|
||||
if next, ok := d.checkTimeoutOrFail(err, readChunkHeaderState); ok {
|
||||
return next
|
||||
}
|
||||
|
||||
clen, err := strconv.ParseInt(hexlen, 16, 64)
|
||||
if err != nil {
|
||||
d.sendError(err)
|
||||
return terminateState
|
||||
}
|
||||
|
||||
if clen == 0 {
|
||||
return readEndOfChunkStreamState
|
||||
}
|
||||
|
||||
d.lrc = limit(d.rw.Reader, clen)
|
||||
return readChunkState
|
||||
}
|
||||
|
||||
func readChunkState(d *httpDecoder) httpState {
|
||||
log.V(2).Infoln(d.idtag+"read-chunk-state, bytes remaining:", d.lrc.N)
|
||||
if !d.setReadTimeoutOrFail() {
|
||||
return terminateState
|
||||
}
|
||||
_, err := d.buf.ReadFrom(d.lrc)
|
||||
if next, ok := d.checkTimeoutOrFail(err, readChunkState); ok {
|
||||
return next
|
||||
}
|
||||
return readEndOfChunkState
|
||||
}
|
||||
|
||||
const crlf = "\r\n"
|
||||
|
||||
func readEndOfChunkState(d *httpDecoder) httpState {
|
||||
log.V(2).Infoln(d.idtag + "read-end-of-chunk-state")
|
||||
if !d.setReadTimeoutOrFail() {
|
||||
return terminateState
|
||||
}
|
||||
b, err := d.rw.Reader.Peek(2)
|
||||
if len(b) == 2 {
|
||||
if string(b) == crlf {
|
||||
d.rw.ReadByte()
|
||||
d.rw.ReadByte()
|
||||
return readChunkHeaderState
|
||||
}
|
||||
d.sendError(errors.New(d.idtag + "unexpected data at end-of-chunk marker"))
|
||||
return terminateState
|
||||
}
|
||||
// less than two bytes avail
|
||||
if next, ok := d.checkTimeoutOrFail(err, readEndOfChunkState); ok {
|
||||
return next
|
||||
}
|
||||
panic("couldn't peek 2 bytes, but didn't get an error?!")
|
||||
}
|
||||
|
||||
func readEndOfChunkStreamState(d *httpDecoder) httpState {
|
||||
log.V(2).Infoln(d.idtag + "read-end-of-chunk-stream-state")
|
||||
if !d.setReadTimeoutOrFail() {
|
||||
return terminateState
|
||||
}
|
||||
b, err := d.rw.Reader.Peek(2)
|
||||
if len(b) == 2 {
|
||||
if string(b) == crlf {
|
||||
d.rw.ReadByte()
|
||||
d.rw.ReadByte()
|
||||
return d.generateRequest()
|
||||
}
|
||||
d.sendError(errors.New(d.idtag + "unexpected data at end-of-chunk marker"))
|
||||
return terminateState
|
||||
}
|
||||
// less than 2 bytes avail
|
||||
if next, ok := d.checkTimeoutOrFail(err, readEndOfChunkStreamState); ok {
|
||||
return next
|
||||
}
|
||||
panic("couldn't peek 2 bytes, but didn't get an error?!")
|
||||
}
|
||||
|
||||
func readBodyState(d *httpDecoder) httpState {
|
||||
log.V(2).Infof(d.idtag+"read-body-state: %d bytes remaining", d.lrc.N)
|
||||
// read remaining bytes into the buffer
|
||||
var err error
|
||||
if d.lrc.N > 0 {
|
||||
if !d.setReadTimeoutOrFail() {
|
||||
return terminateState
|
||||
}
|
||||
_, err = d.buf.ReadFrom(d.lrc)
|
||||
}
|
||||
if d.lrc.N <= 0 {
|
||||
return d.generateRequest()
|
||||
}
|
||||
if next, ok := d.checkTimeoutOrFail(err, readBodyState); ok {
|
||||
return next
|
||||
}
|
||||
return readBodyState
|
||||
}
|
||||
|
||||
func isGracefulTermSignal(err error) bool {
|
||||
if err == io.EOF {
|
||||
return true
|
||||
}
|
||||
if operr, ok := err.(*net.OpError); ok {
|
||||
return operr.Op == "read" && err == syscall.ECONNRESET
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func awaitRequestState(d *httpDecoder) httpState {
|
||||
log.V(2).Infoln(d.idtag + "await-request-state")
|
||||
tr := textproto.NewReader(d.rw.Reader)
|
||||
if !d.setReadTimeoutOrFail() {
|
||||
return terminateState
|
||||
}
|
||||
requestLine, err := tr.ReadLine()
|
||||
if requestLine == "" && isGracefulTermSignal(err) {
|
||||
// we're actually expecting this at some point, so don't react poorly
|
||||
return gracefulTerminateState
|
||||
}
|
||||
if next, ok := d.checkTimeoutOrFail(err, awaitRequestState); ok {
|
||||
return next
|
||||
}
|
||||
ss := strings.SplitN(requestLine, " ", 3)
|
||||
if len(ss) < 3 {
|
||||
if err == io.EOF {
|
||||
return gracefulTerminateState
|
||||
}
|
||||
d.sendError(errors.New(d.idtag + "illegal request line"))
|
||||
return terminateState
|
||||
}
|
||||
r := d.req
|
||||
r.Method = ss[0]
|
||||
r.RequestURI = ss[1]
|
||||
r.URL, err = url.ParseRequestURI(ss[1])
|
||||
if err != nil {
|
||||
d.sendError(err)
|
||||
return terminateState
|
||||
}
|
||||
major, minor, ok := http.ParseHTTPVersion(ss[2])
|
||||
if !ok {
|
||||
d.sendError(errors.New(d.idtag + "malformed HTTP version"))
|
||||
return terminateState
|
||||
}
|
||||
r.ProtoMajor = major
|
||||
r.ProtoMinor = minor
|
||||
r.Proto = ss[2]
|
||||
return readHeaderState
|
||||
}
|
||||
|
||||
func readHeaderState(d *httpDecoder) httpState {
|
||||
log.V(2).Infoln(d.idtag + "read-header-state")
|
||||
if !d.setReadTimeoutOrFail() {
|
||||
return terminateState
|
||||
}
|
||||
r := d.req
|
||||
tr := textproto.NewReader(d.rw.Reader)
|
||||
h, err := tr.ReadMIMEHeader()
|
||||
// merge any headers that were read successfully (before a possible error)
|
||||
for k, v := range h {
|
||||
if rh, exists := r.Header[k]; exists {
|
||||
r.Header[k] = append(rh, v...)
|
||||
} else {
|
||||
r.Header[k] = v
|
||||
}
|
||||
log.V(2).Infoln(d.idtag+"request header", k, v)
|
||||
}
|
||||
if next, ok := d.checkTimeoutOrFail(err, readHeaderState); ok {
|
||||
return next
|
||||
}
|
||||
|
||||
// special headers: Host, Content-Length, Transfer-Encoding
|
||||
r.Host = r.Header.Get("Host")
|
||||
r.TransferEncoding = r.Header["Transfer-Encoding"]
|
||||
if cl := r.Header.Get("Content-Length"); cl != "" {
|
||||
l, err := strconv.ParseInt(cl, 10, 64)
|
||||
if err != nil {
|
||||
d.sendError(err)
|
||||
return terminateState
|
||||
}
|
||||
if l > -1 {
|
||||
r.ContentLength = l
|
||||
log.V(2).Infoln(d.idtag+"set content length", r.ContentLength)
|
||||
}
|
||||
}
|
||||
d.updateForRequest()
|
||||
return d.readBodyContent()
|
||||
}
|
397
Godeps/_workspace/src/github.com/mesos/mesos-go/messenger/http_transporter.go
generated
vendored
397
Godeps/_workspace/src/github.com/mesos/mesos-go/messenger/http_transporter.go
generated
vendored
|
@ -20,14 +20,17 @@ package messenger
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
|
@ -38,42 +41,120 @@ import (
|
|||
|
||||
var (
|
||||
discardOnStopError = fmt.Errorf("discarding message because transport is shutting down")
|
||||
errNotStarted = errors.New("HTTP transport has not been started")
|
||||
errTerminal = errors.New("HTTP transport is terminated")
|
||||
errAlreadyRunning = errors.New("HTTP transport is already running")
|
||||
|
||||
httpTransport, httpClient = &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 10 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
},
|
||||
&http.Client{
|
||||
Transport: httpTransport,
|
||||
Timeout: DefaultReadTimeout,
|
||||
}
|
||||
)
|
||||
|
||||
// httpTransporter is a subset of the Transporter interface
|
||||
type httpTransporter interface {
|
||||
Send(ctx context.Context, msg *Message) error
|
||||
Recv() (*Message, error)
|
||||
Inject(ctx context.Context, msg *Message) error
|
||||
Install(messageName string)
|
||||
Start() (upid.UPID, <-chan error)
|
||||
Stop(graceful bool) error
|
||||
}
|
||||
|
||||
type notStartedState struct {
|
||||
h *HTTPTransporter
|
||||
}
|
||||
|
||||
type stoppedState struct{}
|
||||
|
||||
type runningState struct {
|
||||
*notStartedState
|
||||
}
|
||||
|
||||
/* -- not-started state */
|
||||
|
||||
func (s *notStartedState) Send(ctx context.Context, msg *Message) error { return errNotStarted }
|
||||
func (s *notStartedState) Recv() (*Message, error) { return nil, errNotStarted }
|
||||
func (s *notStartedState) Inject(ctx context.Context, msg *Message) error { return errNotStarted }
|
||||
func (s *notStartedState) Stop(graceful bool) error { return errNotStarted }
|
||||
func (s *notStartedState) Install(messageName string) { s.h.install(messageName) }
|
||||
func (s *notStartedState) Start() (upid.UPID, <-chan error) {
|
||||
s.h.state = &runningState{s}
|
||||
return s.h.start()
|
||||
}
|
||||
|
||||
/* -- stopped state */
|
||||
|
||||
func (s *stoppedState) Send(ctx context.Context, msg *Message) error { return errTerminal }
|
||||
func (s *stoppedState) Recv() (*Message, error) { return nil, errTerminal }
|
||||
func (s *stoppedState) Inject(ctx context.Context, msg *Message) error { return errTerminal }
|
||||
func (s *stoppedState) Stop(graceful bool) error { return errTerminal }
|
||||
func (s *stoppedState) Install(messageName string) {}
|
||||
func (s *stoppedState) Start() (upid.UPID, <-chan error) {
|
||||
ch := make(chan error, 1)
|
||||
ch <- errTerminal
|
||||
return upid.UPID{}, ch
|
||||
}
|
||||
|
||||
/* -- running state */
|
||||
|
||||
func (s *runningState) Send(ctx context.Context, msg *Message) error { return s.h.send(ctx, msg) }
|
||||
func (s *runningState) Recv() (*Message, error) { return s.h.recv() }
|
||||
func (s *runningState) Inject(ctx context.Context, msg *Message) error { return s.h.inject(ctx, msg) }
|
||||
func (s *runningState) Stop(graceful bool) error {
|
||||
s.h.state = &stoppedState{}
|
||||
return s.h.stop(graceful)
|
||||
}
|
||||
func (s *runningState) Start() (upid.UPID, <-chan error) {
|
||||
ch := make(chan error, 1)
|
||||
ch <- errAlreadyRunning
|
||||
return upid.UPID{}, ch
|
||||
}
|
||||
|
||||
// HTTPTransporter implements the interfaces of the Transporter.
|
||||
type HTTPTransporter struct {
|
||||
// If the host is empty("") then it will listen on localhost.
|
||||
// If the port is empty("") then it will listen on random port.
|
||||
upid *upid.UPID
|
||||
upid upid.UPID
|
||||
listener net.Listener // TODO(yifan): Change to TCPListener.
|
||||
mux *http.ServeMux
|
||||
tr *http.Transport
|
||||
client *http.Client // TODO(yifan): Set read/write deadline.
|
||||
client *http.Client
|
||||
messageQueue chan *Message
|
||||
address net.IP // optional binding address
|
||||
started chan struct{}
|
||||
stopped chan struct{}
|
||||
stopping int32
|
||||
lifeLock sync.Mutex // protect lifecycle (start/stop) funcs
|
||||
shouldQuit chan struct{}
|
||||
stateLock sync.RWMutex // protect lifecycle (start/stop) funcs
|
||||
state httpTransporter
|
||||
}
|
||||
|
||||
// NewHTTPTransporter creates a new http transporter with an optional binding address.
|
||||
func NewHTTPTransporter(upid *upid.UPID, address net.IP) *HTTPTransporter {
|
||||
tr := &http.Transport{}
|
||||
func NewHTTPTransporter(upid upid.UPID, address net.IP) *HTTPTransporter {
|
||||
result := &HTTPTransporter{
|
||||
upid: upid,
|
||||
messageQueue: make(chan *Message, defaultQueueSize),
|
||||
mux: http.NewServeMux(),
|
||||
client: &http.Client{Transport: tr},
|
||||
tr: tr,
|
||||
client: httpClient,
|
||||
tr: httpTransport,
|
||||
address: address,
|
||||
started: make(chan struct{}),
|
||||
stopped: make(chan struct{}),
|
||||
shouldQuit: make(chan struct{}),
|
||||
}
|
||||
close(result.stopped)
|
||||
result.state = ¬StartedState{result}
|
||||
return result
|
||||
}
|
||||
|
||||
func (t *HTTPTransporter) getState() httpTransporter {
|
||||
t.stateLock.RLock()
|
||||
defer t.stateLock.RUnlock()
|
||||
return t.state
|
||||
}
|
||||
|
||||
// some network errors are probably recoverable, attempt to determine that here.
|
||||
func isRecoverableError(err error) bool {
|
||||
if urlErr, ok := err.(*url.Error); ok {
|
||||
|
@ -85,6 +166,7 @@ func isRecoverableError(err error) bool {
|
|||
return true
|
||||
}
|
||||
//TODO(jdef) this is pretty hackish, there's probably a better way
|
||||
//TODO(jdef) should also check for EHOSTDOWN and EHOSTUNREACH
|
||||
return (netErr.Op == "dial" && netErr.Net == "tcp" && netErr.Err == syscall.ECONNREFUSED)
|
||||
}
|
||||
log.V(2).Infof("unrecoverable error: %#v", err)
|
||||
|
@ -104,6 +186,10 @@ func (e *recoverableError) Error() string {
|
|||
|
||||
// Send sends the message to its specified upid.
|
||||
func (t *HTTPTransporter) Send(ctx context.Context, msg *Message) (sendError error) {
|
||||
return t.getState().Send(ctx, msg)
|
||||
}
|
||||
|
||||
func (t *HTTPTransporter) send(ctx context.Context, msg *Message) (sendError error) {
|
||||
log.V(2).Infof("Sending message to %v via http\n", msg.UPID)
|
||||
req, err := t.makeLibprocessRequest(msg)
|
||||
if err != nil {
|
||||
|
@ -120,7 +206,7 @@ func (t *HTTPTransporter) Send(ctx context.Context, msg *Message) (sendError err
|
|||
return ctx.Err()
|
||||
case <-time.After(duration):
|
||||
// ..retry request, continue
|
||||
case <-t.stopped:
|
||||
case <-t.shouldQuit:
|
||||
return discardOnStopError
|
||||
}
|
||||
}
|
||||
|
@ -164,7 +250,7 @@ func (t *HTTPTransporter) httpDo(ctx context.Context, req *http.Request, f func(
|
|||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-t.stopped:
|
||||
case <-t.shouldQuit:
|
||||
return discardOnStopError
|
||||
default: // continue
|
||||
}
|
||||
|
@ -178,7 +264,7 @@ func (t *HTTPTransporter) httpDo(ctx context.Context, req *http.Request, f func(
|
|||
return ctx.Err()
|
||||
case err := <-c:
|
||||
return err
|
||||
case <-t.stopped:
|
||||
case <-t.shouldQuit:
|
||||
t.tr.CancelRequest(req)
|
||||
<-c // Wait for f to return.
|
||||
return discardOnStopError
|
||||
|
@ -187,24 +273,32 @@ func (t *HTTPTransporter) httpDo(ctx context.Context, req *http.Request, f func(
|
|||
|
||||
// Recv returns the message, one at a time.
|
||||
func (t *HTTPTransporter) Recv() (*Message, error) {
|
||||
return t.getState().Recv()
|
||||
}
|
||||
|
||||
func (t *HTTPTransporter) recv() (*Message, error) {
|
||||
select {
|
||||
default:
|
||||
select {
|
||||
case msg := <-t.messageQueue:
|
||||
return msg, nil
|
||||
case <-t.stopped:
|
||||
case <-t.shouldQuit:
|
||||
}
|
||||
case <-t.stopped:
|
||||
case <-t.shouldQuit:
|
||||
}
|
||||
return nil, discardOnStopError
|
||||
}
|
||||
|
||||
//Inject places a message into the incoming message queue.
|
||||
func (t *HTTPTransporter) Inject(ctx context.Context, msg *Message) error {
|
||||
return t.getState().Inject(ctx, msg)
|
||||
}
|
||||
|
||||
func (t *HTTPTransporter) inject(ctx context.Context, msg *Message) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-t.stopped:
|
||||
case <-t.shouldQuit:
|
||||
return discardOnStopError
|
||||
default: // continue
|
||||
}
|
||||
|
@ -214,15 +308,73 @@ func (t *HTTPTransporter) Inject(ctx context.Context, msg *Message) error {
|
|||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-t.stopped:
|
||||
case <-t.shouldQuit:
|
||||
return discardOnStopError
|
||||
}
|
||||
}
|
||||
|
||||
// Install the request URI according to the message's name.
|
||||
func (t *HTTPTransporter) Install(msgName string) {
|
||||
t.getState().Install(msgName)
|
||||
}
|
||||
|
||||
func (t *HTTPTransporter) install(msgName string) {
|
||||
requestURI := fmt.Sprintf("/%s/%s", t.upid.ID, msgName)
|
||||
t.mux.HandleFunc(requestURI, t.messageHandler)
|
||||
t.mux.HandleFunc(requestURI, t.messageDecoder)
|
||||
}
|
||||
|
||||
type loggedListener struct {
|
||||
delegate net.Listener
|
||||
done <-chan struct{}
|
||||
}
|
||||
|
||||
func (l *loggedListener) Accept() (c net.Conn, err error) {
|
||||
c, err = l.delegate.Accept()
|
||||
if c != nil {
|
||||
log.Infoln("accepted connection from", c.RemoteAddr())
|
||||
c = logConnection(c)
|
||||
} else if err != nil {
|
||||
select {
|
||||
case <-l.done:
|
||||
default:
|
||||
log.Errorln("failed to accept connection:", err.Error())
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (l *loggedListener) Close() (err error) {
|
||||
err = l.delegate.Close()
|
||||
if err != nil {
|
||||
select {
|
||||
case <-l.done:
|
||||
default:
|
||||
log.Errorln("error closing listener:", err.Error())
|
||||
}
|
||||
} else {
|
||||
log.Infoln("closed listener")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (l *loggedListener) Addr() net.Addr { return l.delegate.Addr() }
|
||||
|
||||
func logConnection(c net.Conn) net.Conn {
|
||||
w := hex.Dumper(os.Stdout)
|
||||
r := io.TeeReader(c, w)
|
||||
return &loggedConnection{
|
||||
Conn: c,
|
||||
reader: r,
|
||||
}
|
||||
}
|
||||
|
||||
type loggedConnection struct {
|
||||
net.Conn
|
||||
reader io.Reader
|
||||
}
|
||||
|
||||
func (c *loggedConnection) Read(b []byte) (int, error) {
|
||||
return c.reader.Read(b)
|
||||
}
|
||||
|
||||
// Listen starts listen on UPID. If UPID is empty, the transporter
|
||||
|
@ -252,6 +404,7 @@ func (t *HTTPTransporter) listen() error {
|
|||
}
|
||||
// Save the host:port in case they are not specified in upid.
|
||||
host, port, _ = net.SplitHostPort(ln.Addr().String())
|
||||
log.Infoln("listening on", host, "port", port)
|
||||
|
||||
if len(t.upid.Host) == 0 {
|
||||
t.upid.Host = host
|
||||
|
@ -261,72 +414,81 @@ func (t *HTTPTransporter) listen() error {
|
|||
t.upid.Port = port
|
||||
}
|
||||
|
||||
t.listener = ln
|
||||
if log.V(3) {
|
||||
t.listener = &loggedListener{delegate: ln, done: t.shouldQuit}
|
||||
} else {
|
||||
t.listener = ln
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start starts the http transporter
|
||||
func (t *HTTPTransporter) Start() <-chan error {
|
||||
t.lifeLock.Lock()
|
||||
defer t.lifeLock.Unlock()
|
||||
|
||||
select {
|
||||
case <-t.started:
|
||||
// already started
|
||||
return nil
|
||||
case <-t.stopped:
|
||||
defer close(t.started)
|
||||
t.stopped = make(chan struct{})
|
||||
atomic.StoreInt32(&t.stopping, 0)
|
||||
default:
|
||||
panic("not started, not stopped, what am i? how can i start?")
|
||||
}
|
||||
func (t *HTTPTransporter) Start() (upid.UPID, <-chan error) {
|
||||
t.stateLock.Lock()
|
||||
defer t.stateLock.Unlock()
|
||||
return t.state.Start()
|
||||
}
|
||||
|
||||
// start expects to be guarded by stateLock
|
||||
func (t *HTTPTransporter) start() (upid.UPID, <-chan error) {
|
||||
ch := make(chan error, 1)
|
||||
if err := t.listen(); err != nil {
|
||||
ch <- err
|
||||
} else {
|
||||
// TODO(yifan): Set read/write deadline.
|
||||
log.Infof("http transport listening on %v", t.listener.Addr())
|
||||
go func() {
|
||||
err := http.Serve(t.listener, t.mux)
|
||||
if atomic.CompareAndSwapInt32(&t.stopping, 1, 0) {
|
||||
ch <- nil
|
||||
} else {
|
||||
ch <- err
|
||||
}
|
||||
}()
|
||||
return upid.UPID{}, ch
|
||||
}
|
||||
return ch
|
||||
|
||||
// TODO(yifan): Set read/write deadline.
|
||||
go func() {
|
||||
s := &http.Server{
|
||||
ReadTimeout: DefaultReadTimeout,
|
||||
WriteTimeout: DefaultWriteTimeout,
|
||||
Handler: t.mux,
|
||||
}
|
||||
err := s.Serve(t.listener)
|
||||
select {
|
||||
case <-t.shouldQuit:
|
||||
log.V(1).Infof("HTTP server stopped because of shutdown")
|
||||
ch <- nil
|
||||
default:
|
||||
if err != nil && log.V(1) {
|
||||
log.Errorln("HTTP server stopped with error", err.Error())
|
||||
} else {
|
||||
log.V(1).Infof("HTTP server stopped")
|
||||
}
|
||||
ch <- err
|
||||
t.Stop(false)
|
||||
}
|
||||
}()
|
||||
return t.upid, ch
|
||||
}
|
||||
|
||||
// Stop stops the http transporter by closing the listener.
|
||||
func (t *HTTPTransporter) Stop(graceful bool) error {
|
||||
t.lifeLock.Lock()
|
||||
defer t.lifeLock.Unlock()
|
||||
t.stateLock.Lock()
|
||||
defer t.stateLock.Unlock()
|
||||
return t.state.Stop(graceful)
|
||||
}
|
||||
|
||||
// stop expects to be guarded by stateLock
|
||||
func (t *HTTPTransporter) stop(graceful bool) error {
|
||||
close(t.shouldQuit)
|
||||
|
||||
log.Info("stopping HTTP transport")
|
||||
|
||||
select {
|
||||
case <-t.stopped:
|
||||
// already stopped
|
||||
return nil
|
||||
case <-t.started:
|
||||
defer close(t.stopped)
|
||||
t.started = make(chan struct{})
|
||||
default:
|
||||
panic("not started, not stopped, what am i? how can i stop?")
|
||||
}
|
||||
//TODO(jdef) if graceful, wait for pending requests to terminate
|
||||
atomic.StoreInt32(&t.stopping, 1)
|
||||
|
||||
err := t.listener.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
// UPID returns the upid of the transporter.
|
||||
func (t *HTTPTransporter) UPID() *upid.UPID {
|
||||
func (t *HTTPTransporter) UPID() upid.UPID {
|
||||
t.stateLock.Lock()
|
||||
defer t.stateLock.Unlock()
|
||||
return t.upid
|
||||
}
|
||||
|
||||
func (t *HTTPTransporter) messageHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (t *HTTPTransporter) messageDecoder(w http.ResponseWriter, r *http.Request) {
|
||||
// Verify it's a libprocess request.
|
||||
from, err := getLibprocessFrom(r)
|
||||
if err != nil {
|
||||
|
@ -334,19 +496,86 @@ func (t *HTTPTransporter) messageHandler(w http.ResponseWriter, r *http.Request)
|
|||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
decoder := DecodeHTTP(w, r)
|
||||
defer decoder.Cancel(true)
|
||||
|
||||
t.processRequests(from, decoder.Requests())
|
||||
|
||||
// log an error if there's one waiting, otherwise move on
|
||||
select {
|
||||
case err, ok := <-decoder.Err():
|
||||
if ok {
|
||||
log.Errorf("failed to decode HTTP message: %v", err)
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (t *HTTPTransporter) processRequests(from *upid.UPID, incoming <-chan *Request) {
|
||||
for {
|
||||
select {
|
||||
case r, ok := <-incoming:
|
||||
if !ok || !t.processOneRequest(from, r) {
|
||||
return
|
||||
}
|
||||
case <-t.shouldQuit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *HTTPTransporter) processOneRequest(from *upid.UPID, request *Request) (keepGoing bool) {
|
||||
// regardless of whether we write a Response we must close this chan
|
||||
defer close(request.response)
|
||||
keepGoing = true
|
||||
|
||||
//TODO(jdef) this is probably inefficient given the current implementation of the
|
||||
// decoder: no need to make another copy of data that's already competely buffered
|
||||
data, err := ioutil.ReadAll(request.Body)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to read HTTP body: %v\n", err)
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
// this is unlikely given the current implementation of the decoder:
|
||||
// the body has been completely buffered in memory already
|
||||
log.Errorf("failed to read HTTP body: %v", err)
|
||||
return
|
||||
}
|
||||
log.V(2).Infof("Receiving message from %v, length %v\n", from, len(data))
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
t.messageQueue <- &Message{
|
||||
log.V(2).Infof("Receiving %q %v from %v, length %v", request.Method, request.URL, from, len(data))
|
||||
m := &Message{
|
||||
UPID: from,
|
||||
Name: extractNameFromRequestURI(r.RequestURI),
|
||||
Name: extractNameFromRequestURI(request.RequestURI),
|
||||
Bytes: data,
|
||||
}
|
||||
|
||||
// deterministic behavior and output..
|
||||
select {
|
||||
case <-t.shouldQuit:
|
||||
keepGoing = false
|
||||
select {
|
||||
case t.messageQueue <- m:
|
||||
default:
|
||||
}
|
||||
case t.messageQueue <- m:
|
||||
select {
|
||||
case <-t.shouldQuit:
|
||||
keepGoing = false
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Only send back an HTTP response if this isn't from libprocess
|
||||
// (which we determine by looking at the User-Agent). This is
|
||||
// necessary because older versions of libprocess would try and
|
||||
// recv the data and parse it as an HTTP request which would
|
||||
// fail thus causing the socket to get closed (but now
|
||||
// libprocess will ignore responses, see ignore_data).
|
||||
// see https://github.com/apache/mesos/blob/adecbfa6a216815bd7dc7d26e721c4c87e465c30/3rdparty/libprocess/src/process.cpp#L2192
|
||||
if _, ok := parseLibprocessAgent(request.Header); !ok {
|
||||
log.V(2).Infof("not libprocess agent, sending a 202")
|
||||
request.response <- Response{
|
||||
code: 202,
|
||||
reason: "Accepted",
|
||||
} // should never block
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t *HTTPTransporter) makeLibprocessRequest(msg *Message) (*http.Request, error) {
|
||||
|
@ -361,9 +590,11 @@ func (t *HTTPTransporter) makeLibprocessRequest(msg *Message) (*http.Request, er
|
|||
log.Errorf("Failed to create request: %v\n", err)
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Add("Libprocess-From", t.upid.String())
|
||||
if !msg.isV1API() {
|
||||
req.Header.Add("Libprocess-From", t.upid.String())
|
||||
req.Header.Add("Connection", "Keep-Alive")
|
||||
}
|
||||
req.Header.Add("Content-Type", "application/x-protobuf")
|
||||
req.Header.Add("Connection", "Keep-Alive")
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
@ -372,10 +603,8 @@ func getLibprocessFrom(r *http.Request) (*upid.UPID, error) {
|
|||
if r.Method != "POST" {
|
||||
return nil, fmt.Errorf("Not a POST request")
|
||||
}
|
||||
ua, ok := r.Header["User-Agent"]
|
||||
if ok && strings.HasPrefix(ua[0], "libprocess/") {
|
||||
// TODO(yifan): Just take the first field for now.
|
||||
return upid.Parse(ua[0][len("libprocess/"):])
|
||||
if agent, ok := parseLibprocessAgent(r.Header); ok {
|
||||
return upid.Parse(agent)
|
||||
}
|
||||
lf, ok := r.Header["Libprocess-From"]
|
||||
if ok {
|
||||
|
@ -384,3 +613,15 @@ func getLibprocessFrom(r *http.Request) (*upid.UPID, error) {
|
|||
}
|
||||
return nil, fmt.Errorf("Cannot find 'User-Agent' or 'Libprocess-From'")
|
||||
}
|
||||
|
||||
func parseLibprocessAgent(h http.Header) (string, bool) {
|
||||
const prefix = "libprocess/"
|
||||
if ua, ok := h["User-Agent"]; ok {
|
||||
for _, agent := range ua {
|
||||
if strings.HasPrefix(agent, prefix) {
|
||||
return agent[len(prefix):], true
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
|
136
Godeps/_workspace/src/github.com/mesos/mesos-go/messenger/http_transporter_test.go
generated
vendored
136
Godeps/_workspace/src/github.com/mesos/mesos-go/messenger/http_transporter_test.go
generated
vendored
|
@ -1,7 +1,9 @@
|
|||
package messenger
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
@ -17,11 +19,8 @@ import (
|
|||
)
|
||||
|
||||
func TestTransporterNew(t *testing.T) {
|
||||
id, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
|
||||
assert.NoError(t, err)
|
||||
trans := NewHTTPTransporter(id, nil)
|
||||
trans := NewHTTPTransporter(upid.UPID{ID: "mesos1", Host: "localhost"}, nil)
|
||||
assert.NotNil(t, trans)
|
||||
assert.NotNil(t, trans.upid)
|
||||
assert.NotNil(t, trans.messageQueue)
|
||||
assert.NotNil(t, trans.client)
|
||||
}
|
||||
|
@ -31,9 +30,6 @@ func TestTransporterSend(t *testing.T) {
|
|||
serverId := "testserver"
|
||||
|
||||
// setup mesos client-side
|
||||
fromUpid, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
|
||||
assert.NoError(t, err)
|
||||
|
||||
protoMsg := testmessage.GenerateSmallMessage()
|
||||
msgName := getMessageName(protoMsg)
|
||||
msg := &Message{
|
||||
|
@ -55,8 +51,8 @@ func TestTransporterSend(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
// make transport call.
|
||||
transport := NewHTTPTransporter(fromUpid, nil)
|
||||
errch := transport.Start()
|
||||
transport := NewHTTPTransporter(upid.UPID{ID: "mesos1", Host: "localhost"}, nil)
|
||||
_, errch := transport.Start()
|
||||
defer transport.Stop(false)
|
||||
|
||||
msg.UPID = toUpid
|
||||
|
@ -78,9 +74,6 @@ func TestTransporter_DiscardedSend(t *testing.T) {
|
|||
serverId := "testserver"
|
||||
|
||||
// setup mesos client-side
|
||||
fromUpid, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
|
||||
assert.NoError(t, err)
|
||||
|
||||
protoMsg := testmessage.GenerateSmallMessage()
|
||||
msgName := getMessageName(protoMsg)
|
||||
msg := &Message{
|
||||
|
@ -100,8 +93,8 @@ func TestTransporter_DiscardedSend(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
// make transport call.
|
||||
transport := NewHTTPTransporter(fromUpid, nil)
|
||||
errch := transport.Start()
|
||||
transport := NewHTTPTransporter(upid.UPID{ID: "mesos1", Host: "localhost"}, nil)
|
||||
_, errch := transport.Start()
|
||||
defer transport.Stop(false)
|
||||
|
||||
msg.UPID = toUpid
|
||||
|
@ -138,20 +131,18 @@ func TestTransporter_DiscardedSend(t *testing.T) {
|
|||
|
||||
func TestTransporterStartAndRcvd(t *testing.T) {
|
||||
serverId := "testserver"
|
||||
serverPort := getNewPort()
|
||||
serverAddr := "127.0.0.1:" + strconv.Itoa(serverPort)
|
||||
serverAddr := "127.0.0.1"
|
||||
protoMsg := testmessage.GenerateSmallMessage()
|
||||
msgName := getMessageName(protoMsg)
|
||||
ctrl := make(chan struct{})
|
||||
|
||||
// setup receiver (server) process
|
||||
rcvPid, err := upid.Parse(fmt.Sprintf("%s@%s", serverId, serverAddr))
|
||||
assert.NoError(t, err)
|
||||
receiver := NewHTTPTransporter(rcvPid, nil)
|
||||
receiver := NewHTTPTransporter(upid.UPID{ID: serverId, Host: serverAddr}, nil)
|
||||
receiver.Install(msgName)
|
||||
|
||||
go func() {
|
||||
defer close(ctrl)
|
||||
t.Logf("received something...")
|
||||
msg, err := receiver.Recv()
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, msg)
|
||||
|
@ -160,25 +151,23 @@ func TestTransporterStartAndRcvd(t *testing.T) {
|
|||
}
|
||||
}()
|
||||
|
||||
errch := receiver.Start()
|
||||
rcvPid, errch := receiver.Start()
|
||||
defer receiver.Stop(false)
|
||||
assert.NotNil(t, errch)
|
||||
|
||||
time.Sleep(time.Millisecond * 7) // time to catchup
|
||||
|
||||
// setup sender (client) process
|
||||
sndUpid, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
|
||||
assert.NoError(t, err)
|
||||
|
||||
sender := NewHTTPTransporter(sndUpid, nil)
|
||||
sender := NewHTTPTransporter(upid.UPID{ID: "mesos1", Host: "localhost"}, nil)
|
||||
msg := &Message{
|
||||
UPID: rcvPid,
|
||||
UPID: &rcvPid,
|
||||
Name: msgName,
|
||||
ProtoMessage: protoMsg,
|
||||
}
|
||||
errch2 := sender.Start()
|
||||
_, errch2 := sender.Start()
|
||||
defer sender.Stop(false)
|
||||
|
||||
t.Logf("sending test message")
|
||||
sender.Send(context.TODO(), msg)
|
||||
|
||||
select {
|
||||
|
@ -198,22 +187,18 @@ func TestTransporterStartAndRcvd(t *testing.T) {
|
|||
|
||||
func TestTransporterStartAndInject(t *testing.T) {
|
||||
serverId := "testserver"
|
||||
serverPort := getNewPort()
|
||||
serverAddr := "127.0.0.1:" + strconv.Itoa(serverPort)
|
||||
protoMsg := testmessage.GenerateSmallMessage()
|
||||
msgName := getMessageName(protoMsg)
|
||||
ctrl := make(chan struct{})
|
||||
|
||||
// setup receiver (server) process
|
||||
rcvPid, err := upid.Parse(fmt.Sprintf("%s@%s", serverId, serverAddr))
|
||||
assert.NoError(t, err)
|
||||
receiver := NewHTTPTransporter(rcvPid, nil)
|
||||
receiver := NewHTTPTransporter(upid.UPID{ID: serverId, Host: "127.0.0.1"}, nil)
|
||||
receiver.Install(msgName)
|
||||
errch := receiver.Start()
|
||||
rcvPid, errch := receiver.Start()
|
||||
defer receiver.Stop(false)
|
||||
|
||||
msg := &Message{
|
||||
UPID: rcvPid,
|
||||
UPID: &rcvPid,
|
||||
Name: msgName,
|
||||
ProtoMessage: protoMsg,
|
||||
}
|
||||
|
@ -243,15 +228,11 @@ func TestTransporterStartAndInject(t *testing.T) {
|
|||
|
||||
func TestTransporterStartAndStop(t *testing.T) {
|
||||
serverId := "testserver"
|
||||
serverPort := getNewPort()
|
||||
serverAddr := "127.0.0.1:" + strconv.Itoa(serverPort)
|
||||
|
||||
// setup receiver (server) process
|
||||
rcvPid, err := upid.Parse(fmt.Sprintf("%s@%s", serverId, serverAddr))
|
||||
assert.NoError(t, err)
|
||||
receiver := NewHTTPTransporter(rcvPid, nil)
|
||||
receiver := NewHTTPTransporter(upid.UPID{ID: serverId, Host: "127.0.0.1"}, nil)
|
||||
|
||||
errch := receiver.Start()
|
||||
_, errch := receiver.Start()
|
||||
assert.NotNil(t, errch)
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
|
@ -269,7 +250,10 @@ func TestTransporterStartAndStop(t *testing.T) {
|
|||
|
||||
func TestMutatedHostUPid(t *testing.T) {
|
||||
serverId := "testserver"
|
||||
serverPort := getNewPort()
|
||||
// NOTE(tsenart): This static port can cause conflicts if multiple instances
|
||||
// of this test run concurrently or else if this port is already bound by
|
||||
// another socket.
|
||||
serverPort := 12345
|
||||
serverHost := "127.0.0.1"
|
||||
serverAddr := serverHost + ":" + strconv.Itoa(serverPort)
|
||||
|
||||
|
@ -279,7 +263,7 @@ func TestMutatedHostUPid(t *testing.T) {
|
|||
// setup receiver (server) process
|
||||
uPid, err := upid.Parse(fmt.Sprintf("%s@%s", serverId, serverAddr))
|
||||
assert.NoError(t, err)
|
||||
receiver := NewHTTPTransporter(uPid, addr)
|
||||
receiver := NewHTTPTransporter(*uPid, addr)
|
||||
|
||||
err = receiver.listen()
|
||||
assert.NoError(t, err)
|
||||
|
@ -294,36 +278,22 @@ func TestMutatedHostUPid(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEmptyHostPortUPid(t *testing.T) {
|
||||
serverId := "testserver"
|
||||
serverPort := getNewPort()
|
||||
serverHost := "127.0.0.1"
|
||||
serverAddr := serverHost + ":" + strconv.Itoa(serverPort)
|
||||
|
||||
// setup receiver (server) process
|
||||
uPid, err := upid.Parse(fmt.Sprintf("%s@%s", serverId, serverAddr))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Unset upid host and port
|
||||
uPid.Host = ""
|
||||
uPid.Port = ""
|
||||
uPid := upid.UPID{ID: "testserver"}
|
||||
|
||||
// override the upid.Host with this listener IP
|
||||
addr := net.ParseIP("0.0.0.0")
|
||||
|
||||
receiver := NewHTTPTransporter(uPid, addr)
|
||||
|
||||
err = receiver.listen()
|
||||
err := receiver.listen()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// This should be the host that overrides as uPid.Host is empty
|
||||
if receiver.upid.Host != "0.0.0.0" {
|
||||
t.Fatalf("reciever.upid.Host was expected to return %s, got %s\n", serverHost, receiver.upid.Host)
|
||||
t.Fatalf("reciever.upid.Host was expected to return 0.0.0.0, got %q", receiver.upid.Host)
|
||||
}
|
||||
|
||||
// This should end up being a random port, not the server port as uPid
|
||||
// port is empty
|
||||
if receiver.upid.Port == strconv.Itoa(serverPort) {
|
||||
t.Fatalf("receiver.upid.Port was not expected to return %d, got %s\n", serverPort, receiver.upid.Port)
|
||||
if receiver.upid.Port == "0" {
|
||||
t.Fatalf("receiver.upid.Port was not expected to return 0, got %q", receiver.upid.Port)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -332,3 +302,49 @@ func makeMockServer(path string, handler func(rsp http.ResponseWriter, req *http
|
|||
mux.HandleFunc(path, handler)
|
||||
return httptest.NewServer(mux)
|
||||
}
|
||||
|
||||
func TestProcessOneRequest(t *testing.T) {
|
||||
ht := &HTTPTransporter{
|
||||
messageQueue: make(chan *Message, 1),
|
||||
shouldQuit: make(chan struct{}),
|
||||
}
|
||||
testfunc := func(expectProceed bool) {
|
||||
rchan := make(chan Response, 1)
|
||||
proceed := ht.processOneRequest(&upid.UPID{ID: "james"}, &Request{
|
||||
response: rchan,
|
||||
Request: &http.Request{
|
||||
Method: "foo",
|
||||
RequestURI: "a/z/bar",
|
||||
Body: ioutil.NopCloser(&bytes.Reader{}),
|
||||
},
|
||||
})
|
||||
// expecting to get a 202 response since the request doesn't have libprocess headers
|
||||
if proceed != expectProceed {
|
||||
t.Fatalf("expected proceed signal %t instead of %t", expectProceed, proceed)
|
||||
}
|
||||
select {
|
||||
case resp := <-rchan:
|
||||
if resp.code != 202 {
|
||||
t.Fatalf("expected a 202 response for all libprocess requests")
|
||||
}
|
||||
default:
|
||||
t.Fatalf("expected a response since we're not a libprocess agent")
|
||||
}
|
||||
select {
|
||||
case m := <-ht.messageQueue:
|
||||
// From, Name, Data
|
||||
assert.Equal(t, "james", m.UPID.ID)
|
||||
assert.Equal(t, "bar", m.Name)
|
||||
default:
|
||||
t.Fatalf("expected a message for the request that was processed")
|
||||
}
|
||||
}
|
||||
t.Log("testing w/o shouldQuit signal")
|
||||
testfunc(true)
|
||||
|
||||
t.Log("testing w/ shouldQuit signal")
|
||||
close(ht.shouldQuit)
|
||||
for i := 0; i < 100; i++ {
|
||||
testfunc(false) // do this in a loop to test determinism
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,9 +36,17 @@ type Message struct {
|
|||
|
||||
// RequestURI returns the request URI of the message.
|
||||
func (m *Message) RequestURI() string {
|
||||
if m.isV1API() {
|
||||
return fmt.Sprintf("/api/v1/%s", m.Name)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("/%s/%s", m.UPID.ID, m.Name)
|
||||
}
|
||||
|
||||
func (m *Message) isV1API() bool {
|
||||
return !strings.HasPrefix(m.Name, "mesos.internal")
|
||||
}
|
||||
|
||||
// NOTE: This should not fail or panic.
|
||||
func extractNameFromRequestURI(requestURI string) string {
|
||||
return strings.Split(requestURI, "/")[2]
|
||||
|
|
|
@ -19,12 +19,11 @@
|
|||
package messenger
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
"sync"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
log "github.com/golang/glog"
|
||||
|
@ -36,21 +35,8 @@ import (
|
|||
|
||||
const (
|
||||
defaultQueueSize = 1024
|
||||
preparePeriod = time.Second * 1
|
||||
)
|
||||
|
||||
var (
|
||||
sendRoutines int
|
||||
encodeRoutines int
|
||||
decodeRoutines int
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.IntVar(&sendRoutines, "send-routines", 1, "Number of network sending routines")
|
||||
flag.IntVar(&encodeRoutines, "encode-routines", 1, "Number of encoding routines")
|
||||
flag.IntVar(&decodeRoutines, "decode-routines", 1, "Number of decoding routines")
|
||||
}
|
||||
|
||||
// MessageHandler is the callback of the message. When the callback
|
||||
// is invoked, the sender's upid and the message is passed to the callback.
|
||||
type MessageHandler func(from *upid.UPID, pbMsg proto.Message)
|
||||
|
@ -62,24 +48,25 @@ type Messenger interface {
|
|||
Route(ctx context.Context, from *upid.UPID, msg proto.Message) error
|
||||
Start() error
|
||||
Stop() error
|
||||
UPID() *upid.UPID
|
||||
UPID() upid.UPID
|
||||
}
|
||||
|
||||
// MesosMessenger is an implementation of the Messenger interface.
|
||||
type MesosMessenger struct {
|
||||
upid *upid.UPID
|
||||
upid upid.UPID
|
||||
encodingQueue chan *Message
|
||||
sendingQueue chan *Message
|
||||
installedMessages map[string]reflect.Type
|
||||
installedHandlers map[string]MessageHandler
|
||||
stop chan struct{}
|
||||
stopOnce sync.Once
|
||||
tr Transporter
|
||||
}
|
||||
|
||||
// ForHostname creates a new default messenger (HTTP), using UPIDBindingAddress to
|
||||
// determine the binding-address used for both the UPID.Host and Transport binding address.
|
||||
func ForHostname(proc *process.Process, hostname string, bindingAddress net.IP, port uint16, publishedAddress net.IP) (Messenger, error) {
|
||||
upid := &upid.UPID{
|
||||
upid := upid.UPID{
|
||||
ID: proc.Label(),
|
||||
Port: strconv.Itoa(int(port)),
|
||||
}
|
||||
|
@ -149,17 +136,16 @@ func UPIDBindingAddress(hostname string, bindingAddress net.IP) (string, error)
|
|||
}
|
||||
|
||||
// NewMesosMessenger creates a new mesos messenger.
|
||||
func NewHttp(upid *upid.UPID) *MesosMessenger {
|
||||
func NewHttp(upid upid.UPID) *MesosMessenger {
|
||||
return NewHttpWithBindingAddress(upid, nil)
|
||||
}
|
||||
|
||||
func NewHttpWithBindingAddress(upid *upid.UPID, address net.IP) *MesosMessenger {
|
||||
func NewHttpWithBindingAddress(upid upid.UPID, address net.IP) *MesosMessenger {
|
||||
return New(upid, NewHTTPTransporter(upid, address))
|
||||
}
|
||||
|
||||
func New(upid *upid.UPID, t Transporter) *MesosMessenger {
|
||||
func New(upid upid.UPID, t Transporter) *MesosMessenger {
|
||||
return &MesosMessenger{
|
||||
upid: upid,
|
||||
encodingQueue: make(chan *Message, defaultQueueSize),
|
||||
sendingQueue: make(chan *Message, defaultQueueSize),
|
||||
installedMessages: make(map[string]reflect.Type),
|
||||
|
@ -194,7 +180,7 @@ func (m *MesosMessenger) Install(handler MessageHandler, msg proto.Message) erro
|
|||
func (m *MesosMessenger) Send(ctx context.Context, upid *upid.UPID, msg proto.Message) error {
|
||||
if upid == nil {
|
||||
panic("cannot sent a message to a nil pid")
|
||||
} else if upid.Equal(m.upid) {
|
||||
} else if *upid == m.upid {
|
||||
return fmt.Errorf("Send the message to self")
|
||||
}
|
||||
name := getMessageName(msg)
|
||||
|
@ -212,11 +198,15 @@ func (m *MesosMessenger) Send(ctx context.Context, upid *upid.UPID, msg proto.Me
|
|||
// 1) routing internal error to callback handlers
|
||||
// 2) testing components without starting remote servers.
|
||||
func (m *MesosMessenger) Route(ctx context.Context, upid *upid.UPID, msg proto.Message) error {
|
||||
// if destination is not self, send to outbound.
|
||||
if !upid.Equal(m.upid) {
|
||||
if upid == nil {
|
||||
panic("cannot route a message to a nil pid")
|
||||
} else if *upid != m.upid {
|
||||
// if destination is not self, send to outbound.
|
||||
return m.Send(ctx, upid, msg)
|
||||
}
|
||||
|
||||
// TODO(jdef) this has an unfortunate performance impact for self-messaging. implement
|
||||
// something more reasonable here.
|
||||
data, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -225,30 +215,29 @@ func (m *MesosMessenger) Route(ctx context.Context, upid *upid.UPID, msg proto.M
|
|||
return m.tr.Inject(ctx, &Message{upid, name, msg, data})
|
||||
}
|
||||
|
||||
// Start starts the messenger.
|
||||
// Start starts the messenger; expects to be called once and only once.
|
||||
func (m *MesosMessenger) Start() error {
|
||||
|
||||
m.stop = make(chan struct{})
|
||||
errChan := m.tr.Start()
|
||||
|
||||
select {
|
||||
case err := <-errChan:
|
||||
log.Errorf("failed to start messenger: %v", err)
|
||||
return err
|
||||
case <-time.After(preparePeriod): // continue
|
||||
pid, errChan := m.tr.Start()
|
||||
if pid == (upid.UPID{}) {
|
||||
err := <-errChan
|
||||
return fmt.Errorf("failed to start messenger: %v", err)
|
||||
}
|
||||
|
||||
m.upid = m.tr.UPID()
|
||||
// the pid that we're actually bound as
|
||||
m.upid = pid
|
||||
|
||||
for i := 0; i < sendRoutines; i++ {
|
||||
go m.sendLoop()
|
||||
}
|
||||
for i := 0; i < encodeRoutines; i++ {
|
||||
go m.encodeLoop()
|
||||
}
|
||||
for i := 0; i < decodeRoutines; i++ {
|
||||
go m.decodeLoop()
|
||||
}
|
||||
go m.sendLoop()
|
||||
go m.encodeLoop()
|
||||
go m.decodeLoop()
|
||||
|
||||
// wait for a listener error or a stop signal; either way stop the messenger
|
||||
|
||||
// TODO(jdef) a better implementation would attempt to re-listen; need to coordinate
|
||||
// access to m.upid in that case. probably better off with a state machine instead of
|
||||
// what we have now.
|
||||
go func() {
|
||||
select {
|
||||
case err := <-errChan:
|
||||
|
@ -256,7 +245,11 @@ func (m *MesosMessenger) Start() error {
|
|||
//TODO(jdef) should the driver abort in this case? probably
|
||||
//since this messenger will never attempt to re-establish the
|
||||
//transport
|
||||
log.Error(err)
|
||||
log.Errorln("transport stopped unexpectedly:", err.Error())
|
||||
}
|
||||
err = m.Stop()
|
||||
if err != nil && err != errTerminal {
|
||||
log.Errorln("failed to stop messenger cleanly: ", err.Error())
|
||||
}
|
||||
case <-m.stop:
|
||||
}
|
||||
|
@ -265,18 +258,27 @@ func (m *MesosMessenger) Start() error {
|
|||
}
|
||||
|
||||
// Stop stops the messenger and clean up all the goroutines.
|
||||
func (m *MesosMessenger) Stop() error {
|
||||
//TODO(jdef) don't hardcode the graceful flag here
|
||||
if err := m.tr.Stop(true); err != nil {
|
||||
log.Errorf("Failed to stop the transporter: %v\n", err)
|
||||
return err
|
||||
}
|
||||
close(m.stop)
|
||||
return nil
|
||||
func (m *MesosMessenger) Stop() (err error) {
|
||||
m.stopOnce.Do(func() {
|
||||
select {
|
||||
case <-m.stop:
|
||||
default:
|
||||
defer close(m.stop)
|
||||
}
|
||||
|
||||
log.Info("stopping messenger..")
|
||||
|
||||
//TODO(jdef) don't hardcode the graceful flag here
|
||||
if err2 := m.tr.Stop(true); err2 != nil && err2 != errTerminal {
|
||||
log.Warningf("failed to stop the transporter: %v\n", err2)
|
||||
err = err2
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// UPID returns the upid of the messenger.
|
||||
func (m *MesosMessenger) UPID() *upid.UPID {
|
||||
func (m *MesosMessenger) UPID() upid.UPID {
|
||||
return m.upid
|
||||
}
|
||||
|
||||
|
@ -317,7 +319,8 @@ func (m *MesosMessenger) reportError(err error) {
|
|||
defer cancel()
|
||||
|
||||
c := make(chan error, 1)
|
||||
go func() { c <- m.Route(ctx, m.UPID(), &mesos.FrameworkErrorMessage{Message: proto.String(err.Error())}) }()
|
||||
pid := m.upid
|
||||
go func() { c <- m.Route(ctx, &pid, &mesos.FrameworkErrorMessage{Message: proto.String(err.Error())}) }()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
<-c // wait for Route to return
|
||||
|
@ -388,5 +391,14 @@ func (m *MesosMessenger) decodeLoop() {
|
|||
|
||||
// getMessageName returns the name of the message in the mesos manner.
|
||||
func getMessageName(msg proto.Message) string {
|
||||
return fmt.Sprintf("%v.%v", "mesos.internal", reflect.TypeOf(msg).Elem().Name())
|
||||
var msgName string
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *mesos.Call:
|
||||
msgName = "scheduler"
|
||||
default:
|
||||
msgName = fmt.Sprintf("%v.%v", "mesos.internal", reflect.TypeOf(msg).Elem().Name())
|
||||
}
|
||||
|
||||
return msgName
|
||||
}
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -19,19 +18,13 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
startPort = 10000 + rand.Intn(30000)
|
||||
globalWG = new(sync.WaitGroup)
|
||||
globalWG = new(sync.WaitGroup)
|
||||
)
|
||||
|
||||
func noopHandler(*upid.UPID, proto.Message) {
|
||||
globalWG.Done()
|
||||
}
|
||||
|
||||
func getNewPort() int {
|
||||
startPort++
|
||||
return startPort
|
||||
}
|
||||
|
||||
func shuffleMessages(queue *[]proto.Message) {
|
||||
for i := range *queue {
|
||||
index := rand.Intn(i + 1)
|
||||
|
@ -136,39 +129,25 @@ func runTestServer(b *testing.B, wg *sync.WaitGroup) *httptest.Server {
|
|||
}
|
||||
|
||||
func TestMessengerFailToInstall(t *testing.T) {
|
||||
m := NewHttp(&upid.UPID{ID: "mesos"})
|
||||
m := NewHttp(upid.UPID{ID: "mesos"})
|
||||
handler := func(from *upid.UPID, pbMsg proto.Message) {}
|
||||
assert.NotNil(t, m)
|
||||
assert.NoError(t, m.Install(handler, &testmessage.SmallMessage{}))
|
||||
assert.Error(t, m.Install(handler, &testmessage.SmallMessage{}))
|
||||
}
|
||||
|
||||
func TestMessengerFailToStart(t *testing.T) {
|
||||
port := strconv.Itoa(getNewPort())
|
||||
m1 := NewHttp(&upid.UPID{ID: "mesos", Host: "localhost", Port: port})
|
||||
m2 := NewHttp(&upid.UPID{ID: "mesos", Host: "localhost", Port: port})
|
||||
assert.NoError(t, m1.Start())
|
||||
assert.Error(t, m2.Start())
|
||||
}
|
||||
|
||||
func TestMessengerFailToSend(t *testing.T) {
|
||||
upid, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
|
||||
assert.NoError(t, err)
|
||||
m := NewHttp(upid)
|
||||
m := NewHttp(upid.UPID{ID: "foo", Host: "localhost"})
|
||||
assert.NoError(t, m.Start())
|
||||
assert.Error(t, m.Send(context.TODO(), upid, &testmessage.SmallMessage{}))
|
||||
self := m.UPID()
|
||||
assert.Error(t, m.Send(context.TODO(), &self, &testmessage.SmallMessage{}))
|
||||
}
|
||||
|
||||
func TestMessenger(t *testing.T) {
|
||||
messages := generateMixedMessages(1000)
|
||||
|
||||
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
|
||||
assert.NoError(t, err)
|
||||
upid2, err := upid.Parse(fmt.Sprintf("mesos2@localhost:%d", getNewPort()))
|
||||
assert.NoError(t, err)
|
||||
|
||||
m1 := NewHttp(upid1)
|
||||
m2 := NewHttp(upid2)
|
||||
m1 := NewHttp(upid.UPID{ID: "mesos1", Host: "localhost"})
|
||||
m2 := NewHttp(upid.UPID{ID: "mesos2", Host: "localhost"})
|
||||
|
||||
done := make(chan struct{})
|
||||
counts := make([]int, 4)
|
||||
|
@ -177,10 +156,11 @@ func TestMessenger(t *testing.T) {
|
|||
|
||||
assert.NoError(t, m1.Start())
|
||||
assert.NoError(t, m2.Start())
|
||||
upid2 := m2.UPID()
|
||||
|
||||
go func() {
|
||||
for _, msg := range messages {
|
||||
assert.NoError(t, m1.Send(context.TODO(), upid2, msg))
|
||||
assert.NoError(t, m1.Send(context.TODO(), &upid2, msg))
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -204,20 +184,20 @@ func BenchmarkMessengerSendSmallMessage(b *testing.B) {
|
|||
srv := runTestServer(b, wg)
|
||||
defer srv.Close()
|
||||
|
||||
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
|
||||
assert.NoError(b, err)
|
||||
upid2, err := upid.Parse(fmt.Sprintf("testserver@%s", srv.Listener.Addr().String()))
|
||||
|
||||
assert.NoError(b, err)
|
||||
|
||||
m1 := NewHttp(upid1)
|
||||
m1 := NewHttp(upid.UPID{ID: "mesos1", Host: "localhost"})
|
||||
assert.NoError(b, m1.Start())
|
||||
defer m1.Stop()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m1.Send(context.TODO(), upid2, messages[i%1000])
|
||||
}
|
||||
wg.Wait()
|
||||
b.StopTimer()
|
||||
time.Sleep(2 * time.Second) // allow time for connection cleanup
|
||||
}
|
||||
|
||||
func BenchmarkMessengerSendMediumMessage(b *testing.B) {
|
||||
|
@ -228,19 +208,20 @@ func BenchmarkMessengerSendMediumMessage(b *testing.B) {
|
|||
srv := runTestServer(b, wg)
|
||||
defer srv.Close()
|
||||
|
||||
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
|
||||
assert.NoError(b, err)
|
||||
upid2, err := upid.Parse(fmt.Sprintf("testserver@%s", srv.Listener.Addr().String()))
|
||||
assert.NoError(b, err)
|
||||
|
||||
m1 := NewHttp(upid1)
|
||||
m1 := NewHttp(upid.UPID{ID: "mesos1", Host: "localhost"})
|
||||
assert.NoError(b, m1.Start())
|
||||
defer m1.Stop()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m1.Send(context.TODO(), upid2, messages[i%1000])
|
||||
}
|
||||
wg.Wait()
|
||||
b.StopTimer()
|
||||
time.Sleep(2 * time.Second) // allow time for connection cleanup
|
||||
}
|
||||
|
||||
func BenchmarkMessengerSendBigMessage(b *testing.B) {
|
||||
|
@ -251,19 +232,20 @@ func BenchmarkMessengerSendBigMessage(b *testing.B) {
|
|||
srv := runTestServer(b, wg)
|
||||
defer srv.Close()
|
||||
|
||||
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
|
||||
assert.NoError(b, err)
|
||||
upid2, err := upid.Parse(fmt.Sprintf("testserver@%s", srv.Listener.Addr().String()))
|
||||
assert.NoError(b, err)
|
||||
|
||||
m1 := NewHttp(upid1)
|
||||
m1 := NewHttp(upid.UPID{ID: "mesos1", Host: "localhost"})
|
||||
assert.NoError(b, m1.Start())
|
||||
defer m1.Stop()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m1.Send(context.TODO(), upid2, messages[i%1000])
|
||||
}
|
||||
wg.Wait()
|
||||
b.StopTimer()
|
||||
time.Sleep(2 * time.Second) // allow time for connection cleanup
|
||||
}
|
||||
|
||||
func BenchmarkMessengerSendLargeMessage(b *testing.B) {
|
||||
|
@ -274,19 +256,20 @@ func BenchmarkMessengerSendLargeMessage(b *testing.B) {
|
|||
srv := runTestServer(b, wg)
|
||||
defer srv.Close()
|
||||
|
||||
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
|
||||
assert.NoError(b, err)
|
||||
upid2, err := upid.Parse(fmt.Sprintf("testserver@%s", srv.Listener.Addr().String()))
|
||||
assert.NoError(b, err)
|
||||
|
||||
m1 := NewHttp(upid1)
|
||||
m1 := NewHttp(upid.UPID{ID: "mesos1", Host: "localhost"})
|
||||
assert.NoError(b, m1.Start())
|
||||
defer m1.Stop()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m1.Send(context.TODO(), upid2, messages[i%1000])
|
||||
}
|
||||
wg.Wait()
|
||||
b.StopTimer()
|
||||
time.Sleep(2 * time.Second) // allow time for connection cleanup
|
||||
}
|
||||
|
||||
func BenchmarkMessengerSendMixedMessage(b *testing.B) {
|
||||
|
@ -297,19 +280,20 @@ func BenchmarkMessengerSendMixedMessage(b *testing.B) {
|
|||
srv := runTestServer(b, wg)
|
||||
defer srv.Close()
|
||||
|
||||
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
|
||||
assert.NoError(b, err)
|
||||
upid2, err := upid.Parse(fmt.Sprintf("testserver@%s", srv.Listener.Addr().String()))
|
||||
assert.NoError(b, err)
|
||||
|
||||
m1 := NewHttp(upid1)
|
||||
m1 := NewHttp(upid.UPID{ID: "mesos1", Host: "localhost"})
|
||||
assert.NoError(b, m1.Start())
|
||||
defer m1.Stop()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m1.Send(context.TODO(), upid2, messages[i%1000])
|
||||
}
|
||||
wg.Wait()
|
||||
b.StopTimer()
|
||||
time.Sleep(2 * time.Second) // allow time for connection cleanup
|
||||
}
|
||||
|
||||
func BenchmarkMessengerSendRecvSmallMessage(b *testing.B) {
|
||||
|
@ -317,23 +301,25 @@ func BenchmarkMessengerSendRecvSmallMessage(b *testing.B) {
|
|||
|
||||
messages := generateSmallMessages(1000)
|
||||
|
||||
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
|
||||
assert.NoError(b, err)
|
||||
upid2, err := upid.Parse(fmt.Sprintf("mesos2@localhost:%d", getNewPort()))
|
||||
assert.NoError(b, err)
|
||||
|
||||
m1 := NewHttp(upid1)
|
||||
m2 := NewHttp(upid2)
|
||||
m1 := NewHttp(upid.UPID{ID: "foo1", Host: "localhost"})
|
||||
m2 := NewHttp(upid.UPID{ID: "foo2", Host: "localhost"})
|
||||
assert.NoError(b, m1.Start())
|
||||
defer m1.Stop()
|
||||
|
||||
assert.NoError(b, m2.Start())
|
||||
defer m2.Stop()
|
||||
|
||||
assert.NoError(b, m2.Install(noopHandler, &testmessage.SmallMessage{}))
|
||||
|
||||
time.Sleep(time.Second) // Avoid race on upid.
|
||||
upid2 := m2.UPID()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m1.Send(context.TODO(), upid2, messages[i%1000])
|
||||
m1.Send(context.TODO(), &upid2, messages[i%1000])
|
||||
}
|
||||
globalWG.Wait()
|
||||
b.StopTimer()
|
||||
time.Sleep(2 * time.Second) // allow time for connection cleanup
|
||||
}
|
||||
|
||||
func BenchmarkMessengerSendRecvMediumMessage(b *testing.B) {
|
||||
|
@ -341,23 +327,25 @@ func BenchmarkMessengerSendRecvMediumMessage(b *testing.B) {
|
|||
|
||||
messages := generateMediumMessages(1000)
|
||||
|
||||
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
|
||||
assert.NoError(b, err)
|
||||
upid2, err := upid.Parse(fmt.Sprintf("mesos2@localhost:%d", getNewPort()))
|
||||
assert.NoError(b, err)
|
||||
|
||||
m1 := NewHttp(upid1)
|
||||
m2 := NewHttp(upid2)
|
||||
m1 := NewHttp(upid.UPID{ID: "foo1", Host: "localhost"})
|
||||
m2 := NewHttp(upid.UPID{ID: "foo2", Host: "localhost"})
|
||||
assert.NoError(b, m1.Start())
|
||||
defer m1.Stop()
|
||||
|
||||
assert.NoError(b, m2.Start())
|
||||
defer m2.Stop()
|
||||
|
||||
assert.NoError(b, m2.Install(noopHandler, &testmessage.MediumMessage{}))
|
||||
|
||||
time.Sleep(time.Second) // Avoid race on upid.
|
||||
upid2 := m2.UPID()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m1.Send(context.TODO(), upid2, messages[i%1000])
|
||||
m1.Send(context.TODO(), &upid2, messages[i%1000])
|
||||
}
|
||||
globalWG.Wait()
|
||||
b.StopTimer()
|
||||
time.Sleep(2 * time.Second) // allow time for connection cleanup
|
||||
}
|
||||
|
||||
func BenchmarkMessengerSendRecvBigMessage(b *testing.B) {
|
||||
|
@ -365,72 +353,78 @@ func BenchmarkMessengerSendRecvBigMessage(b *testing.B) {
|
|||
|
||||
messages := generateBigMessages(1000)
|
||||
|
||||
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
|
||||
assert.NoError(b, err)
|
||||
upid2, err := upid.Parse(fmt.Sprintf("mesos2@localhost:%d", getNewPort()))
|
||||
assert.NoError(b, err)
|
||||
|
||||
m1 := NewHttp(upid1)
|
||||
m2 := NewHttp(upid2)
|
||||
m1 := NewHttp(upid.UPID{ID: "foo1", Host: "localhost"})
|
||||
m2 := NewHttp(upid.UPID{ID: "foo2", Host: "localhost"})
|
||||
assert.NoError(b, m1.Start())
|
||||
defer m1.Stop()
|
||||
|
||||
assert.NoError(b, m2.Start())
|
||||
defer m2.Stop()
|
||||
|
||||
assert.NoError(b, m2.Install(noopHandler, &testmessage.BigMessage{}))
|
||||
|
||||
time.Sleep(time.Second) // Avoid race on upid.
|
||||
upid2 := m2.UPID()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m1.Send(context.TODO(), upid2, messages[i%1000])
|
||||
m1.Send(context.TODO(), &upid2, messages[i%1000])
|
||||
}
|
||||
globalWG.Wait()
|
||||
b.StopTimer()
|
||||
time.Sleep(2 * time.Second) // allow time for connection cleanup
|
||||
}
|
||||
|
||||
func BenchmarkMessengerSendRecvLargeMessage(b *testing.B) {
|
||||
globalWG.Add(b.N)
|
||||
messages := generateLargeMessages(1000)
|
||||
|
||||
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
|
||||
assert.NoError(b, err)
|
||||
upid2, err := upid.Parse(fmt.Sprintf("mesos2@localhost:%d", getNewPort()))
|
||||
assert.NoError(b, err)
|
||||
|
||||
m1 := NewHttp(upid1)
|
||||
m2 := NewHttp(upid2)
|
||||
m1 := NewHttp(upid.UPID{ID: "foo1", Host: "localhost"})
|
||||
m2 := NewHttp(upid.UPID{ID: "foo2", Host: "localhost"})
|
||||
assert.NoError(b, m1.Start())
|
||||
defer m1.Stop()
|
||||
|
||||
assert.NoError(b, m2.Start())
|
||||
defer m2.Stop()
|
||||
|
||||
assert.NoError(b, m2.Install(noopHandler, &testmessage.LargeMessage{}))
|
||||
|
||||
time.Sleep(time.Second) // Avoid race on upid.
|
||||
upid2 := m2.UPID()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m1.Send(context.TODO(), upid2, messages[i%1000])
|
||||
m1.Send(context.TODO(), &upid2, messages[i%1000])
|
||||
}
|
||||
globalWG.Wait()
|
||||
b.StopTimer()
|
||||
time.Sleep(2 * time.Second) // allow time for connection cleanup
|
||||
}
|
||||
|
||||
func BenchmarkMessengerSendRecvMixedMessage(b *testing.B) {
|
||||
globalWG.Add(b.N)
|
||||
messages := generateMixedMessages(1000)
|
||||
|
||||
upid1, err := upid.Parse(fmt.Sprintf("mesos1@localhost:%d", getNewPort()))
|
||||
assert.NoError(b, err)
|
||||
upid2, err := upid.Parse(fmt.Sprintf("mesos2@localhost:%d", getNewPort()))
|
||||
assert.NoError(b, err)
|
||||
|
||||
m1 := NewHttp(upid1)
|
||||
m2 := NewHttp(upid2)
|
||||
m1 := NewHttp(upid.UPID{ID: "foo1", Host: "localhost"})
|
||||
m2 := NewHttp(upid.UPID{ID: "foo2", Host: "localhost"})
|
||||
assert.NoError(b, m1.Start())
|
||||
defer m1.Stop()
|
||||
|
||||
assert.NoError(b, m2.Start())
|
||||
defer m2.Stop()
|
||||
|
||||
assert.NoError(b, m2.Install(noopHandler, &testmessage.SmallMessage{}))
|
||||
assert.NoError(b, m2.Install(noopHandler, &testmessage.MediumMessage{}))
|
||||
assert.NoError(b, m2.Install(noopHandler, &testmessage.BigMessage{}))
|
||||
assert.NoError(b, m2.Install(noopHandler, &testmessage.LargeMessage{}))
|
||||
|
||||
time.Sleep(time.Second) // Avoid race on upid.
|
||||
upid2 := m2.UPID()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m1.Send(context.TODO(), upid2, messages[i%1000])
|
||||
m1.Send(context.TODO(), &upid2, messages[i%1000])
|
||||
}
|
||||
globalWG.Wait()
|
||||
b.StopTimer()
|
||||
time.Sleep(2 * time.Second) // allow time for connection cleanup
|
||||
}
|
||||
|
||||
func TestUPIDBindingAddress(t *testing.T) {
|
||||
|
|
|
@ -83,8 +83,8 @@ func (m *MockedMessenger) Stop() error {
|
|||
}
|
||||
|
||||
// UPID is a mocked implementation.
|
||||
func (m *MockedMessenger) UPID() *upid.UPID {
|
||||
return m.Called().Get(0).(*upid.UPID)
|
||||
func (m *MockedMessenger) UPID() upid.UPID {
|
||||
return m.Called().Get(0).(upid.UPID)
|
||||
}
|
||||
|
||||
func (m *MockedMessenger) recvLoop() {
|
||||
|
|
|
@ -43,11 +43,11 @@ type Transporter interface {
|
|||
|
||||
//Start starts the transporter and returns immediately. The error chan
|
||||
//is never nil.
|
||||
Start() <-chan error
|
||||
Start() (upid.UPID, <-chan error)
|
||||
|
||||
//Stop kills the transporter.
|
||||
Stop(graceful bool) error
|
||||
|
||||
//UPID returns the PID for transporter.
|
||||
UPID() *upid.UPID
|
||||
UPID() upid.UPID
|
||||
}
|
||||
|
|
|
@ -66,6 +66,11 @@ type SchedulerDriver interface {
|
|||
// framework via Scheduler.ResourceOffers callback, asynchronously.
|
||||
RequestResources(requests []*mesos.Request) (mesos.Status, error)
|
||||
|
||||
// AcceptOffers utilizes the new HTTP API to send a Scheduler Call Message
|
||||
// to the Mesos Master. Valid operation types are LAUNCH, RESERVE, UNRESERVE,
|
||||
// CREATE, DESTROY, and more.
|
||||
AcceptOffers(offerIDs []*mesos.OfferID, operations []*mesos.Offer_Operation, filters *mesos.Filters) (mesos.Status, error)
|
||||
|
||||
// Launches the given set of tasks. Any resources remaining (i.e.,
|
||||
// not used by the tasks or their executors) will be considered
|
||||
// declined. The specified filters are applied on all unused
|
||||
|
|
File diff suppressed because it is too large
Load Diff
44
Godeps/_workspace/src/github.com/mesos/mesos-go/scheduler/scheduler_intgr_test.go
generated
vendored
44
Godeps/_workspace/src/github.com/mesos/mesos-go/scheduler/scheduler_intgr_test.go
generated
vendored
|
@ -37,9 +37,10 @@ import (
|
|||
|
||||
// testScuduler is used for testing Schduler callbacks.
|
||||
type testScheduler struct {
|
||||
ch chan bool
|
||||
wg *sync.WaitGroup
|
||||
s *SchedulerIntegrationTestSuite
|
||||
ch chan bool
|
||||
wg *sync.WaitGroup
|
||||
s *SchedulerIntegrationTestSuite
|
||||
errors chan string // yields errors received by Scheduler.Error
|
||||
}
|
||||
|
||||
// convenience
|
||||
|
@ -83,7 +84,7 @@ func (sched *testScheduler) StatusUpdate(dr SchedulerDriver, stat *mesos.TaskSta
|
|||
sched.s.NotNil(stat)
|
||||
sched.s.Equal("test-task-001", stat.GetTaskId().GetValue())
|
||||
sched.wg.Done()
|
||||
log.Infof("Status update done with waitGroup %v \n", sched.wg)
|
||||
log.Infof("Status update done with waitGroup")
|
||||
}
|
||||
|
||||
func (sched *testScheduler) SlaveLost(dr SchedulerDriver, slaveId *mesos.SlaveID) {
|
||||
|
@ -109,7 +110,7 @@ func (sched *testScheduler) ExecutorLost(SchedulerDriver, *mesos.ExecutorID, *me
|
|||
|
||||
func (sched *testScheduler) Error(dr SchedulerDriver, err string) {
|
||||
log.Infoln("Sched.Error() called.")
|
||||
sched.s.Equal("test-error-999", err)
|
||||
sched.errors <- err
|
||||
sched.ch <- true
|
||||
}
|
||||
|
||||
|
@ -128,7 +129,7 @@ func (sched *testScheduler) waitForCallback(timeout time.Duration) bool {
|
|||
}
|
||||
|
||||
func newTestScheduler(s *SchedulerIntegrationTestSuite) *testScheduler {
|
||||
return &testScheduler{ch: make(chan bool), s: s}
|
||||
return &testScheduler{ch: make(chan bool), s: s, errors: make(chan string, 2)}
|
||||
}
|
||||
|
||||
type mockServerConfigurator func(frameworkId *mesos.FrameworkID, suite *SchedulerIntegrationTestSuite)
|
||||
|
@ -168,8 +169,12 @@ func (suite *SchedulerIntegrationTestSuite) configure(frameworkId *mesos.Framewo
|
|||
suite.sched = newTestScheduler(suite)
|
||||
suite.sched.ch = make(chan bool, 10) // big enough that it doesn't block callback processing
|
||||
|
||||
suite.driver = newTestSchedulerDriver(suite.T(), suite.sched, suite.framework, suite.server.Addr, nil)
|
||||
|
||||
cfg := DriverConfig{
|
||||
Scheduler: suite.sched,
|
||||
Framework: suite.framework,
|
||||
Master: suite.server.Addr,
|
||||
}
|
||||
suite.driver = newTestSchedulerDriver(suite.T(), cfg).MesosSchedulerDriver
|
||||
suite.config(frameworkId, suite)
|
||||
|
||||
stat, err := suite.driver.Start()
|
||||
|
@ -205,7 +210,9 @@ var defaultMockServerConfigurator = mockServerConfigurator(func(frameworkId *mes
|
|||
rsp.WriteHeader(http.StatusAccepted)
|
||||
}
|
||||
// this is what the mocked scheduler is expecting to receive
|
||||
suite.driver.frameworkRegistered(suite.driver.MasterPid, &mesos.FrameworkRegisteredMessage{
|
||||
suite.driver.eventLock.Lock()
|
||||
defer suite.driver.eventLock.Unlock()
|
||||
suite.driver.frameworkRegistered(suite.driver.masterPid, &mesos.FrameworkRegisteredMessage{
|
||||
FrameworkId: frameworkId,
|
||||
MasterInfo: masterInfo,
|
||||
})
|
||||
|
@ -219,7 +226,9 @@ var defaultMockServerConfigurator = mockServerConfigurator(func(frameworkId *mes
|
|||
rsp.WriteHeader(http.StatusAccepted)
|
||||
}
|
||||
// this is what the mocked scheduler is expecting to receive
|
||||
suite.driver.frameworkReregistered(suite.driver.MasterPid, &mesos.FrameworkReregisteredMessage{
|
||||
suite.driver.eventLock.Lock()
|
||||
defer suite.driver.eventLock.Unlock()
|
||||
suite.driver.frameworkReregistered(suite.driver.masterPid, &mesos.FrameworkReregisteredMessage{
|
||||
FrameworkId: frameworkId,
|
||||
MasterInfo: masterInfo,
|
||||
})
|
||||
|
@ -239,8 +248,12 @@ func (s *SchedulerIntegrationTestSuite) TearDownTest() {
|
|||
if s.server != nil {
|
||||
s.server.Close()
|
||||
}
|
||||
if s.driver != nil && s.driver.Status() == mesos.Status_DRIVER_RUNNING {
|
||||
if s.driver != nil {
|
||||
s.driver.Abort()
|
||||
|
||||
// wait for all events to finish processing, otherwise we can get into a data
|
||||
// race when the suite object is reused for the next test.
|
||||
<-s.driver.done
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -353,7 +366,7 @@ func (suite *SchedulerIntegrationTestSuite) TestSchedulerDriverStatusUpdatedEven
|
|||
defer req.Body.Close()
|
||||
assert.NotNil(t, data)
|
||||
wg.Done()
|
||||
log.Infof("MockMaster - Done with wait group %v \n", wg)
|
||||
log.Infof("MockMaster - Done with wait group")
|
||||
})
|
||||
suite.sched.wg = &wg
|
||||
})
|
||||
|
@ -369,7 +382,8 @@ func (suite *SchedulerIntegrationTestSuite) TestSchedulerDriverStatusUpdatedEven
|
|||
float64(time.Now().Unix()),
|
||||
[]byte("test-abcd-ef-3455-454-001"),
|
||||
),
|
||||
Pid: proto.String(suite.driver.self.String()),
|
||||
// note: cannot use driver's pid here if we want an ACK
|
||||
Pid: proto.String("test-slave-001(1)@foo.bar:1234"),
|
||||
}
|
||||
pbMsg.Update.SlaveId = &mesos.SlaveID{Value: proto.String("test-slave-001")}
|
||||
|
||||
|
@ -437,6 +451,8 @@ func (suite *SchedulerIntegrationTestSuite) TestSchedulerDriverFrameworkErrorEve
|
|||
|
||||
c := suite.newMockClient()
|
||||
c.SendMessage(suite.driver.self, pbMsg)
|
||||
suite.sched.waitForCallback(0)
|
||||
message := <-suite.sched.errors
|
||||
suite.Equal("test-error-999", message)
|
||||
suite.sched.waitForCallback(10 * time.Second)
|
||||
suite.Equal(mesos.Status_DRIVER_ABORTED, suite.driver.Status())
|
||||
}
|
||||
|
|
581
Godeps/_workspace/src/github.com/mesos/mesos-go/scheduler/scheduler_unit_test.go
generated
vendored
581
Godeps/_workspace/src/github.com/mesos/mesos-go/scheduler/scheduler_unit_test.go
generated
vendored
|
@ -21,61 +21,22 @@ package scheduler
|
|||
import (
|
||||
"fmt"
|
||||
"os/user"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
log "github.com/golang/glog"
|
||||
"github.com/mesos/mesos-go/detector"
|
||||
"github.com/mesos/mesos-go/detector/zoo"
|
||||
_ "github.com/mesos/mesos-go/detector/zoo"
|
||||
mesos "github.com/mesos/mesos-go/mesosproto"
|
||||
util "github.com/mesos/mesos-go/mesosutil"
|
||||
"github.com/mesos/mesos-go/messenger"
|
||||
"github.com/mesos/mesos-go/upid"
|
||||
"github.com/samuel/go-zookeeper/zk"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
registerMockDetectorOnce sync.Once
|
||||
)
|
||||
|
||||
func ensureMockDetectorRegistered() {
|
||||
registerMockDetectorOnce.Do(func() {
|
||||
var s *SchedulerTestSuite
|
||||
err := s.registerMockDetector("testing://")
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type MockDetector struct {
|
||||
mock.Mock
|
||||
address string
|
||||
}
|
||||
|
||||
func (m *MockDetector) Detect(listener detector.MasterChanged) error {
|
||||
if listener != nil {
|
||||
if pid, err := upid.Parse("master(2)@" + m.address); err != nil {
|
||||
return err
|
||||
} else {
|
||||
go listener.OnMasterChanged(detector.CreateMasterInfo(pid))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockDetector) Done() <-chan struct{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockDetector) Cancel() {}
|
||||
|
||||
type SchedulerTestSuiteCore struct {
|
||||
master string
|
||||
masterUpid string
|
||||
|
@ -89,18 +50,6 @@ type SchedulerTestSuite struct {
|
|||
SchedulerTestSuiteCore
|
||||
}
|
||||
|
||||
func (s *SchedulerTestSuite) registerMockDetector(prefix string) error {
|
||||
address := ""
|
||||
if s != nil {
|
||||
address = s.master
|
||||
} else {
|
||||
address = "127.0.0.1:8080"
|
||||
}
|
||||
return detector.Register(prefix, detector.PluginFactory(func(spec string) (detector.Master, error) {
|
||||
return &MockDetector{address: address}, nil
|
||||
}))
|
||||
}
|
||||
|
||||
func (s *SchedulerTestSuiteCore) SetupTest() {
|
||||
s.master = "127.0.0.1:8080"
|
||||
s.masterUpid = "master(2)@" + s.master
|
||||
|
@ -118,93 +67,89 @@ func TestSchedulerSuite(t *testing.T) {
|
|||
suite.Run(t, new(SchedulerTestSuite))
|
||||
}
|
||||
|
||||
func newTestSchedulerDriver(t *testing.T, sched Scheduler, framework *mesos.FrameworkInfo, master string, cred *mesos.Credential) *MesosSchedulerDriver {
|
||||
dconfig := DriverConfig{
|
||||
Scheduler: sched,
|
||||
Framework: framework,
|
||||
Master: master,
|
||||
Credential: cred,
|
||||
func driverConfig(sched Scheduler, framework *mesos.FrameworkInfo, master string, cred *mesos.Credential) DriverConfig {
|
||||
return driverConfigMessenger(sched, framework, master, cred, nil)
|
||||
}
|
||||
|
||||
func driverConfigMessenger(sched Scheduler, framework *mesos.FrameworkInfo, master string, cred *mesos.Credential, m messenger.Messenger) DriverConfig {
|
||||
d := DriverConfig{
|
||||
Scheduler: sched,
|
||||
Framework: framework,
|
||||
Master: master,
|
||||
Credential: cred,
|
||||
NewDetector: func() (detector.Master, error) { return nil, nil }, // master detection not needed
|
||||
}
|
||||
driver, err := NewMesosSchedulerDriver(dconfig)
|
||||
if m != nil {
|
||||
d.NewMessenger = func() (messenger.Messenger, error) { return m, nil }
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func mockedMessenger() *messenger.MockedMessenger {
|
||||
m := messenger.NewMockedMessenger()
|
||||
m.On("Start").Return(nil)
|
||||
m.On("UPID").Return(upid.UPID{})
|
||||
m.On("Send").Return(nil)
|
||||
m.On("Stop").Return(nil)
|
||||
m.On("Route").Return(nil)
|
||||
m.On("Install").Return(nil)
|
||||
return m
|
||||
}
|
||||
|
||||
type testSchedulerDriver struct {
|
||||
*MesosSchedulerDriver
|
||||
}
|
||||
|
||||
func (t *testSchedulerDriver) setConnected(b bool) {
|
||||
t.eventLock.Lock()
|
||||
defer t.eventLock.Unlock()
|
||||
t.connected = b
|
||||
}
|
||||
|
||||
func newTestSchedulerDriver(t *testing.T, cfg DriverConfig) *testSchedulerDriver {
|
||||
driver, err := NewMesosSchedulerDriver(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return driver
|
||||
return &testSchedulerDriver{driver}
|
||||
}
|
||||
|
||||
func TestSchedulerDriverNew(t *testing.T) {
|
||||
masterAddr := "localhost:5050"
|
||||
driver := newTestSchedulerDriver(t, NewMockScheduler(), &mesos.FrameworkInfo{}, masterAddr, nil)
|
||||
driver := newTestSchedulerDriver(t, driverConfig(NewMockScheduler(), &mesos.FrameworkInfo{}, masterAddr, nil))
|
||||
user, _ := user.Current()
|
||||
assert.Equal(t, user.Username, driver.FrameworkInfo.GetUser())
|
||||
assert.Equal(t, user.Username, driver.frameworkInfo.GetUser())
|
||||
host := util.GetHostname("")
|
||||
assert.Equal(t, host, driver.FrameworkInfo.GetHostname())
|
||||
assert.Equal(t, host, driver.frameworkInfo.GetHostname())
|
||||
}
|
||||
|
||||
func TestSchedulerDriverNew_WithPid(t *testing.T) {
|
||||
masterAddr := "master@127.0.0.1:5050"
|
||||
mUpid, err := upid.Parse(masterAddr)
|
||||
assert.NoError(t, err)
|
||||
driver := newTestSchedulerDriver(t, NewMockScheduler(), &mesos.FrameworkInfo{}, masterAddr, nil)
|
||||
driver := newTestSchedulerDriver(t, driverConfig(NewMockScheduler(), &mesos.FrameworkInfo{}, masterAddr, nil))
|
||||
driver.handleMasterChanged(driver.self, &mesos.InternalMasterChangeDetected{Master: &mesos.MasterInfo{Pid: proto.String(mUpid.String())}})
|
||||
assert.True(t, driver.MasterPid.Equal(mUpid), fmt.Sprintf("expected upid %+v instead of %+v", mUpid, driver.MasterPid))
|
||||
assert.True(t, driver.masterPid.Equal(mUpid), fmt.Sprintf("expected upid %+v instead of %+v", mUpid, driver.masterPid))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func (suite *SchedulerTestSuite) TestSchedulerDriverNew_WithZkUrl() {
|
||||
masterAddr := "zk://127.0.0.1:5050/mesos"
|
||||
driver := newTestSchedulerDriver(suite.T(), NewMockScheduler(), suite.framework, masterAddr, nil)
|
||||
md, err := zoo.NewMockMasterDetector(masterAddr)
|
||||
suite.NoError(err)
|
||||
suite.NotNil(md)
|
||||
driver.masterDetector = md // override internal master detector
|
||||
|
||||
md.ScheduleConnEvent(zk.StateConnected)
|
||||
|
||||
done := make(chan struct{})
|
||||
driver.masterDetector.Detect(detector.OnMasterChanged(func(m *mesos.MasterInfo) {
|
||||
suite.NotNil(m)
|
||||
suite.NotEqual(m.GetPid, suite.masterUpid)
|
||||
close(done)
|
||||
}))
|
||||
|
||||
//TODO(vlad) revisit, detector not responding.
|
||||
|
||||
//NOTE(jdef) this works for me, I wonder if the timeouts are too short, or if
|
||||
//GOMAXPROCS settings are affecting the result?
|
||||
|
||||
// md.ScheduleSessEvent(zk.EventNodeChildrenChanged)
|
||||
// select {
|
||||
// case <-done:
|
||||
// case <-time.After(time.Millisecond * 1000):
|
||||
// suite.T().Errorf("Timed out waiting for children event.")
|
||||
// }
|
||||
}
|
||||
|
||||
func (suite *SchedulerTestSuite) TestSchedulerDriverNew_WithFrameworkInfo_Override() {
|
||||
suite.framework.Hostname = proto.String("local-host")
|
||||
driver := newTestSchedulerDriver(suite.T(), NewMockScheduler(), suite.framework, "127.0.0.1:5050", nil)
|
||||
suite.Equal(driver.FrameworkInfo.GetUser(), "test-user")
|
||||
suite.Equal("local-host", driver.FrameworkInfo.GetHostname())
|
||||
driver := newTestSchedulerDriver(suite.T(), driverConfig(NewMockScheduler(), suite.framework, "127.0.0.1:5050", nil))
|
||||
suite.Equal(driver.frameworkInfo.GetUser(), "test-user")
|
||||
suite.Equal("local-host", driver.frameworkInfo.GetHostname())
|
||||
}
|
||||
|
||||
func (suite *SchedulerTestSuite) TestSchedulerDriverStartOK() {
|
||||
sched := NewMockScheduler()
|
||||
|
||||
messenger := messenger.NewMockedMessenger()
|
||||
messenger.On("Start").Return(nil)
|
||||
messenger.On("UPID").Return(&upid.UPID{})
|
||||
messenger.On("Send").Return(nil)
|
||||
messenger.On("Stop").Return(nil)
|
||||
|
||||
driver := newTestSchedulerDriver(suite.T(), sched, suite.framework, suite.master, nil)
|
||||
driver.messenger = messenger
|
||||
suite.True(driver.Stopped())
|
||||
driver := newTestSchedulerDriver(suite.T(), driverConfigMessenger(sched, suite.framework, suite.master, nil, mockedMessenger()))
|
||||
suite.False(driver.Running())
|
||||
|
||||
stat, err := driver.Start()
|
||||
suite.NoError(err)
|
||||
suite.Equal(mesos.Status_DRIVER_RUNNING, stat)
|
||||
suite.False(driver.Stopped())
|
||||
suite.True(driver.Running())
|
||||
driver.Stop(true)
|
||||
}
|
||||
|
||||
func (suite *SchedulerTestSuite) TestSchedulerDriverStartWithMessengerFailure() {
|
||||
|
@ -213,19 +158,18 @@ func (suite *SchedulerTestSuite) TestSchedulerDriverStartWithMessengerFailure()
|
|||
|
||||
messenger := messenger.NewMockedMessenger()
|
||||
messenger.On("Start").Return(fmt.Errorf("Failed to start messenger"))
|
||||
messenger.On("Stop").Return()
|
||||
messenger.On("Stop").Return(nil)
|
||||
messenger.On("Install").Return(nil)
|
||||
|
||||
driver := newTestSchedulerDriver(suite.T(), sched, suite.framework, suite.master, nil)
|
||||
driver.messenger = messenger
|
||||
suite.True(driver.Stopped())
|
||||
driver := newTestSchedulerDriver(suite.T(), driverConfigMessenger(sched, suite.framework, suite.master, nil, messenger))
|
||||
suite.False(driver.Running())
|
||||
|
||||
stat, err := driver.Start()
|
||||
suite.Error(err)
|
||||
suite.True(driver.Stopped())
|
||||
suite.True(!driver.Connected())
|
||||
suite.False(driver.Running())
|
||||
suite.False(driver.Connected())
|
||||
suite.Equal(mesos.Status_DRIVER_NOT_STARTED, driver.Status())
|
||||
suite.Equal(mesos.Status_DRIVER_NOT_STARTED, stat)
|
||||
|
||||
}
|
||||
|
||||
func (suite *SchedulerTestSuite) TestSchedulerDriverStartWithRegistrationFailure() {
|
||||
|
@ -235,13 +179,11 @@ func (suite *SchedulerTestSuite) TestSchedulerDriverStartWithRegistrationFailure
|
|||
// Set expections and return values.
|
||||
messenger := messenger.NewMockedMessenger()
|
||||
messenger.On("Start").Return(nil)
|
||||
messenger.On("UPID").Return(&upid.UPID{})
|
||||
messenger.On("UPID").Return(upid.UPID{})
|
||||
messenger.On("Stop").Return(nil)
|
||||
messenger.On("Install").Return(nil)
|
||||
|
||||
driver := newTestSchedulerDriver(suite.T(), sched, suite.framework, suite.master, nil)
|
||||
|
||||
driver.messenger = messenger
|
||||
suite.True(driver.Stopped())
|
||||
driver := newTestSchedulerDriver(suite.T(), driverConfigMessenger(sched, suite.framework, suite.master, nil, messenger))
|
||||
|
||||
// reliable registration loops until the driver is stopped, connected, etc..
|
||||
stat, err := driver.Start()
|
||||
|
@ -250,42 +192,36 @@ func (suite *SchedulerTestSuite) TestSchedulerDriverStartWithRegistrationFailure
|
|||
|
||||
time.Sleep(5 * time.Second) // wait a bit, registration should be looping...
|
||||
|
||||
suite.False(driver.Stopped())
|
||||
suite.True(driver.Running())
|
||||
suite.Equal(mesos.Status_DRIVER_RUNNING, driver.Status())
|
||||
|
||||
// stop the driver, should not panic!
|
||||
driver.Stop(false) // not failing over
|
||||
suite.True(driver.Stopped())
|
||||
driver.Stop(false) // intentionally not failing over
|
||||
suite.False(driver.Running())
|
||||
suite.Equal(mesos.Status_DRIVER_STOPPED, driver.Status())
|
||||
|
||||
messenger.AssertExpectations(suite.T())
|
||||
}
|
||||
|
||||
func (suite *SchedulerTestSuite) TestSchedulerDriverJoinUnstarted() {
|
||||
driver := newTestSchedulerDriver(suite.T(), NewMockScheduler(), suite.framework, suite.master, nil)
|
||||
suite.True(driver.Stopped())
|
||||
driver := newTestSchedulerDriver(suite.T(), driverConfig(NewMockScheduler(), suite.framework, suite.master, nil))
|
||||
suite.False(driver.Running())
|
||||
|
||||
stat, err := driver.Join()
|
||||
suite.Error(err)
|
||||
suite.Equal(mesos.Status_DRIVER_NOT_STARTED, stat)
|
||||
suite.False(driver.Running())
|
||||
}
|
||||
|
||||
func (suite *SchedulerTestSuite) TestSchedulerDriverJoinOK() {
|
||||
// Set expections and return values.
|
||||
messenger := messenger.NewMockedMessenger()
|
||||
messenger.On("Start").Return(nil)
|
||||
messenger.On("UPID").Return(&upid.UPID{})
|
||||
messenger.On("Send").Return(nil)
|
||||
messenger.On("Stop").Return(nil)
|
||||
|
||||
driver := newTestSchedulerDriver(suite.T(), NewMockScheduler(), suite.framework, suite.master, nil)
|
||||
driver.messenger = messenger
|
||||
suite.True(driver.Stopped())
|
||||
driver := newTestSchedulerDriver(suite.T(), driverConfigMessenger(NewMockScheduler(), suite.framework, suite.master, nil, mockedMessenger()))
|
||||
suite.False(driver.Running())
|
||||
|
||||
stat, err := driver.Start()
|
||||
suite.NoError(err)
|
||||
suite.Equal(mesos.Status_DRIVER_RUNNING, stat)
|
||||
suite.False(driver.Stopped())
|
||||
suite.True(driver.Running())
|
||||
|
||||
testCh := make(chan mesos.Status)
|
||||
go func() {
|
||||
|
@ -293,45 +229,37 @@ func (suite *SchedulerTestSuite) TestSchedulerDriverJoinOK() {
|
|||
testCh <- stat
|
||||
}()
|
||||
|
||||
close(driver.stopCh) // manually stopping
|
||||
stat = <-testCh // when Stop() is called, stat will be DRIVER_STOPPED.
|
||||
driver.Stop(true)
|
||||
}
|
||||
|
||||
func (suite *SchedulerTestSuite) TestSchedulerDriverRun() {
|
||||
// Set expections and return values.
|
||||
messenger := messenger.NewMockedMessenger()
|
||||
messenger.On("Start").Return(nil)
|
||||
messenger.On("UPID").Return(&upid.UPID{})
|
||||
messenger.On("Send").Return(nil)
|
||||
messenger.On("Stop").Return(nil)
|
||||
|
||||
driver := newTestSchedulerDriver(suite.T(), NewMockScheduler(), suite.framework, suite.master, nil)
|
||||
driver.messenger = messenger
|
||||
suite.True(driver.Stopped())
|
||||
driver := newTestSchedulerDriver(suite.T(), driverConfigMessenger(NewMockScheduler(), suite.framework, suite.master, nil, mockedMessenger()))
|
||||
suite.False(driver.Running())
|
||||
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
defer close(ch)
|
||||
stat, err := driver.Run()
|
||||
suite.NoError(err)
|
||||
suite.Equal(mesos.Status_DRIVER_STOPPED, stat)
|
||||
}()
|
||||
time.Sleep(time.Millisecond * 1)
|
||||
|
||||
suite.False(driver.Stopped())
|
||||
<-driver.started
|
||||
suite.True(driver.Running())
|
||||
suite.Equal(mesos.Status_DRIVER_RUNNING, driver.Status())
|
||||
|
||||
// close it all.
|
||||
driver.setStatus(mesos.Status_DRIVER_STOPPED)
|
||||
close(driver.stopCh)
|
||||
time.Sleep(time.Millisecond * 1)
|
||||
driver.Stop(true)
|
||||
<-ch
|
||||
}
|
||||
|
||||
func (suite *SchedulerTestSuite) TestSchedulerDriverStopUnstarted() {
|
||||
driver := newTestSchedulerDriver(suite.T(), NewMockScheduler(), suite.framework, suite.master, nil)
|
||||
suite.True(driver.Stopped())
|
||||
driver := newTestSchedulerDriver(suite.T(), driverConfig(NewMockScheduler(), suite.framework, suite.master, nil))
|
||||
suite.False(driver.Running())
|
||||
|
||||
stat, err := driver.Stop(true)
|
||||
suite.NotNil(err)
|
||||
suite.True(driver.Stopped())
|
||||
suite.False(driver.Running())
|
||||
suite.Equal(mesos.Status_DRIVER_NOT_STARTED, stat)
|
||||
}
|
||||
|
||||
|
@ -347,25 +275,19 @@ func (m *msgTracker) Send(ctx context.Context, upid *upid.UPID, msg proto.Messag
|
|||
|
||||
func (suite *SchedulerTestSuite) TestSchdulerDriverStop_WithoutFailover() {
|
||||
// Set expections and return values.
|
||||
messenger := &msgTracker{MockedMessenger: messenger.NewMockedMessenger()}
|
||||
messenger.On("Start").Return(nil)
|
||||
messenger.On("UPID").Return(&upid.UPID{})
|
||||
messenger.On("Send").Return(nil)
|
||||
messenger.On("Stop").Return(nil)
|
||||
messenger.On("Route").Return(nil)
|
||||
|
||||
driver := newTestSchedulerDriver(suite.T(), NewMockScheduler(), suite.framework, suite.master, nil)
|
||||
driver.messenger = messenger
|
||||
suite.True(driver.Stopped())
|
||||
messenger := &msgTracker{MockedMessenger: mockedMessenger()}
|
||||
driver := newTestSchedulerDriver(suite.T(), driverConfigMessenger(NewMockScheduler(), suite.framework, suite.master, nil, messenger))
|
||||
suite.False(driver.Running())
|
||||
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
defer close(ch)
|
||||
stat, err := driver.Run()
|
||||
suite.NoError(err)
|
||||
suite.Equal(mesos.Status_DRIVER_STOPPED, stat)
|
||||
}()
|
||||
time.Sleep(time.Millisecond * 1)
|
||||
|
||||
suite.False(driver.Stopped())
|
||||
<-driver.started
|
||||
suite.True(driver.Running())
|
||||
suite.Equal(mesos.Status_DRIVER_RUNNING, driver.Status())
|
||||
driver.connected = true // pretend that we're already registered
|
||||
|
||||
|
@ -376,93 +298,218 @@ func (suite *SchedulerTestSuite) TestSchdulerDriverStop_WithoutFailover() {
|
|||
_, isUnregMsg := msg.(proto.Message)
|
||||
suite.True(isUnregMsg, "expected UnregisterFrameworkMessage instead of %+v", msg)
|
||||
|
||||
suite.True(driver.Stopped())
|
||||
suite.False(driver.Running())
|
||||
suite.Equal(mesos.Status_DRIVER_STOPPED, driver.Status())
|
||||
<-ch
|
||||
}
|
||||
|
||||
func (suite *SchedulerTestSuite) TestSchdulerDriverStop_WithFailover() {
|
||||
// Set expections and return values.
|
||||
messenger := &msgTracker{MockedMessenger: messenger.NewMockedMessenger()}
|
||||
messenger.On("Start").Return(nil)
|
||||
messenger.On("UPID").Return(&upid.UPID{})
|
||||
messenger.On("Send").Return(nil)
|
||||
messenger.On("Stop").Return(nil)
|
||||
messenger.On("Route").Return(nil)
|
||||
|
||||
driver := newTestSchedulerDriver(suite.T(), NewMockScheduler(), suite.framework, suite.master, nil)
|
||||
driver.messenger = messenger
|
||||
suite.True(driver.Stopped())
|
||||
|
||||
stat, err := driver.Start()
|
||||
suite.NoError(err)
|
||||
suite.Equal(mesos.Status_DRIVER_RUNNING, stat)
|
||||
suite.False(driver.Stopped())
|
||||
driver.connected = true // pretend that we're already registered
|
||||
mess := &msgTracker{MockedMessenger: mockedMessenger()}
|
||||
d := DriverConfig{
|
||||
Scheduler: NewMockScheduler(),
|
||||
Framework: suite.framework,
|
||||
Master: suite.master,
|
||||
NewMessenger: func() (messenger.Messenger, error) { return mess, nil },
|
||||
NewDetector: func() (detector.Master, error) { return nil, nil },
|
||||
}
|
||||
driver := newTestSchedulerDriver(suite.T(), d)
|
||||
suite.False(driver.Running())
|
||||
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
// Run() blocks until the driver is stopped or aborted
|
||||
stat, err := driver.Join()
|
||||
defer close(ch)
|
||||
stat, err := driver.Run()
|
||||
suite.NoError(err)
|
||||
suite.Equal(mesos.Status_DRIVER_STOPPED, stat)
|
||||
}()
|
||||
<-driver.started
|
||||
driver.setConnected(true) // simulated
|
||||
|
||||
// wait for Join() to begin blocking (so that it has already validated the driver state)
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
suite.True(driver.Running())
|
||||
driver.Stop(true) // true = scheduler failover
|
||||
msg := messenger.lastMessage
|
||||
msg := mess.lastMessage
|
||||
|
||||
// we're expecting that lastMessage is nil because when failing over there's no
|
||||
// 'unregister' message sent by the scheduler.
|
||||
suite.Nil(msg)
|
||||
|
||||
suite.True(driver.Stopped())
|
||||
suite.False(driver.Running())
|
||||
suite.Equal(mesos.Status_DRIVER_STOPPED, driver.Status())
|
||||
<-ch
|
||||
}
|
||||
|
||||
func (suite *SchedulerTestSuite) TestSchdulerDriverAbort() {
|
||||
// Set expections and return values.
|
||||
messenger := messenger.NewMockedMessenger()
|
||||
messenger.On("Start").Return(nil)
|
||||
messenger.On("UPID").Return(&upid.UPID{})
|
||||
messenger.On("Send").Return(nil)
|
||||
messenger.On("Stop").Return(nil)
|
||||
messenger.On("Route").Return(nil)
|
||||
|
||||
driver := newTestSchedulerDriver(suite.T(), NewMockScheduler(), suite.framework, suite.master, nil)
|
||||
driver.messenger = messenger
|
||||
suite.True(driver.Stopped())
|
||||
func (suite *SchedulerTestSuite) TestSchedulerDriverAbort() {
|
||||
driver := newTestSchedulerDriver(suite.T(), driverConfigMessenger(NewMockScheduler(), suite.framework, suite.master, nil, mockedMessenger()))
|
||||
suite.False(driver.Running())
|
||||
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
defer close(ch)
|
||||
stat, err := driver.Run()
|
||||
suite.NoError(err)
|
||||
suite.Equal(mesos.Status_DRIVER_ABORTED, stat)
|
||||
}()
|
||||
time.Sleep(time.Millisecond * 1)
|
||||
<-driver.started
|
||||
driver.setConnected(true) // simulated
|
||||
|
||||
suite.False(driver.Stopped())
|
||||
suite.True(driver.Running())
|
||||
suite.Equal(mesos.Status_DRIVER_RUNNING, driver.Status())
|
||||
|
||||
stat, err := driver.Abort()
|
||||
time.Sleep(time.Millisecond * 1)
|
||||
suite.NoError(err)
|
||||
suite.True(driver.Stopped())
|
||||
|
||||
<-driver.stopCh
|
||||
suite.False(driver.Running())
|
||||
suite.Equal(mesos.Status_DRIVER_ABORTED, stat)
|
||||
suite.Equal(mesos.Status_DRIVER_ABORTED, driver.Status())
|
||||
log.Info("waiting for driver to stop")
|
||||
<-ch
|
||||
}
|
||||
|
||||
type fakeErrorScheduler struct {
|
||||
Scheduler
|
||||
msg string
|
||||
called chan struct{}
|
||||
}
|
||||
|
||||
func (f *fakeErrorScheduler) Error(d SchedulerDriver, msg string) {
|
||||
select {
|
||||
case <-f.called:
|
||||
return
|
||||
default:
|
||||
}
|
||||
defer close(f.called)
|
||||
f.msg = msg
|
||||
}
|
||||
|
||||
func (suite *SchedulerTestSuite) TestSchedulerDriverErrorBeforeConnected() {
|
||||
sched := NewMockScheduler()
|
||||
errorTracker := &fakeErrorScheduler{Scheduler: sched, called: make(chan struct{})}
|
||||
driver := newTestSchedulerDriver(suite.T(), driverConfigMessenger(errorTracker, suite.framework, suite.master, nil, mockedMessenger()))
|
||||
|
||||
const msg = "some random error message"
|
||||
suite.False(driver.Running())
|
||||
|
||||
func() {
|
||||
driver.eventLock.Lock()
|
||||
defer driver.eventLock.Unlock()
|
||||
driver.error(msg) // this is the callback that's eventually invoked when receiving an error from the master
|
||||
}()
|
||||
|
||||
<-errorTracker.called
|
||||
suite.Equal(msg, errorTracker.msg)
|
||||
|
||||
<-driver.stopCh
|
||||
suite.False(driver.Running())
|
||||
suite.Equal(mesos.Status_DRIVER_ABORTED, driver.Status())
|
||||
}
|
||||
|
||||
func (suite *SchedulerTestSuite) TestSchdulerDriverLunchTasksUnstarted() {
|
||||
func (suite *SchedulerTestSuite) TestSchdulerDriverAcceptOffersUnstarted() {
|
||||
sched := NewMockScheduler()
|
||||
sched.On("Error").Return()
|
||||
|
||||
// Set expections and return values.
|
||||
messenger := messenger.NewMockedMessenger()
|
||||
messenger.On("Route").Return(nil)
|
||||
messenger.On("Install").Return(nil)
|
||||
|
||||
driver := newTestSchedulerDriver(suite.T(), sched, suite.framework, suite.master, nil)
|
||||
driver.messenger = messenger
|
||||
suite.True(driver.Stopped())
|
||||
driver := newTestSchedulerDriver(suite.T(), driverConfigMessenger(sched, suite.framework, suite.master, nil, messenger))
|
||||
|
||||
stat, err := driver.AcceptOffers(
|
||||
[]*mesos.OfferID{{}},
|
||||
[]*mesos.Offer_Operation{},
|
||||
&mesos.Filters{},
|
||||
)
|
||||
suite.Error(err)
|
||||
suite.Equal(mesos.Status_DRIVER_NOT_STARTED, stat)
|
||||
}
|
||||
|
||||
func (suite *SchedulerTestSuite) TestSchdulerDriverAcceptOffersWithError() {
|
||||
sched := NewMockScheduler()
|
||||
sched.On("StatusUpdate").Return(nil)
|
||||
sched.On("Error").Return()
|
||||
|
||||
msgr := mockedMessenger()
|
||||
driver := newTestSchedulerDriver(suite.T(), driverConfigMessenger(sched, suite.framework, suite.master, nil, msgr))
|
||||
driver.dispatch = func(_ context.Context, _ *upid.UPID, _ proto.Message) error {
|
||||
return fmt.Errorf("Unable to send message")
|
||||
}
|
||||
|
||||
go func() {
|
||||
driver.Run()
|
||||
}()
|
||||
<-driver.started
|
||||
driver.setConnected(true) // simulated
|
||||
suite.True(driver.Running())
|
||||
|
||||
// setup an offer
|
||||
offer := util.NewOffer(
|
||||
util.NewOfferID("test-offer-001"),
|
||||
suite.framework.Id,
|
||||
util.NewSlaveID("test-slave-001"),
|
||||
"test-slave(1)@localhost:5050",
|
||||
)
|
||||
|
||||
pid, err := upid.Parse("test-slave(1)@localhost:5050")
|
||||
suite.NoError(err)
|
||||
driver.cache.putOffer(offer, pid)
|
||||
|
||||
// launch task
|
||||
task := util.NewTaskInfo(
|
||||
"simple-task",
|
||||
util.NewTaskID("simpe-task-1"),
|
||||
util.NewSlaveID("test-slave-001"),
|
||||
[]*mesos.Resource{util.NewScalarResourceWithReservation("mem", 400, "principal", "role")},
|
||||
)
|
||||
task.Command = util.NewCommandInfo("pwd")
|
||||
task.Executor = util.NewExecutorInfo(util.NewExecutorID("test-exec"), task.Command)
|
||||
tasks := []*mesos.TaskInfo{task}
|
||||
|
||||
operations := []*mesos.Offer_Operation{util.NewLaunchOperation(tasks)}
|
||||
|
||||
stat, err := driver.AcceptOffers(
|
||||
[]*mesos.OfferID{offer.Id},
|
||||
operations,
|
||||
&mesos.Filters{},
|
||||
)
|
||||
suite.Equal(mesos.Status_DRIVER_RUNNING, stat)
|
||||
suite.Error(err)
|
||||
}
|
||||
|
||||
func (suite *SchedulerTestSuite) TestSchdulerDriverAcceptOffers() {
|
||||
driver := newTestSchedulerDriver(suite.T(), driverConfigMessenger(NewMockScheduler(), suite.framework, suite.master, nil, mockedMessenger()))
|
||||
|
||||
go func() {
|
||||
driver.Run()
|
||||
}()
|
||||
<-driver.started
|
||||
driver.setConnected(true) // simulated
|
||||
suite.True(driver.Running())
|
||||
|
||||
volumes := []*mesos.Resource{util.NewVolumeResourceWithReservation(400, "containerPath", "persistenceId", mesos.Volume_RW.Enum(), "principal", "role")}
|
||||
|
||||
operations := []*mesos.Offer_Operation{util.NewCreateOperation(volumes)}
|
||||
|
||||
stat, err := driver.AcceptOffers(
|
||||
[]*mesos.OfferID{{}},
|
||||
operations,
|
||||
&mesos.Filters{},
|
||||
)
|
||||
suite.NoError(err)
|
||||
suite.Equal(mesos.Status_DRIVER_RUNNING, stat)
|
||||
}
|
||||
|
||||
func (suite *SchedulerTestSuite) TestSchdulerDriverLaunchTasksUnstarted() {
|
||||
sched := NewMockScheduler()
|
||||
sched.On("Error").Return()
|
||||
|
||||
// Set expections and return values.
|
||||
messenger := messenger.NewMockedMessenger()
|
||||
messenger.On("Route").Return(nil)
|
||||
messenger.On("Install").Return(nil)
|
||||
|
||||
driver := newTestSchedulerDriver(suite.T(), driverConfigMessenger(sched, suite.framework, suite.master, nil, messenger))
|
||||
|
||||
stat, err := driver.LaunchTasks(
|
||||
[]*mesos.OfferID{{}},
|
||||
|
@ -478,33 +525,18 @@ func (suite *SchedulerTestSuite) TestSchdulerDriverLaunchTasksWithError() {
|
|||
sched.On("StatusUpdate").Return(nil)
|
||||
sched.On("Error").Return()
|
||||
|
||||
msgr := messenger.NewMockedMessenger()
|
||||
msgr.On("Start").Return(nil)
|
||||
msgr.On("Send").Return(nil)
|
||||
msgr.On("UPID").Return(&upid.UPID{})
|
||||
msgr.On("Stop").Return(nil)
|
||||
msgr.On("Route").Return(nil)
|
||||
|
||||
driver := newTestSchedulerDriver(suite.T(), sched, suite.framework, suite.master, nil)
|
||||
driver.messenger = msgr
|
||||
suite.True(driver.Stopped())
|
||||
msgr := mockedMessenger()
|
||||
driver := newTestSchedulerDriver(suite.T(), driverConfigMessenger(sched, suite.framework, suite.master, nil, msgr))
|
||||
driver.dispatch = func(_ context.Context, _ *upid.UPID, _ proto.Message) error {
|
||||
return fmt.Errorf("Unable to send message")
|
||||
}
|
||||
|
||||
go func() {
|
||||
driver.Run()
|
||||
}()
|
||||
time.Sleep(time.Millisecond * 1)
|
||||
<-driver.started
|
||||
driver.setConnected(true) // simulated
|
||||
suite.False(driver.Stopped())
|
||||
suite.Equal(mesos.Status_DRIVER_RUNNING, driver.Status())
|
||||
|
||||
// to trigger error
|
||||
msgr2 := messenger.NewMockedMessenger()
|
||||
msgr2.On("Start").Return(nil)
|
||||
msgr2.On("UPID").Return(&upid.UPID{})
|
||||
msgr2.On("Send").Return(fmt.Errorf("Unable to send message"))
|
||||
msgr2.On("Stop").Return(nil)
|
||||
msgr.On("Route").Return(nil)
|
||||
driver.messenger = msgr2
|
||||
suite.True(driver.Running())
|
||||
|
||||
// setup an offer
|
||||
offer := util.NewOffer(
|
||||
|
@ -534,30 +566,19 @@ func (suite *SchedulerTestSuite) TestSchdulerDriverLaunchTasksWithError() {
|
|||
tasks,
|
||||
&mesos.Filters{},
|
||||
)
|
||||
suite.Error(err)
|
||||
suite.Equal(mesos.Status_DRIVER_RUNNING, stat)
|
||||
|
||||
suite.Error(err)
|
||||
}
|
||||
|
||||
func (suite *SchedulerTestSuite) TestSchdulerDriverLaunchTasks() {
|
||||
messenger := messenger.NewMockedMessenger()
|
||||
messenger.On("Start").Return(nil)
|
||||
messenger.On("UPID").Return(&upid.UPID{})
|
||||
messenger.On("Send").Return(nil)
|
||||
messenger.On("Stop").Return(nil)
|
||||
messenger.On("Route").Return(nil)
|
||||
|
||||
driver := newTestSchedulerDriver(suite.T(), NewMockScheduler(), suite.framework, suite.master, nil)
|
||||
driver.messenger = messenger
|
||||
suite.True(driver.Stopped())
|
||||
driver := newTestSchedulerDriver(suite.T(), driverConfigMessenger(NewMockScheduler(), suite.framework, suite.master, nil, mockedMessenger()))
|
||||
|
||||
go func() {
|
||||
driver.Run()
|
||||
}()
|
||||
time.Sleep(time.Millisecond * 1)
|
||||
<-driver.started
|
||||
driver.setConnected(true) // simulated
|
||||
suite.False(driver.Stopped())
|
||||
suite.Equal(mesos.Status_DRIVER_RUNNING, driver.Status())
|
||||
suite.True(driver.Running())
|
||||
|
||||
task := util.NewTaskInfo(
|
||||
"simple-task",
|
||||
|
@ -578,24 +599,14 @@ func (suite *SchedulerTestSuite) TestSchdulerDriverLaunchTasks() {
|
|||
}
|
||||
|
||||
func (suite *SchedulerTestSuite) TestSchdulerDriverKillTask() {
|
||||
messenger := messenger.NewMockedMessenger()
|
||||
messenger.On("Start").Return(nil)
|
||||
messenger.On("UPID").Return(&upid.UPID{})
|
||||
messenger.On("Send").Return(nil)
|
||||
messenger.On("Stop").Return(nil)
|
||||
messenger.On("Route").Return(nil)
|
||||
|
||||
driver := newTestSchedulerDriver(suite.T(), NewMockScheduler(), suite.framework, suite.master, nil)
|
||||
driver.messenger = messenger
|
||||
suite.True(driver.Stopped())
|
||||
driver := newTestSchedulerDriver(suite.T(), driverConfigMessenger(NewMockScheduler(), suite.framework, suite.master, nil, mockedMessenger()))
|
||||
|
||||
go func() {
|
||||
driver.Run()
|
||||
}()
|
||||
time.Sleep(time.Millisecond * 1)
|
||||
<-driver.started
|
||||
driver.setConnected(true) // simulated
|
||||
suite.False(driver.Stopped())
|
||||
suite.Equal(mesos.Status_DRIVER_RUNNING, driver.Status())
|
||||
suite.True(driver.Running())
|
||||
|
||||
stat, err := driver.KillTask(util.NewTaskID("test-task-1"))
|
||||
suite.NoError(err)
|
||||
|
@ -603,16 +614,7 @@ func (suite *SchedulerTestSuite) TestSchdulerDriverKillTask() {
|
|||
}
|
||||
|
||||
func (suite *SchedulerTestSuite) TestSchdulerDriverRequestResources() {
|
||||
messenger := messenger.NewMockedMessenger()
|
||||
messenger.On("Start").Return(nil)
|
||||
messenger.On("UPID").Return(&upid.UPID{})
|
||||
messenger.On("Send").Return(nil)
|
||||
messenger.On("Stop").Return(nil)
|
||||
messenger.On("Route").Return(nil)
|
||||
|
||||
driver := newTestSchedulerDriver(suite.T(), NewMockScheduler(), suite.framework, suite.master, nil)
|
||||
driver.messenger = messenger
|
||||
suite.True(driver.Stopped())
|
||||
driver := newTestSchedulerDriver(suite.T(), driverConfigMessenger(NewMockScheduler(), suite.framework, suite.master, nil, mockedMessenger()))
|
||||
|
||||
driver.Start()
|
||||
driver.setConnected(true) // simulated
|
||||
|
@ -637,16 +639,7 @@ func (suite *SchedulerTestSuite) TestSchdulerDriverDeclineOffers() {
|
|||
}
|
||||
|
||||
func (suite *SchedulerTestSuite) TestSchdulerDriverReviveOffers() {
|
||||
messenger := messenger.NewMockedMessenger()
|
||||
messenger.On("Start").Return(nil)
|
||||
messenger.On("UPID").Return(&upid.UPID{})
|
||||
messenger.On("Send").Return(nil)
|
||||
messenger.On("Stop").Return(nil)
|
||||
messenger.On("Route").Return(nil)
|
||||
|
||||
driver := newTestSchedulerDriver(suite.T(), NewMockScheduler(), suite.framework, suite.master, nil)
|
||||
driver.messenger = messenger
|
||||
suite.True(driver.Stopped())
|
||||
driver := newTestSchedulerDriver(suite.T(), driverConfigMessenger(NewMockScheduler(), suite.framework, suite.master, nil, mockedMessenger()))
|
||||
|
||||
driver.Start()
|
||||
driver.setConnected(true) // simulated
|
||||
|
@ -658,16 +651,7 @@ func (suite *SchedulerTestSuite) TestSchdulerDriverReviveOffers() {
|
|||
}
|
||||
|
||||
func (suite *SchedulerTestSuite) TestSchdulerDriverSendFrameworkMessage() {
|
||||
messenger := messenger.NewMockedMessenger()
|
||||
messenger.On("Start").Return(nil)
|
||||
messenger.On("UPID").Return(&upid.UPID{})
|
||||
messenger.On("Send").Return(nil)
|
||||
messenger.On("Stop").Return(nil)
|
||||
messenger.On("Route").Return(nil)
|
||||
|
||||
driver := newTestSchedulerDriver(suite.T(), NewMockScheduler(), suite.framework, suite.master, nil)
|
||||
driver.messenger = messenger
|
||||
suite.True(driver.Stopped())
|
||||
driver := newTestSchedulerDriver(suite.T(), driverConfigMessenger(NewMockScheduler(), suite.framework, suite.master, nil, mockedMessenger()))
|
||||
|
||||
driver.Start()
|
||||
driver.setConnected(true) // simulated
|
||||
|
@ -683,16 +667,7 @@ func (suite *SchedulerTestSuite) TestSchdulerDriverSendFrameworkMessage() {
|
|||
}
|
||||
|
||||
func (suite *SchedulerTestSuite) TestSchdulerDriverReconcileTasks() {
|
||||
messenger := messenger.NewMockedMessenger()
|
||||
messenger.On("Start").Return(nil)
|
||||
messenger.On("UPID").Return(&upid.UPID{})
|
||||
messenger.On("Send").Return(nil)
|
||||
messenger.On("Stop").Return(nil)
|
||||
messenger.On("Route").Return(nil)
|
||||
|
||||
driver := newTestSchedulerDriver(suite.T(), NewMockScheduler(), suite.framework, suite.master, nil)
|
||||
driver.messenger = messenger
|
||||
suite.True(driver.Stopped())
|
||||
driver := newTestSchedulerDriver(suite.T(), driverConfigMessenger(NewMockScheduler(), suite.framework, suite.master, nil, mockedMessenger()))
|
||||
|
||||
driver.Start()
|
||||
driver.setConnected(true) // simulated
|
||||
|
|
|
@ -49,10 +49,7 @@ func Parse(input string) (*UPID, error) {
|
|||
}
|
||||
|
||||
// String returns the string representation.
|
||||
func (u *UPID) String() string {
|
||||
if u == nil {
|
||||
return ""
|
||||
}
|
||||
func (u UPID) String() string {
|
||||
return fmt.Sprintf("%s@%s:%s", u.ID, u.Host, u.Port)
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue