mirror of https://github.com/etcd-io/dbtester.git
*: clean up, vendor update
This commit is contained in:
parent
51310653aa
commit
2f17ec2ee5
|
|
@ -25,38 +25,43 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/client",
|
||||
"Comment": "v2.3.0-191-g307cb51",
|
||||
"Rev": "307cb5167c936f65714d30da33b22157c4d011e7"
|
||||
"Comment": "v2.3.0-227-ge8a4ed0",
|
||||
"Rev": "e8a4ed01e20fc5fbb09e2a00b9a31ef30c6e25e5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/clientv3",
|
||||
"Comment": "v2.3.0-191-g307cb51",
|
||||
"Rev": "307cb5167c936f65714d30da33b22157c4d011e7"
|
||||
"Comment": "v2.3.0-227-ge8a4ed0",
|
||||
"Rev": "e8a4ed01e20fc5fbb09e2a00b9a31ef30c6e25e5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
||||
"Comment": "v2.3.0-191-g307cb51",
|
||||
"Rev": "307cb5167c936f65714d30da33b22157c4d011e7"
|
||||
"Comment": "v2.3.0-227-ge8a4ed0",
|
||||
"Rev": "e8a4ed01e20fc5fbb09e2a00b9a31ef30c6e25e5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb",
|
||||
"Comment": "v2.3.0-191-g307cb51",
|
||||
"Rev": "307cb5167c936f65714d30da33b22157c4d011e7"
|
||||
"Comment": "v2.3.0-227-ge8a4ed0",
|
||||
"Rev": "e8a4ed01e20fc5fbb09e2a00b9a31ef30c6e25e5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
|
||||
"Comment": "v2.3.0-191-g307cb51",
|
||||
"Rev": "307cb5167c936f65714d30da33b22157c4d011e7"
|
||||
"Comment": "v2.3.0-227-ge8a4ed0",
|
||||
"Rev": "e8a4ed01e20fc5fbb09e2a00b9a31ef30c6e25e5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/tlsutil",
|
||||
"Comment": "v2.3.0-227-ge8a4ed0",
|
||||
"Rev": "e8a4ed01e20fc5fbb09e2a00b9a31ef30c6e25e5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/types",
|
||||
"Comment": "v2.3.0-191-g307cb51",
|
||||
"Rev": "307cb5167c936f65714d30da33b22157c4d011e7"
|
||||
"Comment": "v2.3.0-227-ge8a4ed0",
|
||||
"Rev": "e8a4ed01e20fc5fbb09e2a00b9a31ef30c6e25e5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/storage/storagepb",
|
||||
"Comment": "v2.3.0-191-g307cb51",
|
||||
"Rev": "307cb5167c936f65714d30da33b22157c4d011e7"
|
||||
"Comment": "v2.3.0-227-ge8a4ed0",
|
||||
"Rev": "e8a4ed01e20fc5fbb09e2a00b9a31ef30c6e25e5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/dustin/go-humanize",
|
||||
|
|
@ -67,20 +72,24 @@
|
|||
"Comment": "v0.1-17-g533cd7f",
|
||||
"Rev": "533cd7fd8a85905f67a1753afb4deddc85ea174f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ghodss/yaml",
|
||||
"Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gogo/protobuf/gogoproto",
|
||||
"Comment": "v0.2",
|
||||
"Rev": "4168943e65a2802828518e95310aeeed6d84c4e5"
|
||||
"Comment": "v0.2-5-g5b7453b",
|
||||
"Rev": "5b7453b208470fdb0141985c2cfdcf1471686693"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gogo/protobuf/proto",
|
||||
"Comment": "v0.2",
|
||||
"Rev": "4168943e65a2802828518e95310aeeed6d84c4e5"
|
||||
"Comment": "v0.2-5-g5b7453b",
|
||||
"Rev": "5b7453b208470fdb0141985c2cfdcf1471686693"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor",
|
||||
"Comment": "v0.2",
|
||||
"Rev": "4168943e65a2802828518e95310aeeed6d84c4e5"
|
||||
"Comment": "v0.2-5-g5b7453b",
|
||||
"Rev": "5b7453b208470fdb0141985c2cfdcf1471686693"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/freetype",
|
||||
|
|
|
|||
|
|
@ -107,9 +107,9 @@ maxClientCnxns={{.MaxClientCnxns}}
|
|||
PreAllocSize: 65536 * 1024,
|
||||
MaxClientCnxns: 60,
|
||||
Peers: []ZookeeperPeer{
|
||||
{MyID: 1, IP: "10.240.0.12"},
|
||||
{MyID: 2, IP: "10.240.0.13"},
|
||||
{MyID: 3, IP: "10.240.0.14"},
|
||||
{MyID: 1, IP: ""},
|
||||
{MyID: 2, IP: ""},
|
||||
{MyID: 3, IP: ""},
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -219,68 +219,7 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
|
|||
switch r.Operation {
|
||||
case Request_Start:
|
||||
switch t.req.Database {
|
||||
case Request_etcd:
|
||||
_, err := os.Stat(etcdBinaryPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := os.RemoveAll(etcdDataDir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := openToAppend(t.req.DatabaseLogPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t.logfile = f
|
||||
|
||||
clusterN := len(peerIPs)
|
||||
names := make([]string, clusterN)
|
||||
clientURLs := make([]string, clusterN)
|
||||
peerURLs := make([]string, clusterN)
|
||||
members := make([]string, clusterN)
|
||||
for i, u := range peerIPs {
|
||||
names[i] = fmt.Sprintf("etcd-%d", i+1)
|
||||
clientURLs[i] = fmt.Sprintf("http://%s:2379", u)
|
||||
peerURLs[i] = fmt.Sprintf("http://%s:2380", u)
|
||||
members[i] = fmt.Sprintf("%s=%s", names[i], peerURLs[i])
|
||||
}
|
||||
clusterStr := strings.Join(members, ",")
|
||||
flags := []string{
|
||||
"--name", names[t.req.ServerIndex],
|
||||
"--data-dir", etcdDataDir,
|
||||
|
||||
"--listen-client-urls", clientURLs[t.req.ServerIndex],
|
||||
"--advertise-client-urls", clientURLs[t.req.ServerIndex],
|
||||
|
||||
"--listen-peer-urls", peerURLs[t.req.ServerIndex],
|
||||
"--initial-advertise-peer-urls", peerURLs[t.req.ServerIndex],
|
||||
|
||||
"--initial-cluster-token", etcdToken,
|
||||
"--initial-cluster", clusterStr,
|
||||
"--initial-cluster-state", "new",
|
||||
}
|
||||
flagString := strings.Join(flags, " ")
|
||||
|
||||
cmd := exec.Command(etcdBinaryPath, flags...)
|
||||
cmd.Stdout = f
|
||||
cmd.Stderr = f
|
||||
log.Printf("Starting: %s %s", cmd.Path, flagString)
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t.cmd = cmd
|
||||
t.pid = cmd.Process.Pid
|
||||
log.Printf("Started: %s [PID: %d]", cmd.Path, t.pid)
|
||||
processPID = t.pid
|
||||
go func() {
|
||||
if err := cmd.Wait(); err != nil {
|
||||
log.Printf("%s cmd.Wait returned %v", cmd.Path, err)
|
||||
return
|
||||
}
|
||||
log.Printf("Exiting %s", cmd.Path)
|
||||
}()
|
||||
|
||||
case Request_etcd2: // TODO: combine with etcd3
|
||||
case Request_etcdv2, Request_etcdv3:
|
||||
_, err := os.Stat(etcdBinaryPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -62,21 +62,21 @@ func (Request_Operation) EnumDescriptor() ([]byte, []int) { return fileDescripto
|
|||
type Request_Database int32
|
||||
|
||||
const (
|
||||
Request_etcd Request_Database = 0
|
||||
Request_etcd2 Request_Database = 1
|
||||
Request_etcdv3 Request_Database = 0
|
||||
Request_etcdv2 Request_Database = 1
|
||||
Request_ZooKeeper Request_Database = 2
|
||||
Request_Consul Request_Database = 3
|
||||
)
|
||||
|
||||
var Request_Database_name = map[int32]string{
|
||||
0: "etcd",
|
||||
1: "etcd2",
|
||||
0: "etcdv3",
|
||||
1: "etcdv2",
|
||||
2: "ZooKeeper",
|
||||
3: "Consul",
|
||||
}
|
||||
var Request_Database_value = map[string]int32{
|
||||
"etcd": 0,
|
||||
"etcd2": 1,
|
||||
"etcdv3": 0,
|
||||
"etcdv2": 1,
|
||||
"ZooKeeper": 2,
|
||||
"Consul": 3,
|
||||
}
|
||||
|
|
@ -962,37 +962,37 @@ var (
|
|||
)
|
||||
|
||||
var fileDescriptorMessage = []byte{
|
||||
// 503 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x53, 0xcd, 0x6e, 0xd3, 0x40,
|
||||
0x10, 0x4e, 0xda, 0x26, 0xb1, 0x27, 0xa4, 0x0d, 0x2b, 0x68, 0x57, 0x15, 0x8a, 0xaa, 0xa8, 0x87,
|
||||
0x1e, 0x48, 0x2a, 0xa5, 0x08, 0x21, 0xc4, 0x05, 0xd2, 0x4b, 0x55, 0x7e, 0x22, 0x87, 0x13, 0x37,
|
||||
0xc7, 0x99, 0xb8, 0xa6, 0x8e, 0x27, 0xec, 0xae, 0x51, 0xda, 0x27, 0x81, 0x37, 0xea, 0x91, 0x47,
|
||||
0xe0, 0xe7, 0x45, 0x98, 0x4c, 0xf3, 0xd3, 0x16, 0x7a, 0x58, 0x69, 0xbe, 0xbf, 0x59, 0x69, 0x76,
|
||||
0x16, 0x6a, 0x63, 0xb4, 0x36, 0x8c, 0xb1, 0x3d, 0x31, 0xe4, 0x48, 0x95, 0xb8, 0xcc, 0xdc, 0x6e,
|
||||
0x2b, 0x4e, 0xdc, 0x59, 0x3e, 0x68, 0x47, 0x34, 0x3e, 0x8c, 0x29, 0xa6, 0x43, 0x51, 0x07, 0xf9,
|
||||
0x48, 0x90, 0x00, 0xa9, 0xae, 0x53, 0xcd, 0xef, 0x25, 0xa8, 0x04, 0xf8, 0x25, 0x47, 0xeb, 0xd4,
|
||||
0x73, 0xf0, 0x69, 0x82, 0x26, 0x74, 0x09, 0x65, 0xba, 0xb8, 0x57, 0x3c, 0xd8, 0xec, 0xe8, 0xb6,
|
||||
0x74, 0x6d, 0xcf, 0x2d, 0xed, 0x0f, 0x0b, 0x3d, 0x58, 0x59, 0xd5, 0x11, 0x78, 0xc3, 0xd0, 0x85,
|
||||
0x83, 0xd0, 0xa2, 0x5e, 0x93, 0xd8, 0xce, 0x9d, 0xd8, 0xf1, 0x5c, 0x0e, 0x96, 0x46, 0xa5, 0xa1,
|
||||
0x32, 0x41, 0x34, 0x27, 0x3d, 0xab, 0xd7, 0x39, 0xe3, 0x07, 0x0b, 0xa8, 0xf6, 0xa0, 0x6a, 0xd1,
|
||||
0x7c, 0x65, 0x90, 0x0d, 0x71, 0xaa, 0x37, 0x58, 0xad, 0x05, 0x37, 0x29, 0xf5, 0x04, 0xfc, 0x94,
|
||||
0xe2, 0x9e, 0xc1, 0x51, 0x32, 0xd5, 0x25, 0x49, 0xaf, 0x08, 0x75, 0x00, 0x5b, 0x8b, 0x5b, 0xde,
|
||||
0x32, 0x19, 0xba, 0x33, 0x5d, 0x16, 0xcf, 0x5d, 0x5a, 0x3d, 0x85, 0x87, 0x63, 0xca, 0x12, 0x47,
|
||||
0x26, 0x40, 0x9b, 0xa7, 0x4e, 0xbc, 0x15, 0xf1, 0xfe, 0x2b, 0xf0, 0x78, 0xb6, 0x63, 0xa2, 0x38,
|
||||
0xc5, 0x6e, 0x4a, 0xf9, 0xb0, 0x67, 0xe8, 0x33, 0x46, 0xee, 0x7d, 0x38, 0x46, 0xed, 0x49, 0xe4,
|
||||
0x1e, 0x55, 0x35, 0x00, 0x2c, 0x77, 0xe2, 0x89, 0x9c, 0xe2, 0x85, 0xf6, 0xc5, 0x7b, 0x83, 0x51,
|
||||
0xdb, 0x50, 0x1e, 0xe4, 0xd1, 0x39, 0x3a, 0x0d, 0xa2, 0xcd, 0x91, 0xda, 0x87, 0xda, 0x25, 0xd1,
|
||||
0x39, 0x22, 0x0f, 0xfa, 0xdd, 0xc5, 0xc9, 0xb1, 0xae, 0xca, 0x24, 0x6e, 0x93, 0xea, 0x19, 0x3c,
|
||||
0x5e, 0x12, 0x3c, 0x80, 0xd7, 0x69, 0x4a, 0x51, 0x3f, 0xb9, 0x44, 0xfd, 0x80, 0xdd, 0xeb, 0xc1,
|
||||
0xff, 0x45, 0xf5, 0x02, 0x76, 0x56, 0x6d, 0xc2, 0x69, 0x37, 0x4d, 0xf8, 0xb9, 0xba, 0xd9, 0x34,
|
||||
0xb3, 0xba, 0x26, 0xb9, 0xfb, 0xe4, 0x66, 0x0b, 0xfc, 0xe5, 0x12, 0x28, 0x1f, 0x4a, 0x7d, 0x17,
|
||||
0x1a, 0x57, 0x2f, 0x28, 0x0f, 0x36, 0xfa, 0x8e, 0x26, 0xf5, 0xa2, 0xaa, 0xce, 0x36, 0xca, 0x0a,
|
||||
0xbd, 0xd6, 0x7c, 0x09, 0xde, 0xe2, 0xf1, 0x67, 0x16, 0x74, 0xd1, 0x90, 0xcd, 0x9c, 0x9b, 0x55,
|
||||
0x1d, 0x76, 0xd7, 0xc0, 0xff, 0x44, 0x74, 0x2a, 0x57, 0xd5, 0xd7, 0x14, 0x40, 0xb9, 0x4b, 0x19,
|
||||
0xcf, 0xbc, 0xbe, 0xde, 0xdc, 0x07, 0x8f, 0x1b, 0x4d, 0x18, 0xca, 0xba, 0xd8, 0x3c, 0x8a, 0x78,
|
||||
0xe3, 0x65, 0x33, 0xbd, 0x60, 0x01, 0x3b, 0xaf, 0xa0, 0xfa, 0xd1, 0x84, 0x19, 0xfb, 0x8c, 0x43,
|
||||
0xa3, 0x5a, 0xe0, 0x09, 0x1c, 0x71, 0xbd, 0x79, 0x7b, 0x0d, 0x77, 0xb7, 0x96, 0xf8, 0xba, 0x6b,
|
||||
0xb3, 0xf0, 0xe6, 0xd1, 0xd5, 0xaf, 0x46, 0xe1, 0xea, 0x77, 0xa3, 0xf8, 0x83, 0xcf, 0x4f, 0x3e,
|
||||
0xdf, 0xfe, 0x34, 0x0a, 0x83, 0xb2, 0x7c, 0x8e, 0xa3, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x92,
|
||||
0x14, 0xa6, 0x10, 0x63, 0x03, 0x00, 0x00,
|
||||
// 505 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x53, 0xcd, 0x4e, 0xdb, 0x4e,
|
||||
0x10, 0xcf, 0x07, 0x49, 0xec, 0xc9, 0x3f, 0x90, 0xff, 0xaa, 0x85, 0x15, 0xaa, 0x22, 0x14, 0x71,
|
||||
0xe0, 0xd0, 0x04, 0x29, 0xa9, 0xaa, 0x1e, 0xda, 0x43, 0x1b, 0x2e, 0x88, 0x7e, 0x44, 0x4e, 0x4f,
|
||||
0xbd, 0x39, 0xce, 0xc4, 0xb8, 0x38, 0x9e, 0xd4, 0xbb, 0x46, 0x81, 0x27, 0xe9, 0xa1, 0x0f, 0xc4,
|
||||
0xb1, 0x8f, 0xd0, 0x8f, 0x17, 0xe9, 0x30, 0xc4, 0x09, 0xd0, 0x72, 0x58, 0x69, 0x7e, 0x5f, 0xb3,
|
||||
0xd2, 0xec, 0x2c, 0x34, 0x66, 0x68, 0x8c, 0x1f, 0x62, 0x77, 0x9e, 0x92, 0x25, 0x55, 0xe1, 0x32,
|
||||
0xb1, 0xbb, 0x9d, 0x30, 0xb2, 0xa7, 0xd9, 0xb8, 0x1b, 0xd0, 0xec, 0x30, 0xa4, 0x90, 0x0e, 0x45,
|
||||
0x1d, 0x67, 0x53, 0x41, 0x02, 0xa4, 0xba, 0x49, 0xb5, 0xbf, 0x55, 0xa0, 0xe6, 0xe1, 0x97, 0x0c,
|
||||
0x8d, 0x55, 0xcf, 0xc1, 0xa5, 0x39, 0xa6, 0xbe, 0x8d, 0x28, 0xd1, 0xc5, 0xbd, 0xe2, 0xc1, 0x66,
|
||||
0x4f, 0x77, 0xa5, 0x6b, 0x77, 0x69, 0xe9, 0x7e, 0xc8, 0x75, 0x6f, 0x6d, 0x55, 0x7d, 0x70, 0x26,
|
||||
0xbe, 0xf5, 0xc7, 0xbe, 0x41, 0x5d, 0x92, 0xd8, 0xce, 0xbd, 0xd8, 0xd1, 0x52, 0xf6, 0x56, 0x46,
|
||||
0xa5, 0xa1, 0x36, 0x47, 0x4c, 0x8f, 0x87, 0x46, 0x97, 0x39, 0xe3, 0x7a, 0x39, 0x54, 0x7b, 0x50,
|
||||
0x37, 0x98, 0x9e, 0x33, 0x48, 0x26, 0xb8, 0xd0, 0x1b, 0xac, 0x36, 0xbc, 0xdb, 0x94, 0x7a, 0x02,
|
||||
0x6e, 0x4c, 0xe1, 0x30, 0xc5, 0x69, 0xb4, 0xd0, 0x15, 0x49, 0xaf, 0x09, 0x75, 0x00, 0x5b, 0xf9,
|
||||
0x2d, 0x6f, 0x99, 0xf4, 0xed, 0xa9, 0xae, 0x8a, 0xe7, 0x3e, 0xad, 0x9e, 0xc2, 0xff, 0x33, 0x4a,
|
||||
0x22, 0x4b, 0xa9, 0x87, 0x26, 0x8b, 0xad, 0x78, 0x6b, 0xe2, 0xfd, 0x5b, 0xe0, 0xf1, 0x6c, 0x87,
|
||||
0x44, 0x61, 0x8c, 0x83, 0x98, 0xb2, 0xc9, 0x30, 0xa5, 0xcf, 0x18, 0xd8, 0xf7, 0xfe, 0x0c, 0xb5,
|
||||
0x23, 0x91, 0x07, 0x54, 0xd5, 0x02, 0x30, 0xdc, 0x89, 0x27, 0x72, 0x82, 0x17, 0xda, 0x15, 0xef,
|
||||
0x2d, 0x46, 0x6d, 0x43, 0x75, 0x9c, 0x05, 0x67, 0x68, 0x35, 0x88, 0xb6, 0x44, 0x6a, 0x1f, 0x1a,
|
||||
0x97, 0x44, 0x67, 0x88, 0x3c, 0xe8, 0x77, 0x17, 0xc7, 0x47, 0xba, 0x2e, 0x93, 0xb8, 0x4b, 0xaa,
|
||||
0x67, 0xf0, 0x78, 0x45, 0xf0, 0x00, 0x5e, 0xc7, 0x31, 0x05, 0xa3, 0xe8, 0x12, 0xf5, 0x7f, 0xec,
|
||||
0x2e, 0x7b, 0xff, 0x16, 0xd5, 0x0b, 0xd8, 0x59, 0xb7, 0xf1, 0x17, 0x83, 0x38, 0xe2, 0xe7, 0x1a,
|
||||
0x24, 0x8b, 0xc4, 0xe8, 0x86, 0xe4, 0x1e, 0x92, 0xdb, 0x1d, 0x70, 0x57, 0x4b, 0xa0, 0x5c, 0xa8,
|
||||
0x8c, 0xac, 0x9f, 0xda, 0x66, 0x41, 0x39, 0xb0, 0x31, 0xb2, 0x34, 0x6f, 0x16, 0x55, 0xfd, 0x7a,
|
||||
0xa3, 0x8c, 0xd0, 0xa5, 0xf6, 0x2b, 0x70, 0xf2, 0xc7, 0x57, 0x00, 0x55, 0xb4, 0xc1, 0xe4, 0xbc,
|
||||
0xcf, 0xf6, 0xbc, 0xee, 0x71, 0xa0, 0x01, 0xee, 0x27, 0xa2, 0x13, 0xb9, 0xad, 0x59, 0xba, 0x96,
|
||||
0x06, 0x94, 0xf0, 0xd8, 0x9b, 0xe5, 0xf6, 0x3e, 0x38, 0xdc, 0x6b, 0xce, 0x50, 0x36, 0xc6, 0x64,
|
||||
0x41, 0xc0, 0x4b, 0x2f, 0xcb, 0xe9, 0x78, 0x39, 0xec, 0xbd, 0x84, 0xfa, 0xc7, 0xd4, 0x4f, 0xd8,
|
||||
0x97, 0x5a, 0x4c, 0x55, 0x07, 0x1c, 0x81, 0x53, 0xae, 0x37, 0xef, 0x6e, 0xe2, 0xee, 0xd6, 0x0a,
|
||||
0xdf, 0x74, 0x6d, 0x17, 0xde, 0x3c, 0xba, 0xfa, 0xd9, 0x2a, 0x5c, 0xfd, 0x6a, 0x15, 0xbf, 0xf3,
|
||||
0xf9, 0xc1, 0xe7, 0xeb, 0xef, 0x56, 0x61, 0x5c, 0x95, 0xff, 0xd1, 0xff, 0x13, 0x00, 0x00, 0xff,
|
||||
0xff, 0x9b, 0x3c, 0x4e, 0x34, 0x66, 0x03, 0x00, 0x00,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,8 +19,8 @@ message Request {
|
|||
Restart = 2;
|
||||
}
|
||||
enum Database {
|
||||
etcd = 0;
|
||||
etcd2 = 1;
|
||||
etcdv3 = 0;
|
||||
etcdv2 = 1;
|
||||
ZooKeeper = 2;
|
||||
Consul = 3;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ var (
|
|||
)
|
||||
|
||||
func init() {
|
||||
StartCommand.PersistentFlags().StringVar(&globalFlags.Database, "database", "", "etcd, etcd2, zookeeper, zk, consul.")
|
||||
StartCommand.PersistentFlags().StringVar(&globalFlags.Database, "database", "", "etcdv2, etcdv3, zookeeper, zk, consul.")
|
||||
StartCommand.PersistentFlags().StringSliceVar(&globalFlags.AgentEndpoints, "agent-endpoints", []string{""}, "Endpoints to send client requests to, then it automatically configures.")
|
||||
StartCommand.PersistentFlags().Int64Var(&globalFlags.ZookeeperPreAllocSize, "zk-pre-alloc-size", 65536*1024, "Disk pre-allocation size in bytes.")
|
||||
StartCommand.PersistentFlags().Int64Var(&globalFlags.ZookeeperMaxClientCnxns, "zk-max-client-conns", 5000, "Maximum number of concurrent Zookeeper connection.")
|
||||
|
|
@ -98,10 +98,10 @@ func CommandFunc(cmd *cobra.Command, args []string) error {
|
|||
}
|
||||
|
||||
switch globalFlags.Database {
|
||||
case "etcd":
|
||||
req.Database = agent.Request_etcd
|
||||
case "etcd2":
|
||||
req.Database = agent.Request_etcd2
|
||||
case "etcdv2":
|
||||
req.Database = agent.Request_etcdv2
|
||||
case "etcdv3":
|
||||
req.Database = agent.Request_etcdv3
|
||||
case "zookeeper":
|
||||
req.Database = agent.Request_ZooKeeper
|
||||
case "consul":
|
||||
|
|
|
|||
2
main.go
2
main.go
|
|
@ -41,6 +41,7 @@ import (
|
|||
"github.com/coreos/dbtester/analyze"
|
||||
"github.com/coreos/dbtester/bench"
|
||||
"github.com/coreos/dbtester/control"
|
||||
"github.com/coreos/dbtester/script"
|
||||
"github.com/coreos/dbtester/upload"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
|
@ -65,6 +66,7 @@ func init() {
|
|||
rootCommand.AddCommand(control.StartCommand)
|
||||
rootCommand.AddCommand(control.StopCommand)
|
||||
rootCommand.AddCommand(control.RestartCommand)
|
||||
rootCommand.AddCommand(script.Command)
|
||||
rootCommand.AddCommand(upload.Command)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,16 @@
|
|||
// Copyright 2016 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package script implements script command.
|
||||
package script
|
||||
|
|
@ -0,0 +1,161 @@
|
|||
package script
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"text/template"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type scriptConfig struct {
|
||||
DBName string
|
||||
BucketName string
|
||||
LogPrefix string
|
||||
ClientPort string
|
||||
ProjectName string
|
||||
KeyPath string
|
||||
Conns int
|
||||
Clients int
|
||||
ValSize int
|
||||
Total int
|
||||
}
|
||||
|
||||
var (
|
||||
Command = &cobra.Command{
|
||||
Use: "script",
|
||||
Short: "Generates cloud provisioning script.",
|
||||
SuggestFor: []string{"scrt"},
|
||||
RunE: scriptCommandFunc,
|
||||
}
|
||||
|
||||
outputPath string
|
||||
cfg = scriptConfig{}
|
||||
)
|
||||
|
||||
func init() {
|
||||
Command.PersistentFlags().StringVarP(&outputPath, "output", "o", "script.sh", "File path to store script.")
|
||||
Command.PersistentFlags().StringVarP(&cfg.DBName, "db-name", "d", "etcdv3", "Name of database (etcdv2, etcdv3, zookeeper, zk, consul).")
|
||||
Command.PersistentFlags().StringVarP(&cfg.BucketName, "bucket-name", "b", "", "Name of bucket to store results.")
|
||||
Command.PersistentFlags().StringVarP(&cfg.LogPrefix, "log-prefix", "p", "bench-01", "Prefix to name instances, logs files.")
|
||||
Command.PersistentFlags().StringVarP(&cfg.ClientPort, "client-port", "c", "2379", "2379 for etcd, 2181 for Zookeeper, 8500 for Consul.")
|
||||
Command.PersistentFlags().StringVarP(&cfg.ProjectName, "project-name", "n", "etcd-development", "Project name.")
|
||||
Command.PersistentFlags().StringVarP(&cfg.KeyPath, "key-path", "k", "$HOME/key.json", "Key path.")
|
||||
Command.PersistentFlags().IntVar(&cfg.Conns, "conns", 1, "conns.")
|
||||
Command.PersistentFlags().IntVar(&cfg.Clients, "clients", 1, "clients.")
|
||||
Command.PersistentFlags().IntVar(&cfg.ValSize, "val-size", 256, "val-size.")
|
||||
Command.PersistentFlags().IntVar(&cfg.Total, "total", 3000000, "total.")
|
||||
}
|
||||
|
||||
func scriptCommandFunc(cmd *cobra.Command, args []string) error {
|
||||
tpl := template.Must(template.New("scriptTemplate").Parse(scriptTemplate))
|
||||
buf := new(bytes.Buffer)
|
||||
if err := tpl.Execute(buf, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
return toFile(buf.String(), outputPath)
|
||||
}
|
||||
|
||||
const scriptTemplate = `
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
gcloud compute instances list
|
||||
gcloud compute instances create {{.LogPrefix}}-{{.DBName}}-1 --custom-cpu=8 --custom-memory=16 --image="ubuntu-15-10" --boot-disk-size=50 --boot-disk-type="pd-ssd" --local-ssd interface=SCSI --zone us-central1-a
|
||||
gcloud compute instances create {{.LogPrefix}}-{{.DBName}}-2 --custom-cpu=8 --custom-memory=16 --image="ubuntu-15-10" --boot-disk-size=50 --boot-disk-type="pd-ssd" --local-ssd interface=SCSI --zone us-central1-a
|
||||
gcloud compute instances create {{.LogPrefix}}-{{.DBName}}-3 --custom-cpu=8 --custom-memory=16 --image="ubuntu-15-10" --boot-disk-size=50 --boot-disk-type="pd-ssd" --local-ssd interface=SCSI --zone us-central1-a
|
||||
gcloud compute instances create {{.LogPrefix}}-{{.DBName}}-tester --custom-cpu=16 --custom-memory=30 --image="ubuntu-15-10" --boot-disk-size=50 --boot-disk-type="pd-ssd" --zone us-central1-a
|
||||
gcloud compute instances list
|
||||
|
||||
gcloud compute ssh {{.LogPrefix}}-{{.DBName}}-1
|
||||
gcloud compute ssh {{.LogPrefix}}-{{.DBName}}-2
|
||||
gcloud compute ssh {{.LogPrefix}}-{{.DBName}}-3
|
||||
gcloud compute ssh {{.LogPrefix}}-{{.DBName}}-tester
|
||||
|
||||
|
||||
#########
|
||||
# agent #
|
||||
#########
|
||||
GO_VERSION="1.6" && cd /usr/local && sudo rm -rf ./go && sudo curl -s https://storage.googleapis.com/golang/go$GO_VERSION.linux-amd64.tar.gz | sudo tar -v -C /usr/local/ -xz && cd $HOME;
|
||||
echo "export GOPATH=$(echo $HOME)/go" >> $HOME/.bashrc
|
||||
PATH_VAR=$PATH":/usr/local/go/bin:$(echo $HOME)/go/bin"
|
||||
echo "export PATH=$(echo $PATH_VAR)" >> $HOME/.bashrc
|
||||
export GOPATH=$(echo $HOME)/go
|
||||
PATH_VAR=$PATH":/usr/local/go/bin:$(echo $HOME)/go/bin"
|
||||
export PATH=$(echo $PATH_VAR)
|
||||
go get -v -u -f github.com/coreos/dbtester
|
||||
curl https://storage.googleapis.com/etcd/dbtester_agent.sh | bash -s f /mnt/ssd0
|
||||
|
||||
cd /mnt/ssd0
|
||||
ls /mnt/ssd0
|
||||
cat /mnt/ssd0/agent.log
|
||||
|
||||
|
||||
##########
|
||||
# tester #
|
||||
##########
|
||||
ulimit -n 3000
|
||||
ulimit -n
|
||||
GO_VERSION="1.6" && cd /usr/local && sudo rm -rf ./go && sudo curl -s https://storage.googleapis.com/golang/go$GO_VERSION.linux-amd64.tar.gz | sudo tar -v -C /usr/local/ -xz && cd $HOME;
|
||||
echo "export GOPATH=$(echo $HOME)/go" >> $HOME/.bashrc
|
||||
PATH_VAR=$PATH":/usr/local/go/bin:$(echo $HOME)/go/bin"
|
||||
echo "export PATH=$(echo $PATH_VAR)" >> $HOME/.bashrc
|
||||
export GOPATH=$(echo $HOME)/go
|
||||
PATH_VAR=$PATH":/usr/local/go/bin:$(echo $HOME)/go/bin"
|
||||
export PATH=$(echo $PATH_VAR)
|
||||
psn ps-kill --force -s dbtester
|
||||
go get -v -u -f github.com/coreos/dbtester
|
||||
|
||||
# start test
|
||||
AGENT_ENDPOINTS='___IP_ADDR_1___:3500,___IP_ADDR_2___:3500,___IP_ADDR_3___:3500'
|
||||
DATABASE_ENDPOINTS='___IP_ADDR_1___:{{.ClientPort}},___IP_ADDR_2___:{{.ClientPort}},___IP_ADDR_3___:{{.ClientPort}}'
|
||||
|
||||
# start database
|
||||
dbtester start --agent-endpoints=$(echo $AGENT_ENDPOINTS) --database={{.DBName}} --database-log-path=database.log --log-prefix={{.LogPrefix}}-{{.DBName}} --google-cloud-project-name={{.ProjectName}} --key-path={{.KeyPath}} --bucket={{.BucketName}} --monitor-result-path=monitor.csv;
|
||||
|
||||
cat /mnt/ssd0/agent.log
|
||||
cat /mnt/ssd0/database.log
|
||||
|
||||
# start benchmark
|
||||
nohup dbtester bench --database={{.DBName}} --sample --no-histogram --csv-result-path={{.LogPrefix}}-{{.DBName}}-timeseries.csv --google-cloud-project-name={{.ProjectName}} --key-path={{.KeyPath}} --bucket={{.BucketName}} --endpoints=$DATABASE_ENDPOINTS --conns={{.Conns}} --clients={{.Clients}} put --key-size=64 --val-size={{.ValSize}} --total={{.Total}} > {{.LogPrefix}}-{{.DBName}}-result.txt 2>&1 &
|
||||
|
||||
cat {{.LogPrefix}}-{{.DBName}}-result.txt
|
||||
|
||||
# benchmark done!
|
||||
# stop database to trigger uploading in remote machines
|
||||
dbtester stop --agent-endpoints=$(echo $AGENT_ENDPOINTS)
|
||||
|
||||
dbtester upload --from={{.LogPrefix}}-{{.DBName}}-timeseries.csv --to={{.LogPrefix}}-{{.DBName}}-timeseries.csv --google-cloud-project-name={{.ProjectName}} --key-path={{.KeyPath}} --bucket={{.BucketName}}
|
||||
|
||||
|
||||
####################
|
||||
# in case of panic #
|
||||
####################
|
||||
dbtester upload --from=/mnt/ssd0/agent.log --to={{.LogPrefix}}-{{.DBName}}-1-agent.log --google-cloud-project-name={{.ProjectName}} --key-path={{.KeyPath}} --bucket={{.BucketName}}
|
||||
dbtester upload --from=/mnt/ssd0/database.log --to={{.LogPrefix}}-{{.DBName}}-1-database.log --google-cloud-project-name={{.ProjectName}} --key-path={{.KeyPath}} --bucket={{.BucketName}}
|
||||
dbtester upload --from=/mnt/ssd0/monitor.csv --to={{.LogPrefix}}-{{.DBName}}-1-monitor.csv --google-cloud-project-name={{.ProjectName}} --key-path={{.KeyPath}} --bucket={{.BucketName}}
|
||||
|
||||
dbtester upload --from=/mnt/ssd0/agent.log --to={{.LogPrefix}}-{{.DBName}}-2-agent.log --google-cloud-project-name={{.ProjectName}} --key-path={{.KeyPath}} --bucket={{.BucketName}}
|
||||
dbtester upload --from=/mnt/ssd0/database.log --to={{.LogPrefix}}-{{.DBName}}-2-database.log --google-cloud-project-name={{.ProjectName}} --key-path={{.KeyPath}} --bucket={{.BucketName}}
|
||||
dbtester upload --from=/mnt/ssd0/monitor.csv --to={{.LogPrefix}}-{{.DBName}}-2-monitor.csv --google-cloud-project-name={{.ProjectName}} --key-path={{.KeyPath}} --bucket={{.BucketName}}
|
||||
|
||||
dbtester upload --from=/mnt/ssd0/agent.log --to={{.LogPrefix}}-{{.DBName}}-3-agent.log --google-cloud-project-name={{.ProjectName}} --key-path={{.KeyPath}} --bucket={{.BucketName}}
|
||||
dbtester upload --from=/mnt/ssd0/database.log --to={{.LogPrefix}}-{{.DBName}}-3-database.log --google-cloud-project-name={{.ProjectName}} --key-path={{.KeyPath}} --bucket={{.BucketName}}
|
||||
dbtester upload --from=/mnt/ssd0/monitor.csv --to={{.LogPrefix}}-{{.DBName}}-3-monitor.csv --google-cloud-project-name={{.ProjectName}} --key-path={{.KeyPath}} --bucket={{.BucketName}}
|
||||
|
||||
`
|
||||
|
||||
func toFile(txt, fpath string) error {
|
||||
f, err := os.OpenFile(fpath, os.O_RDWR|os.O_TRUNC, 0777)
|
||||
if err != nil {
|
||||
f, err = os.Create(fpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := f.WriteString(txt); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -15,7 +15,6 @@
|
|||
package clientv3
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
|
|
@ -53,26 +52,6 @@ type Client struct {
|
|||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
// EndpointDialer is a policy for choosing which endpoint to dial next
|
||||
type EndpointDialer func(*Client) (*grpc.ClientConn, error)
|
||||
|
||||
type Config struct {
|
||||
// Endpoints is a list of URLs
|
||||
Endpoints []string
|
||||
|
||||
// RetryDialer chooses the next endpoint to use
|
||||
RetryDialer EndpointDialer
|
||||
|
||||
// DialTimeout is the timeout for failing to establish a connection.
|
||||
DialTimeout time.Duration
|
||||
|
||||
// TLS holds the client secure credentials, if any.
|
||||
TLS *tls.Config
|
||||
|
||||
// Logger is the logger used by client library.
|
||||
Logger Logger
|
||||
}
|
||||
|
||||
// New creates a new etcdv3 client from a given configuration.
|
||||
func New(cfg Config) (*Client, error) {
|
||||
if cfg.RetryDialer == nil {
|
||||
|
|
@ -90,6 +69,15 @@ func NewFromURL(url string) (*Client, error) {
|
|||
return New(Config{Endpoints: []string{url}})
|
||||
}
|
||||
|
||||
// NewFromConfigFile creates a new etcdv3 client from a configuration file.
|
||||
func NewFromConfigFile(path string) (*Client, error) {
|
||||
cfg, err := configFromFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return New(*cfg)
|
||||
}
|
||||
|
||||
// Close shuts down the client's etcd connections.
|
||||
func (c *Client) Close() error {
|
||||
c.mu.Lock()
|
||||
|
|
|
|||
|
|
@ -0,0 +1,111 @@
|
|||
// Copyright 2016 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/pkg/tlsutil"
|
||||
"github.com/ghodss/yaml"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// EndpointDialer is a policy for choosing which endpoint to dial next
|
||||
type EndpointDialer func(*Client) (*grpc.ClientConn, error)
|
||||
|
||||
type Config struct {
|
||||
// Endpoints is a list of URLs
|
||||
Endpoints []string
|
||||
|
||||
// RetryDialer chooses the next endpoint to use
|
||||
RetryDialer EndpointDialer
|
||||
|
||||
// DialTimeout is the timeout for failing to establish a connection.
|
||||
DialTimeout time.Duration
|
||||
|
||||
// TLS holds the client secure credentials, if any.
|
||||
TLS *tls.Config
|
||||
|
||||
// Logger is the logger used by client library.
|
||||
Logger Logger
|
||||
}
|
||||
|
||||
type YamlConfig struct {
|
||||
Endpoints []string `json:"endpoints"`
|
||||
DialTimeout time.Duration `json:"dial-timeout"`
|
||||
InsecureTransport bool `json:"insecure-transport"`
|
||||
InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"`
|
||||
Certfile string `json:"cert-file"`
|
||||
Keyfile string `json:"key-file"`
|
||||
CAfile string `json:"ca-file"`
|
||||
}
|
||||
|
||||
func configFromFile(fpath string) (*Config, error) {
|
||||
b, err := ioutil.ReadFile(fpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
yc := &YamlConfig{}
|
||||
|
||||
err = yaml.Unmarshal(b, yc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := &Config{
|
||||
Endpoints: yc.Endpoints,
|
||||
DialTimeout: yc.DialTimeout,
|
||||
}
|
||||
|
||||
if yc.InsecureTransport {
|
||||
cfg.TLS = nil
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
var (
|
||||
cert *tls.Certificate
|
||||
cp *x509.CertPool
|
||||
)
|
||||
|
||||
if yc.Certfile != "" && yc.Keyfile != "" {
|
||||
cert, err = tlsutil.NewCert(yc.Certfile, yc.Keyfile, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if yc.CAfile != "" {
|
||||
cp, err = tlsutil.NewCertPool([]string{yc.CAfile})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
tlscfg := &tls.Config{
|
||||
MinVersion: tls.VersionTLS10,
|
||||
InsecureSkipVerify: yc.InsecureSkipTLSVerify,
|
||||
RootCAs: cp,
|
||||
}
|
||||
if cert != nil {
|
||||
tlscfg.Certificates = []tls.Certificate{*cert}
|
||||
}
|
||||
cfg.TLS = tlscfg
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
|
@ -26,6 +26,7 @@ type (
|
|||
DefragmentResponse pb.DefragmentResponse
|
||||
AlarmResponse pb.AlarmResponse
|
||||
AlarmMember pb.AlarmMember
|
||||
StatusResponse pb.StatusResponse
|
||||
)
|
||||
|
||||
type Maintenance interface {
|
||||
|
|
@ -43,6 +44,9 @@ type Maintenance interface {
|
|||
// To defragment multiple members in the cluster, user need to call defragment multiple
|
||||
// times with different endpoints.
|
||||
Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error)
|
||||
|
||||
// Status gets the status of the member.
|
||||
Status(ctx context.Context, endpoint string) (*StatusResponse, error)
|
||||
}
|
||||
|
||||
type maintenance struct {
|
||||
|
|
@ -128,6 +132,19 @@ func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*Defragm
|
|||
return (*DefragmentResponse)(resp), nil
|
||||
}
|
||||
|
||||
func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
|
||||
conn, err := m.c.Dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remote := pb.NewMaintenanceClient(conn)
|
||||
resp, err := remote.Status(ctx, &pb.StatusRequest{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return (*StatusResponse)(resp), nil
|
||||
}
|
||||
|
||||
func (m *maintenance) getRemote() pb.MaintenanceClient {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
|
|
|||
|
|
@ -95,7 +95,7 @@ func OpDelete(key string, opts ...OpOption) Op {
|
|||
panic("unexpected revision in delete")
|
||||
case ret.sort != nil:
|
||||
panic("unexpected sort in delete")
|
||||
case ret.serializable != false:
|
||||
case ret.serializable:
|
||||
panic("unexpected serializable in delete")
|
||||
}
|
||||
return ret
|
||||
|
|
@ -113,7 +113,7 @@ func OpPut(key, val string, opts ...OpOption) Op {
|
|||
panic("unexpected revision in put")
|
||||
case ret.sort != nil:
|
||||
panic("unexpected sort in put")
|
||||
case ret.serializable != false:
|
||||
case ret.serializable:
|
||||
panic("unexpected serializable in delete")
|
||||
}
|
||||
return ret
|
||||
|
|
@ -129,7 +129,7 @@ func opWatch(key string, opts ...OpOption) Op {
|
|||
panic("unexpected limit in watch")
|
||||
case ret.sort != nil:
|
||||
panic("unexpected sort in watch")
|
||||
case ret.serializable != false:
|
||||
case ret.serializable:
|
||||
panic("unexpected serializable in watch")
|
||||
}
|
||||
return ret
|
||||
|
|
|
|||
|
|
@ -521,7 +521,7 @@ func (w *watcher) resumeWatchers(wc pb.Watch_WatchClient) error {
|
|||
resp, err := wc.Recv()
|
||||
if err != nil {
|
||||
return err
|
||||
} else if len(resp.Events) != 0 || resp.Created != true {
|
||||
} else if len(resp.Events) != 0 || !resp.Created {
|
||||
return fmt.Errorf("watcher: unexpected response (%+v)", resp)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -55,6 +55,8 @@
|
|||
AlarmRequest
|
||||
AlarmMember
|
||||
AlarmResponse
|
||||
StatusRequest
|
||||
StatusResponse
|
||||
AuthEnableRequest
|
||||
AuthDisableRequest
|
||||
AuthenticateRequest
|
||||
|
|
|
|||
|
|
@ -1249,6 +1249,29 @@ func (m *AlarmResponse) GetAlarms() []*AlarmMember {
|
|||
return nil
|
||||
}
|
||||
|
||||
type StatusRequest struct {
|
||||
}
|
||||
|
||||
func (m *StatusRequest) Reset() { *m = StatusRequest{} }
|
||||
func (m *StatusRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*StatusRequest) ProtoMessage() {}
|
||||
|
||||
type StatusResponse struct {
|
||||
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
|
||||
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
|
||||
}
|
||||
|
||||
func (m *StatusResponse) Reset() { *m = StatusResponse{} }
|
||||
func (m *StatusResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*StatusResponse) ProtoMessage() {}
|
||||
|
||||
func (m *StatusResponse) GetHeader() *ResponseHeader {
|
||||
if m != nil {
|
||||
return m.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type AuthEnableRequest struct {
|
||||
}
|
||||
|
||||
|
|
@ -1603,6 +1626,8 @@ func init() {
|
|||
proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest")
|
||||
proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember")
|
||||
proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse")
|
||||
proto.RegisterType((*StatusRequest)(nil), "etcdserverpb.StatusRequest")
|
||||
proto.RegisterType((*StatusResponse)(nil), "etcdserverpb.StatusResponse")
|
||||
proto.RegisterType((*AuthEnableRequest)(nil), "etcdserverpb.AuthEnableRequest")
|
||||
proto.RegisterType((*AuthDisableRequest)(nil), "etcdserverpb.AuthDisableRequest")
|
||||
proto.RegisterType((*AuthenticateRequest)(nil), "etcdserverpb.AuthenticateRequest")
|
||||
|
|
@ -2250,6 +2275,8 @@ var _Cluster_serviceDesc = grpc.ServiceDesc{
|
|||
type MaintenanceClient interface {
|
||||
// Alarm activates, deactivates, and queries alarms regarding cluster health.
|
||||
Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error)
|
||||
// Status gets the status of the member.
|
||||
Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error)
|
||||
Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error)
|
||||
// Hash returns the hash of the local KV state for consistency checking purpose.
|
||||
// This is designed for testing; do not use this in production when there
|
||||
|
|
@ -2274,6 +2301,15 @@ func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ..
|
|||
return out, nil
|
||||
}
|
||||
|
||||
func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) {
|
||||
out := new(StatusResponse)
|
||||
err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) {
|
||||
out := new(DefragmentResponse)
|
||||
err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, c.cc, opts...)
|
||||
|
|
@ -2297,6 +2333,8 @@ func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...g
|
|||
type MaintenanceServer interface {
|
||||
// Alarm activates, deactivates, and queries alarms regarding cluster health.
|
||||
Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error)
|
||||
// Status gets the status of the member.
|
||||
Status(context.Context, *StatusRequest) (*StatusResponse, error)
|
||||
Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error)
|
||||
// Hash returns the hash of the local KV state for consistency checking purpose.
|
||||
// This is designed for testing; do not use this in production when there
|
||||
|
|
@ -2320,6 +2358,18 @@ func _Maintenance_Alarm_Handler(srv interface{}, ctx context.Context, dec func(i
|
|||
return out, nil
|
||||
}
|
||||
|
||||
func _Maintenance_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
|
||||
in := new(StatusRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out, err := srv.(MaintenanceServer).Status(ctx, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func _Maintenance_Defragment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
|
||||
in := new(DefragmentRequest)
|
||||
if err := dec(in); err != nil {
|
||||
|
|
@ -2352,6 +2402,10 @@ var _Maintenance_serviceDesc = grpc.ServiceDesc{
|
|||
MethodName: "Alarm",
|
||||
Handler: _Maintenance_Alarm_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Status",
|
||||
Handler: _Maintenance_Status_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Defragment",
|
||||
Handler: _Maintenance_Defragment_Handler,
|
||||
|
|
@ -4351,6 +4405,58 @@ func (m *AlarmResponse) MarshalTo(data []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func (m *StatusRequest) Marshal() (data []byte, err error) {
|
||||
size := m.Size()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
}
|
||||
|
||||
func (m *StatusRequest) MarshalTo(data []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *StatusResponse) Marshal() (data []byte, err error) {
|
||||
size := m.Size()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
}
|
||||
|
||||
func (m *StatusResponse) MarshalTo(data []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Header != nil {
|
||||
data[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintRpc(data, i, uint64(m.Header.Size()))
|
||||
n30, err := m.Header.MarshalTo(data[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n30
|
||||
}
|
||||
if len(m.Version) > 0 {
|
||||
data[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintRpc(data, i, uint64(len(m.Version)))
|
||||
i += copy(data[i:], m.Version)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *AuthEnableRequest) Marshal() (data []byte, err error) {
|
||||
size := m.Size()
|
||||
data = make([]byte, size)
|
||||
|
|
@ -4652,11 +4758,11 @@ func (m *AuthEnableResponse) MarshalTo(data []byte) (int, error) {
|
|||
data[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintRpc(data, i, uint64(m.Header.Size()))
|
||||
n30, err := m.Header.MarshalTo(data[i:])
|
||||
n31, err := m.Header.MarshalTo(data[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n30
|
||||
i += n31
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
|
@ -4680,11 +4786,11 @@ func (m *AuthDisableResponse) MarshalTo(data []byte) (int, error) {
|
|||
data[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintRpc(data, i, uint64(m.Header.Size()))
|
||||
n31, err := m.Header.MarshalTo(data[i:])
|
||||
n32, err := m.Header.MarshalTo(data[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n31
|
||||
i += n32
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
|
@ -4708,11 +4814,11 @@ func (m *AuthenticateResponse) MarshalTo(data []byte) (int, error) {
|
|||
data[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintRpc(data, i, uint64(m.Header.Size()))
|
||||
n32, err := m.Header.MarshalTo(data[i:])
|
||||
n33, err := m.Header.MarshalTo(data[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n32
|
||||
i += n33
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
|
@ -4736,11 +4842,11 @@ func (m *AuthUserAddResponse) MarshalTo(data []byte) (int, error) {
|
|||
data[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintRpc(data, i, uint64(m.Header.Size()))
|
||||
n33, err := m.Header.MarshalTo(data[i:])
|
||||
n34, err := m.Header.MarshalTo(data[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n33
|
||||
i += n34
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
|
@ -4764,11 +4870,11 @@ func (m *AuthUserGetResponse) MarshalTo(data []byte) (int, error) {
|
|||
data[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintRpc(data, i, uint64(m.Header.Size()))
|
||||
n34, err := m.Header.MarshalTo(data[i:])
|
||||
n35, err := m.Header.MarshalTo(data[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n34
|
||||
i += n35
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
|
@ -4792,11 +4898,11 @@ func (m *AuthUserDeleteResponse) MarshalTo(data []byte) (int, error) {
|
|||
data[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintRpc(data, i, uint64(m.Header.Size()))
|
||||
n35, err := m.Header.MarshalTo(data[i:])
|
||||
n36, err := m.Header.MarshalTo(data[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n35
|
||||
i += n36
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
|
@ -4820,11 +4926,11 @@ func (m *AuthUserChangePasswordResponse) MarshalTo(data []byte) (int, error) {
|
|||
data[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintRpc(data, i, uint64(m.Header.Size()))
|
||||
n36, err := m.Header.MarshalTo(data[i:])
|
||||
n37, err := m.Header.MarshalTo(data[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n36
|
||||
i += n37
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
|
@ -4848,11 +4954,11 @@ func (m *AuthUserGrantResponse) MarshalTo(data []byte) (int, error) {
|
|||
data[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintRpc(data, i, uint64(m.Header.Size()))
|
||||
n37, err := m.Header.MarshalTo(data[i:])
|
||||
n38, err := m.Header.MarshalTo(data[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n37
|
||||
i += n38
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
|
@ -4876,11 +4982,11 @@ func (m *AuthUserRevokeResponse) MarshalTo(data []byte) (int, error) {
|
|||
data[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintRpc(data, i, uint64(m.Header.Size()))
|
||||
n38, err := m.Header.MarshalTo(data[i:])
|
||||
n39, err := m.Header.MarshalTo(data[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n38
|
||||
i += n39
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
|
@ -4904,11 +5010,11 @@ func (m *AuthRoleAddResponse) MarshalTo(data []byte) (int, error) {
|
|||
data[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintRpc(data, i, uint64(m.Header.Size()))
|
||||
n39, err := m.Header.MarshalTo(data[i:])
|
||||
n40, err := m.Header.MarshalTo(data[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n39
|
||||
i += n40
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
|
@ -4932,11 +5038,11 @@ func (m *AuthRoleGetResponse) MarshalTo(data []byte) (int, error) {
|
|||
data[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintRpc(data, i, uint64(m.Header.Size()))
|
||||
n40, err := m.Header.MarshalTo(data[i:])
|
||||
n41, err := m.Header.MarshalTo(data[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n40
|
||||
i += n41
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
|
@ -4960,11 +5066,11 @@ func (m *AuthRoleDeleteResponse) MarshalTo(data []byte) (int, error) {
|
|||
data[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintRpc(data, i, uint64(m.Header.Size()))
|
||||
n41, err := m.Header.MarshalTo(data[i:])
|
||||
n42, err := m.Header.MarshalTo(data[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n41
|
||||
i += n42
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
|
@ -4988,11 +5094,11 @@ func (m *AuthRoleGrantResponse) MarshalTo(data []byte) (int, error) {
|
|||
data[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintRpc(data, i, uint64(m.Header.Size()))
|
||||
n42, err := m.Header.MarshalTo(data[i:])
|
||||
n43, err := m.Header.MarshalTo(data[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n42
|
||||
i += n43
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
|
@ -5016,11 +5122,11 @@ func (m *AuthRoleRevokeResponse) MarshalTo(data []byte) (int, error) {
|
|||
data[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintRpc(data, i, uint64(m.Header.Size()))
|
||||
n43, err := m.Header.MarshalTo(data[i:])
|
||||
n44, err := m.Header.MarshalTo(data[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n43
|
||||
i += n44
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
|
@ -5731,6 +5837,26 @@ func (m *AlarmResponse) Size() (n int) {
|
|||
return n
|
||||
}
|
||||
|
||||
func (m *StatusRequest) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *StatusResponse) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if m.Header != nil {
|
||||
l = m.Header.Size()
|
||||
n += 1 + l + sovRpc(uint64(l))
|
||||
}
|
||||
l = len(m.Version)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovRpc(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *AuthEnableRequest) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
|
|
@ -10402,6 +10528,168 @@ func (m *AlarmResponse) Unmarshal(data []byte) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
func (m *StatusRequest) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRpc
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipRpc(data[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthRpc
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *StatusResponse) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRpc
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRpc
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthRpc
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Header == nil {
|
||||
m.Header = &ResponseHeader{}
|
||||
}
|
||||
if err := m.Header.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRpc
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthRpc
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Version = string(data[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipRpc(data[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthRpc
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *AuthEnableRequest) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
iNdEx := 0
|
||||
|
|
|
|||
|
|
@ -75,6 +75,9 @@ service Maintenance {
|
|||
// Alarm activates, deactivates, and queries alarms regarding cluster health.
|
||||
rpc Alarm(AlarmRequest) returns (AlarmResponse) {}
|
||||
|
||||
// Status gets the status of the member.
|
||||
rpc Status(StatusRequest) returns (StatusResponse) {}
|
||||
|
||||
rpc Defragment(DefragmentRequest) returns (DefragmentResponse) {}
|
||||
|
||||
// Hash returns the hash of the local KV state for consistency checking purpose.
|
||||
|
|
@ -471,6 +474,14 @@ message AlarmResponse {
|
|||
repeated AlarmMember alarms = 2;
|
||||
}
|
||||
|
||||
message StatusRequest {
|
||||
}
|
||||
|
||||
message StatusResponse {
|
||||
ResponseHeader header = 1;
|
||||
string version = 2;
|
||||
}
|
||||
|
||||
message AuthEnableRequest {
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,72 @@
|
|||
// Copyright 2016 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tlsutil
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
// NewCertPool creates x509 certPool with provided CA files.
|
||||
func NewCertPool(CAFiles []string) (*x509.CertPool, error) {
|
||||
certPool := x509.NewCertPool()
|
||||
|
||||
for _, CAFile := range CAFiles {
|
||||
pemByte, err := ioutil.ReadFile(CAFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for {
|
||||
var block *pem.Block
|
||||
block, pemByte = pem.Decode(pemByte)
|
||||
if block == nil {
|
||||
break
|
||||
}
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
certPool.AddCert(cert)
|
||||
}
|
||||
}
|
||||
|
||||
return certPool, nil
|
||||
}
|
||||
|
||||
// NewCert generates TLS cert by using the given cert,key and parse function.
|
||||
func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) {
|
||||
cert, err := ioutil.ReadFile(certfile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key, err := ioutil.ReadFile(keyfile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if parseFunc == nil {
|
||||
parseFunc = tls.X509KeyPair
|
||||
}
|
||||
|
||||
tlsCert, err := parseFunc(cert, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &tlsCert, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
# OSX leaves these everywhere on SMB shares
|
||||
._*
|
||||
|
||||
# Eclipse files
|
||||
.classpath
|
||||
.project
|
||||
.settings/**
|
||||
|
||||
# Emacs save files
|
||||
*~
|
||||
|
||||
# Vim-related files
|
||||
[._]*.s[a-w][a-z]
|
||||
[._]s[a-w][a-z]
|
||||
*.un~
|
||||
Session.vim
|
||||
.netrwhist
|
||||
|
||||
# Go test binaries
|
||||
*.test
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.3
|
||||
- 1.4
|
||||
script:
|
||||
- go test
|
||||
- go build
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Sam Ghods
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
@ -0,0 +1,116 @@
|
|||
# YAML marshaling and unmarshaling support for Go
|
||||
|
||||
[](https://travis-ci.org/ghodss/yaml)
|
||||
|
||||
## Introduction
|
||||
|
||||
A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
|
||||
|
||||
In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
|
||||
|
||||
## Compatibility
|
||||
|
||||
This package uses [go-yaml v2](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
|
||||
|
||||
## Caveats
|
||||
|
||||
**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
|
||||
|
||||
```
|
||||
BAD:
|
||||
exampleKey: !!binary gIGC
|
||||
|
||||
GOOD:
|
||||
exampleKey: gIGC
|
||||
... and decode the base64 data in your code.
|
||||
```
|
||||
|
||||
**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
|
||||
|
||||
## Installation and usage
|
||||
|
||||
To install, run:
|
||||
|
||||
```
|
||||
$ go get github.com/ghodss/yaml
|
||||
```
|
||||
|
||||
And import using:
|
||||
|
||||
```
|
||||
import "github.com/ghodss/yaml"
|
||||
```
|
||||
|
||||
Usage is very similar to the JSON library:
|
||||
|
||||
```go
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
)
|
||||
|
||||
type Person struct {
|
||||
Name string `json:"name"` // Affects YAML field names too.
|
||||
Age int `json:"name"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Marshal a Person struct to YAML.
|
||||
p := Person{"John", 30}
|
||||
y, err := yaml.Marshal(p)
|
||||
if err != nil {
|
||||
fmt.Printf("err: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Println(string(y))
|
||||
/* Output:
|
||||
name: John
|
||||
age: 30
|
||||
*/
|
||||
|
||||
// Unmarshal the YAML back into a Person struct.
|
||||
var p2 Person
|
||||
err := yaml.Unmarshal(y, &p2)
|
||||
if err != nil {
|
||||
fmt.Printf("err: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Println(p2)
|
||||
/* Output:
|
||||
{John 30}
|
||||
*/
|
||||
}
|
||||
```
|
||||
|
||||
`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
|
||||
|
||||
```go
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
)
|
||||
func main() {
|
||||
j := []byte(`{"name": "John", "age": 30}`)
|
||||
y, err := yaml.JSONToYAML(j)
|
||||
if err != nil {
|
||||
fmt.Printf("err: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Println(string(y))
|
||||
/* Output:
|
||||
name: John
|
||||
age: 30
|
||||
*/
|
||||
j2, err := yaml.YAMLToJSON(y)
|
||||
if err != nil {
|
||||
fmt.Printf("err: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Println(string(j2))
|
||||
/* Output:
|
||||
{"age":30,"name":"John"}
|
||||
*/
|
||||
}
|
||||
```
|
||||
|
|
@ -0,0 +1,497 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// indirect walks down v allocating pointers as needed,
|
||||
// until it gets to a non-pointer.
|
||||
// if it encounters an Unmarshaler, indirect stops and returns that.
|
||||
// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
|
||||
func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
|
||||
// If v is a named type and is addressable,
|
||||
// start with its address, so that if the type has pointer methods,
|
||||
// we find them.
|
||||
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
for {
|
||||
// Load value from interface, but only if the result will be
|
||||
// usefully addressable.
|
||||
if v.Kind() == reflect.Interface && !v.IsNil() {
|
||||
e := v.Elem()
|
||||
if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
|
||||
v = e
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if v.Kind() != reflect.Ptr {
|
||||
break
|
||||
}
|
||||
|
||||
if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
|
||||
break
|
||||
}
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
if v.Type().NumMethod() > 0 {
|
||||
if u, ok := v.Interface().(json.Unmarshaler); ok {
|
||||
return u, nil, reflect.Value{}
|
||||
}
|
||||
if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
|
||||
return nil, u, reflect.Value{}
|
||||
}
|
||||
}
|
||||
v = v.Elem()
|
||||
}
|
||||
return nil, nil, v
|
||||
}
|
||||
|
||||
// A field represents a single field found in a struct.
|
||||
type field struct {
|
||||
name string
|
||||
nameBytes []byte // []byte(name)
|
||||
equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
|
||||
|
||||
tag bool
|
||||
index []int
|
||||
typ reflect.Type
|
||||
omitEmpty bool
|
||||
quoted bool
|
||||
}
|
||||
|
||||
func fillField(f field) field {
|
||||
f.nameBytes = []byte(f.name)
|
||||
f.equalFold = foldFunc(f.nameBytes)
|
||||
return f
|
||||
}
|
||||
|
||||
// byName sorts field by name, breaking ties with depth,
|
||||
// then breaking ties with "name came from json tag", then
|
||||
// breaking ties with index sequence.
|
||||
type byName []field
|
||||
|
||||
func (x byName) Len() int { return len(x) }
|
||||
|
||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byName) Less(i, j int) bool {
|
||||
if x[i].name != x[j].name {
|
||||
return x[i].name < x[j].name
|
||||
}
|
||||
if len(x[i].index) != len(x[j].index) {
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
if x[i].tag != x[j].tag {
|
||||
return x[i].tag
|
||||
}
|
||||
return byIndex(x).Less(i, j)
|
||||
}
|
||||
|
||||
// byIndex sorts field by index sequence.
|
||||
type byIndex []field
|
||||
|
||||
func (x byIndex) Len() int { return len(x) }
|
||||
|
||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byIndex) Less(i, j int) bool {
|
||||
for k, xik := range x[i].index {
|
||||
if k >= len(x[j].index) {
|
||||
return false
|
||||
}
|
||||
if xik != x[j].index[k] {
|
||||
return xik < x[j].index[k]
|
||||
}
|
||||
}
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
|
||||
// typeFields returns a list of fields that JSON should recognize for the given type.
|
||||
// The algorithm is breadth-first search over the set of structs to include - the top struct
|
||||
// and then any reachable anonymous structs.
|
||||
func typeFields(t reflect.Type) []field {
|
||||
// Anonymous fields to explore at the current level and the next.
|
||||
current := []field{}
|
||||
next := []field{{typ: t}}
|
||||
|
||||
// Count of queued names for current level and the next.
|
||||
count := map[reflect.Type]int{}
|
||||
nextCount := map[reflect.Type]int{}
|
||||
|
||||
// Types already visited at an earlier level.
|
||||
visited := map[reflect.Type]bool{}
|
||||
|
||||
// Fields found.
|
||||
var fields []field
|
||||
|
||||
for len(next) > 0 {
|
||||
current, next = next, current[:0]
|
||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
||||
|
||||
for _, f := range current {
|
||||
if visited[f.typ] {
|
||||
continue
|
||||
}
|
||||
visited[f.typ] = true
|
||||
|
||||
// Scan f.typ for fields to include.
|
||||
for i := 0; i < f.typ.NumField(); i++ {
|
||||
sf := f.typ.Field(i)
|
||||
if sf.PkgPath != "" { // unexported
|
||||
continue
|
||||
}
|
||||
tag := sf.Tag.Get("json")
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
name, opts := parseTag(tag)
|
||||
if !isValidTag(name) {
|
||||
name = ""
|
||||
}
|
||||
index := make([]int, len(f.index)+1)
|
||||
copy(index, f.index)
|
||||
index[len(f.index)] = i
|
||||
|
||||
ft := sf.Type
|
||||
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
||||
// Follow pointer.
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
// Record found field and index sequence.
|
||||
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||
tagged := name != ""
|
||||
if name == "" {
|
||||
name = sf.Name
|
||||
}
|
||||
fields = append(fields, fillField(field{
|
||||
name: name,
|
||||
tag: tagged,
|
||||
index: index,
|
||||
typ: ft,
|
||||
omitEmpty: opts.Contains("omitempty"),
|
||||
quoted: opts.Contains("string"),
|
||||
}))
|
||||
if count[f.typ] > 1 {
|
||||
// If there were multiple instances, add a second,
|
||||
// so that the annihilation code will see a duplicate.
|
||||
// It only cares about the distinction between 1 or 2,
|
||||
// so don't bother generating any more copies.
|
||||
fields = append(fields, fields[len(fields)-1])
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Record new anonymous struct to explore in next round.
|
||||
nextCount[ft]++
|
||||
if nextCount[ft] == 1 {
|
||||
next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(byName(fields))
|
||||
|
||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
||||
// except that fields with JSON tags are promoted.
|
||||
|
||||
// The fields are sorted in primary order of name, secondary order
|
||||
// of field index length. Loop over names; for each name, delete
|
||||
// hidden fields by choosing the one dominant field that survives.
|
||||
out := fields[:0]
|
||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
||||
// One iteration per name.
|
||||
// Find the sequence of fields with the name of this first field.
|
||||
fi := fields[i]
|
||||
name := fi.name
|
||||
for advance = 1; i+advance < len(fields); advance++ {
|
||||
fj := fields[i+advance]
|
||||
if fj.name != name {
|
||||
break
|
||||
}
|
||||
}
|
||||
if advance == 1 { // Only one field with this name
|
||||
out = append(out, fi)
|
||||
continue
|
||||
}
|
||||
dominant, ok := dominantField(fields[i : i+advance])
|
||||
if ok {
|
||||
out = append(out, dominant)
|
||||
}
|
||||
}
|
||||
|
||||
fields = out
|
||||
sort.Sort(byIndex(fields))
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// dominantField looks through the fields, all of which are known to
|
||||
// have the same name, to find the single field that dominates the
|
||||
// others using Go's embedding rules, modified by the presence of
|
||||
// JSON tags. If there are multiple top-level fields, the boolean
|
||||
// will be false: This condition is an error in Go and we skip all
|
||||
// the fields.
|
||||
func dominantField(fields []field) (field, bool) {
|
||||
// The fields are sorted in increasing index-length order. The winner
|
||||
// must therefore be one with the shortest index length. Drop all
|
||||
// longer entries, which is easy: just truncate the slice.
|
||||
length := len(fields[0].index)
|
||||
tagged := -1 // Index of first tagged field.
|
||||
for i, f := range fields {
|
||||
if len(f.index) > length {
|
||||
fields = fields[:i]
|
||||
break
|
||||
}
|
||||
if f.tag {
|
||||
if tagged >= 0 {
|
||||
// Multiple tagged fields at the same level: conflict.
|
||||
// Return no field.
|
||||
return field{}, false
|
||||
}
|
||||
tagged = i
|
||||
}
|
||||
}
|
||||
if tagged >= 0 {
|
||||
return fields[tagged], true
|
||||
}
|
||||
// All remaining fields have the same length. If there's more than one,
|
||||
// we have a conflict (two fields named "X" at the same level) and we
|
||||
// return no field.
|
||||
if len(fields) > 1 {
|
||||
return field{}, false
|
||||
}
|
||||
return fields[0], true
|
||||
}
|
||||
|
||||
var fieldCache struct {
|
||||
sync.RWMutex
|
||||
m map[reflect.Type][]field
|
||||
}
|
||||
|
||||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
||||
func cachedTypeFields(t reflect.Type) []field {
|
||||
fieldCache.RLock()
|
||||
f := fieldCache.m[t]
|
||||
fieldCache.RUnlock()
|
||||
if f != nil {
|
||||
return f
|
||||
}
|
||||
|
||||
// Compute fields without lock.
|
||||
// Might duplicate effort but won't hold other computations back.
|
||||
f = typeFields(t)
|
||||
if f == nil {
|
||||
f = []field{}
|
||||
}
|
||||
|
||||
fieldCache.Lock()
|
||||
if fieldCache.m == nil {
|
||||
fieldCache.m = map[reflect.Type][]field{}
|
||||
}
|
||||
fieldCache.m[t] = f
|
||||
fieldCache.Unlock()
|
||||
return f
|
||||
}
|
||||
|
||||
func isValidTag(s string) bool {
|
||||
if s == "" {
|
||||
return false
|
||||
}
|
||||
for _, c := range s {
|
||||
switch {
|
||||
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
|
||||
// Backslash and quote chars are reserved, but
|
||||
// otherwise any punctuation chars are allowed
|
||||
// in a tag name.
|
||||
default:
|
||||
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
const (
|
||||
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
|
||||
kelvin = '\u212a'
|
||||
smallLongEss = '\u017f'
|
||||
)
|
||||
|
||||
// foldFunc returns one of four different case folding equivalence
|
||||
// functions, from most general (and slow) to fastest:
|
||||
//
|
||||
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
|
||||
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
|
||||
// 3) asciiEqualFold, no special, but includes non-letters (including _)
|
||||
// 4) simpleLetterEqualFold, no specials, no non-letters.
|
||||
//
|
||||
// The letters S and K are special because they map to 3 runes, not just 2:
|
||||
// * S maps to s and to U+017F 'ſ' Latin small letter long s
|
||||
// * k maps to K and to U+212A 'K' Kelvin sign
|
||||
// See http://play.golang.org/p/tTxjOc0OGo
|
||||
//
|
||||
// The returned function is specialized for matching against s and
|
||||
// should only be given s. It's not curried for performance reasons.
|
||||
func foldFunc(s []byte) func(s, t []byte) bool {
|
||||
nonLetter := false
|
||||
special := false // special letter
|
||||
for _, b := range s {
|
||||
if b >= utf8.RuneSelf {
|
||||
return bytes.EqualFold
|
||||
}
|
||||
upper := b & caseMask
|
||||
if upper < 'A' || upper > 'Z' {
|
||||
nonLetter = true
|
||||
} else if upper == 'K' || upper == 'S' {
|
||||
// See above for why these letters are special.
|
||||
special = true
|
||||
}
|
||||
}
|
||||
if special {
|
||||
return equalFoldRight
|
||||
}
|
||||
if nonLetter {
|
||||
return asciiEqualFold
|
||||
}
|
||||
return simpleLetterEqualFold
|
||||
}
|
||||
|
||||
// equalFoldRight is a specialization of bytes.EqualFold when s is
|
||||
// known to be all ASCII (including punctuation), but contains an 's',
|
||||
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
|
||||
// See comments on foldFunc.
|
||||
func equalFoldRight(s, t []byte) bool {
|
||||
for _, sb := range s {
|
||||
if len(t) == 0 {
|
||||
return false
|
||||
}
|
||||
tb := t[0]
|
||||
if tb < utf8.RuneSelf {
|
||||
if sb != tb {
|
||||
sbUpper := sb & caseMask
|
||||
if 'A' <= sbUpper && sbUpper <= 'Z' {
|
||||
if sbUpper != tb&caseMask {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
t = t[1:]
|
||||
continue
|
||||
}
|
||||
// sb is ASCII and t is not. t must be either kelvin
|
||||
// sign or long s; sb must be s, S, k, or K.
|
||||
tr, size := utf8.DecodeRune(t)
|
||||
switch sb {
|
||||
case 's', 'S':
|
||||
if tr != smallLongEss {
|
||||
return false
|
||||
}
|
||||
case 'k', 'K':
|
||||
if tr != kelvin {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
t = t[size:]
|
||||
|
||||
}
|
||||
if len(t) > 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// asciiEqualFold is a specialization of bytes.EqualFold for use when
|
||||
// s is all ASCII (but may contain non-letters) and contains no
|
||||
// special-folding letters.
|
||||
// See comments on foldFunc.
|
||||
func asciiEqualFold(s, t []byte) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i, sb := range s {
|
||||
tb := t[i]
|
||||
if sb == tb {
|
||||
continue
|
||||
}
|
||||
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
|
||||
if sb&caseMask != tb&caseMask {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
|
||||
// use when s is all ASCII letters (no underscores, etc) and also
|
||||
// doesn't contain 'k', 'K', 's', or 'S'.
|
||||
// See comments on foldFunc.
|
||||
func simpleLetterEqualFold(s, t []byte) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i, b := range s {
|
||||
if b&caseMask != t[i]&caseMask {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// tagOptions is the string following a comma in a struct field's "json"
|
||||
// tag, or the empty string. It does not include the leading comma.
|
||||
type tagOptions string
|
||||
|
||||
// parseTag splits a struct field's json tag into its name and
|
||||
// comma-separated options.
|
||||
func parseTag(tag string) (string, tagOptions) {
|
||||
if idx := strings.Index(tag, ","); idx != -1 {
|
||||
return tag[:idx], tagOptions(tag[idx+1:])
|
||||
}
|
||||
return tag, tagOptions("")
|
||||
}
|
||||
|
||||
// Contains reports whether a comma-separated list of options
|
||||
// contains a particular substr flag. substr must be surrounded by a
|
||||
// string boundary or commas.
|
||||
func (o tagOptions) Contains(optionName string) bool {
|
||||
if len(o) == 0 {
|
||||
return false
|
||||
}
|
||||
s := string(o)
|
||||
for s != "" {
|
||||
var next string
|
||||
i := strings.Index(s, ",")
|
||||
if i >= 0 {
|
||||
s, next = s[:i], s[i+1:]
|
||||
}
|
||||
if s == optionName {
|
||||
return true
|
||||
}
|
||||
s = next
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
@ -0,0 +1,277 @@
|
|||
package yaml
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// Marshals the object into JSON then converts JSON to YAML and returns the
|
||||
// YAML.
|
||||
func Marshal(o interface{}) ([]byte, error) {
|
||||
j, err := json.Marshal(o)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error marshaling into JSON: ", err)
|
||||
}
|
||||
|
||||
y, err := JSONToYAML(j)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error converting JSON to YAML: ", err)
|
||||
}
|
||||
|
||||
return y, nil
|
||||
}
|
||||
|
||||
// Converts YAML to JSON then uses JSON to unmarshal into an object.
|
||||
func Unmarshal(y []byte, o interface{}) error {
|
||||
vo := reflect.ValueOf(o)
|
||||
j, err := yamlToJSON(y, &vo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error converting YAML to JSON: %v", err)
|
||||
}
|
||||
|
||||
err = json.Unmarshal(j, o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error unmarshaling JSON: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert JSON to YAML.
|
||||
func JSONToYAML(j []byte) ([]byte, error) {
|
||||
// Convert the JSON to an object.
|
||||
var jsonObj interface{}
|
||||
// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
|
||||
// Go JSON library doesn't try to pick the right number type (int, float,
|
||||
// etc.) when unmarshling to interface{}, it just picks float64
|
||||
// universally. go-yaml does go through the effort of picking the right
|
||||
// number type, so we can preserve number type throughout this process.
|
||||
err := yaml.Unmarshal(j, &jsonObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Marshal this object into YAML.
|
||||
return yaml.Marshal(jsonObj)
|
||||
}
|
||||
|
||||
// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
|
||||
// this method should be a no-op.
|
||||
//
|
||||
// Things YAML can do that are not supported by JSON:
|
||||
// * In YAML you can have binary and null keys in your maps. These are invalid
|
||||
// in JSON. (int and float keys are converted to strings.)
|
||||
// * Binary data in YAML with the !!binary tag is not supported. If you want to
|
||||
// use binary data with this library, encode the data as base64 as usual but do
|
||||
// not use the !!binary tag in your YAML. This will ensure the original base64
|
||||
// encoded data makes it all the way through to the JSON.
|
||||
func YAMLToJSON(y []byte) ([]byte, error) {
|
||||
return yamlToJSON(y, nil)
|
||||
}
|
||||
|
||||
func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
|
||||
// Convert the YAML to an object.
|
||||
var yamlObj interface{}
|
||||
err := yaml.Unmarshal(y, &yamlObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// YAML objects are not completely compatible with JSON objects (e.g. you
|
||||
// can have non-string keys in YAML). So, convert the YAML-compatible object
|
||||
// to a JSON-compatible object, failing with an error if irrecoverable
|
||||
// incompatibilties happen along the way.
|
||||
jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert this object to JSON and return the data.
|
||||
return json.Marshal(jsonObj)
|
||||
}
|
||||
|
||||
func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
|
||||
var err error
|
||||
|
||||
// Resolve jsonTarget to a concrete value (i.e. not a pointer or an
|
||||
// interface). We pass decodingNull as false because we're not actually
|
||||
// decoding into the value, we're just checking if the ultimate target is a
|
||||
// string.
|
||||
if jsonTarget != nil {
|
||||
ju, tu, pv := indirect(*jsonTarget, false)
|
||||
// We have a JSON or Text Umarshaler at this level, so we can't be trying
|
||||
// to decode into a string.
|
||||
if ju != nil || tu != nil {
|
||||
jsonTarget = nil
|
||||
} else {
|
||||
jsonTarget = &pv
|
||||
}
|
||||
}
|
||||
|
||||
// If yamlObj is a number or a boolean, check if jsonTarget is a string -
|
||||
// if so, coerce. Else return normal.
|
||||
// If yamlObj is a map or array, find the field that each key is
|
||||
// unmarshaling to, and when you recurse pass the reflect.Value for that
|
||||
// field back into this function.
|
||||
switch typedYAMLObj := yamlObj.(type) {
|
||||
case map[interface{}]interface{}:
|
||||
// JSON does not support arbitrary keys in a map, so we must convert
|
||||
// these keys to strings.
|
||||
//
|
||||
// From my reading of go-yaml v2 (specifically the resolve function),
|
||||
// keys can only have the types string, int, int64, float64, binary
|
||||
// (unsupported), or null (unsupported).
|
||||
strMap := make(map[string]interface{})
|
||||
for k, v := range typedYAMLObj {
|
||||
// Resolve the key to a string first.
|
||||
var keyString string
|
||||
switch typedKey := k.(type) {
|
||||
case string:
|
||||
keyString = typedKey
|
||||
case int:
|
||||
keyString = strconv.Itoa(typedKey)
|
||||
case int64:
|
||||
// go-yaml will only return an int64 as a key if the system
|
||||
// architecture is 32-bit and the key's value is between 32-bit
|
||||
// and 64-bit. Otherwise the key type will simply be int.
|
||||
keyString = strconv.FormatInt(typedKey, 10)
|
||||
case float64:
|
||||
// Stolen from go-yaml to use the same conversion to string as
|
||||
// the go-yaml library uses to convert float to string when
|
||||
// Marshaling.
|
||||
s := strconv.FormatFloat(typedKey, 'g', -1, 32)
|
||||
switch s {
|
||||
case "+Inf":
|
||||
s = ".inf"
|
||||
case "-Inf":
|
||||
s = "-.inf"
|
||||
case "NaN":
|
||||
s = ".nan"
|
||||
}
|
||||
keyString = s
|
||||
case bool:
|
||||
if typedKey {
|
||||
keyString = "true"
|
||||
} else {
|
||||
keyString = "false"
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
|
||||
reflect.TypeOf(k), k, v)
|
||||
}
|
||||
|
||||
// jsonTarget should be a struct or a map. If it's a struct, find
|
||||
// the field it's going to map to and pass its reflect.Value. If
|
||||
// it's a map, find the element type of the map and pass the
|
||||
// reflect.Value created from that type. If it's neither, just pass
|
||||
// nil - JSON conversion will error for us if it's a real issue.
|
||||
if jsonTarget != nil {
|
||||
t := *jsonTarget
|
||||
if t.Kind() == reflect.Struct {
|
||||
keyBytes := []byte(keyString)
|
||||
// Find the field that the JSON library would use.
|
||||
var f *field
|
||||
fields := cachedTypeFields(t.Type())
|
||||
for i := range fields {
|
||||
ff := &fields[i]
|
||||
if bytes.Equal(ff.nameBytes, keyBytes) {
|
||||
f = ff
|
||||
break
|
||||
}
|
||||
// Do case-insensitive comparison.
|
||||
if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
|
||||
f = ff
|
||||
}
|
||||
}
|
||||
if f != nil {
|
||||
// Find the reflect.Value of the most preferential
|
||||
// struct field.
|
||||
jtf := t.Field(f.index[0])
|
||||
strMap[keyString], err = convertToJSONableObject(v, &jtf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
} else if t.Kind() == reflect.Map {
|
||||
// Create a zero value of the map's element type to use as
|
||||
// the JSON target.
|
||||
jtv := reflect.Zero(t.Type().Elem())
|
||||
strMap[keyString], err = convertToJSONableObject(v, &jtv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
strMap[keyString], err = convertToJSONableObject(v, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return strMap, nil
|
||||
case []interface{}:
|
||||
// We need to recurse into arrays in case there are any
|
||||
// map[interface{}]interface{}'s inside and to convert any
|
||||
// numbers to strings.
|
||||
|
||||
// If jsonTarget is a slice (which it really should be), find the
|
||||
// thing it's going to map to. If it's not a slice, just pass nil
|
||||
// - JSON conversion will error for us if it's a real issue.
|
||||
var jsonSliceElemValue *reflect.Value
|
||||
if jsonTarget != nil {
|
||||
t := *jsonTarget
|
||||
if t.Kind() == reflect.Slice {
|
||||
// By default slices point to nil, but we need a reflect.Value
|
||||
// pointing to a value of the slice type, so we create one here.
|
||||
ev := reflect.Indirect(reflect.New(t.Type().Elem()))
|
||||
jsonSliceElemValue = &ev
|
||||
}
|
||||
}
|
||||
|
||||
// Make and use a new array.
|
||||
arr := make([]interface{}, len(typedYAMLObj))
|
||||
for i, v := range typedYAMLObj {
|
||||
arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return arr, nil
|
||||
default:
|
||||
// If the target type is a string and the YAML type is a number,
|
||||
// convert the YAML type to a string.
|
||||
if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
|
||||
// Based on my reading of go-yaml, it may return int, int64,
|
||||
// float64, or uint64.
|
||||
var s string
|
||||
switch typedVal := typedYAMLObj.(type) {
|
||||
case int:
|
||||
s = strconv.FormatInt(int64(typedVal), 10)
|
||||
case int64:
|
||||
s = strconv.FormatInt(typedVal, 10)
|
||||
case float64:
|
||||
s = strconv.FormatFloat(typedVal, 'g', -1, 32)
|
||||
case uint64:
|
||||
s = strconv.FormatUint(typedVal, 10)
|
||||
case bool:
|
||||
if typedVal {
|
||||
s = "true"
|
||||
} else {
|
||||
s = "false"
|
||||
}
|
||||
}
|
||||
if len(s) > 0 {
|
||||
yamlObj = interface{}(s)
|
||||
}
|
||||
}
|
||||
return yamlObj, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
|
@ -185,6 +185,17 @@ func NewExtension(e []byte) Extension {
|
|||
return ee
|
||||
}
|
||||
|
||||
func AppendExtension(e extendableProto, tag int32, buf []byte) {
|
||||
if ee, eok := e.(extensionsMap); eok {
|
||||
ext := ee.ExtensionMap()[int32(tag)] // may be missing
|
||||
ext.enc = append(ext.enc, buf...)
|
||||
ee.ExtensionMap()[int32(tag)] = ext
|
||||
} else if ee, eok := e.(extensionsBytes); eok {
|
||||
ext := ee.GetExtensions()
|
||||
*ext = append(*ext, buf...)
|
||||
}
|
||||
}
|
||||
|
||||
func (this Extension) GoString() string {
|
||||
if this.enc == nil {
|
||||
if err := encodeExtension(&this); err != nil {
|
||||
|
|
|
|||
Loading…
Reference in New Issue