Gyu-Ho Lee 2016-04-11 03:13:56 -07:00
parent 8c07144a35
commit fcdae397da
24 changed files with 1414 additions and 2239 deletions

68
Godeps/Godeps.json generated
View File

@ -29,48 +29,48 @@
},
{
"ImportPath": "github.com/coreos/etcd/auth/authpb",
"Comment": "v2.3.0-320-g6c8428c",
"Rev": "6c8428c3939a7fa224ac8e97005b1967f3fd87f1"
"Comment": "v2.3.0-322-gaf1b3f0",
"Rev": "af1b3f061ae8073610a1af285b4b088743eedbe0"
},
{
"ImportPath": "github.com/coreos/etcd/client",
"Comment": "v2.3.0-320-g6c8428c",
"Rev": "6c8428c3939a7fa224ac8e97005b1967f3fd87f1"
"Comment": "v2.3.0-322-gaf1b3f0",
"Rev": "af1b3f061ae8073610a1af285b4b088743eedbe0"
},
{
"ImportPath": "github.com/coreos/etcd/clientv3",
"Comment": "v2.3.0-320-g6c8428c",
"Rev": "6c8428c3939a7fa224ac8e97005b1967f3fd87f1"
"Comment": "v2.3.0-322-gaf1b3f0",
"Rev": "af1b3f061ae8073610a1af285b4b088743eedbe0"
},
{
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
"Comment": "v2.3.0-320-g6c8428c",
"Rev": "6c8428c3939a7fa224ac8e97005b1967f3fd87f1"
"Comment": "v2.3.0-322-gaf1b3f0",
"Rev": "af1b3f061ae8073610a1af285b4b088743eedbe0"
},
{
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb",
"Comment": "v2.3.0-320-g6c8428c",
"Rev": "6c8428c3939a7fa224ac8e97005b1967f3fd87f1"
"Comment": "v2.3.0-322-gaf1b3f0",
"Rev": "af1b3f061ae8073610a1af285b4b088743eedbe0"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
"Comment": "v2.3.0-320-g6c8428c",
"Rev": "6c8428c3939a7fa224ac8e97005b1967f3fd87f1"
"Comment": "v2.3.0-322-gaf1b3f0",
"Rev": "af1b3f061ae8073610a1af285b4b088743eedbe0"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/tlsutil",
"Comment": "v2.3.0-320-g6c8428c",
"Rev": "6c8428c3939a7fa224ac8e97005b1967f3fd87f1"
"Comment": "v2.3.0-322-gaf1b3f0",
"Rev": "af1b3f061ae8073610a1af285b4b088743eedbe0"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/types",
"Comment": "v2.3.0-320-g6c8428c",
"Rev": "6c8428c3939a7fa224ac8e97005b1967f3fd87f1"
"Comment": "v2.3.0-322-gaf1b3f0",
"Rev": "af1b3f061ae8073610a1af285b4b088743eedbe0"
},
{
"ImportPath": "github.com/coreos/etcd/storage/storagepb",
"Comment": "v2.3.0-320-g6c8428c",
"Rev": "6c8428c3939a7fa224ac8e97005b1967f3fd87f1"
"Comment": "v2.3.0-322-gaf1b3f0",
"Rev": "af1b3f061ae8073610a1af285b4b088743eedbe0"
},
{
"ImportPath": "github.com/dustin/go-humanize",
@ -87,18 +87,18 @@
},
{
"ImportPath": "github.com/gogo/protobuf/gogoproto",
"Comment": "v0.2-9-g4365f75",
"Rev": "4365f750fe246471f2a03ef5da5231c3565c5628"
"Comment": "v0.2-10-g74b6e9d",
"Rev": "74b6e9deaff6ba6da1389ec97351d337f0d08b06"
},
{
"ImportPath": "github.com/gogo/protobuf/proto",
"Comment": "v0.2-9-g4365f75",
"Rev": "4365f750fe246471f2a03ef5da5231c3565c5628"
"Comment": "v0.2-10-g74b6e9d",
"Rev": "74b6e9deaff6ba6da1389ec97351d337f0d08b06"
},
{
"ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor",
"Comment": "v0.2-9-g4365f75",
"Rev": "4365f750fe246471f2a03ef5da5231c3565c5628"
"Comment": "v0.2-10-g74b6e9d",
"Rev": "74b6e9deaff6ba6da1389ec97351d337f0d08b06"
},
{
"ImportPath": "github.com/golang/freetype",
@ -387,43 +387,43 @@
},
{
"ImportPath": "google.golang.org/grpc",
"Rev": "ecd00d52ac82a2cd37e17bf91d9c6ca228b71745"
"Rev": "7834b974e55fbf85a5b01afb5821391c71084efd"
},
{
"ImportPath": "google.golang.org/grpc/codes",
"Rev": "ecd00d52ac82a2cd37e17bf91d9c6ca228b71745"
"Rev": "7834b974e55fbf85a5b01afb5821391c71084efd"
},
{
"ImportPath": "google.golang.org/grpc/credentials",
"Rev": "ecd00d52ac82a2cd37e17bf91d9c6ca228b71745"
"Rev": "7834b974e55fbf85a5b01afb5821391c71084efd"
},
{
"ImportPath": "google.golang.org/grpc/credentials/oauth",
"Rev": "ecd00d52ac82a2cd37e17bf91d9c6ca228b71745"
"Rev": "7834b974e55fbf85a5b01afb5821391c71084efd"
},
{
"ImportPath": "google.golang.org/grpc/grpclog",
"Rev": "ecd00d52ac82a2cd37e17bf91d9c6ca228b71745"
"Rev": "7834b974e55fbf85a5b01afb5821391c71084efd"
},
{
"ImportPath": "google.golang.org/grpc/internal",
"Rev": "ecd00d52ac82a2cd37e17bf91d9c6ca228b71745"
"Rev": "7834b974e55fbf85a5b01afb5821391c71084efd"
},
{
"ImportPath": "google.golang.org/grpc/metadata",
"Rev": "ecd00d52ac82a2cd37e17bf91d9c6ca228b71745"
"Rev": "7834b974e55fbf85a5b01afb5821391c71084efd"
},
{
"ImportPath": "google.golang.org/grpc/naming",
"Rev": "ecd00d52ac82a2cd37e17bf91d9c6ca228b71745"
"Rev": "7834b974e55fbf85a5b01afb5821391c71084efd"
},
{
"ImportPath": "google.golang.org/grpc/peer",
"Rev": "ecd00d52ac82a2cd37e17bf91d9c6ca228b71745"
"Rev": "7834b974e55fbf85a5b01afb5821391c71084efd"
},
{
"ImportPath": "google.golang.org/grpc/transport",
"Rev": "ecd00d52ac82a2cd37e17bf91d9c6ca228b71745"
"Rev": "7834b974e55fbf85a5b01afb5821391c71084efd"
},
{
"ImportPath": "gopkg.in/yaml.v2",

View File

@ -8,3 +8,4 @@ Distributed database tester.
[cistat]: https://travis-ci.org/coreos/dbtester
[dbtester-godoc]: https://godoc.org/github.com/coreos/dbtester

View File

@ -49,7 +49,6 @@ type (
ClientPort string
InitLimit int
SyncLimit int
PreAllocSize int64
MaxClientCnxns int64
Peers []ZookeeperPeer
}
@ -57,20 +56,6 @@ type (
MyID int
IP string
}
ConsulConfig struct {
Bootstrap bool `json:"bootstrap"`
Server bool `json:"server,omitempty"`
AdvertiseAddr string `json:"advertise_addr,omitempty"`
DataCenter string `json:"datacenter,omitempty"`
DataDir string `json:"data_dir,omitempty"`
Encrypt string `json:"encrypt,omitempty"`
LogLevel string `json:"log_level,omitempty"`
EnableSyslog bool `json:"enable_syslog,omitempty"`
StartJoin []string `json:"start_join,omitempty"`
RetryJoin []string `json:"retry_join,omitempty"`
RetryInterval string `json:"retry_interval,omitempty"`
}
)
var (
@ -94,7 +79,6 @@ dataDir={{.DataDir}}
clientPort={{.ClientPort}}
initLimit={{.InitLimit}}
syncLimit={{.SyncLimit}}
preAllocSize={{.PreAllocSize}}
maxClientCnxns={{.MaxClientCnxns}}
{{range .Peers}}server.{{.MyID}}={{.IP}}:2888:3888
{{end}}
@ -104,7 +88,6 @@ maxClientCnxns={{.MaxClientCnxns}}
ClientPort: "2181",
InitLimit: 5,
SyncLimit: 5,
PreAllocSize: 65536 * 1024,
MaxClientCnxns: 60,
Peers: []ZookeeperPeer{
{MyID: 1, IP: ""},
@ -160,10 +143,7 @@ func CommandFunc(cmd *cobra.Command, args []string) error {
RegisterTransporterServer(grpcServer, sender)
if err := grpcServer.Serve(ln); err != nil {
return err
}
return nil
return grpcServer.Serve(ln)
}
type transporterServer struct { // satisfy TransporterServer
@ -176,7 +156,7 @@ type transporterServer struct { // satisfy TransporterServer
var databaseStopped = make(chan struct{})
func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response, error) {
peerIPs := strings.Split(r.PeerIPs, "___")
peerIPs := strings.Split(r.PeerIPString, "___")
if r.Operation == Request_Start || r.Operation == Request_Restart {
if !filepath.HasPrefix(etcdDataDir, globalFlags.WorkingDirectory) {
etcdDataDir = filepath.Join(globalFlags.WorkingDirectory, etcdDataDir)
@ -193,8 +173,8 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
if !filepath.HasPrefix(r.DatabaseLogPath, globalFlags.WorkingDirectory) {
r.DatabaseLogPath = filepath.Join(globalFlags.WorkingDirectory, r.DatabaseLogPath)
}
if !filepath.HasPrefix(r.MonitorResultPath, globalFlags.WorkingDirectory) {
r.MonitorResultPath = filepath.Join(globalFlags.WorkingDirectory, r.MonitorResultPath)
if !filepath.HasPrefix(r.MonitorLogPath, globalFlags.WorkingDirectory) {
r.MonitorLogPath = filepath.Join(globalFlags.WorkingDirectory, r.MonitorLogPath)
}
log.Printf("Working directory: %s", globalFlags.WorkingDirectory)
@ -203,14 +183,14 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
log.Printf("Zookeeper working directory: %s", zkWorkingDir)
log.Printf("Zookeeper data directory: %s", zkDataDir)
log.Printf("Database log path: %s", r.DatabaseLogPath)
log.Printf("Monitor result path: %s", r.MonitorResultPath)
log.Printf("Monitor result path: %s", r.MonitorLogPath)
}
if r.Operation == Request_Start {
t.req = *r
}
if t.req.StorageKey != "" {
if err := toFile(t.req.StorageKey, filepath.Join(globalFlags.WorkingDirectory, "key.json")); err != nil {
if t.req.GoogleCloudStorageKey != "" {
if err := toFile(t.req.GoogleCloudStorageKey, filepath.Join(globalFlags.WorkingDirectory, "key.json")); err != nil {
return nil, err
}
}
@ -310,7 +290,6 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
peers = append(peers, ZookeeperPeer{MyID: i + 1, IP: peerIPs[i]})
}
zkCfg.Peers = peers
zkCfg.PreAllocSize = t.req.ZookeeperPreAllocSize
zkCfg.MaxClientCnxns = t.req.ZookeeperMaxClientCnxns
tpl := template.Must(template.New("zkTemplate").Parse(zkTemplate))
buf := new(bytes.Buffer)
@ -369,7 +348,8 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
t.logfile = f
var flags []string
if t.req.ServerIndex == 0 { // leader
switch t.req.ServerIndex {
case 0: // leader
flags = []string{
"agent",
"-server",
@ -378,7 +358,8 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
"-client", peerIPs[t.req.ServerIndex],
"-bootstrap-expect", "3",
}
} else {
default:
flags = []string{
"agent",
"-server",
@ -480,7 +461,7 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
return err
}
f, err := openToAppend(t.req.MonitorResultPath)
f, err := openToAppend(t.req.MonitorLogPath)
if err != nil {
return err
}
@ -489,7 +470,7 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
return ps.WriteToCSV(f, pss...)
}
log.Printf("%s monitor saved at %s", t.req.Database, t.req.MonitorResultPath)
log.Printf("%s monitor saved at %s", t.req.Database, t.req.MonitorLogPath)
var err error
if err = rFunc(); err != nil {
log.Warningln("error:", err)
@ -504,12 +485,14 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
log.Warnf("Monitoring error %v", err)
break escape
}
case sig := <-notifier:
log.Printf("Received %v", sig)
return
case <-databaseStopped:
log.Println("Monitoring stopped. Uploading data to cloud storage...")
u, err := remotestorage.NewGoogleCloudStorage([]byte(t.req.StorageKey), t.req.GoogleCloudProjectName)
u, err := remotestorage.NewGoogleCloudStorage([]byte(t.req.GoogleCloudStorageKey), t.req.GoogleCloudProjectName)
if err != nil {
log.Warnf("error (%v)", err)
return
@ -518,13 +501,13 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
// set up file names
srcDatabaseLogPath := t.req.DatabaseLogPath
dstDatabaseLogPath := filepath.Base(t.req.DatabaseLogPath)
if !strings.HasPrefix(filepath.Base(t.req.DatabaseLogPath), t.req.LogPrefix) {
dstDatabaseLogPath = fmt.Sprintf("%s-%d-%s", t.req.LogPrefix, t.req.ServerIndex+1, filepath.Base(t.req.DatabaseLogPath))
if !strings.HasPrefix(filepath.Base(t.req.DatabaseLogPath), t.req.TestName) {
dstDatabaseLogPath = fmt.Sprintf("%s-%d-%s", t.req.TestName, t.req.ServerIndex+1, filepath.Base(t.req.DatabaseLogPath))
}
log.Printf("Uploading %s to %s", srcDatabaseLogPath, dstDatabaseLogPath)
var uerr error
for k := 0; k < 5; k++ {
if uerr = u.UploadFile(t.req.Bucket, srcDatabaseLogPath, dstDatabaseLogPath); uerr != nil {
if uerr = u.UploadFile(t.req.GoogleCloudStorageBucketName, srcDatabaseLogPath, dstDatabaseLogPath); uerr != nil {
log.Println(uerr)
continue
} else {
@ -532,14 +515,14 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
}
}
srcMonitorResultPath := t.req.MonitorResultPath
dstMonitorResultPath := filepath.Base(t.req.MonitorResultPath)
if !strings.HasPrefix(filepath.Base(t.req.MonitorResultPath), t.req.LogPrefix) {
dstMonitorResultPath = fmt.Sprintf("%s-%d-%s", t.req.LogPrefix, t.req.ServerIndex+1, filepath.Base(t.req.MonitorResultPath))
srcMonitorResultPath := t.req.MonitorLogPath
dstMonitorResultPath := filepath.Base(t.req.MonitorLogPath)
if !strings.HasPrefix(filepath.Base(t.req.MonitorLogPath), t.req.TestName) {
dstMonitorResultPath = fmt.Sprintf("%s-%d-%s", t.req.TestName, t.req.ServerIndex+1, filepath.Base(t.req.MonitorLogPath))
}
log.Printf("Uploading %s to %s", srcMonitorResultPath, dstMonitorResultPath)
for k := 0; k < 5; k++ {
if uerr = u.UploadFile(t.req.Bucket, srcMonitorResultPath, dstMonitorResultPath); uerr != nil {
if uerr = u.UploadFile(t.req.GoogleCloudStorageBucketName, srcMonitorResultPath, dstMonitorResultPath); uerr != nil {
log.Println(uerr)
continue
} else {
@ -549,12 +532,12 @@ func (t *transporterServer) Transfer(ctx context.Context, r *Request) (*Response
srcAgentLogPath := agentLogPath
dstAgentLogPath := filepath.Base(agentLogPath)
if !strings.HasPrefix(filepath.Base(agentLogPath), t.req.LogPrefix) {
dstAgentLogPath = fmt.Sprintf("%s-%d-%s", t.req.LogPrefix, t.req.ServerIndex+1, filepath.Base(agentLogPath))
if !strings.HasPrefix(filepath.Base(agentLogPath), t.req.TestName) {
dstAgentLogPath = fmt.Sprintf("%s-%d-%s", t.req.TestName, t.req.ServerIndex+1, filepath.Base(agentLogPath))
}
log.Printf("Uploading %s to %s", srcAgentLogPath, dstAgentLogPath)
for k := 0; k < 5; k++ {
if uerr = u.UploadFile(t.req.Bucket, srcAgentLogPath, dstAgentLogPath); uerr != nil {
if uerr = u.UploadFile(t.req.GoogleCloudStorageBucketName, srcAgentLogPath, dstAgentLogPath); uerr != nil {
log.Println(uerr)
continue
} else {

View File

@ -89,33 +89,29 @@ func (Request_Database) EnumDescriptor() ([]byte, []int) { return fileDescriptor
type Request struct {
Operation Request_Operation `protobuf:"varint,1,opt,name=operation,proto3,enum=agent.Request_Operation" json:"operation,omitempty"`
Database Request_Database `protobuf:"varint,2,opt,name=database,proto3,enum=agent.Request_Database" json:"database,omitempty"`
PeerIPs string `protobuf:"bytes,3,opt,name=peerIPs,proto3" json:"peerIPs,omitempty"`
PeerIPString string `protobuf:"bytes,3,opt,name=peerIPString,proto3" json:"peerIPString,omitempty"`
// ServerIPIndex is the index in peerIPs that points to the
// corresponding remote IP.
ServerIndex uint32 `protobuf:"varint,4,opt,name=serverIndex,proto3" json:"serverIndex,omitempty"`
// LogPrefix prefixes all logs to be generated in agent.
LogPrefix string `protobuf:"bytes,5,opt,name=logPrefix,proto3" json:"logPrefix,omitempty"`
// TestName prefixes all logs to be generated in agent.
TestName string `protobuf:"bytes,5,opt,name=testName,proto3" json:"testName,omitempty"`
// DatabaseLogPath is the file path to store the database logs.
DatabaseLogPath string `protobuf:"bytes,6,opt,name=databaseLogPath,proto3" json:"databaseLogPath,omitempty"`
// MonitorResultPath is the file path to store monitoring results.
MonitorResultPath string `protobuf:"bytes,7,opt,name=monitorResultPath,proto3" json:"monitorResultPath,omitempty"`
// MonitorLogPath is the file path to store monitoring results.
MonitorLogPath string `protobuf:"bytes,7,opt,name=monitorLogPath,proto3" json:"monitorLogPath,omitempty"`
// GoogleCloudProjectName is the project name to use
// to upload logs.
GoogleCloudProjectName string `protobuf:"bytes,8,opt,name=googleCloudProjectName,proto3" json:"googleCloudProjectName,omitempty"`
// StorageKey is the key to be used to upload
// GoogleCloudStorageKey is the key to be used to upload
// data and logs to Google Cloud Storage and others.
StorageKey string `protobuf:"bytes,9,opt,name=storageKey,proto3" json:"storageKey,omitempty"`
// Bucket is the bucket name to store all data and logs.
Bucket string `protobuf:"bytes,10,opt,name=bucket,proto3" json:"bucket,omitempty"`
GoogleCloudStorageKey string `protobuf:"bytes,9,opt,name=googleCloudStorageKey,proto3" json:"googleCloudStorageKey,omitempty"`
// GoogleCloudStorageBucketName is the bucket name to store all data and logs.
GoogleCloudStorageBucketName string `protobuf:"bytes,10,opt,name=googleCloudStorageBucketName,proto3" json:"googleCloudStorageBucketName,omitempty"`
// ZookeeperMyID is myid that needs to be stored as a file in the remote machine.
ZookeeperMyID uint32 `protobuf:"varint,11,opt,name=zookeeperMyID,proto3" json:"zookeeperMyID,omitempty"`
// ZookeeperPreAllocSize avoid seeks ZooKeeper allocates space
// in the transaction log file in blocks of PreAllocSize kilobytes.
// Default value is 65536 * 1024.
ZookeeperPreAllocSize int64 `protobuf:"varint,12,opt,name=zookeeperPreAllocSize,proto3" json:"zookeeperPreAllocSize,omitempty"`
// ZookeeperMaxClientCnxns limits the number of concurrent connections
// (at the socket level) that a single client, identified by IP address.
ZookeeperMaxClientCnxns int64 `protobuf:"varint,13,opt,name=zookeeperMaxClientCnxns,proto3" json:"zookeeperMaxClientCnxns,omitempty"`
ZookeeperMaxClientCnxns int64 `protobuf:"varint,12,opt,name=zookeeperMaxClientCnxns,proto3" json:"zookeeperMaxClientCnxns,omitempty"`
}
func (m *Request) Reset() { *m = Request{} }
@ -225,22 +221,22 @@ func (m *Request) MarshalTo(data []byte) (int, error) {
i++
i = encodeVarintMessage(data, i, uint64(m.Database))
}
if len(m.PeerIPs) > 0 {
if len(m.PeerIPString) > 0 {
data[i] = 0x1a
i++
i = encodeVarintMessage(data, i, uint64(len(m.PeerIPs)))
i += copy(data[i:], m.PeerIPs)
i = encodeVarintMessage(data, i, uint64(len(m.PeerIPString)))
i += copy(data[i:], m.PeerIPString)
}
if m.ServerIndex != 0 {
data[i] = 0x20
i++
i = encodeVarintMessage(data, i, uint64(m.ServerIndex))
}
if len(m.LogPrefix) > 0 {
if len(m.TestName) > 0 {
data[i] = 0x2a
i++
i = encodeVarintMessage(data, i, uint64(len(m.LogPrefix)))
i += copy(data[i:], m.LogPrefix)
i = encodeVarintMessage(data, i, uint64(len(m.TestName)))
i += copy(data[i:], m.TestName)
}
if len(m.DatabaseLogPath) > 0 {
data[i] = 0x32
@ -248,11 +244,11 @@ func (m *Request) MarshalTo(data []byte) (int, error) {
i = encodeVarintMessage(data, i, uint64(len(m.DatabaseLogPath)))
i += copy(data[i:], m.DatabaseLogPath)
}
if len(m.MonitorResultPath) > 0 {
if len(m.MonitorLogPath) > 0 {
data[i] = 0x3a
i++
i = encodeVarintMessage(data, i, uint64(len(m.MonitorResultPath)))
i += copy(data[i:], m.MonitorResultPath)
i = encodeVarintMessage(data, i, uint64(len(m.MonitorLogPath)))
i += copy(data[i:], m.MonitorLogPath)
}
if len(m.GoogleCloudProjectName) > 0 {
data[i] = 0x42
@ -260,30 +256,25 @@ func (m *Request) MarshalTo(data []byte) (int, error) {
i = encodeVarintMessage(data, i, uint64(len(m.GoogleCloudProjectName)))
i += copy(data[i:], m.GoogleCloudProjectName)
}
if len(m.StorageKey) > 0 {
if len(m.GoogleCloudStorageKey) > 0 {
data[i] = 0x4a
i++
i = encodeVarintMessage(data, i, uint64(len(m.StorageKey)))
i += copy(data[i:], m.StorageKey)
i = encodeVarintMessage(data, i, uint64(len(m.GoogleCloudStorageKey)))
i += copy(data[i:], m.GoogleCloudStorageKey)
}
if len(m.Bucket) > 0 {
if len(m.GoogleCloudStorageBucketName) > 0 {
data[i] = 0x52
i++
i = encodeVarintMessage(data, i, uint64(len(m.Bucket)))
i += copy(data[i:], m.Bucket)
i = encodeVarintMessage(data, i, uint64(len(m.GoogleCloudStorageBucketName)))
i += copy(data[i:], m.GoogleCloudStorageBucketName)
}
if m.ZookeeperMyID != 0 {
data[i] = 0x58
i++
i = encodeVarintMessage(data, i, uint64(m.ZookeeperMyID))
}
if m.ZookeeperPreAllocSize != 0 {
data[i] = 0x60
i++
i = encodeVarintMessage(data, i, uint64(m.ZookeeperPreAllocSize))
}
if m.ZookeeperMaxClientCnxns != 0 {
data[i] = 0x68
data[i] = 0x60
i++
i = encodeVarintMessage(data, i, uint64(m.ZookeeperMaxClientCnxns))
}
@ -354,14 +345,14 @@ func (m *Request) Size() (n int) {
if m.Database != 0 {
n += 1 + sovMessage(uint64(m.Database))
}
l = len(m.PeerIPs)
l = len(m.PeerIPString)
if l > 0 {
n += 1 + l + sovMessage(uint64(l))
}
if m.ServerIndex != 0 {
n += 1 + sovMessage(uint64(m.ServerIndex))
}
l = len(m.LogPrefix)
l = len(m.TestName)
if l > 0 {
n += 1 + l + sovMessage(uint64(l))
}
@ -369,7 +360,7 @@ func (m *Request) Size() (n int) {
if l > 0 {
n += 1 + l + sovMessage(uint64(l))
}
l = len(m.MonitorResultPath)
l = len(m.MonitorLogPath)
if l > 0 {
n += 1 + l + sovMessage(uint64(l))
}
@ -377,20 +368,17 @@ func (m *Request) Size() (n int) {
if l > 0 {
n += 1 + l + sovMessage(uint64(l))
}
l = len(m.StorageKey)
l = len(m.GoogleCloudStorageKey)
if l > 0 {
n += 1 + l + sovMessage(uint64(l))
}
l = len(m.Bucket)
l = len(m.GoogleCloudStorageBucketName)
if l > 0 {
n += 1 + l + sovMessage(uint64(l))
}
if m.ZookeeperMyID != 0 {
n += 1 + sovMessage(uint64(m.ZookeeperMyID))
}
if m.ZookeeperPreAllocSize != 0 {
n += 1 + sovMessage(uint64(m.ZookeeperPreAllocSize))
}
if m.ZookeeperMaxClientCnxns != 0 {
n += 1 + sovMessage(uint64(m.ZookeeperMaxClientCnxns))
}
@ -488,7 +476,7 @@ func (m *Request) Unmarshal(data []byte) error {
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PeerIPs", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field PeerIPString", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@ -513,7 +501,7 @@ func (m *Request) Unmarshal(data []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PeerIPs = string(data[iNdEx:postIndex])
m.PeerIPString = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 0 {
@ -536,7 +524,7 @@ func (m *Request) Unmarshal(data []byte) error {
}
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LogPrefix", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field TestName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@ -561,7 +549,7 @@ func (m *Request) Unmarshal(data []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.LogPrefix = string(data[iNdEx:postIndex])
m.TestName = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 2 {
@ -594,7 +582,7 @@ func (m *Request) Unmarshal(data []byte) error {
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field MonitorResultPath", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field MonitorLogPath", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@ -619,7 +607,7 @@ func (m *Request) Unmarshal(data []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.MonitorResultPath = string(data[iNdEx:postIndex])
m.MonitorLogPath = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 8:
if wireType != 2 {
@ -652,7 +640,7 @@ func (m *Request) Unmarshal(data []byte) error {
iNdEx = postIndex
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StorageKey", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field GoogleCloudStorageKey", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@ -677,11 +665,11 @@ func (m *Request) Unmarshal(data []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.StorageKey = string(data[iNdEx:postIndex])
m.GoogleCloudStorageKey = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 10:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Bucket", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field GoogleCloudStorageBucketName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@ -706,7 +694,7 @@ func (m *Request) Unmarshal(data []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Bucket = string(data[iNdEx:postIndex])
m.GoogleCloudStorageBucketName = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 11:
if wireType != 0 {
@ -728,25 +716,6 @@ func (m *Request) Unmarshal(data []byte) error {
}
}
case 12:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ZookeeperPreAllocSize", wireType)
}
m.ZookeeperPreAllocSize = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMessage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.ZookeeperPreAllocSize |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 13:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ZookeeperMaxClientCnxns", wireType)
}
@ -962,37 +931,36 @@ var (
)
var fileDescriptorMessage = []byte{
// 505 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x53, 0xcd, 0x4e, 0xdb, 0x4e,
0x10, 0xcf, 0x07, 0x49, 0xec, 0xc9, 0x3f, 0x90, 0xff, 0xaa, 0x85, 0x15, 0xaa, 0x22, 0x14, 0x71,
0xe0, 0xd0, 0x04, 0x29, 0xa9, 0xaa, 0x1e, 0xda, 0x43, 0x1b, 0x2e, 0x88, 0x7e, 0x44, 0x4e, 0x4f,
0xbd, 0x39, 0xce, 0xc4, 0xb8, 0x38, 0x9e, 0xd4, 0xbb, 0x46, 0x81, 0x27, 0xe9, 0xa1, 0x0f, 0xc4,
0xb1, 0x8f, 0xd0, 0x8f, 0x17, 0xe9, 0x30, 0xc4, 0x09, 0xd0, 0x72, 0x58, 0x69, 0x7e, 0x5f, 0xb3,
0xd2, 0xec, 0x2c, 0x34, 0x66, 0x68, 0x8c, 0x1f, 0x62, 0x77, 0x9e, 0x92, 0x25, 0x55, 0xe1, 0x32,
0xb1, 0xbb, 0x9d, 0x30, 0xb2, 0xa7, 0xd9, 0xb8, 0x1b, 0xd0, 0xec, 0x30, 0xa4, 0x90, 0x0e, 0x45,
0x1d, 0x67, 0x53, 0x41, 0x02, 0xa4, 0xba, 0x49, 0xb5, 0xbf, 0x55, 0xa0, 0xe6, 0xe1, 0x97, 0x0c,
0x8d, 0x55, 0xcf, 0xc1, 0xa5, 0x39, 0xa6, 0xbe, 0x8d, 0x28, 0xd1, 0xc5, 0xbd, 0xe2, 0xc1, 0x66,
0x4f, 0x77, 0xa5, 0x6b, 0x77, 0x69, 0xe9, 0x7e, 0xc8, 0x75, 0x6f, 0x6d, 0x55, 0x7d, 0x70, 0x26,
0xbe, 0xf5, 0xc7, 0xbe, 0x41, 0x5d, 0x92, 0xd8, 0xce, 0xbd, 0xd8, 0xd1, 0x52, 0xf6, 0x56, 0x46,
0xa5, 0xa1, 0x36, 0x47, 0x4c, 0x8f, 0x87, 0x46, 0x97, 0x39, 0xe3, 0x7a, 0x39, 0x54, 0x7b, 0x50,
0x37, 0x98, 0x9e, 0x33, 0x48, 0x26, 0xb8, 0xd0, 0x1b, 0xac, 0x36, 0xbc, 0xdb, 0x94, 0x7a, 0x02,
0x6e, 0x4c, 0xe1, 0x30, 0xc5, 0x69, 0xb4, 0xd0, 0x15, 0x49, 0xaf, 0x09, 0x75, 0x00, 0x5b, 0xf9,
0x2d, 0x6f, 0x99, 0xf4, 0xed, 0xa9, 0xae, 0x8a, 0xe7, 0x3e, 0xad, 0x9e, 0xc2, 0xff, 0x33, 0x4a,
0x22, 0x4b, 0xa9, 0x87, 0x26, 0x8b, 0xad, 0x78, 0x6b, 0xe2, 0xfd, 0x5b, 0xe0, 0xf1, 0x6c, 0x87,
0x44, 0x61, 0x8c, 0x83, 0x98, 0xb2, 0xc9, 0x30, 0xa5, 0xcf, 0x18, 0xd8, 0xf7, 0xfe, 0x0c, 0xb5,
0x23, 0x91, 0x07, 0x54, 0xd5, 0x02, 0x30, 0xdc, 0x89, 0x27, 0x72, 0x82, 0x17, 0xda, 0x15, 0xef,
0x2d, 0x46, 0x6d, 0x43, 0x75, 0x9c, 0x05, 0x67, 0x68, 0x35, 0x88, 0xb6, 0x44, 0x6a, 0x1f, 0x1a,
0x97, 0x44, 0x67, 0x88, 0x3c, 0xe8, 0x77, 0x17, 0xc7, 0x47, 0xba, 0x2e, 0x93, 0xb8, 0x4b, 0xaa,
0x67, 0xf0, 0x78, 0x45, 0xf0, 0x00, 0x5e, 0xc7, 0x31, 0x05, 0xa3, 0xe8, 0x12, 0xf5, 0x7f, 0xec,
0x2e, 0x7b, 0xff, 0x16, 0xd5, 0x0b, 0xd8, 0x59, 0xb7, 0xf1, 0x17, 0x83, 0x38, 0xe2, 0xe7, 0x1a,
0x24, 0x8b, 0xc4, 0xe8, 0x86, 0xe4, 0x1e, 0x92, 0xdb, 0x1d, 0x70, 0x57, 0x4b, 0xa0, 0x5c, 0xa8,
0x8c, 0xac, 0x9f, 0xda, 0x66, 0x41, 0x39, 0xb0, 0x31, 0xb2, 0x34, 0x6f, 0x16, 0x55, 0xfd, 0x7a,
0xa3, 0x8c, 0xd0, 0xa5, 0xf6, 0x2b, 0x70, 0xf2, 0xc7, 0x57, 0x00, 0x55, 0xb4, 0xc1, 0xe4, 0xbc,
0xcf, 0xf6, 0xbc, 0xee, 0x71, 0xa0, 0x01, 0xee, 0x27, 0xa2, 0x13, 0xb9, 0xad, 0x59, 0xba, 0x96,
0x06, 0x94, 0xf0, 0xd8, 0x9b, 0xe5, 0xf6, 0x3e, 0x38, 0xdc, 0x6b, 0xce, 0x50, 0x36, 0xc6, 0x64,
0x41, 0xc0, 0x4b, 0x2f, 0xcb, 0xe9, 0x78, 0x39, 0xec, 0xbd, 0x84, 0xfa, 0xc7, 0xd4, 0x4f, 0xd8,
0x97, 0x5a, 0x4c, 0x55, 0x07, 0x1c, 0x81, 0x53, 0xae, 0x37, 0xef, 0x6e, 0xe2, 0xee, 0xd6, 0x0a,
0xdf, 0x74, 0x6d, 0x17, 0xde, 0x3c, 0xba, 0xfa, 0xd9, 0x2a, 0x5c, 0xfd, 0x6a, 0x15, 0xbf, 0xf3,
0xf9, 0xc1, 0xe7, 0xeb, 0xef, 0x56, 0x61, 0x5c, 0x95, 0xff, 0xd1, 0xff, 0x13, 0x00, 0x00, 0xff,
0xff, 0x9b, 0x3c, 0x4e, 0x34, 0x66, 0x03, 0x00, 0x00,
// 491 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x53, 0xcd, 0x6e, 0xda, 0x40,
0x10, 0x86, 0x10, 0xc0, 0x1e, 0x02, 0x41, 0xab, 0xb6, 0x59, 0xa1, 0x0a, 0x45, 0x56, 0x54, 0xe5,
0x02, 0x91, 0xa0, 0xaa, 0x7a, 0x68, 0x2f, 0x90, 0x4b, 0x94, 0xfe, 0x20, 0xd3, 0x53, 0x6f, 0x8b,
0x99, 0x38, 0x6e, 0xc0, 0xe3, 0xae, 0xd7, 0x11, 0xe9, 0x93, 0xf4, 0x19, 0xfa, 0x24, 0x39, 0xf6,
0x11, 0xfa, 0xf3, 0x22, 0x9d, 0x2c, 0x31, 0x09, 0xb4, 0xe9, 0xc1, 0xd2, 0x7c, 0x7f, 0x33, 0xd6,
0xee, 0x2c, 0xd4, 0xe7, 0x98, 0xa6, 0x2a, 0xc4, 0x6e, 0xa2, 0xc9, 0x90, 0x28, 0x73, 0x19, 0x9b,
0x56, 0x27, 0x8c, 0xcc, 0x79, 0x36, 0xe9, 0x06, 0x34, 0x3f, 0x0a, 0x29, 0xa4, 0x23, 0xab, 0x4e,
0xb2, 0x33, 0x8b, 0x2c, 0xb0, 0xd5, 0x32, 0xe5, 0x7d, 0x2b, 0x43, 0xd5, 0xc7, 0xcf, 0x19, 0xa6,
0x46, 0xbc, 0x00, 0x97, 0x12, 0xd4, 0xca, 0x44, 0x14, 0xcb, 0xe2, 0x7e, 0xf1, 0xb0, 0xd1, 0x93,
0x5d, 0xdb, 0xb5, 0x7b, 0x6b, 0xe9, 0xbe, 0xcf, 0x75, 0xff, 0xce, 0x2a, 0xfa, 0xe0, 0x4c, 0x95,
0x51, 0x13, 0x95, 0xa2, 0xdc, 0xb2, 0xb1, 0xbd, 0x8d, 0xd8, 0xf1, 0xad, 0xec, 0xaf, 0x8c, 0xc2,
0x83, 0x9d, 0x04, 0x51, 0x9f, 0x8c, 0xc6, 0x46, 0x47, 0x71, 0x28, 0x4b, 0x1c, 0x74, 0xfd, 0x35,
0x4e, 0xec, 0x43, 0x2d, 0x45, 0x7d, 0xc9, 0x4c, 0x3c, 0xc5, 0x85, 0xdc, 0x66, 0x4b, 0xdd, 0xbf,
0x4f, 0x89, 0x16, 0x38, 0x86, 0x07, 0xbc, 0x53, 0x73, 0x94, 0x65, 0xdb, 0x61, 0x85, 0xc5, 0x21,
0xec, 0xe6, 0xd3, 0xde, 0x50, 0x38, 0x52, 0xe6, 0x5c, 0x56, 0xac, 0x65, 0x93, 0x16, 0xcf, 0xa0,
0x31, 0xa7, 0x38, 0x32, 0xa4, 0x73, 0x63, 0xd5, 0x1a, 0x37, 0x58, 0x3e, 0xa0, 0x27, 0x21, 0x51,
0x38, 0xc3, 0xe1, 0x8c, 0xb2, 0xe9, 0x48, 0xd3, 0x27, 0x0c, 0x96, 0xb3, 0x1d, 0xeb, 0x7f, 0x40,
0x15, 0xcf, 0xe1, 0xf1, 0x3d, 0x65, 0xcc, 0x1d, 0xf9, 0x78, 0x4e, 0xf1, 0x4a, 0xba, 0x36, 0xf6,
0x6f, 0x51, 0x0c, 0xe0, 0xe9, 0xdf, 0xc2, 0x20, 0x0b, 0x2e, 0x70, 0x39, 0x13, 0x6c, 0xf8, 0xbf,
0x1e, 0x71, 0x00, 0xf5, 0x2f, 0x44, 0x17, 0x88, 0x7c, 0x59, 0x6f, 0xaf, 0x4e, 0x8e, 0x65, 0xcd,
0x9e, 0xe1, 0x3a, 0x29, 0x5e, 0xc2, 0xde, 0x1d, 0xa1, 0x16, 0xc3, 0x59, 0xc4, 0x97, 0x37, 0x8c,
0x17, 0x71, 0x2a, 0x77, 0xd8, 0x5f, 0xf2, 0x1f, 0x92, 0xbd, 0x0e, 0xb8, 0xab, 0x95, 0x10, 0x2e,
0x94, 0xc7, 0x46, 0x69, 0xd3, 0x2c, 0x08, 0x07, 0xb6, 0xf9, 0x67, 0x92, 0x66, 0x51, 0xd4, 0x6e,
0xf6, 0x2b, 0xb5, 0xf4, 0x96, 0xf7, 0x1a, 0x9c, 0x7c, 0x15, 0x04, 0x40, 0x05, 0x4d, 0x30, 0xbd,
0xec, 0xb3, 0x3d, 0xaf, 0x7b, 0x1c, 0xa8, 0x83, 0xfb, 0x91, 0xe8, 0xd4, 0x4e, 0x6b, 0x6e, 0xdd,
0x48, 0x43, 0x8a, 0xd3, 0x6c, 0xd6, 0x2c, 0x79, 0x07, 0xe0, 0x70, 0xaf, 0x84, 0x21, 0x0a, 0x09,
0xd5, 0x34, 0x0b, 0x02, 0x7e, 0x02, 0x76, 0x55, 0x1d, 0x3f, 0x87, 0xbd, 0x57, 0x50, 0xfb, 0xa0,
0x55, 0xcc, 0x3e, 0x6d, 0x50, 0x8b, 0x0e, 0x38, 0x16, 0x9e, 0x71, 0xdd, 0x58, 0xdf, 0xcb, 0xd6,
0xee, 0x0a, 0x2f, 0xbb, 0x7a, 0x85, 0xc1, 0xa3, 0xeb, 0x9f, 0xed, 0xc2, 0xf5, 0xaf, 0x76, 0xf1,
0x3b, 0x7f, 0x3f, 0xf8, 0xfb, 0xfa, 0xbb, 0x5d, 0x98, 0x54, 0xec, 0x6b, 0xe9, 0xff, 0x09, 0x00,
0x00, 0xff, 0xff, 0x69, 0x13, 0x1f, 0x38, 0x74, 0x03, 0x00, 0x00,
}

View File

@ -27,43 +27,38 @@ message Request {
Operation operation = 1;
Database database = 2;
string peerIPs = 3;
string peerIPString = 3;
// ServerIPIndex is the index in peerIPs that points to the
// corresponding remote IP.
uint32 serverIndex = 4;
// LogPrefix prefixes all logs to be generated in agent.
string logPrefix = 5;
// TestName prefixes all logs to be generated in agent.
string testName = 5;
// DatabaseLogPath is the file path to store the database logs.
string databaseLogPath = 6;
// MonitorResultPath is the file path to store monitoring results.
string monitorResultPath = 7;
// MonitorLogPath is the file path to store monitoring results.
string monitorLogPath = 7;
// GoogleCloudProjectName is the project name to use
// to upload logs.
string googleCloudProjectName = 8;
// StorageKey is the key to be used to upload
// GoogleCloudStorageKey is the key to be used to upload
// data and logs to Google Cloud Storage and others.
string storageKey = 9;
string googleCloudStorageKey = 9;
// Bucket is the bucket name to store all data and logs.
string bucket = 10;
// GoogleCloudStorageBucketName is the bucket name to store all data and logs.
string googleCloudStorageBucketName = 10;
// ZookeeperMyID is myid that needs to be stored as a file in the remote machine.
uint32 zookeeperMyID = 11;
// ZookeeperPreAllocSize avoid seeks ZooKeeper allocates space
// in the transaction log file in blocks of PreAllocSize kilobytes.
// Default value is 65536 * 1024.
int64 zookeeperPreAllocSize = 12;
// ZookeeperMaxClientCnxns limits the number of concurrent connections
// (at the socket level) that a single client, identified by IP address.
int64 zookeeperMaxClientCnxns = 13;
int64 zookeeperMaxClientCnxns = 12;
}
message Response {

View File

@ -19,14 +19,6 @@ import (
"runtime"
)
func openToRead(fpath string) (*os.File, error) {
f, err := os.OpenFile(fpath, os.O_RDONLY, 0444)
if err != nil {
return f, err
}
return f, nil
}
func openToAppend(fpath string) (*os.File, error) {
f, err := os.OpenFile(fpath, os.O_RDWR|os.O_APPEND, 0777)
if err != nil {
@ -47,10 +39,8 @@ func toFile(txt, fpath string) error {
}
}
defer f.Close()
if _, err := f.WriteString(txt); err != nil {
_, err = f.WriteString(txt)
return err
}
return nil
}
func homeDir() string {

View File

@ -1,3 +1,4 @@
# aggregate the data from each machine
step1:
- data_path_list:
- bench-20160330/bench-01-etcd-1-monitor.csv
@ -24,6 +25,7 @@ step1:
data_benchmark_path: bench-20160330/bench-02-zk-timeseries.csv
output_path: bench-20160330/bench-02-zk-aggregated.csv
# aggregate the data of each aggregated from step1
step2:
- data_list:
- path: bench-20160330/bench-01-etcd-aggregated.csv
@ -38,6 +40,7 @@ step2:
name: zookeeper_3.4.8
output_path: bench-20160330/bench-02-all-aggregated.csv
# plot graphs
step3:
- data_path: bench-20160330/bench-01-all-aggregated.csv
title: Write 300K keys, 1 client, key 64 bytes, value 256 bytes

View File

@ -1,562 +0,0 @@
step1:
- data_path_list:
- bench-20160325/bench-01-consul-1-monitor.csv
- bench-20160325/bench-01-consul-2-monitor.csv
- bench-20160325/bench-01-consul-3-monitor.csv
data_benchmark_path: bench-20160325/bench-01-consul-timeseries.csv
output_path: bench-20160325/bench-01-consul-aggregated.csv
- data_path_list:
- bench-20160325/bench-01-etcd-1-monitor.csv
- bench-20160325/bench-01-etcd-2-monitor.csv
- bench-20160325/bench-01-etcd-3-monitor.csv
data_benchmark_path: bench-20160325/bench-01-etcd-timeseries.csv
output_path: bench-20160325/bench-01-etcd-aggregated.csv
- data_path_list:
- bench-20160325/bench-01-zk-1-monitor.csv
- bench-20160325/bench-01-zk-2-monitor.csv
- bench-20160325/bench-01-zk-3-monitor.csv
data_benchmark_path: bench-20160325/bench-01-zk-timeseries.csv
output_path: bench-20160325/bench-01-zk-aggregated.csv
- data_path_list:
- bench-20160325/bench-02-consul-1-monitor.csv
- bench-20160325/bench-02-consul-2-monitor.csv
- bench-20160325/bench-02-consul-3-monitor.csv
data_benchmark_path: bench-20160325/bench-02-consul-timeseries.csv
output_path: bench-20160325/bench-02-consul-aggregated.csv
- data_path_list:
- bench-20160325/bench-02-etcd-1-monitor.csv
- bench-20160325/bench-02-etcd-2-monitor.csv
- bench-20160325/bench-02-etcd-3-monitor.csv
data_benchmark_path: bench-20160325/bench-02-etcd-timeseries.csv
output_path: bench-20160325/bench-02-etcd-aggregated.csv
- data_path_list:
- bench-20160325/bench-02-zk-1-monitor.csv
- bench-20160325/bench-02-zk-2-monitor.csv
- bench-20160325/bench-02-zk-3-monitor.csv
data_benchmark_path: bench-20160325/bench-02-zk-timeseries.csv
output_path: bench-20160325/bench-02-zk-aggregated.csv
- data_path_list:
- bench-20160325/bench-03-consul-1-monitor.csv
- bench-20160325/bench-03-consul-2-monitor.csv
- bench-20160325/bench-03-consul-3-monitor.csv
data_benchmark_path: bench-20160325/bench-03-consul-timeseries.csv
output_path: bench-20160325/bench-03-consul-aggregated.csv
- data_path_list:
- bench-20160325/bench-03-etcd-1-monitor.csv
- bench-20160325/bench-03-etcd-2-monitor.csv
- bench-20160325/bench-03-etcd-3-monitor.csv
data_benchmark_path: bench-20160325/bench-03-etcd-timeseries.csv
output_path: bench-20160325/bench-03-etcd-aggregated.csv
- data_path_list:
- bench-20160325/bench-03-zk-1-monitor.csv
- bench-20160325/bench-03-zk-2-monitor.csv
- bench-20160325/bench-03-zk-3-monitor.csv
data_benchmark_path: bench-20160325/bench-03-zk-timeseries.csv
output_path: bench-20160325/bench-03-zk-aggregated.csv
- data_path_list:
- bench-20160325/bench-04-consul-1-monitor.csv
- bench-20160325/bench-04-consul-2-monitor.csv
- bench-20160325/bench-04-consul-3-monitor.csv
data_benchmark_path: bench-20160325/bench-04-consul-timeseries.csv
output_path: bench-20160325/bench-04-consul-aggregated.csv
- data_path_list:
- bench-20160325/bench-04-etcd-1-monitor.csv
- bench-20160325/bench-04-etcd-2-monitor.csv
- bench-20160325/bench-04-etcd-3-monitor.csv
data_benchmark_path: bench-20160325/bench-04-etcd-timeseries.csv
output_path: bench-20160325/bench-04-etcd-aggregated.csv
- data_path_list:
- bench-20160325/bench-04-zk-1-monitor.csv
- bench-20160325/bench-04-zk-2-monitor.csv
- bench-20160325/bench-04-zk-3-monitor.csv
data_benchmark_path: bench-20160325/bench-04-zk-timeseries.csv
output_path: bench-20160325/bench-04-zk-aggregated.csv
- data_path_list:
- bench-20160325/bench-05-consul-1-monitor.csv
- bench-20160325/bench-05-consul-2-monitor.csv
- bench-20160325/bench-05-consul-3-monitor.csv
data_benchmark_path: bench-20160325/bench-05-consul-timeseries.csv
output_path: bench-20160325/bench-05-consul-aggregated.csv
- data_path_list:
- bench-20160325/bench-05-etcd-1-monitor.csv
- bench-20160325/bench-05-etcd-2-monitor.csv
- bench-20160325/bench-05-etcd-3-monitor.csv
data_benchmark_path: bench-20160325/bench-05-etcd-timeseries.csv
output_path: bench-20160325/bench-05-etcd-aggregated.csv
- data_path_list:
- bench-20160325/bench-05-zk-1-monitor.csv
- bench-20160325/bench-05-zk-2-monitor.csv
- bench-20160325/bench-05-zk-3-monitor.csv
data_benchmark_path: bench-20160325/bench-05-zk-timeseries.csv
output_path: bench-20160325/bench-05-zk-aggregated.csv
- data_path_list:
- bench-20160325/bench-06-consul-1-monitor.csv
- bench-20160325/bench-06-consul-2-monitor.csv
- bench-20160325/bench-06-consul-3-monitor.csv
data_benchmark_path: bench-20160325/bench-06-consul-timeseries.csv
output_path: bench-20160325/bench-06-consul-aggregated.csv
- data_path_list:
- bench-20160325/bench-06-etcd-1-monitor.csv
- bench-20160325/bench-06-etcd-2-monitor.csv
- bench-20160325/bench-06-etcd-3-monitor.csv
data_benchmark_path: bench-20160325/bench-06-etcd-timeseries.csv
output_path: bench-20160325/bench-06-etcd-aggregated.csv
- data_path_list:
- bench-20160325/bench-06-zk-1-monitor.csv
- bench-20160325/bench-06-zk-2-monitor.csv
- bench-20160325/bench-06-zk-3-monitor.csv
data_benchmark_path: bench-20160325/bench-06-zk-timeseries.csv
output_path: bench-20160325/bench-06-zk-aggregated.csv
step2:
- data_list:
- path: bench-20160325/bench-01-etcd-aggregated.csv
name: etcd_v3
- path: bench-20160325/bench-01-zk-aggregated.csv
name: zookeeper_3.4.8
- path: bench-20160325/bench-01-consul-aggregated.csv
name: consul_0.6.4
output_path: bench-20160325/bench-01-all-aggregated.csv
- data_list:
- path: bench-20160325/bench-02-etcd-aggregated.csv
name: etcd_v3
- path: bench-20160325/bench-02-zk-aggregated.csv
name: zookeeper_3.4.8
- path: bench-20160325/bench-02-consul-aggregated.csv
name: consul_0.6.4
output_path: bench-20160325/bench-02-all-aggregated.csv
- data_list:
- path: bench-20160325/bench-03-etcd-aggregated.csv
name: etcd_v3
- path: bench-20160325/bench-03-zk-aggregated.csv
name: zookeeper_3.4.8
- path: bench-20160325/bench-03-consul-aggregated.csv
name: consul_0.6.4
output_path: bench-20160325/bench-03-all-aggregated.csv
- data_list:
- path: bench-20160325/bench-04-etcd-aggregated.csv
name: etcd_v3
- path: bench-20160325/bench-04-zk-aggregated.csv
name: zookeeper_3.4.8
- path: bench-20160325/bench-04-consul-aggregated.csv
name: consul_0.6.4
output_path: bench-20160325/bench-04-all-aggregated.csv
- data_list:
- path: bench-20160325/bench-05-etcd-aggregated.csv
name: etcd_v3
- path: bench-20160325/bench-05-zk-aggregated.csv
name: zookeeper_3.4.8
- path: bench-20160325/bench-05-consul-aggregated.csv
name: consul_0.6.4
output_path: bench-20160325/bench-05-all-aggregated.csv
- data_list:
- path: bench-20160325/bench-06-etcd-aggregated.csv
name: etcd_v3
- path: bench-20160325/bench-06-zk-aggregated.csv
name: zookeeper_3.4.8
- path: bench-20160325/bench-06-consul-aggregated.csv
name: consul_0.6.4
output_path: bench-20160325/bench-06-all-aggregated.csv
step3:
- data_path: bench-20160325/bench-01-all-aggregated.csv
title: Write 300K keys, 1 client, key 64 bytes, value 256 bytes
plot_list:
- lines:
- column: avg_latency_ms_etcd_v3
legend: etcd v3
- column: avg_latency_ms_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: avg_latency_ms_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: Latency(millisecond)
output_path_list:
- bench-20160325/bench-01-avg-latency-ms.svg
- bench-20160325/bench-01-avg-latency-ms.png
- lines:
- column: throughput_etcd_v3
legend: etcd v3
- column: throughput_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: throughput_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: Throughput
output_path_list:
- bench-20160325/bench-01-throughput.svg
- bench-20160325/bench-01-throughput.png
- lines:
- column: avg_cpu_etcd_v3
legend: etcd v3
- column: avg_cpu_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: avg_cpu_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: CPU
output_path_list:
- bench-20160325/bench-01-avg-cpu.svg
- bench-20160325/bench-01-avg-cpu.png
- lines:
- column: avg_memory_mb_etcd_v3
legend: etcd v3
- column: avg_memory_mb_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: avg_memory_mb_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: Memory(MB)
output_path_list:
- bench-20160325/bench-01-avg-memory.svg
- bench-20160325/bench-01-avg-memory.png
- data_path: bench-20160325/bench-02-all-aggregated.csv
title: Write 1M keys, 10 clients, key 64 bytes, value 256 bytes
plot_list:
- lines:
- column: avg_latency_ms_etcd_v3
legend: etcd v3
- column: avg_latency_ms_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: avg_latency_ms_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: Latency(millisecond)
output_path_list:
- bench-20160325/bench-02-avg-latency-ms.svg
- bench-20160325/bench-02-avg-latency-ms.png
- lines:
- column: throughput_etcd_v3
legend: etcd v3
- column: throughput_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: throughput_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: Throughput
output_path_list:
- bench-20160325/bench-02-throughput.svg
- bench-20160325/bench-02-throughput.png
- lines:
- column: avg_cpu_etcd_v3
legend: etcd v3
- column: avg_cpu_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: avg_cpu_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: CPU
output_path_list:
- bench-20160325/bench-02-avg-cpu.svg
- bench-20160325/bench-02-avg-cpu.png
- lines:
- column: avg_memory_mb_etcd_v3
legend: etcd v3
- column: avg_memory_mb_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: avg_memory_mb_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: Memory(MB)
output_path_list:
- bench-20160325/bench-02-avg-memory.svg
- bench-20160325/bench-02-avg-memory.png
- data_path: bench-20160325/bench-03-all-aggregated.csv
title: Write 3M keys, 500 clients, key 64 bytes, value 256 bytes
plot_list:
- lines:
- column: avg_latency_ms_etcd_v3
legend: etcd v3
- column: avg_latency_ms_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: avg_latency_ms_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: Latency(millisecond)
output_path_list:
- bench-20160325/bench-03-avg-latency-ms.svg
- bench-20160325/bench-03-avg-latency-ms.png
- lines:
- column: throughput_etcd_v3
legend: etcd v3
- column: throughput_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: throughput_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: Throughput
output_path_list:
- bench-20160325/bench-03-throughput.svg
- bench-20160325/bench-03-throughput.png
- lines:
- column: avg_cpu_etcd_v3
legend: etcd v3
- column: avg_cpu_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: avg_cpu_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: CPU
output_path_list:
- bench-20160325/bench-03-avg-cpu.svg
- bench-20160325/bench-03-avg-cpu.png
- lines:
- column: avg_memory_mb_etcd_v3
legend: etcd v3
- column: avg_memory_mb_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: avg_memory_mb_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: Memory(MB)
output_path_list:
- bench-20160325/bench-03-avg-memory.svg
- bench-20160325/bench-03-avg-memory.png
- data_path: bench-20160325/bench-04-all-aggregated.csv
title: Write 3M keys, 1K clients, key 64 bytes, value 256 bytes
plot_list:
- lines:
- column: avg_latency_ms_etcd_v3
legend: etcd v3
- column: avg_latency_ms_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: avg_latency_ms_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: Latency(millisecond)
output_path_list:
- bench-20160325/bench-04-avg-latency-ms.svg
- bench-20160325/bench-04-avg-latency-ms.png
- lines:
- column: throughput_etcd_v3
legend: etcd v3
- column: throughput_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: throughput_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: Throughput
output_path_list:
- bench-20160325/bench-04-throughput.svg
- bench-20160325/bench-04-throughput.png
- lines:
- column: avg_cpu_etcd_v3
legend: etcd v3
- column: avg_cpu_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: avg_cpu_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: CPU
output_path_list:
- bench-20160325/bench-04-avg-cpu.svg
- bench-20160325/bench-04-avg-cpu.png
- lines:
- column: avg_memory_mb_etcd_v3
legend: etcd v3
- column: avg_memory_mb_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: avg_memory_mb_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: Memory(MB)
output_path_list:
- bench-20160325/bench-04-avg-memory.svg
- bench-20160325/bench-04-avg-memory.png
- data_path: bench-20160325/bench-05-all-aggregated.csv
title: Read single key 1M times, 1 client, key 64 bytes, value 1 kb
plot_list:
- lines:
- column: avg_latency_ms_etcd_v3
legend: etcd v3
- column: avg_latency_ms_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: avg_latency_ms_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: Latency(millisecond)
output_path_list:
- bench-20160325/bench-05-avg-latency-ms.svg
- bench-20160325/bench-05-avg-latency-ms.png
- lines:
- column: throughput_etcd_v3
legend: etcd v3
- column: throughput_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: throughput_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: Throughput
output_path_list:
- bench-20160325/bench-05-throughput.svg
- bench-20160325/bench-05-throughput.png
- lines:
- column: avg_cpu_etcd_v3
legend: etcd v3
- column: avg_cpu_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: avg_cpu_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: CPU
output_path_list:
- bench-20160325/bench-05-avg-cpu.svg
- bench-20160325/bench-05-avg-cpu.png
- lines:
- column: avg_memory_mb_etcd_v3
legend: etcd v3
- column: avg_memory_mb_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: avg_memory_mb_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: Memory(MB)
output_path_list:
- bench-20160325/bench-05-avg-memory.svg
- bench-20160325/bench-05-avg-memory.png
- data_path: bench-20160325/bench-06-all-aggregated.csv
title: Read single key 1M times, 100 clients, key 64 bytes, value 1 kb
plot_list:
- lines:
- column: avg_latency_ms_etcd_v3
legend: etcd v3
- column: avg_latency_ms_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: avg_latency_ms_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: Latency(millisecond)
output_path_list:
- bench-20160325/bench-06-avg-latency-ms.svg
- bench-20160325/bench-06-avg-latency-ms.png
- lines:
- column: throughput_etcd_v3
legend: etcd v3
- column: throughput_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: throughput_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: Throughput
output_path_list:
- bench-20160325/bench-06-throughput.svg
- bench-20160325/bench-06-throughput.png
- lines:
- column: avg_cpu_etcd_v3
legend: etcd v3
- column: avg_cpu_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: avg_cpu_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: CPU
output_path_list:
- bench-20160325/bench-06-avg-cpu.svg
- bench-20160325/bench-06-avg-cpu.png
- lines:
- column: avg_memory_mb_etcd_v3
legend: etcd v3
- column: avg_memory_mb_zookeeper_3.4.8
legend: Zookeeper 3.4.8
- column: avg_memory_mb_consul_0.6.4
legend: Consul 0.6.4
x_axis: Second
y_axis: Memory(MB)
output_path_list:
- bench-20160325/bench-06-avg-memory.svg
- bench-20160325/bench-06-avg-memory.png
step4:
preface: |
- Google Cloud Compute Engine
- 3 machines(server) of 8 vCPUs + 16GB Memory + 375GB local SSD (SCSI)
- 1 machine(client) of 16 vCPUs + 30GB Memory + 50GB SSD
- Ubuntu 15.10
- Go 1.6 with etcd master branch as of testing date
- Java 8 with Zookeeper 3.4.8(current)
- Java(TM) SE Runtime Environment (build 1.8.0_74-b02)
- Java HotSpot(TM) 64-Bit Server VM (build 25.74-b02, mixed mode)
results:
- title: Write 300K keys, 1 client, key 64 bytes, value 256 bytes
images:
- bench-20160325/bench-01-avg-latency-ms.svg
- bench-20160325/bench-01-throughput.svg
- bench-20160325/bench-01-avg-cpu.svg
- bench-20160325/bench-01-avg-memory.svg
- title: Write 1M keys, 10 clients, key 64 bytes, value 256 bytes
images:
- bench-20160325/bench-02-avg-latency-ms.svg
- bench-20160325/bench-02-throughput.svg
- bench-20160325/bench-02-avg-cpu.svg
- bench-20160325/bench-02-avg-memory.svg
- title: Write 3M keys, 500 clients, key 64 bytes, value 256 bytes
images:
- bench-20160325/bench-03-avg-latency-ms.svg
- bench-20160325/bench-03-throughput.svg
- bench-20160325/bench-03-avg-cpu.svg
- bench-20160325/bench-03-avg-memory.svg
- title: Write 3M keys, 1K clients, key 64 bytes, value 256 bytes
images:
- bench-20160325/bench-04-avg-latency-ms.svg
- bench-20160325/bench-04-throughput.svg
- bench-20160325/bench-04-avg-cpu.svg
- bench-20160325/bench-04-avg-memory.svg
- title: Read single key 1M times, 1 client, key 64 bytes, value 1 kb
images:
- bench-20160325/bench-05-avg-latency-ms.svg
- bench-20160325/bench-05-throughput.svg
- bench-20160325/bench-05-avg-cpu.svg
- bench-20160325/bench-05-avg-memory.svg
- title: Read single key 1M times, 100 clients, key 64 bytes, value 1 kb
images:
- bench-20160325/bench-06-avg-latency-ms.svg
- bench-20160325/bench-06-throughput.svg
- bench-20160325/bench-06-avg-cpu.svg
- bench-20160325/bench-06-avg-memory.svg
output_path: bench-20160325/README.md

View File

@ -1,161 +0,0 @@
step1:
- data_path_list:
- bench-20160330/bench-01-etcd-1-monitor.csv
- bench-20160330/bench-01-etcd-2-monitor.csv
- bench-20160330/bench-01-etcd-3-monitor.csv
data_benchmark_path: bench-20160330/bench-01-etcd-timeseries.csv
output_path: bench-20160330/bench-01-etcd-aggregated.csv
- data_path_list:
- bench-20160330/bench-01-zk-1-monitor.csv
- bench-20160330/bench-01-zk-2-monitor.csv
- bench-20160330/bench-01-zk-3-monitor.csv
data_benchmark_path: bench-20160330/bench-01-zk-timeseries.csv
output_path: bench-20160330/bench-01-zk-aggregated.csv
- data_path_list:
- bench-20160330/bench-02-etcd-1-monitor.csv
- bench-20160330/bench-02-etcd-2-monitor.csv
- bench-20160330/bench-02-etcd-3-monitor.csv
data_benchmark_path: bench-20160330/bench-02-etcd-timeseries.csv
output_path: bench-20160330/bench-02-etcd-aggregated.csv
- data_path_list:
- bench-20160330/bench-02-zk-1-monitor.csv
- bench-20160330/bench-02-zk-2-monitor.csv
- bench-20160330/bench-02-zk-3-monitor.csv
data_benchmark_path: bench-20160330/bench-02-zk-timeseries.csv
output_path: bench-20160330/bench-02-zk-aggregated.csv
step2:
- data_list:
- path: bench-20160330/bench-01-etcd-aggregated.csv
name: etcd_v3
- path: bench-20160330/bench-01-zk-aggregated.csv
name: zookeeper_3.4.8
output_path: bench-20160330/bench-01-all-aggregated.csv
- data_list:
- path: bench-20160330/bench-02-etcd-aggregated.csv
name: etcd_v3
- path: bench-20160330/bench-02-zk-aggregated.csv
name: zookeeper_3.4.8
output_path: bench-20160330/bench-02-all-aggregated.csv
step3:
- data_path: bench-20160330/bench-01-all-aggregated.csv
title: Write 300K keys, 1 client, key 64 bytes, value 256 bytes
plot_list:
- lines:
- column: avg_latency_ms_etcd_v3
legend: etcd v3
- column: avg_latency_ms_zookeeper_3.4.8
legend: Zookeeper 3.4.8
x_axis: Second
y_axis: Latency(millisecond)
output_path_list:
- bench-20160330/bench-01-avg-latency-ms.svg
- bench-20160330/bench-01-avg-latency-ms.png
- lines:
- column: throughput_etcd_v3
legend: etcd v3
- column: throughput_zookeeper_3.4.8
legend: Zookeeper 3.4.8
x_axis: Second
y_axis: Throughput
output_path_list:
- bench-20160330/bench-01-throughput.svg
- bench-20160330/bench-01-throughput.png
- lines:
- column: avg_cpu_etcd_v3
legend: etcd v3
- column: avg_cpu_zookeeper_3.4.8
legend: Zookeeper 3.4.8
x_axis: Second
y_axis: CPU
output_path_list:
- bench-20160330/bench-01-avg-cpu.svg
- bench-20160330/bench-01-avg-cpu.png
- lines:
- column: avg_memory_mb_etcd_v3
legend: etcd v3
- column: avg_memory_mb_zookeeper_3.4.8
legend: Zookeeper 3.4.8
x_axis: Second
y_axis: Memory(MB)
output_path_list:
- bench-20160330/bench-01-avg-memory.svg
- bench-20160330/bench-01-avg-memory.png
- data_path: bench-20160330/bench-02-all-aggregated.csv
title: Write 3M keys, 1K clients, key 64 bytes, value 256 bytes
plot_list:
- lines:
- column: avg_latency_ms_etcd_v3
legend: etcd v3
- column: avg_latency_ms_zookeeper_3.4.8
legend: Zookeeper 3.4.8
x_axis: Second
y_axis: Latency(millisecond)
output_path_list:
- bench-20160330/bench-02-avg-latency-ms.svg
- bench-20160330/bench-02-avg-latency-ms.png
- lines:
- column: throughput_etcd_v3
legend: etcd v3
- column: throughput_zookeeper_3.4.8
legend: Zookeeper 3.4.8
x_axis: Second
y_axis: Throughput
output_path_list:
- bench-20160330/bench-02-throughput.svg
- bench-20160330/bench-02-throughput.png
- lines:
- column: avg_cpu_etcd_v3
legend: etcd v3
- column: avg_cpu_zookeeper_3.4.8
legend: Zookeeper 3.4.8
x_axis: Second
y_axis: CPU
output_path_list:
- bench-20160330/bench-02-avg-cpu.svg
- bench-20160330/bench-02-avg-cpu.png
- lines:
- column: avg_memory_mb_etcd_v3
legend: etcd v3
- column: avg_memory_mb_zookeeper_3.4.8
legend: Zookeeper 3.4.8
x_axis: Second
y_axis: Memory(MB)
output_path_list:
- bench-20160330/bench-02-avg-memory.svg
- bench-20160330/bench-02-avg-memory.png
step4:
preface: |
- Google Cloud Compute Engine
- 8 vCPUs + 16GB Memory + 375GB local SSD (SCSI)
- 1 machine(client) of 16 vCPUs + 30GB Memory + 50GB SSD
- Ubuntu 15.10
- Go 1.6 with etcd master branch as of testing date
- Java 8 with Zookeeper 3.4.8(current)
- Java(TM) SE Runtime Environment (build 1.8.0_74-b02)
- Java HotSpot(TM) 64-Bit Server VM (build 25.74-b02, mixed mode)
results:
- title: Write 300K keys, 1 client, key 64 bytes, value 256 bytes
images:
- bench-20160330/bench-01-avg-latency-ms.png
- bench-20160330/bench-01-throughput.png
- bench-20160330/bench-01-avg-cpu.png
- bench-20160330/bench-01-avg-memory.png
- title: Write 3M keys, 1K clients, key 64 bytes, value 256 bytes
images:
- bench-20160330/bench-02-avg-latency-ms.png
- bench-20160330/bench-02-throughput.png
- bench-20160330/bench-02-avg-cpu.png
- bench-20160330/bench-02-avg-memory.png
output_path: bench-20160330/README.md

View File

@ -1,63 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bench
import (
"sync"
"github.com/cheggaaa/pb"
"github.com/spf13/cobra"
)
var (
Command = &cobra.Command{
Use: "bench",
Short: "Low-level benchmark tool for etcdv2, etcdv3, Zookeeper, Consul.",
}
database string
endpoints []string
totalConns uint
totalClients uint
sample bool
noHistogram bool
csvResultPath string
googleCloudProjectName string
keyPath string
bucket string
bar *pb.ProgressBar
results chan result
wg sync.WaitGroup
)
func init() {
cobra.EnablePrefixMatching = true
}
func init() {
Command.PersistentFlags().StringVarP(&database, "database", "d", "etcdv3", "etcdv2, etcdv3, zk, consul")
Command.PersistentFlags().StringSliceVar(&endpoints, "endpoints", []string{"10.240.0.9:2181", "10.240.0.10:2181", "10.240.0.14:2181"}, "gRPC endpoints")
Command.PersistentFlags().UintVar(&totalConns, "conns", 1, "Total number of gRPC connections or Zookeeper connections")
Command.PersistentFlags().UintVar(&totalClients, "clients", 1, "Total number of gRPC clients (only for etcd)")
Command.PersistentFlags().BoolVar(&sample, "sample", false, "'true' to sample requests for every second.")
Command.PersistentFlags().BoolVar(&noHistogram, "no-histogram", false, "'true' to not show results in histogram.")
Command.PersistentFlags().StringVar(&csvResultPath, "csv-result-path", "timeseries.csv", "path to store csv results.")
Command.PersistentFlags().StringVar(&googleCloudProjectName, "google-cloud-project-name", "", "Google cloud project name.")
Command.PersistentFlags().StringVar(&keyPath, "key-path", "", "Path of key file.")
Command.PersistentFlags().StringVar(&bucket, "bucket", "", "Bucket name in cloud storage.")
}

View File

@ -1,17 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package bench implements benchmark commands
// based on https://github.com/coreos/etcd/tree/master/tools/benchmark.
package bench

View File

@ -1,347 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bench
import (
"bufio"
"fmt"
"log"
"net/http"
"strconv"
"strings"
"time"
"github.com/cheggaaa/pb"
clientv2 "github.com/coreos/etcd/client"
"github.com/coreos/etcd/clientv3"
consulapi "github.com/hashicorp/consul/api"
"github.com/samuel/go-zookeeper/zk"
"github.com/spf13/cobra"
"golang.org/x/net/context"
)
// putCmd represents the put command
var putCmd = &cobra.Command{
Use: "put",
Short: "Benchmark put",
Run: putFunc,
}
var (
zkCreateFlags = int32(0)
zkCreateAcl = zk.WorldACL(zk.PermAll)
keySize int
valSize int
putTotal int
etcdCompactionCycle int64
)
func init() {
Command.AddCommand(putCmd)
putCmd.Flags().IntVar(&keySize, "key-size", 8, "Key size of put request")
putCmd.Flags().IntVar(&valSize, "val-size", 8, "Value size of put request")
putCmd.Flags().IntVar(&putTotal, "total", 10000, "Total number of put requests")
putCmd.Flags().Int64Var(&etcdCompactionCycle, "etcd-compaction-cycle", 0, "Compact every X number of put requests. 0 means no compaction.")
}
func putFunc(cmd *cobra.Command, args []string) {
results = make(chan result)
requests := make(chan request, totalClients)
bar = pb.New(putTotal)
keys := multiRandStrings(keySize, putTotal)
value := string(mustRandBytes(valSize))
bar.Format("Bom !")
bar.Start()
var etcdClients []*clientv3.Client
switch database {
case "etcdv2":
conns := mustCreateClientsEtcdv2(totalConns)
for i := range conns {
wg.Add(1)
go doPutEtcdv2(context.Background(), conns[i], requests)
}
case "etcdv3":
etcdClients = mustCreateClientsEtcdv3(totalClients, totalConns)
for i := range etcdClients {
wg.Add(1)
go doPutEtcdv3(context.Background(), etcdClients[i], requests)
}
defer func() {
for i := range etcdClients {
etcdClients[i].Close()
}
}()
case "zk":
conns := mustCreateConnsZk(totalConns)
defer func() {
for i := range conns {
conns[i].Close()
}
}()
for i := range conns {
wg.Add(1)
go doPutZk(conns[i], requests)
}
case "consul":
conns := mustCreateConnsConsul(totalConns)
for i := range conns {
wg.Add(1)
go doPutConsul(conns[i], requests)
}
default:
log.Fatalf("unknown database %s", database)
}
pdoneC := printReport(results)
go func() {
for i := 0; i < putTotal; i++ {
if database == "etcd" && etcdCompactionCycle > 0 && int64(i)%etcdCompactionCycle == 0 {
log.Printf("etcd starting compaction at %d put request", i)
go func() {
compactKV(etcdClients)
}()
}
key := keys[i]
switch database {
case "etcdv2":
requests <- request{etcdv2Op: etcdv2Op{key: key, value: value}}
case "etcdv3":
requests <- request{etcdv3Op: clientv3.OpPut(key, value)}
case "zk":
requests <- request{zkOp: zkOp{key: "/" + key, value: []byte(value)}}
case "consul":
requests <- request{consulOp: consulOp{key: key, value: []byte(value)}}
}
}
close(requests)
}()
wg.Wait()
bar.Finish()
close(results)
<-pdoneC
fmt.Println("Expected Put Total:", putTotal)
switch database {
case "etcdv2":
for k, v := range getTotalKeysEtcdv2(endpoints) {
fmt.Println("Endpoint :", k)
fmt.Println("Number of Keys:", v)
fmt.Println()
}
case "etcdv3":
for k, v := range getTotalKeysEtcdv3(endpoints) {
fmt.Println("Endpoint :", k)
fmt.Println("Number of Keys:", v)
fmt.Println()
}
case "zk":
for k, v := range getTotalKeysZk(endpoints) {
fmt.Println("Endpoint :", k)
fmt.Println("Number of Keys:", v)
fmt.Println()
}
case "consul":
for k, v := range getTotalKeysConsul(endpoints) {
fmt.Println("Endpoint :", k)
fmt.Println("Number of Keys:", v)
fmt.Println()
}
}
}
func doPutEtcdv2(ctx context.Context, conn clientv2.KeysAPI, requests <-chan request) {
defer wg.Done()
for req := range requests {
op := req.etcdv2Op
st := time.Now()
_, err := conn.Set(context.Background(), op.key, op.value, nil)
var errStr string
if err != nil {
errStr = err.Error()
}
results <- result{errStr: errStr, duration: time.Since(st), happened: time.Now()}
bar.Increment()
}
}
func getTotalKeysEtcdv2(endpoints []string) map[string]int64 {
rs := make(map[string]int64)
for _, ep := range endpoints {
rs[ep] = 0 // not supported in metrics
}
return rs
}
func doPutEtcdv3(ctx context.Context, client clientv3.KV, requests <-chan request) {
defer wg.Done()
for req := range requests {
op := req.etcdv3Op
st := time.Now()
_, err := client.Do(ctx, op)
var errStr string
if err != nil {
errStr = err.Error()
}
results <- result{errStr: errStr, duration: time.Since(st), happened: time.Now()}
bar.Increment()
}
}
func getTotalKeysEtcdv3(endpoints []string) map[string]int64 {
rs := make(map[string]int64)
for _, ep := range endpoints {
if !strings.HasPrefix(ep, "http://") {
ep = "http://" + ep
} // TODO: support https
resp, err := http.Get(ep + "/metrics")
if err != nil {
log.Println(err)
rs[ep] = 0
}
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
txt := scanner.Text()
if strings.HasPrefix(txt, "#") {
continue
}
ts := strings.SplitN(txt, " ", 2)
fv := 0.0
if len(ts) == 2 {
v, err := strconv.ParseFloat(ts[1], 64)
if err == nil {
fv = v
}
}
if ts[0] == "etcd_storage_keys_total" {
rs[ep] = int64(fv)
break
}
}
gracefulClose(resp)
}
return rs
}
func doPutZk(conn *zk.Conn, requests <-chan request) {
defer wg.Done()
for req := range requests {
op := req.zkOp
st := time.Now()
_, err := conn.Create(op.key, op.value, zkCreateFlags, zkCreateAcl)
var errStr string
if err != nil {
errStr = err.Error()
}
results <- result{errStr: errStr, duration: time.Since(st), happened: time.Now()}
bar.Increment()
}
}
func getTotalKeysZk(endpoints []string) map[string]int64 {
rs := make(map[string]int64)
stats, ok := zk.FLWSrvr(endpoints, 5*time.Second)
if !ok {
log.Printf("getTotalKeysZk failed with %+v", stats)
for _, ep := range endpoints {
rs[ep] = 0
}
return rs
}
for i, s := range stats {
rs[endpoints[i]] = s.NodeCount
}
return rs
}
func doPutConsul(conn *consulapi.KV, requests <-chan request) {
defer wg.Done()
for req := range requests {
op := req.consulOp
st := time.Now()
_, err := conn.Put(&consulapi.KVPair{Key: op.key, Value: op.value}, nil)
var errStr string
if err != nil {
errStr = err.Error()
}
results <- result{errStr: errStr, duration: time.Since(st), happened: time.Now()}
bar.Increment()
}
}
func getTotalKeysConsul(endpoints []string) map[string]int64 {
rs := make(map[string]int64)
for _, ep := range endpoints {
rs[ep] = 0 // not supported in consul
}
return rs
}
func compactKV(clients []*clientv3.Client) {
var curRev int64
for _, c := range clients {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
resp, err := c.KV.Get(ctx, "foo")
cancel()
if err != nil {
panic(err)
}
curRev = resp.Header.Revision
break
}
revToCompact := max(0, curRev-1000)
for _, c := range clients {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
err := c.KV.Compact(ctx, revToCompact)
cancel()
if err != nil {
panic(err)
}
break
}
}
func max(n1, n2 int64) int64 {
if n1 > n2 {
return n1
}
return n2
}

View File

@ -1,312 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bench
import (
"fmt"
"log"
"os"
"time"
"github.com/cheggaaa/pb"
clientv2 "github.com/coreos/etcd/client"
"github.com/coreos/etcd/clientv3"
consulapi "github.com/hashicorp/consul/api"
"github.com/samuel/go-zookeeper/zk"
"github.com/spf13/cobra"
"golang.org/x/net/context"
)
// rangeCmd represents the range command
var rangeCmd = &cobra.Command{
Use: "range key [end-range]",
Short: "Benchmark range",
Run: rangeFunc,
}
var (
rangeTotal int
rangeConsistency string
singleKey bool
)
func init() {
Command.AddCommand(rangeCmd)
rangeCmd.Flags().IntVar(&rangeTotal, "total", 10000, "Total number of range requests")
rangeCmd.Flags().StringVar(&rangeConsistency, "consistency", "l", "Linearizable(l) or Serializable(s)")
rangeCmd.Flags().BoolVar(&singleKey, "single-key", false, "'true' to get only one single key (automatic put before test)")
rangeCmd.Flags().IntVar(&keySize, "key-size", 64, "key size")
rangeCmd.Flags().IntVar(&valSize, "val-size", 128, "value size")
}
func rangeFunc(cmd *cobra.Command, args []string) {
var k string
if singleKey { // write 'foo'
k = string(randBytes(keySize))
v := randBytes(valSize)
vs := string(v)
switch database {
case "etcdv2":
fmt.Printf("PUT '%s' to etcdv2\n", k)
var err error
for i := 0; i < 5; i++ {
clients := mustCreateClientsEtcdv2(totalConns)
_, err = clients[0].Set(context.Background(), k, vs, nil)
if err != nil {
continue
}
fmt.Printf("Done with PUT '%s' to etcdv2\n", k)
break
}
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
case "etcdv3":
fmt.Printf("PUT '%s' to etcd\n", k)
var err error
for i := 0; i < 5; i++ {
clients := mustCreateClientsEtcdv3(1, 1)
_, err = clients[0].Do(context.Background(), clientv3.OpPut(k, vs))
if err != nil {
continue
}
fmt.Printf("Done with PUT '%s' to etcd\n", k)
break
}
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
case "zk":
k = "/" + k
fmt.Printf("PUT '%s' to Zookeeper\n", k)
var err error
for i := 0; i < 5; i++ {
conns := mustCreateConnsZk(totalConns)
_, err = conns[0].Create(k, v, zkCreateFlags, zkCreateAcl)
if err != nil {
continue
}
fmt.Printf("Done with PUT '%s' to Zookeeper\n", k)
break
}
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
case "consul":
fmt.Printf("PUT '%s' to Consul\n", k)
var err error
for i := 0; i < 5; i++ {
clients := mustCreateConnsConsul(totalConns)
_, err = clients[0].Put(&consulapi.KVPair{Key: k, Value: v}, nil)
if err != nil {
continue
}
fmt.Printf("Done with PUT '%s' to Consul\n", k)
break
}
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
} else if len(args) == 0 || len(args) > 2 {
fmt.Fprintln(os.Stderr, cmd.Usage())
os.Exit(1)
}
var end string
if !singleKey {
k = args[0]
if len(args) == 2 {
end = args[1]
}
}
if database == "etcdv3" { // etcdv2 quorum false by default
if rangeConsistency == "l" {
fmt.Println("bench with linearizable range")
} else if rangeConsistency == "s" {
fmt.Println("bench with serializable range")
} else {
fmt.Fprintln(os.Stderr, cmd.Usage())
os.Exit(1)
}
} else {
fmt.Println("bench with serializable range")
}
results = make(chan result)
requests := make(chan request, totalClients)
bar = pb.New(rangeTotal)
bar.Format("Bom !")
bar.Start()
switch database {
case "etcdv2":
conns := mustCreateClientsEtcdv2(totalConns)
for i := range conns {
wg.Add(1)
go doRangeEtcdv2(conns[i], requests)
}
case "etcdv3":
clients := mustCreateClientsEtcdv3(totalClients, totalConns)
for i := range clients {
wg.Add(1)
go doRangeEtcdv3(clients[i].KV, requests)
}
defer func() {
for i := range clients {
clients[i].Close()
}
}()
case "zk":
conns := mustCreateConnsZk(totalConns)
defer func() {
for i := range conns {
conns[i].Close()
}
}()
for i := range conns {
wg.Add(1)
go doRangeZk(conns[i], requests)
}
case "consul":
conns := mustCreateConnsConsul(totalConns)
for i := range conns {
wg.Add(1)
go doRangeConsul(conns[i], requests)
}
default:
log.Fatalf("unknown database %s", database)
}
pdoneC := printReport(results)
go func() {
for i := 0; i < rangeTotal; i++ {
switch database {
case "etcdv2":
// serializable read by default
requests <- request{etcdv2Op: etcdv2Op{key: k}}
case "etcdv3":
opts := []clientv3.OpOption{clientv3.WithRange(end)}
if rangeConsistency == "s" {
opts = append(opts, clientv3.WithSerializable())
}
requests <- request{etcdv3Op: clientv3.OpGet(k, opts...)}
case "zk":
// serializable read by default
requests <- request{zkOp: zkOp{key: k}}
case "consul":
// serializable read by default
requests <- request{consulOp: consulOp{key: k}}
}
}
close(requests)
}()
wg.Wait()
bar.Finish()
close(results)
<-pdoneC
}
func doRangeEtcdv2(conn clientv2.KeysAPI, requests <-chan request) {
defer wg.Done()
for req := range requests {
op := req.etcdv2Op
st := time.Now()
_, err := conn.Get(context.Background(), op.key, nil)
var errStr string
if err != nil {
errStr = err.Error()
}
results <- result{errStr: errStr, duration: time.Since(st), happened: time.Now()}
bar.Increment()
}
}
func doRangeEtcdv3(client clientv3.KV, requests <-chan request) {
defer wg.Done()
for req := range requests {
op := req.etcdv3Op
st := time.Now()
_, err := client.Do(context.Background(), op)
var errStr string
if err != nil {
errStr = err.Error()
}
results <- result{errStr: errStr, duration: time.Since(st), happened: time.Now()}
bar.Increment()
}
}
func doRangeZk(conn *zk.Conn, requests <-chan request) {
defer wg.Done()
for req := range requests {
op := req.zkOp
st := time.Now()
_, _, err := conn.Get(op.key)
var errStr string
if err != nil {
errStr = err.Error()
}
results <- result{errStr: errStr, duration: time.Since(st), happened: time.Now()}
bar.Increment()
}
}
func doRangeConsul(conn *consulapi.KV, requests <-chan request) {
defer wg.Done()
for req := range requests {
op := req.consulOp
st := time.Now()
_, _, err := conn.Get(op.key, &consulapi.QueryOptions{AllowStale: true})
var errStr string
if err != nil {
errStr = err.Error()
}
results <- result{errStr: errStr, duration: time.Since(st), happened: time.Now()}
bar.Increment()
}
}

View File

@ -1,206 +0,0 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bench
import (
"fmt"
"log"
"math"
"sort"
"strings"
"time"
)
const (
barChar = "∎"
)
type result struct {
errStr string
duration time.Duration
happened time.Time
}
type report struct {
avgTotal float64
fastest float64
slowest float64
average float64
stddev float64
rps float64
results chan result
total time.Duration
errorDist map[string]int
lats []float64
sps *secondPoints
}
func printReport(results chan result) <-chan struct{} {
return wrapReport(func() {
r := &report{
results: results,
errorDist: make(map[string]int),
sps: newSecondPoints(),
}
r.finalize()
r.print()
})
}
func printRate(results chan result) <-chan struct{} {
return wrapReport(func() {
r := &report{
results: results,
errorDist: make(map[string]int),
sps: newSecondPoints(),
}
r.finalize()
fmt.Printf(" Requests/sec:\t%4.4f\n", r.rps)
})
}
func wrapReport(f func()) <-chan struct{} {
donec := make(chan struct{})
go func() {
defer close(donec)
f()
}()
return donec
}
func (r *report) finalize() {
log.Printf("finalize has started\n")
st := time.Now()
for res := range r.results {
if res.errStr != "" {
r.errorDist[res.errStr]++
} else {
r.sps.Add(res.happened, res.duration)
if !noHistogram {
r.lats = append(r.lats, res.duration.Seconds())
r.avgTotal += res.duration.Seconds()
}
}
}
r.total = time.Since(st)
if sample && noHistogram {
return
}
r.rps = float64(len(r.lats)) / r.total.Seconds()
r.average = r.avgTotal / float64(len(r.lats))
for i := range r.lats {
dev := r.lats[i] - r.average
r.stddev += dev * dev
}
r.stddev = math.Sqrt(r.stddev / float64(len(r.lats)))
}
func (r *report) print() {
log.Printf("print has started\n")
if sample && noHistogram {
r.printSecondSample()
}
if len(r.lats) > 0 {
sort.Float64s(r.lats)
r.fastest = r.lats[0]
r.slowest = r.lats[len(r.lats)-1]
fmt.Printf("\nSummary:\n")
fmt.Printf(" Total:\t%4.4f secs.\n", r.total.Seconds())
fmt.Printf(" Slowest:\t%4.4f secs.\n", r.slowest)
fmt.Printf(" Fastest:\t%4.4f secs.\n", r.fastest)
fmt.Printf(" Average:\t%4.4f secs.\n", r.average)
fmt.Printf(" Stddev:\t%4.4f secs.\n", r.stddev)
fmt.Printf(" Requests/sec:\t%4.4f\n", r.rps)
r.printHistogram()
r.printLatencies()
if sample {
r.printSecondSample()
}
}
if len(r.errorDist) > 0 {
r.printErrors()
}
}
// Prints percentile latencies.
func (r *report) printLatencies() {
pctls := []int{10, 25, 50, 75, 90, 95, 99}
data := make([]float64, len(pctls))
j := 0
for i := 0; i < len(r.lats) && j < len(pctls); i++ {
current := i * 100 / len(r.lats)
if current >= pctls[j] {
data[j] = r.lats[i]
j++
}
}
fmt.Printf("\nLatency distribution:\n")
for i := 0; i < len(pctls); i++ {
if data[i] > 0 {
fmt.Printf(" %v%% in %4.4f secs.\n", pctls[i], data[i])
}
}
}
func (r *report) printSecondSample() {
fmt.Println(r.sps.getTimeSeries())
}
func (r *report) printHistogram() {
bc := 10
buckets := make([]float64, bc+1)
counts := make([]int, bc+1)
bs := (r.slowest - r.fastest) / float64(bc)
for i := 0; i < bc; i++ {
buckets[i] = r.fastest + bs*float64(i)
}
buckets[bc] = r.slowest
var bi int
var max int
for i := 0; i < len(r.lats); {
if r.lats[i] <= buckets[bi] {
i++
counts[bi]++
if max < counts[bi] {
max = counts[bi]
}
} else if bi < len(buckets)-1 {
bi++
}
}
fmt.Printf("\nResponse time histogram:\n")
for i := 0; i < len(buckets); i++ {
// Normalize bar lengths.
var barLen int
if max > 0 {
barLen = counts[i] * 40 / max
}
fmt.Printf(" %4.3f [%v]\t|%v\n", buckets[i], counts[i], strings.Repeat(barChar, barLen))
}
}
func (r *report) printErrors() {
fmt.Printf("\nError distribution:\n")
for err, num := range r.errorDist {
fmt.Printf(" [%d]\t%s\n", num, err)
}
}

View File

@ -1,218 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bench
import (
"crypto/rand"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"strings"
"time"
mrand "math/rand"
clientv2 "github.com/coreos/etcd/client"
"github.com/coreos/etcd/clientv3"
consulapi "github.com/hashicorp/consul/api"
"github.com/samuel/go-zookeeper/zk"
)
type request struct {
etcdv2Op etcdv2Op
etcdv3Op clientv3.Op
zkOp zkOp
consulOp consulOp
}
type etcdv2Op struct {
key string
value string
}
type zkOp struct {
key string
value []byte
}
type consulOp struct {
key string
value []byte
}
var (
// dialTotal counts the number of mustCreateConn calls so that endpoint
// connections can be handed out in round-robin order
dialTotal int
)
func mustCreateConnEtcdv3() *clientv3.Client {
endpoint := endpoints[dialTotal%len(endpoints)]
dialTotal++
cfg := clientv3.Config{Endpoints: []string{endpoint}}
client, err := clientv3.New(cfg)
if err != nil {
fmt.Fprintf(os.Stderr, "dial error: %v\n", err)
os.Exit(1)
}
return client
}
func mustCreateClientsEtcdv3(totalClients, totalConns uint) []*clientv3.Client {
conns := make([]*clientv3.Client, totalConns)
for i := range conns {
conns[i] = mustCreateConnEtcdv3()
}
clients := make([]*clientv3.Client, totalClients)
for i := range clients {
clients[i] = conns[i%int(totalConns)]
}
return clients
}
func mustCreateClientsEtcdv2(total uint) []clientv2.KeysAPI {
cks := make([]clientv2.KeysAPI, total)
for i := range cks {
endpoint := endpoints[dialTotal%len(endpoints)]
dialTotal++
if !strings.HasPrefix(endpoint, "http://") {
endpoint = "http://" + endpoint
}
cfg := clientv2.Config{
Endpoints: []string{endpoint},
Transport: clientv2.DefaultTransport,
HeaderTimeoutPerRequest: time.Second,
}
c, err := clientv2.New(cfg)
if err != nil {
log.Fatal(err)
}
kapi := clientv2.NewKeysAPI(c)
cks[i] = kapi
}
return cks
}
func mustCreateConnsZk(total uint) []*zk.Conn {
zks := make([]*zk.Conn, total)
for i := range zks {
endpoint := endpoints[dialTotal%len(endpoints)]
dialTotal++
conn, _, err := zk.Connect([]string{endpoint}, time.Second)
if err != nil {
log.Fatal(err)
}
zks[i] = conn
}
return zks
}
func mustCreateConnsConsul(total uint) []*consulapi.KV {
css := make([]*consulapi.KV, total)
for i := range css {
endpoint := endpoints[dialTotal%len(endpoints)]
dialTotal++
dcfg := consulapi.DefaultConfig()
dcfg.Address = endpoint // x.x.x.x:8500
cli, err := consulapi.NewClient(dcfg)
if err != nil {
log.Fatal(err)
}
css[i] = cli.KV()
}
return css
}
func mustRandBytes(n int) []byte {
rb := make([]byte, n)
_, err := rand.Read(rb)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to generate value: %v\n", err)
os.Exit(1)
}
return rb
}
func randBytes(bytesN int) []byte {
const (
letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
letterIdxBits = 6 // 6 bits to represent a letter index
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
)
src := mrand.NewSource(time.Now().UnixNano())
b := make([]byte, bytesN)
for i, cache, remain := bytesN-1, src.Int63(), letterIdxMax; i >= 0; {
if remain == 0 {
cache, remain = src.Int63(), letterIdxMax
}
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
b[i] = letterBytes[idx]
i--
}
cache >>= letterIdxBits
remain--
}
return b
}
func multiRandStrings(keyN, sliceN int) []string {
m := make(map[string]struct{})
for len(m) != sliceN {
m[string(randBytes(keyN))] = struct{}{}
}
rs := make([]string, sliceN)
idx := 0
for k := range m {
rs[idx] = k
idx++
}
return rs
}
func toFile(txt, fpath string) error {
f, err := os.OpenFile(fpath, os.O_RDWR|os.O_TRUNC, 0777)
if err != nil {
f, err = os.Create(fpath)
if err != nil {
return err
}
}
defer f.Close()
if _, err := f.WriteString(txt); err != nil {
return err
}
return nil
}
func toMillisecond(d time.Duration) float64 {
return d.Seconds() * 1000
}
// gracefulClose drains http.Response.Body until it hits EOF
// and closes it. This prevents TCP/TLS connections from closing,
// therefore available for reuse.
func gracefulClose(resp *http.Response) {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}

75
control/config.go Normal file
View File

@ -0,0 +1,75 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package control
import (
"io/ioutil"
"gopkg.in/yaml.v2"
)
type Config struct {
Database string `yaml:"database"`
TestName string `yaml:"test_name"`
GoogleCloudProjectName string `yaml:"google_cloud_project_name"`
GoogleCloudStorageKey string
GoogleCloudStorageKeyPath string `yaml:"google_cloud_storage_key_path"`
GoogleCloudStorageBucketName string `yaml:"google_cloud_storage_bucket_name"`
PeerIPs []string `yaml:"peer_ips"`
PeerIPString string
AgentPort int `yaml:"agent_port"`
DatabasePort int `yaml:"database_port"`
AgentEndpoints []string
DatabaseEndpoints []string
Step1 struct {
Skip bool `yaml:"skip"`
DatabaseLogPath string `yaml:"database_log_path"`
MonitorLogPath string `yaml:"monitor_log_path"`
ZookeeperMaxClientCnxns int64 `yaml:"zookeeper_max_client_connections"`
} `yaml:"step1"`
Step2 struct {
Skip bool `yaml:"skip"`
BenchType string `yaml:"bench_type"`
LocalRead bool `yaml:"local_read"`
ResultPath string `yaml:"result_path"`
Connections int `yaml:"connections"`
Clients int `yaml:"clients"`
KeySize int `yaml:"key_size"`
ValueSize int `yaml:"value_size"`
TotalRequests int `yaml:"total_requests"`
Etcdv3CompactionCycle int `yaml:"etcdv3_compaction_cycle"`
} `yaml:"step2"`
Step3 struct {
Skip bool `yaml:"skip"`
}
}
func ReadConfig(fpath string) (Config, error) {
bts, err := ioutil.ReadFile(fpath)
if err != nil {
return Config{}, err
}
rs := Config{}
if err := yaml.Unmarshal(bts, &rs); err != nil {
return Config{}, err
}
return rs, nil
}

84
control/config_test.go Normal file
View File

@ -0,0 +1,84 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package control
import (
"reflect"
"testing"
)
func TestReadConfig(t *testing.T) {
c, err := ReadConfig("test.yaml")
if err != nil {
t.Fatal(err)
}
if c.Database != "etcdv3" {
t.Fatalf("unexpected %s", c.Database)
}
if c.TestName != "bench-01-etcdv3" {
t.Fatalf("unexpected %s", c.TestName)
}
if !reflect.DeepEqual(c.PeerIPs, []string{"10.240.0.13", "10.240.0.14", "10.240.0.15"}) {
t.Fatalf("unexpected %s", c.PeerIPs)
}
if c.AgentPort != 3500 {
t.Fatalf("unexpected %d", c.AgentPort)
}
if c.DatabasePort != 2379 {
t.Fatalf("unexpected %d", c.DatabasePort)
}
if c.GoogleCloudProjectName != "etcd-development" {
t.Fatalf("unexpected %s", c.GoogleCloudProjectName)
}
if c.GoogleCloudStorageKeyPath != "$HOME/gcloud-key.json" {
t.Fatalf("unexpected %s", c.GoogleCloudStorageKeyPath)
}
if c.GoogleCloudStorageBucketName != "bench-20160411" {
t.Fatalf("unexpected %s", c.GoogleCloudStorageBucketName)
}
if c.Step1.Skip {
t.Fatalf("unexpected %v", c.Step1.Skip)
}
if c.Step1.DatabaseLogPath != "database.log" {
t.Fatalf("unexpected %s", c.Step1.DatabaseLogPath)
}
if c.Step1.MonitorLogPath != "monitor.csv" {
t.Fatalf("unexpected %s", c.Step1.MonitorLogPath)
}
if c.Step1.ZookeeperMaxClientCnxns != 5000 {
t.Fatalf("unexpected %d", c.Step1.ZookeeperMaxClientCnxns)
}
if c.Step2.Skip {
t.Fatalf("unexpected %v", c.Step2.Skip)
}
if c.Step2.BenchType != "write" {
t.Fatalf("unexpected %s", c.Step2.BenchType)
}
if c.Step2.ResultPath != "bench-01-etcdv3-timeseries.csv" {
t.Fatalf("unexpected %s", c.Step2.ResultPath)
}
if c.Step2.Connections != 100 {
t.Fatalf("unexpected %d", c.Step2.Connections)
}
if !c.Step2.LocalRead {
t.Fatalf("unexpected %v", c.Step2.LocalRead)
}
if c.Step2.TotalRequests != 3000000 {
t.Fatalf("unexpected %d", c.Step2.TotalRequests)
}
if c.Step3.Skip {
t.Fatalf("unexpected %v", c.Step3.Skip)
}
}

View File

@ -18,134 +18,154 @@ import (
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"sync"
"time"
"github.com/coreos/dbtester/agent"
"github.com/spf13/cobra"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
Flags struct {
Database string
AgentEndpoints []string
ZookeeperPreAllocSize int64
ZookeeperMaxClientCnxns int64
LogPrefix string
DatabaseLogPath string
MonitorResultPath string
GoogleCloudProjectName string
KeyPath string
Bucket string
}
"github.com/cheggaaa/pb"
"github.com/coreos/etcd/clientv3"
consulapi "github.com/hashicorp/consul/api"
)
var (
StartCommand = &cobra.Command{
Use: "start",
Short: "Starts database through RPC calls.",
Command = &cobra.Command{
Use: "control",
Short: "Controls tests.",
RunE: CommandFunc,
}
StopCommand = &cobra.Command{
Use: "stop",
Short: "Stops database through RPC calls.",
RunE: CommandFunc,
}
RestartCommand = &cobra.Command{
Use: "restart",
Short: "Restarts database through RPC calls.",
RunE: CommandFunc,
}
globalFlags = Flags{}
configPath string
)
func init() {
StartCommand.PersistentFlags().StringVar(&globalFlags.Database, "database", "", "etcdv2, etcdv3, zookeeper, zk, consul.")
StartCommand.PersistentFlags().StringSliceVar(&globalFlags.AgentEndpoints, "agent-endpoints", []string{""}, "Endpoints to send client requests to, then it automatically configures.")
StartCommand.PersistentFlags().Int64Var(&globalFlags.ZookeeperPreAllocSize, "zk-pre-alloc-size", 65536*1024, "Disk pre-allocation size in bytes.")
StartCommand.PersistentFlags().Int64Var(&globalFlags.ZookeeperMaxClientCnxns, "zk-max-client-conns", 5000, "Maximum number of concurrent Zookeeper connection.")
StartCommand.PersistentFlags().StringVar(&globalFlags.LogPrefix, "log-prefix", "", "Prefix to all logs to be generated in agents.")
StartCommand.PersistentFlags().StringVar(&globalFlags.DatabaseLogPath, "database-log-path", "database.log", "Path of database log.")
StartCommand.PersistentFlags().StringVar(&globalFlags.MonitorResultPath, "monitor-result-path", "monitor.csv", "CSV file path of monitoring results.")
StartCommand.PersistentFlags().StringVar(&globalFlags.GoogleCloudProjectName, "google-cloud-project-name", "", "Google cloud project name.")
StartCommand.PersistentFlags().StringVar(&globalFlags.KeyPath, "key-path", "", "Path of key file.")
StartCommand.PersistentFlags().StringVar(&globalFlags.Bucket, "bucket", "", "Bucket name in cloud storage.")
StopCommand.PersistentFlags().StringSliceVar(&globalFlags.AgentEndpoints, "agent-endpoints", []string{""}, "Endpoints to send client requests to, then it automatically configures.")
RestartCommand.PersistentFlags().StringSliceVar(&globalFlags.AgentEndpoints, "agent-endpoints", []string{""}, "Endpoints to send client requests to, then it automatically configures.")
Command.PersistentFlags().StringVarP(&configPath, "config", "c", "", "YAML configuration file path.")
}
func CommandFunc(cmd *cobra.Command, args []string) error {
if globalFlags.Database == "zk" {
globalFlags.Database = "zookeeper"
}
req := agent.Request{}
switch cmd.Use {
case "start":
req.Operation = agent.Request_Start
case "stop":
req.Operation = agent.Request_Stop
case "restart":
req.Operation = agent.Request_Restart
default:
return fmt.Errorf("Operation '%s' is not supported!\n", cmd.Use)
}
switch globalFlags.Database {
case "etcdv2":
req.Database = agent.Request_etcdv2
case "etcdv3":
req.Database = agent.Request_etcdv3
case "zookeeper":
req.Database = agent.Request_ZooKeeper
case "consul":
req.Database = agent.Request_Consul
default:
if req.Operation != agent.Request_Stop {
return fmt.Errorf("'%s' is not supported!\n", globalFlags.Database)
}
}
peerIPs := extractIPs(globalFlags.AgentEndpoints)
req.PeerIPs = strings.Join(peerIPs, "___") // because protoc mixes the order of 'repeated' type data
if cmd.Use == "start" {
req.ZookeeperPreAllocSize = globalFlags.ZookeeperPreAllocSize
req.ZookeeperMaxClientCnxns = globalFlags.ZookeeperMaxClientCnxns
req.LogPrefix = globalFlags.LogPrefix
req.DatabaseLogPath = globalFlags.DatabaseLogPath
req.MonitorResultPath = globalFlags.MonitorResultPath
req.GoogleCloudProjectName = globalFlags.GoogleCloudProjectName
bts, err := ioutil.ReadFile(globalFlags.KeyPath)
cfg, err := ReadConfig(configPath)
if err != nil {
return err
}
req.StorageKey = string(bts)
req.Bucket = globalFlags.Bucket
switch cfg.Database {
case "etcdv2":
case "etcdv3":
case "zk", "zookeeper":
case "consul":
default:
return fmt.Errorf("%q is not supported", cfg.Database)
}
if !cfg.Step2.Skip {
switch cfg.Step2.BenchType {
case "write":
case "read":
default:
return fmt.Errorf("%q is not supported", cfg.Step2.BenchType)
}
}
bts, err := ioutil.ReadFile(cfg.GoogleCloudStorageKeyPath)
if err != nil {
return err
}
cfg.GoogleCloudStorageKey = string(bts)
cfg.PeerIPString = strings.Join(cfg.PeerIPs, "___") // protoc sorts the 'repeated' type data
cfg.AgentEndpoints = make([]string, len(cfg.PeerIPs))
cfg.DatabaseEndpoints = make([]string, len(cfg.PeerIPs))
for i := range cfg.PeerIPs {
cfg.AgentEndpoints[i] = fmt.Sprintf("%s:%d", cfg.PeerIPs[i], cfg.AgentPort)
}
for i := range cfg.PeerIPs {
cfg.DatabaseEndpoints[i] = fmt.Sprintf("%s:%d", cfg.DatabaseEndpoints[i], cfg.DatabasePort)
}
println()
if !cfg.Step1.Skip {
log.Println("Step 1: starting databases...")
if err = step1(cfg); err != nil {
return err
}
}
println()
if !cfg.Step2.Skip {
log.Println("Step 2: starting tests...")
if err = step2(cfg); err != nil {
return err
}
}
println()
if !cfg.Step3.Skip {
log.Println("Step 3: stopping databases...")
if err = step3(cfg); err != nil {
return err
}
}
return nil
}
func step1(cfg Config) error {
req := agent.Request{}
req.Operation = agent.Request_Start
req.TestName = cfg.TestName
req.GoogleCloudProjectName = cfg.GoogleCloudProjectName
req.GoogleCloudStorageKey = cfg.GoogleCloudStorageKey
req.GoogleCloudStorageBucketName = cfg.GoogleCloudStorageBucketName
switch cfg.Database {
case "etcdv2":
req.Database = agent.Request_etcdv2
case "etcdv3":
req.Database = agent.Request_etcdv3
case "zk":
cfg.Database = "zookeeper"
req.Database = agent.Request_ZooKeeper
case "zookeeper":
req.Database = agent.Request_ZooKeeper
case "consul":
req.Database = agent.Request_Consul
}
req.DatabaseLogPath = cfg.Step1.DatabaseLogPath
req.MonitorLogPath = cfg.Step1.MonitorLogPath
req.PeerIPString = cfg.PeerIPString
req.ZookeeperMaxClientCnxns = cfg.Step1.ZookeeperMaxClientCnxns
donec, errc := make(chan struct{}), make(chan error)
for i := range peerIPs {
for i := range cfg.PeerIPs {
go func(i int) {
nreq := req
nreq.ServerIndex = uint32(i)
nreq.ZookeeperMyID = uint32(i + 1)
ep := globalFlags.AgentEndpoints[nreq.ServerIndex]
ep := cfg.AgentEndpoints[nreq.ServerIndex]
log.Printf("[%d] %s %s at %s", i, req.Operation, req.Database, ep)
log.Printf("[%d] %s %s at %s\n", i, req.Operation, req.Database, ep)
conn, err := grpc.Dial(ep, grpc.WithInsecure())
if err != nil {
log.Printf("[%d] error %v when connecting to %s\n", i, err, ep)
log.Printf("[%d] error %v when connecting to %s", i, err, ep)
errc <- err
return
}
defer conn.Close()
cli := agent.NewTransporterClient(conn)
@ -153,18 +173,20 @@ func CommandFunc(cmd *cobra.Command, args []string) error {
resp, err := cli.Transfer(ctx, &nreq)
cancel()
if err != nil {
log.Printf("[%d] error %v when transferring to %s\n", i, err, ep)
log.Printf("[%d] error %v when transferring to %s", i, err, ep)
errc <- err
return
}
log.Printf("[%d] Response from %s (%+v)\n", i, ep, resp)
log.Printf("[%d] Response from %s (%+v)", i, ep, resp)
donec <- struct{}{}
}(i)
time.Sleep(time.Second)
}
cnt := 0
for cnt != len(peerIPs) {
for cnt != len(cfg.PeerIPs) {
select {
case <-donec:
case err := <-errc:
@ -172,14 +194,360 @@ func CommandFunc(cmd *cobra.Command, args []string) error {
}
cnt++
}
return nil
}
func extractIPs(es []string) []string {
var rs []string
for _, v := range es {
sl := strings.Split(v, ":")
rs = append(rs, sl[0])
var (
bar *pb.ProgressBar
results chan result
wg sync.WaitGroup
)
func step2(cfg Config) error {
valueBts := mustRandBytes(cfg.Step2.ValueSize)
value := string(valueBts)
switch cfg.Step2.BenchType {
case "write":
log.Printf("generating %d keys", cfg.Step2.TotalRequests)
keys := multiRandStrings(cfg.Step2.KeySize, cfg.Step2.TotalRequests)
results = make(chan result)
requests := make(chan request, cfg.Step2.Clients)
bar = pb.New(cfg.Step2.TotalRequests)
bar.Format("Bom !")
bar.Start()
var etcdClients []*clientv3.Client
switch cfg.Database {
case "etcdv2":
conns := mustCreateClientsEtcdv2(cfg.DatabaseEndpoints, cfg.Step2.Connections)
for i := range conns {
wg.Add(1)
go doPutEtcdv2(context.Background(), conns[i], requests)
}
return rs
case "etcdv3":
etcdClients = mustCreateClientsEtcdv3(cfg.DatabaseEndpoints, cfg.Step2.Clients, cfg.Step2.Connections)
for i := range etcdClients {
wg.Add(1)
go doPutEtcdv3(context.Background(), etcdClients[i], requests)
}
defer func() {
for i := range etcdClients {
etcdClients[i].Close()
}
}()
case "zk", "zookeeper":
conns := mustCreateConnsZk(cfg.DatabaseEndpoints, cfg.Step2.Connections)
defer func() {
for i := range conns {
conns[i].Close()
}
}()
for i := range conns {
wg.Add(1)
go doPutZk(conns[i], requests)
}
case "consul":
conns := mustCreateConnsConsul(cfg.DatabaseEndpoints, cfg.Step2.Connections)
for i := range conns {
wg.Add(1)
go doPutConsul(conns[i], requests)
}
}
pdoneC := printReport(results, cfg)
go func() {
for i := 0; i < cfg.Step2.TotalRequests; i++ {
if cfg.Database == "etcdv3" && cfg.Step2.Etcdv3CompactionCycle > 0 && i%cfg.Step2.Etcdv3CompactionCycle == 0 {
log.Printf("etcdv3 starting compaction at %d put request", i)
go func() {
compactKV(etcdClients)
}()
}
key := keys[i]
switch cfg.Database {
case "etcdv2":
requests <- request{etcdv2Op: etcdv2Op{key: key, value: value}}
case "etcdv3":
requests <- request{etcdv3Op: clientv3.OpPut(key, value)}
case "zk", "zookeeper":
requests <- request{zkOp: zkOp{key: "/" + key, value: []byte(value)}}
case "consul":
requests <- request{consulOp: consulOp{key: key, value: []byte(value)}}
}
}
close(requests)
}()
wg.Wait()
bar.Finish()
close(results)
<-pdoneC
log.Println("Expected Write Total:", cfg.Step2.TotalRequests)
switch cfg.Database {
case "etcdv2":
for k, v := range getTotalKeysEtcdv2(cfg.DatabaseEndpoints) {
fmt.Println("Endpoint :", k)
fmt.Println("Number of Keys:", v)
fmt.Println()
}
case "etcdv3":
for k, v := range getTotalKeysEtcdv3(cfg.DatabaseEndpoints) {
fmt.Println("Endpoint :", k)
fmt.Println("Number of Keys:", v)
fmt.Println()
}
case "zk", "zookeeper":
for k, v := range getTotalKeysZk(cfg.DatabaseEndpoints) {
fmt.Println("Endpoint :", k)
fmt.Println("Number of Keys:", v)
fmt.Println()
}
case "consul":
for k, v := range getTotalKeysConsul(cfg.DatabaseEndpoints) {
fmt.Println("Endpoint :", k)
fmt.Println("Number of Keys:", v)
fmt.Println()
}
}
case "read":
key := string(randBytes(cfg.Step2.KeySize))
switch cfg.Database {
case "etcdv2":
log.Printf("PUT '%s' to etcdv2", key)
var err error
for i := 0; i < 5; i++ {
clients := mustCreateClientsEtcdv2(cfg.DatabaseEndpoints, cfg.Step2.Connections)
_, err = clients[0].Set(context.Background(), key, value, nil)
if err != nil {
continue
}
log.Printf("Done with PUT '%s' to etcdv2", key)
break
}
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
case "etcdv3":
log.Printf("PUT '%s' to etcd", key)
var err error
for i := 0; i < 5; i++ {
clients := mustCreateClientsEtcdv3(cfg.DatabaseEndpoints, 1, 1)
_, err = clients[0].Do(context.Background(), clientv3.OpPut(key, value))
if err != nil {
continue
}
log.Printf("Done with PUT '%s' to etcd", key)
break
}
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
case "zk", "zookeeper":
log.Printf("PUT '/%s' to Zookeeper", key)
var err error
for i := 0; i < 5; i++ {
conns := mustCreateConnsZk(cfg.DatabaseEndpoints, cfg.Step2.Connections)
_, err = conns[0].Create("/"+key, valueBts, zkCreateFlags, zkCreateAcl)
if err != nil {
continue
}
log.Printf("Done with PUT '/%s' to Zookeeper", key)
break
}
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
case "consul":
log.Printf("PUT '%s' to Consul", key)
var err error
for i := 0; i < 5; i++ {
clients := mustCreateConnsConsul(cfg.DatabaseEndpoints, cfg.Step2.Connections)
_, err = clients[0].Put(&consulapi.KVPair{Key: key, Value: valueBts}, nil)
if err != nil {
continue
}
log.Printf("Done with PUT '%s' to Consul", key)
break
}
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
results = make(chan result)
requests := make(chan request, cfg.Step2.Clients)
bar = pb.New(cfg.Step2.TotalRequests)
bar.Format("Bom !")
bar.Start()
switch cfg.Database {
case "etcdv2":
conns := mustCreateClientsEtcdv2(cfg.DatabaseEndpoints, cfg.Step2.Connections)
for i := range conns {
wg.Add(1)
go doRangeEtcdv2(conns[i], requests)
}
case "etcdv3":
clients := mustCreateClientsEtcdv3(cfg.DatabaseEndpoints, cfg.Step2.Clients, cfg.Step2.Connections)
for i := range clients {
wg.Add(1)
go doRangeEtcdv3(clients[i].KV, requests)
}
defer func() {
for i := range clients {
clients[i].Close()
}
}()
case "zk", "zookeeper":
conns := mustCreateConnsZk(cfg.DatabaseEndpoints, cfg.Step2.Connections)
defer func() {
for i := range conns {
conns[i].Close()
}
}()
for i := range conns {
wg.Add(1)
go doRangeZk(conns[i], requests)
}
case "consul":
conns := mustCreateConnsConsul(cfg.DatabaseEndpoints, cfg.Step2.Connections)
for i := range conns {
wg.Add(1)
go doRangeConsul(conns[i], requests)
}
}
pdoneC := printReport(results, cfg)
go func() {
for i := 0; i < cfg.Step2.TotalRequests; i++ {
switch cfg.Database {
case "etcdv2":
// serializable read by default
requests <- request{etcdv2Op: etcdv2Op{key: key}}
case "etcdv3":
opts := []clientv3.OpOption{clientv3.WithRange("")}
if cfg.Step2.LocalRead {
opts = append(opts, clientv3.WithSerializable())
}
requests <- request{etcdv3Op: clientv3.OpGet(key, opts...)}
case "zk", "zookeeper":
// serializable read by default
requests <- request{zkOp: zkOp{key: key}}
case "consul":
// serializable read by default
requests <- request{consulOp: consulOp{key: key}}
}
}
close(requests)
}()
wg.Wait()
bar.Finish()
close(results)
<-pdoneC
}
return nil
}
func step3(cfg Config) error {
req := agent.Request{}
req.Operation = agent.Request_Stop
switch cfg.Database {
case "etcdv2":
req.Database = agent.Request_etcdv2
case "etcdv3":
req.Database = agent.Request_etcdv3
case "zk":
cfg.Database = "zookeeper"
req.Database = agent.Request_ZooKeeper
case "zookeeper":
req.Database = agent.Request_ZooKeeper
case "consul":
req.Database = agent.Request_Consul
}
donec, errc := make(chan struct{}), make(chan error)
for i := range cfg.PeerIPs {
go func(i int) {
ep := cfg.AgentEndpoints[req.ServerIndex]
log.Printf("[%d] %s %s at %s", i, req.Operation, req.Database, ep)
conn, err := grpc.Dial(ep, grpc.WithInsecure())
if err != nil {
log.Printf("[%d] error %v when connecting to %s", i, err, ep)
errc <- err
return
}
defer conn.Close()
cli := agent.NewTransporterClient(conn)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) // Consul takes longer
resp, err := cli.Transfer(ctx, &req)
cancel()
if err != nil {
log.Printf("[%d] error %v when transferring to %s", i, err, ep)
errc <- err
return
}
log.Printf("[%d] Response from %s (%+v)", i, ep, resp)
donec <- struct{}{}
}(i)
time.Sleep(time.Second)
}
cnt := 0
for cnt != len(cfg.PeerIPs) {
select {
case <-donec:
case err := <-errc:
return err
}
cnt++
}
return nil
}

115
control/report.go Normal file
View File

@ -0,0 +1,115 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package control
import (
"fmt"
"log"
"path/filepath"
"time"
"github.com/coreos/dbtester/remotestorage"
)
const (
barChar = "∎"
)
type result struct {
errStr string
duration time.Duration
happened time.Time
}
type report struct {
results chan result
sps *secondPoints
errorDist map[string]int
}
func printReport(results chan result, cfg Config) <-chan struct{} {
return wrapReport(func() {
r := &report{
results: results,
errorDist: make(map[string]int),
sps: newSecondPoints(),
}
r.finalize()
r.printSecondSample(cfg)
if len(r.errorDist) > 0 {
r.printErrors()
}
})
}
func wrapReport(f func()) <-chan struct{} {
donec := make(chan struct{})
go func() {
defer close(donec)
f()
}()
return donec
}
func (r *report) finalize() {
log.Printf("finalize has started")
st := time.Now()
for res := range r.results {
if res.errStr != "" {
r.errorDist[res.errStr]++
} else {
r.sps.Add(res.happened, res.duration)
}
}
log.Println("finalize took:", time.Since(st))
}
func (r *report) printSecondSample(cfg Config) {
txt := r.sps.getTimeSeries().String()
fmt.Println(txt)
if err := toFile(txt, cfg.Step2.ResultPath); err != nil {
log.Fatal(err)
}
log.Println("time series saved... Uploading to Google cloud storage...")
u, err := remotestorage.NewGoogleCloudStorage([]byte(cfg.GoogleCloudStorageKey), cfg.GoogleCloudProjectName)
if err != nil {
log.Fatal(err)
}
srcCSVResultPath := cfg.Step2.ResultPath
dstCSVResultPath := filepath.Base(cfg.Step2.ResultPath)
log.Printf("Uploading %s to %s", srcCSVResultPath, dstCSVResultPath)
var uerr error
for k := 0; k < 5; k++ {
if uerr = u.UploadFile(cfg.GoogleCloudStorageBucketName, srcCSVResultPath, dstCSVResultPath); uerr != nil {
log.Println(uerr)
continue
} else {
break
}
}
}
func (r *report) printErrors() {
fmt.Printf("\nError distribution:\n")
for err, num := range r.errorDist {
fmt.Printf(" [%d]\t%s\n", num, err)
}
}

40
control/test.yaml Normal file
View File

@ -0,0 +1,40 @@
database: etcdv3
test_name: bench-01-etcdv3
google_cloud_project_name: etcd-development
google_cloud_storage_key_path: $HOME/gcloud-key.json
google_cloud_storage_bucket_name: bench-20160411
peer_ips:
- 10.240.0.13
- 10.240.0.14
- 10.240.0.15
agent_port: 3500
database_port: 2379
# start database by sending RPC calls to agents
step1:
skip: false
database_log_path: database.log
monitor_log_path: monitor.csv
zookeeper_max_client_connections: 5000
# start benchmark
step2:
skip: false
bench_type: write
local_read: true
result_path: bench-01-etcdv3-timeseries.csv
connections: 100
clients: 100
key_size: 64
value_size: 256
total_requests: 3000000
etcdv3_compaction_cycle: 0
# after benchmark
step3:
skip: false

View File

@ -12,20 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package bench
package control
import (
"bytes"
"encoding/csv"
"fmt"
"io/ioutil"
"log"
"path/filepath"
"sort"
"sync"
"time"
"github.com/coreos/dbtester/remotestorage"
)
type timeSeries struct {
@ -112,31 +108,5 @@ func (ts TimeSeries) String() string {
if err := wr.Error(); err != nil {
log.Fatal(err)
}
txt := buf.String()
if err := toFile(txt, csvResultPath); err != nil {
log.Fatal(err)
} else {
log.Println("time series saved... Uploading to Google cloud storage...")
kbts, err := ioutil.ReadFile(keyPath)
if err != nil {
log.Fatal(err)
}
u, err := remotestorage.NewGoogleCloudStorage(kbts, googleCloudProjectName)
if err != nil {
log.Fatal(err)
}
srcCSVResultPath := csvResultPath
dstCSVResultPath := filepath.Base(csvResultPath)
log.Printf("Uploading %s to %s", srcCSVResultPath, dstCSVResultPath)
var uerr error
for k := 0; k < 5; k++ {
if uerr = u.UploadFile(bucket, srcCSVResultPath, dstCSVResultPath); uerr != nil {
log.Println(uerr)
continue
} else {
break
}
}
}
return fmt.Sprintf("\nSample in one second (unix latency throughput):\n%s", txt)
return buf.String()
}

467
control/util.go Normal file
View File

@ -0,0 +1,467 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package control
import (
"bufio"
"crypto/rand"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"strconv"
"strings"
"time"
"golang.org/x/net/context"
mrand "math/rand"
clientv2 "github.com/coreos/etcd/client"
"github.com/coreos/etcd/clientv3"
consulapi "github.com/hashicorp/consul/api"
"github.com/samuel/go-zookeeper/zk"
)
var (
zkCreateFlags = int32(0)
zkCreateAcl = zk.WorldACL(zk.PermAll)
)
type request struct {
etcdv2Op etcdv2Op
etcdv3Op clientv3.Op
zkOp zkOp
consulOp consulOp
}
type etcdv2Op struct {
key string
value string
}
type zkOp struct {
key string
value []byte
}
type consulOp struct {
key string
value []byte
}
var (
// dialTotal counts the number of mustCreateConn calls so that endpoint
// connections can be handed out in round-robin order
dialTotal int
)
func mustCreateConnEtcdv3(endpoints []string) *clientv3.Client {
endpoint := endpoints[dialTotal%len(endpoints)]
dialTotal++
cfg := clientv3.Config{Endpoints: []string{endpoint}}
client, err := clientv3.New(cfg)
if err != nil {
fmt.Fprintf(os.Stderr, "dial error: %v\n", err)
os.Exit(1)
}
return client
}
func mustCreateClientsEtcdv3(endpoints []string, totalClients, totalConns int) []*clientv3.Client {
conns := make([]*clientv3.Client, totalConns)
for i := range conns {
conns[i] = mustCreateConnEtcdv3(endpoints)
}
clients := make([]*clientv3.Client, totalClients)
for i := range clients {
clients[i] = conns[i%int(totalConns)]
}
return clients
}
func mustCreateClientsEtcdv2(endpoints []string, total int) []clientv2.KeysAPI {
cks := make([]clientv2.KeysAPI, total)
for i := range cks {
endpoint := endpoints[dialTotal%len(endpoints)]
dialTotal++
if !strings.HasPrefix(endpoint, "http://") {
endpoint = "http://" + endpoint
}
cfg := clientv2.Config{
Endpoints: []string{endpoint},
Transport: clientv2.DefaultTransport,
HeaderTimeoutPerRequest: time.Second,
}
c, err := clientv2.New(cfg)
if err != nil {
log.Fatal(err)
}
kapi := clientv2.NewKeysAPI(c)
cks[i] = kapi
}
return cks
}
func mustCreateConnsZk(endpoints []string, total int) []*zk.Conn {
zks := make([]*zk.Conn, total)
for i := range zks {
endpoint := endpoints[dialTotal%len(endpoints)]
dialTotal++
conn, _, err := zk.Connect([]string{endpoint}, time.Second)
if err != nil {
log.Fatal(err)
}
zks[i] = conn
}
return zks
}
func mustCreateConnsConsul(endpoints []string, total int) []*consulapi.KV {
css := make([]*consulapi.KV, total)
for i := range css {
endpoint := endpoints[dialTotal%len(endpoints)]
dialTotal++
dcfg := consulapi.DefaultConfig()
dcfg.Address = endpoint // x.x.x.x:8500
cli, err := consulapi.NewClient(dcfg)
if err != nil {
log.Fatal(err)
}
css[i] = cli.KV()
}
return css
}
func mustRandBytes(n int) []byte {
rb := make([]byte, n)
_, err := rand.Read(rb)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to generate value: %v\n", err)
os.Exit(1)
}
return rb
}
func doPutEtcdv2(ctx context.Context, conn clientv2.KeysAPI, requests <-chan request) {
defer wg.Done()
for req := range requests {
op := req.etcdv2Op
st := time.Now()
_, err := conn.Set(context.Background(), op.key, op.value, nil)
var errStr string
if err != nil {
errStr = err.Error()
}
results <- result{errStr: errStr, duration: time.Since(st), happened: time.Now()}
bar.Increment()
}
}
func getTotalKeysEtcdv2(endpoints []string) map[string]int64 {
rs := make(map[string]int64)
for _, ep := range endpoints {
rs[ep] = 0 // not supported in metrics
}
return rs
}
func doPutEtcdv3(ctx context.Context, client clientv3.KV, requests <-chan request) {
defer wg.Done()
for req := range requests {
op := req.etcdv3Op
st := time.Now()
_, err := client.Do(ctx, op)
var errStr string
if err != nil {
errStr = err.Error()
}
results <- result{errStr: errStr, duration: time.Since(st), happened: time.Now()}
bar.Increment()
}
}
func getTotalKeysEtcdv3(endpoints []string) map[string]int64 {
rs := make(map[string]int64)
for _, ep := range endpoints {
if !strings.HasPrefix(ep, "http://") {
ep = "http://" + ep
}
resp, err := http.Get(ep + "/metrics")
if err != nil {
log.Println(err)
rs[ep] = 0
}
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
txt := scanner.Text()
if strings.HasPrefix(txt, "#") {
continue
}
ts := strings.SplitN(txt, " ", 2)
fv := 0.0
if len(ts) == 2 {
v, err := strconv.ParseFloat(ts[1], 64)
if err == nil {
fv = v
}
}
if ts[0] == "etcd_storage_keys_total" {
rs[ep] = int64(fv)
break
}
}
gracefulClose(resp)
}
return rs
}
func doPutZk(conn *zk.Conn, requests <-chan request) {
defer wg.Done()
for req := range requests {
op := req.zkOp
st := time.Now()
_, err := conn.Create(op.key, op.value, zkCreateFlags, zkCreateAcl)
var errStr string
if err != nil {
errStr = err.Error()
}
results <- result{errStr: errStr, duration: time.Since(st), happened: time.Now()}
bar.Increment()
}
}
func getTotalKeysZk(endpoints []string) map[string]int64 {
rs := make(map[string]int64)
stats, ok := zk.FLWSrvr(endpoints, 5*time.Second)
if !ok {
log.Printf("getTotalKeysZk failed with %+v", stats)
for _, ep := range endpoints {
rs[ep] = 0
}
return rs
}
for i, s := range stats {
rs[endpoints[i]] = s.NodeCount
}
return rs
}
func doPutConsul(conn *consulapi.KV, requests <-chan request) {
defer wg.Done()
for req := range requests {
op := req.consulOp
st := time.Now()
_, err := conn.Put(&consulapi.KVPair{Key: op.key, Value: op.value}, nil)
var errStr string
if err != nil {
errStr = err.Error()
}
results <- result{errStr: errStr, duration: time.Since(st), happened: time.Now()}
bar.Increment()
}
}
func getTotalKeysConsul(endpoints []string) map[string]int64 {
rs := make(map[string]int64)
for _, ep := range endpoints {
rs[ep] = 0 // not supported in consul
}
return rs
}
func compactKV(clients []*clientv3.Client) {
var curRev int64
for _, c := range clients {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
resp, err := c.KV.Get(ctx, "foo")
cancel()
if err != nil {
panic(err)
}
curRev = resp.Header.Revision
break
}
revToCompact := max(0, curRev-1000)
for _, c := range clients {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
err := c.KV.Compact(ctx, revToCompact)
cancel()
if err != nil {
panic(err)
}
break
}
}
func doRangeEtcdv2(conn clientv2.KeysAPI, requests <-chan request) {
defer wg.Done()
for req := range requests {
op := req.etcdv2Op
st := time.Now()
_, err := conn.Get(context.Background(), op.key, nil)
var errStr string
if err != nil {
errStr = err.Error()
}
results <- result{errStr: errStr, duration: time.Since(st), happened: time.Now()}
bar.Increment()
}
}
func doRangeEtcdv3(client clientv3.KV, requests <-chan request) {
defer wg.Done()
for req := range requests {
op := req.etcdv3Op
st := time.Now()
_, err := client.Do(context.Background(), op)
var errStr string
if err != nil {
errStr = err.Error()
}
results <- result{errStr: errStr, duration: time.Since(st), happened: time.Now()}
bar.Increment()
}
}
func doRangeZk(conn *zk.Conn, requests <-chan request) {
defer wg.Done()
for req := range requests {
op := req.zkOp
st := time.Now()
_, _, err := conn.Get(op.key)
var errStr string
if err != nil {
errStr = err.Error()
}
results <- result{errStr: errStr, duration: time.Since(st), happened: time.Now()}
bar.Increment()
}
}
func doRangeConsul(conn *consulapi.KV, requests <-chan request) {
defer wg.Done()
for req := range requests {
op := req.consulOp
st := time.Now()
_, _, err := conn.Get(op.key, &consulapi.QueryOptions{AllowStale: true})
var errStr string
if err != nil {
errStr = err.Error()
}
results <- result{errStr: errStr, duration: time.Since(st), happened: time.Now()}
bar.Increment()
}
}
func max(n1, n2 int64) int64 {
if n1 > n2 {
return n1
}
return n2
}
func randBytes(bytesN int) []byte {
const (
letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
letterIdxBits = 6 // 6 bits to represent a letter index
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
)
src := mrand.NewSource(time.Now().UnixNano())
b := make([]byte, bytesN)
for i, cache, remain := bytesN-1, src.Int63(), letterIdxMax; i >= 0; {
if remain == 0 {
cache, remain = src.Int63(), letterIdxMax
}
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
b[i] = letterBytes[idx]
i--
}
cache >>= letterIdxBits
remain--
}
return b
}
func multiRandStrings(keyN, sliceN int) []string {
m := make(map[string]struct{})
for len(m) != sliceN {
m[string(randBytes(keyN))] = struct{}{}
}
rs := make([]string, sliceN)
idx := 0
for k := range m {
rs[idx] = k
idx++
}
return rs
}
func toFile(txt, fpath string) error {
f, err := os.OpenFile(fpath, os.O_RDWR|os.O_TRUNC, 0777)
if err != nil {
f, err = os.Create(fpath)
if err != nil {
return err
}
}
defer f.Close()
_, err = f.WriteString(txt)
return err
}
func toMillisecond(d time.Duration) float64 {
return d.Seconds() * 1000
}
// gracefulClose drains http.Response.Body until it hits EOF
// and closes it. This prevents TCP/TLS connections from closing,
// therefore available for reuse.
func gracefulClose(resp *http.Response) {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}

11
main.go
View File

@ -20,10 +20,7 @@
// Available Commands:
// agent Database agent in remote servers.
// analyze Analyzes test results specific to dbtester.
// bench Low-level benchmark tool for etcdv2, etcdv3, Zookeeper, Consul.
// start Starts database through RPC calls.
// stop Stops database through RPC calls.
// restart Restarts database through RPC calls.
// control Controls tests.
// upload Uploads to cloud storage.
//
// Flags:
@ -39,7 +36,6 @@ import (
"github.com/coreos/dbtester/agent"
"github.com/coreos/dbtester/analyze"
"github.com/coreos/dbtester/bench"
"github.com/coreos/dbtester/control"
"github.com/coreos/dbtester/upload"
@ -61,10 +57,7 @@ func init() {
func init() {
rootCommand.AddCommand(agent.Command)
rootCommand.AddCommand(analyze.Command)
rootCommand.AddCommand(bench.Command)
rootCommand.AddCommand(control.StartCommand)
rootCommand.AddCommand(control.StopCommand)
rootCommand.AddCommand(control.RestartCommand)
rootCommand.AddCommand(control.Command)
rootCommand.AddCommand(upload.Command)
}

View File

@ -127,13 +127,22 @@ func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto {
if msg.GetName() == typeName {
return msg
}
nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+"."))
if nes != nil {
return nes
}
}
return nil
}
func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto {
for _, nes := range msg.GetNestedType() {
if nes.GetName() == typeName {
return nes
}
if msg.GetName()+"."+nes.GetName() == typeName {
return nes
}
res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+"."))
if res != nil {
return res
}
}
return nil